filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
dmreader/trait_base.py | #------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/21/2002
#
# Refactored into a separate module: 07/04/2003
#
#------------------------------------------------------------------------------
""" Defines common, low-level capabilities needed by the Traits package.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import os
import sys
from os import getcwd
from os.path import dirname, exists, join
from . import _py2to3
from .etsconfig.api import ETSConfig
# backwards compatibility: trait_base used to provide a patched enumerate
enumerate = enumerate
# Set the Python version being used:
vi = sys.version_info
python_version = vi[0] + (float( vi[1] ) / 10.0)
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
ClassTypes = _py2to3.ClassTypes
SequenceTypes = ( list, tuple )
ComplexTypes = ( float, int )
TypeTypes = ( str, str, int, int, float, complex, list, tuple, dict, bool )
TraitNotifier = '__trait_notifier__'
# The standard Traits property cache prefix:
TraitsCache = '_traits_cache_'
#-------------------------------------------------------------------------------
# Singleton 'Uninitialized' object:
#-------------------------------------------------------------------------------
Uninitialized = None
class _Uninitialized(object):
""" The singleton value of this class represents the uninitialized state
of a trait and is specified as the 'old' value in the trait change
notification that occurs when the value of a trait is read before being
set.
"""
def __new__(cls):
if Uninitialized is not None:
return Uninitialized
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<uninitialized>'
def __reduce_ex__(self, protocol):
return (_Uninitialized, ())
#: When the first reference to a trait is a 'get' reference, the default value of
#: the trait is implicitly assigned and returned as the value of the trait.
#: Because of this implicit assignment, a trait change notification is
#: generated with the Uninitialized object as the 'old' value of the trait, and
#: the default trait value as the 'new' value. This allows other parts of the
#: traits package to recognize the assignment as the implicit default value
#: assignment, and treat it specially.
Uninitialized = _Uninitialized()
#-------------------------------------------------------------------------------
# Singleton 'Undefined' object (used as undefined trait name and/or value):
#-------------------------------------------------------------------------------
Undefined = None
class _Undefined(object):
""" Singleton 'Undefined' object (used as undefined trait name and/or value)
"""
def __new__(cls):
if Undefined is not None:
return Undefined
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<undefined>'
def __reduce_ex__(self, protocol):
return (_Undefined, ())
def __eq__(self, other):
return type(self) is type(other)
def __hash__(self):
return hash(type(self))
def __ne__(self, other):
return type(self) is not type(other)
#: Singleton object that indicates that a trait attribute has not yet had a
#: value set (i.e., its value is undefined). This object is used instead of
#: None, because None often has other meanings, such as that a value is not
#: used. When a trait attribute is first assigned a value, and its associated
#: trait notification handlers are called, Undefined is passed as the *old*
#: parameter, to indicate that the attribute previously had no value.
Undefined = _Undefined()
# Tell the C-base code about singleton 'Undefined' and 'Uninitialized' objects:
#from dmreader import ctraits
#ctraits._undefined( Undefined, Uninitialized )
#-------------------------------------------------------------------------------
# Singleton 'Missing' object (used as missing method argument marker):
#-------------------------------------------------------------------------------
class Missing ( object ):
""" Singleton 'Missing' object (used as missing method argument marker).
"""
def __repr__ ( self ):
return '<missing>'
#: Singleton object that indicates that a method argument is missing from a
#: type-checked method signature.
Missing = Missing()
#-------------------------------------------------------------------------------
# Singleton 'Self' object (used as object reference to current 'object'):
#-------------------------------------------------------------------------------
class Self ( object ):
""" Singleton 'Self' object (used as object reference to current 'object').
"""
def __repr__ ( self ):
return '<self>'
#: Singleton object that references the current 'object'.
Self = Self()
#-------------------------------------------------------------------------------
# Define a special 'string' coercion function:
#-------------------------------------------------------------------------------
def strx ( arg ):
""" Wraps the built-in str() function to raise a TypeError if the
argument is not of a type in StringTypes.
"""
if isinstance( arg, StringTypes ):
return str( arg )
raise TypeError
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
StringTypes = ( str, str, int, int, float, complex )
#-------------------------------------------------------------------------------
# Define a mapping of coercable types:
#-------------------------------------------------------------------------------
# Mapping of coercable types.
CoercableTypes = {
int: ( 11, int, int ),
float: ( 11, float, int ),
complex: ( 11, complex, float, int ),
str: ( 11, str, str )
}
#-------------------------------------------------------------------------------
# Return a string containing the class name of an object with the correct
# article (a or an) preceding it (e.g. 'an Image', 'a PlotValue'):
#-------------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, str ):
return add_article( object )
return add_article( object.__class__.__name__ )
#-------------------------------------------------------------------------------
# Return a string containing the right article (i.e. 'a' or 'an') prefixed to
# a specified string:
#-------------------------------------------------------------------------------
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
#----------------------------------------------------------------------------
# Return a 'user-friendly' name for a specified trait:
#----------------------------------------------------------------------------
def user_name_for ( name ):
""" Returns a "user-friendly" version of a string, with the first letter
capitalized and with underscore characters replaced by spaces. For example,
``user_name_for('user_name_for')`` returns ``'User name for'``.
"""
name = name.replace( '_', ' ' )
result = ''
last_lower = False
for c in name:
if c.isupper() and last_lower:
result += ' '
last_lower = c.islower()
result += c
return result.capitalize()
#-------------------------------------------------------------------------------
# Gets the path to the traits home directory:
#-------------------------------------------------------------------------------
_traits_home = None
def traits_home ( ):
""" Gets the path to the Traits home directory.
"""
global _traits_home
if _traits_home is None:
_traits_home = verify_path( join( ETSConfig.application_data,
'traits' ) )
return _traits_home
#-------------------------------------------------------------------------------
# Verify that a specified path exists, and try to create it if it doesn't:
#-------------------------------------------------------------------------------
def verify_path ( path ):
""" Verify that a specified path exists, and try to create it if it
does not exist.
"""
if not exists( path ):
try:
os.mkdir( path )
except:
pass
return path
#-------------------------------------------------------------------------------
# Returns the name of the module the caller's caller is located in:
#-------------------------------------------------------------------------------
def get_module_name ( level = 2 ):
""" Returns the name of the module that the caller's caller is located in.
"""
return sys._getframe( level ).f_globals.get( '__name__', '__main__' )
#-------------------------------------------------------------------------------
# Returns a resource path calculated from the caller's stack:
#-------------------------------------------------------------------------------
def get_resource_path ( level = 2 ):
"""Returns a resource path calculated from the caller's stack.
"""
module = sys._getframe( level ).f_globals.get( '__name__', '__main__' )
path = None
if module != '__main__':
# Return the path to the module:
try:
path = dirname( getattr( sys.modules.get( module ), '__file__' ) )
except:
# Apparently 'module' is not a registered module...treat it like
# '__main__':
pass
if path is None:
# '__main__' is not a real module, so we need a work around:
for path in [ dirname( sys.argv[0] ), getcwd() ]:
if exists( path ):
break
# Handle application bundlers. Since the python source files may be placed
# in a zip file and therefore won't be directly accessable using standard
# open/read commands, the app bundlers will look for resources (i.e. data
# files, images, etc.) in specific locations. For py2app, this is in the
# [myapp].app/Contents/Resources directory. For py2exe, this is the same
# directory as the [myapp].exe executable file generated by py2exe.
frozen = getattr(sys, 'frozen', False)
if frozen:
if frozen == 'macosx_app':
root = os.environ['RESOURCEPATH']
elif frozen in ('dll', 'windows_exe', 'console_exe'):
root = os.path.dirname(sys.executable)
else:
# Unknown app bundler, but try anyway
root = os.path.dirname(sys.executable)
if ".zip/" in path:
zippath, image_path = path.split(".zip/")
path = os.path.join(root, image_path)
return path
#-------------------------------------------------------------------------------
# Returns the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xgetattr( object, xname, default = Undefined ):
""" Returns the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
if default is Undefined:
object = getattr( object, name )
else:
object = getattr( object, name, None )
if object is None:
return default
if default is Undefined:
return getattr( object, names[-1] )
return getattr( object, names[-1], default )
#-------------------------------------------------------------------------------
# Sets the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xsetattr( object, xname, value ):
""" Sets the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
object = getattr( object, name )
setattr( object, names[-1], value )
#-------------------------------------------------------------------------------
# Traits metadata selection functions:
#-------------------------------------------------------------------------------
def is_none ( value ):
return (value is None)
def not_none ( value ):
return (value is not None)
def not_false ( value ):
return (value is not False)
def not_event ( value ):
return (value != 'event')
def is_str ( value ):
return isinstance( value, str )
| []
| []
| [
"RESOURCEPATH"
]
| [] | ["RESOURCEPATH"] | python | 1 | 0 | |
pjson_test.go | package pjson
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/tidwall/lotsa"
"github.com/tidwall/pretty"
)
var json1 = `{
"widget": {
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}
}`
var json2 = `
{
"tagged": "OK",
"Tagged": "KO",
"NotTagged": true,
"unsettable": 101,
"Nested": {
"Yellow": "Green",
"yellow": "yellow"
},
"nestedTagged": {
"Green": "Green",
"Map": {
"this": "that",
"and": "the other thing"
},
"Ints": {
"Uint": 99,
"Uint16": 16,
"Uint32": 32,
"Uint64": 65
},
"Uints": {
"int": -99,
"Int": -98,
"Int16": -16,
"Int32": -32,
"int64": -64,
"Int64": -65
},
"Uints": {
"Float32": 32.32,
"Float64": 64.64
},
"Byte": 254,
"Bool": true
},
"LeftOut": "you shouldn't be here",
"SelfPtr": {"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}},
"SelfSlice": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}],
"SelfSlicePtr": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}],
"SelfPtrSlice": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}],
"interface": "Tile38 Rocks!",
"Interface": "Please Download",
"Array": [0,2,3,4,5],
"time": "2017-05-07T13:24:43-07:00",
"Binary": "R0lGODlhPQBEAPeo",
"NonBinary": [9,3,100,115]
}
`
func mustEqual(a, b string) {
if a != b {
panic("'" + a + "' != '" + b + "'")
}
}
func TestIter(t *testing.T) {
var json []byte
var out []byte
json = []byte(` { "hello" : [ 1, 2, 3 ], "jello" : [ 4, 5, 6 ] } `)
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return -1
})
mustEqual(string(out), "{}")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return 0
})
mustEqual(string(out), "{")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return -1
})
mustEqual(string(out), "{}")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&Key == Key {
return 0
}
return 1
})
mustEqual(string(out), `{"hello"`)
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&Colon == Colon {
return 0
}
return 1
})
mustEqual(string(out), `{"hello":`)
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&(Open|Array) == Open|Array {
return -1
}
if info&Comma == Comma {
return 0
}
return 1
})
mustEqual(string(out), `{"hello":[],`)
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&(Open|Array) == Open|Array {
return -1
}
return 1
})
mustEqual(string(out), `{"hello":[],"jello":[]}`)
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&(Open|Array) == Open|Array {
return -1
}
if info&(Close|Object) == Close|Object {
return 0
}
return 1
})
mustEqual(string(out), `{"hello":[],"jello":[]}`)
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Object|Start) == Object|Start {
out = append(out, json[start:end]...)
}
return 0
})
mustEqual(string(out), "{")
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Object|Start|End) == Object|Start|End {
out = append(out, json[start:end]...)
}
return 0
})
mustEqual(string(out), "")
json = []byte(" [ 1,2,3 ] ")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return 0
})
mustEqual(string(out), "[")
json = []byte(" [ 1,2,3 ] ")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&Comma == Comma {
return 0
}
return 1
})
mustEqual(string(out), "[1,")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return -1
})
mustEqual(string(out), "[]")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
if info&(Array|Close) == Array|Close {
return 0
}
return 1
})
mustEqual(string(out), "[1,2,3]")
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Array|Start) == Array|Start {
out = append(out, json[start:end]...)
}
return 0
})
mustEqual(string(out), "[")
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Array|Start|End) == Array|Start|End {
out = append(out, json[start:end]...)
}
return 0
})
mustEqual(string(out), "")
json = []byte(" true ")
out = nil
Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return 0
})
mustEqual(string(out), "true")
json = []byte(" true ")
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Start|End) == Start|End {
out = append(out, json[start:end]...)
return 0
}
return 1
})
mustEqual(string(out), "true")
json = []byte(`{ "hi\nthere": "yo" }`)
out = nil
Parse(json, 0, func(start, end, info int) int {
if info&(Key) == Key {
out = append(out, json[start:end]...)
return 0
}
return 1
})
mustEqual(string(out), `"hi\nthere"`)
json = []byte(` { "a" : "b" , "c" : [ 1 , 2 , 3 ] } `)
out = nil
var index int
expect := []int{
Start | Open | Object,
Key | String,
Colon,
Value | String,
Comma,
Key | String,
Colon,
Value | Open | Array,
Value | Number,
Comma,
Value | Number,
Comma,
Value | Number,
Value | Close | Array,
End | Close | Object,
}
Parse(json, 0, func(start, end, info int) int {
if expect[index] != info {
t.Fatalf("expected %d, got %d (#%d)\n", expect[index], info, index)
return 0
}
index++
return 1
})
if index != 15 {
panic("!")
}
// mustEqual(string(out), "true")
}
func testreturnvalue(t *testing.T, json string, expect int) {
t.Helper()
e := Parse([]byte(json), 0, nil)
if e != expect {
t.Fatalf("expected '%d', got '%d'", expect, e)
}
}
func TestReturnValues(t *testing.T) {
testreturnvalue(t, "false", 5)
testreturnvalue(t, "false ", 6)
testreturnvalue(t, " false ", 7)
testreturnvalue(t, "", 0)
testreturnvalue(t, " ", -1)
testreturnvalue(t, " a", -1)
testreturnvalue(t, ` {"hel\y" : 1}`, -7)
}
func testvalid(t *testing.T, json string, expect bool) {
t.Helper()
e := Parse([]byte(json), 0, nil)
ok := e > 0
if ok != expect {
t.Fatal("mismatch")
}
}
func TestValidBasic(t *testing.T) {
testvalid(t, "false", true)
testvalid(t, "fals0", false)
testvalid(t, "-\n", false)
testvalid(t, "0", true)
testvalid(t, "00", false)
testvalid(t, "-00", false)
testvalid(t, "-.", false)
testvalid(t, "0.0", true)
testvalid(t, "10.0", true)
testvalid(t, "10e1", true)
testvalid(t, "10EE", false)
testvalid(t, "10E-", false)
testvalid(t, "10E+", false)
testvalid(t, "10E+1a", false)
testvalid(t, "10E123", true)
testvalid(t, "10E-123", true)
testvalid(t, "10E-0123", true)
testvalid(t, "", false)
testvalid(t, " ", false)
testvalid(t, "{}", true)
testvalid(t, "{", false)
testvalid(t, "-", false)
testvalid(t, "-1", true)
testvalid(t, "-1.", false)
testvalid(t, "-1.0", true)
testvalid(t, " -1.0", true)
testvalid(t, " -1.0 ", true)
testvalid(t, "-1.0 ", true)
testvalid(t, "-1.0 i", false)
testvalid(t, "-1.0 i", false)
testvalid(t, "true", true)
testvalid(t, " true", true)
testvalid(t, " true ", true)
testvalid(t, " True ", false)
testvalid(t, " tru", false)
testvalid(t, "false", true)
testvalid(t, " false", true)
testvalid(t, " false ", true)
testvalid(t, " False ", false)
testvalid(t, " fals", false)
testvalid(t, "null", true)
testvalid(t, " null", true)
testvalid(t, " null ", true)
testvalid(t, " Null ", false)
testvalid(t, " nul", false)
testvalid(t, " []", true)
testvalid(t, " [true]", true)
testvalid(t, " [ true, null ]", true)
testvalid(t, " [ true,]", false)
testvalid(t, `{"hello":"world"}`, true)
testvalid(t, `{ "hello": "world" }`, true)
testvalid(t, `{ "hello": "world", }`, false)
testvalid(t, `{"a":"b",}`, false)
testvalid(t, `{"a":"b","a"}`, false)
testvalid(t, `{"a":"b","a":}`, false)
testvalid(t, `{"a":"b","a":1}`, true)
testvalid(t, `{"a":"b",2"1":2}`, false)
testvalid(t, `{"a":"b","a": 1, "c":{"hi":"there"} }`, true)
testvalid(t, `{"a":"b","a": 1, "c":{"hi":"there", "easy":["going",`+
`{"mixed":"bag"}]} }`, true)
testvalid(t, `""`, true)
testvalid(t, `"`, false)
testvalid(t, `"\n"`, true)
testvalid(t, `"\"`, false)
testvalid(t, `"\\"`, true)
testvalid(t, `"a\\b"`, true)
testvalid(t, `"a\\b\\\"a"`, true)
testvalid(t, `"a\\b\\\uFFAAa"`, true)
testvalid(t, `"a\\b\\\uFFAZa"`, false)
testvalid(t, `"a\\b\\\uFFA"`, false)
testvalid(t, string(json1), true)
testvalid(t, string(json2), true)
testvalid(t, `"hello`+string(byte(0))+`world"`, false)
testvalid(t, `"hello world\`, false)
testvalid(t, `"hello world\i`, false)
testvalid(t, `"hello world\u8`, false)
testvalid(t, `[1`, false)
testvalid(t, `[1,`, false)
testvalid(t, `{"hi":"ya"`, false)
testvalid(t, `{"hi`, false)
testvalid(t, `{123:123}`, false)
testvalid(t, `123.a123`, false)
testvalid(t, `123.123e`, false)
}
// mustBeGood parses JSON and stitches together a new JSON document and checks
// if the new doc matches the original.
func mustBeAGood(json []byte) {
var out []byte
n := Parse(json, 0, func(start, end, info int) int {
out = append(out, json[start:end]...)
return 1
})
if n != len(json) {
panic(fmt.Sprintf("expected %d, got %d", len(json), n))
}
json = pretty.Ugly(json)
out = pretty.Ugly(json)
if string(out) != string(json) {
panic("mismatch")
}
}
// testFile tests if a JSON file is good
func testFile(path string) {
json, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
mustBeAGood(json)
}
func TestFiles(t *testing.T) {
fis, err := ioutil.ReadDir("testfiles")
if err != nil {
panic(err)
}
for _, fi := range fis {
testFile(filepath.Join("testfiles", fi.Name()))
}
}
// lotsaOps preforms lots of operations and prints the results.
func lotsaOps(tag string, N int, op func() int) {
start := time.Now()
fmt.Printf("%-24s ", tag)
var total int64
for i := 0; i < N; i++ {
total += int64(op())
}
var out bytes.Buffer
lotsa.WriteOutput(&out, N, 1, time.Since(start), 0)
fmt.Printf("%s, %.2f GB/sec\n", strings.TrimSpace(out.String()),
float64(total)/time.Since(start).Seconds()/1024/1024/1024)
}
func testSpeed(path string) {
baseName := filepath.Base(path)
defer fmt.Printf("\n")
var jdata []byte
if baseName == "random-numbers.json" {
jdata = makeRandomNumbersJSON()
} else {
var err error
jdata, err = ioutil.ReadFile(path)
if err != nil {
panic(err)
}
}
fmt.Printf("== %s == (%d bytes)\n", baseName, len(jdata))
N := 200000000 / len(jdata) / 10 * 10
lotsaOps("pjson.Parse (noop iter)", N, func() int {
if Parse(jdata, 0, func(start, end, info int) int {
return 1
}) < 0 {
panic("invalid")
}
return len(jdata)
})
lotsaOps("pjson.Parse (nil iter)", N, func() int {
if Parse(jdata, 0, nil) < 0 {
panic("invalid")
}
return len(jdata)
})
lotsaOps("json.Valid (stdlib)", N, func() int {
if !json.Valid(jdata) {
panic("invalid")
}
return len(jdata)
})
}
func TestSpeed(t *testing.T) {
if os.Getenv("SPEED_TEST") == "" {
fmt.Printf("Speed test disabled. Use SPEED_TEST=1\n")
return
}
fmt.Printf("%s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)
fis, err := ioutil.ReadDir("testfiles")
if err != nil {
panic(err)
}
for _, fi := range fis {
t.Run(fi.Name(), func(t *testing.T) {
testSpeed(filepath.Join("testfiles", fi.Name()))
})
}
t.Run("random-numbers.json", func(t *testing.T) {
testSpeed(filepath.Join("testfiles", "random-numbers.json"))
})
}
func makeRandomNumbersJSON() []byte {
rand.Seed(time.Now().UnixNano())
N := 10000
var json []byte
json = append(json, '[')
for i := 0; i < N; i++ {
if i > 0 {
json = append(json, ',')
}
x := rand.Float64()
switch rand.Int() % 5 {
case 0:
x *= 1
case 1:
x *= 10
case 2:
x *= 100
case 3:
x *= 1000
case 4:
x *= 10000
}
switch rand.Int() % 2 {
case 0:
x *= -1
case 1:
x *= +1
}
switch rand.Int() % 6 {
case 0:
json = strconv.AppendFloat(json, x, 'f', -1, 64)
case 1:
json = strconv.AppendFloat(json, x, 'f', 0, 64)
case 2:
json = strconv.AppendFloat(json, x, 'f', 2, 64)
case 3:
json = strconv.AppendFloat(json, x, 'f', 4, 64)
case 4:
json = strconv.AppendFloat(json, x, 'f', 8, 64)
case 5:
json = strconv.AppendFloat(json, x, 'e', 8, 64)
}
}
json = append(json, ']')
return json
}
| [
"\"SPEED_TEST\""
]
| []
| [
"SPEED_TEST"
]
| [] | ["SPEED_TEST"] | go | 1 | 0 | |
superset/migrations/versions/96e99fb176a0_add_import_mixing_to_saved_query.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add_import_mixing_to_saved_query
Revision ID: 96e99fb176a0
Revises: 585b0b1a7b18
Create Date: 2020-10-21 21:09:55.945956
"""
import os
from uuid import uuid4
import sqlalchemy as sa
from alembic import op
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import UUIDType
from superset import db
from superset.migrations.shared.utils import assign_uuids
# revision identifiers, used by Alembic.
revision = "96e99fb176a0"
down_revision = "585b0b1a7b18"
Base = declarative_base()
class ImportMixin:
id = sa.Column(sa.Integer, primary_key=True)
uuid = sa.Column(UUIDType(binary=True), primary_key=False, default=uuid4)
class SavedQuery(Base, ImportMixin):
__tablename__ = "saved_query"
default_batch_size = int(os.environ.get("BATCH_SIZE", 200))
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
# Add uuid column
try:
with op.batch_alter_table("saved_query") as batch_op:
batch_op.add_column(
sa.Column(
"uuid",
UUIDType(binary=True),
primary_key=False,
default=uuid4,
),
)
except OperationalError:
# Ignore column update errors so that we can run upgrade multiple times
pass
assign_uuids(SavedQuery, session)
try:
# Add uniqueness constraint
with op.batch_alter_table("saved_query") as batch_op:
# Batch mode is required for sqlite
batch_op.create_unique_constraint("uq_saved_query_uuid", ["uuid"])
except OperationalError:
pass
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
# Remove uuid column
with op.batch_alter_table("saved_query") as batch_op:
batch_op.drop_constraint("uq_saved_query_uuid", type_="unique")
batch_op.drop_column("uuid")
| []
| []
| [
"BATCH_SIZE"
]
| [] | ["BATCH_SIZE"] | python | 1 | 0 | |
authors/apps/articles/models.py | import os
from django.db import models
from django.utils.text import slugify
from django.contrib.postgres.fields import ArrayField
from django.db.models.signals import post_save
from django.dispatch import receiver
from notifications.signals import notify
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from authors.apps.authentication.models import User
from authors.apps.profiles.models import Profile
from authors.apps.core.email_with_celery import SendEmail
class TimestampedModel(models.Model):
''' Model to take care of when an instance occurs in the database
Appends created at and updated at fields using datetime.now()'''
# Timestamp shows when an object was first created in the database
created_at = models.DateTimeField(auto_now_add=True)
# represents when an object was last changed
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
# It is a good practice to have ordering in reverse chronology.
#
ordering = ['-created_at', '-updated_at']
class Article(TimestampedModel):
slug = models.SlugField(db_index=True, max_length=255, unique=True)
title = models.CharField(db_index=True, max_length=255)
description = models.TextField()
body = models.TextField()
tagList = ArrayField(models.CharField(
max_length=255), default=None, null=True, blank=True)
image = models.ImageField(
upload_to='myphoto/%Y/%m/%d/', null=True, max_length=255)
# blank = True
# a many-to-many field will map to a serializer field that
# requires at least one input, unless the model field has blank=True
like = models.ManyToManyField(User, blank=True, related_name='like')
# define related_name argument for 'Article.like' or 'Article.dislike'.
# to ensure that the fields were not conflicting with each other,
dislike = models.ManyToManyField(User, blank=True, related_name='dislike')
# Bookmarked is set as False
bookmarked = models.BooleanField(default=False)
# An author is the creator of the article, usually the current logged in user.
# I create a foreign key r/ship.
# This r/ship can help returns all articles of a particular author.
author = models.ForeignKey(
'authentication.User', on_delete=models.CASCADE,
related_name='articles'
)
ratings_counter = models.IntegerField(default=0)
prepopulated_fields = {"slug": ("title",)}
def _get_unique_slug(self):
slug = slugify(self.title)
unique_slug = slug
num = 1
while Article.objects.filter(slug=unique_slug).exists():
unique_slug = '{}-{}'.format(slug, num)
num += 1
return unique_slug
def save(self, *args, **kwargs):
''' Creates a slug based on Article title
Example:
Title: ArticleOne
Slug: ArticleOne-1
'''
self.slug = self._get_unique_slug()
super(Article, self).save(*args, **kwargs)
def updaterate(self, rating):
'''
'''
self.ratings_counter = rating
def __str__(self):
''' Returns a title of the article as object representation'''
return self.title
class Comment(TimestampedModel):
'''
Comment class implementation
'''
body = models.TextField()
author = models.ForeignKey('authentication.User',
on_delete=models.CASCADE)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
likes = models.ManyToManyField('authentication.User',
related_name='likes', blank=True)
dislikes = models.ManyToManyField('authentication.User',
related_name='dislikes', blank=True)
def __str__(self):
return self.body
class ArticleRating(models.Model):
"""
Defines the ratings fields for a rater
"""
rater = models.ForeignKey(
'authentication.User', on_delete=models.CASCADE,
related_name='articlesrating'
)
note =models.TextField()
article = models.ForeignKey(
Article, on_delete=models.CASCADE, related_name="articlerating")
rating = models.IntegerField()
def __str__(self):
return self.note
class Report(TimestampedModel):
"""Reporting an article model"""
body = models.TextField()
author = models.ForeignKey('authentication.User', on_delete=models.CASCADE)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
def __str__(self):
return self.body
@receiver(post_save, sender=Article)
def send_notifications_to_all_users(sender,
instance,
created, *args, **kwargs):
"""Create a Signal that sends email to all users that follow the author.
Arguments:
sender {[type]} -- [Instance of ]
created {[type]} -- [If the article is posted.]
"""
if instance and created:
users_following = instance.author.profile.get_followers(
instance.author.profile)
users_follow = [u.user for u in users_following if u.get_notifications]
link = f'{os.getenv("HEROKU_BACKEND_URL")}/api/articles/{instance.slug}'
users_foll = [u.user.id for u in users_following]
if users_foll:
uuid = urlsafe_base64_encode(force_bytes(users_foll[0])
).decode("utf-8")
subscription = f'{os.getenv("HEROKU_BACKEND_URL")}/api/users/subscription/{uuid}/'
SendEmail(
template="create_article.html",
context={
"article": instance,
"author": instance.author,
"url_link": link,
"subscription": subscription
},
subject="New Article",
e_to=[u.email for u in users_follow],
).send()
@receiver(post_save, sender=Comment)
def send_notifications_to_all_users_on_comments(sender,
instance,
created,
*args, **kwargs):
"""Create a Signal that sends email to all users that follow the author.
Arguments:
sender {[type]} -- [Instance of ]
created {[type]} -- [If the article is posted.]
"""
if instance and created:
user_following = Profile.objects.all()
user_follow = [u.user for u in user_following if \
u.has_favorited(instance.article) and u.get_notifications]
author = User.objects.get(email=instance.author)
if author:
comment = Comment.objects.get(id=instance.id)
link = f'{os.getenv("HEROKU_BACKEND_URL")}/api/articles/{comment.article.slug}/comments/{instance.id}'
uuid = urlsafe_base64_encode(force_bytes(author.id)
).decode("utf-8")
subscription = f'{os.getenv("HEROKU_BACKEND_URL")}/api/users/subscription/{uuid}/'
SendEmail(
template="comment_notification.html",
context={
"article": instance.article,
"comment": instance,
"url_link": link,
"subscription": subscription
},
subject=" New Comment.",
e_to=[u.email for u in user_follow],
).send()
| []
| []
| [
"HEROKU_BACKEND_URL"
]
| [] | ["HEROKU_BACKEND_URL"] | python | 1 | 0 | |
ci/v1/ci_test.py | """
Integration test of the CMPT 756 sample applicaton.
Result of test in program return code:
0: Test succeeded
1: Test failed
"""
# Standard library modules
import argparse
import os
import sys
# Installed packages
# Local modules
import create_tables
import music
# The services check only that we pass an authorization,
# not whether it's valid
DUMMY_AUTH = 'Bearer A'
def parse_args():
"""Parse the command-line arguments.
Returns
-------
namespace
A namespace of all the arguments, augmented with names
'user_url' and 'music_url'.
"""
argp = argparse.ArgumentParser(
'ci_test',
description='Integration test of CMPT 756 sample application'
)
argp.add_argument(
'user_address',
help="DNS name or IP address of user service."
)
argp.add_argument(
'user_port',
type=int,
help="Port number of user service."
)
argp.add_argument(
'music_address',
help="DNS name or IP address of music service."
)
argp.add_argument(
'music_port',
type=int,
help="Port number of music service."
)
argp.add_argument(
'playlist_address',
help="DNS name or IP address of playlist service."
)
argp.add_argument(
'playlist_port',
type=int,
help="Port number of playlist service."
)
argp.add_argument(
'table_suffix',
help="Suffix to add to table names (not including leading "
"'-'). If suffix is 'scp756-2022', the music table "
"will be 'Music-scp756-2022'."
)
args = argp.parse_args()
args.user_url = "http://{}:{}/api/v1/user/".format(
args.user_address, args.user_port)
args.music_url = "http://{}:{}/api/v1/music/".format(
args.music_address, args.music_port)
args.playlist_url = "http://{}:{}/api/v1/playlist/".format(
args.playlist_address, args.playlist_port)
return args
def get_env_vars(args):
"""Augment the arguments with environment variable values.
Parameters
----------
args: namespace
The command-line argument values.
Environment variables
---------------------
AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
SVC_LOADER_TOKEN, DYNAMODB_URL: string
Environment variables specifying these AWS access parameters.
Modifies
-------
args
The args namespace augmented with the following names:
dynamodb_region, access_key_id, secret_access_key, loader_token,
dynamodb_url
These names contain the string values passed in the corresponding
environment variables.
Returns
-------
Nothing
"""
# These are required to be present
args.dynamodb_region = os.getenv('AWS_REGION')
args.access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
args.secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
args.loader_token = os.getenv('SVC_LOADER_TOKEN')
args.dynamodb_url = os.getenv('DYNAMODB_URL')
def setup(args):
"""Create the DynamoDB tables.
Parameters
----------
args: namespace
The arguments specifying the tables. Uses dynamodb_url,
dynamodb_region, access_key_id, secret_access_key, table_suffix.
"""
create_tables.create_tables(
args.dynamodb_url,
args.dynamodb_region,
args.access_key_id,
args.secret_access_key,
'Music-' + args.table_suffix,
'User-' + args.table_suffix,
'Playlist-' + args.table_suffix
)
def run_test(args):
"""Run the tests.
Parameters
----------
args: namespace
The arguments for the test. Uses music_url.
Prerequisites
-------------
The DyamoDB tables must already exist.
Returns
-------
number
An HTTP status code representing the test result.
Some "pseudo-HTTP" codes are defined in the 600 range
to indicate conditions that are not included in the HTTP
specification.
Notes
-----
This test is highly incomplete and needs substantial extension.
NEED TO ADD TESTS HERE
"""
_ = music.Music(args.music_url, DUMMY_AUTH)
'''
mserv = music.Music(args.music_url, DUMMY_AUTH)
artist, song = ('Mary Chapin Carpenter', 'John Doe No. 24')
trc, m_id = mserv.create(artist, song)
if trc != 200:
sys.exit(1)
trc, ra, rs = mserv.read(m_id)
if trc == 200:
if artist != ra or song != rs:
# Fake HTTP code to indicate error
trc = 601
mserv.delete(m_id)
return trc
'''
return 200
if __name__ == '__main__':
args = parse_args()
get_env_vars(args)
setup(args)
trc = run_test(args)
if trc != 200:
sys.exit(1)
| []
| []
| [
"DYNAMODB_URL",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION",
"SVC_LOADER_TOKEN",
"AWS_ACCESS_KEY_ID"
]
| [] | ["DYNAMODB_URL", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "SVC_LOADER_TOKEN", "AWS_ACCESS_KEY_ID"] | python | 5 | 0 | |
go/examples/test/api_filters_test.go | package test
import (
"fmt"
"github.com/avinetworks/sdk/go/clients"
"github.com/avinetworks/sdk/go/session"
"os"
"testing"
)
func TestApiFilters(t *testing.T) {
aviClient, err := clients.NewAviClient(os.Getenv("controller"), "admin",
session.SetPassword(os.Getenv("password")),
session.SetTenant("admin"),
session.SetVersion(os.Getenv("version")),
session.SetInsecure)
if err != nil {
fmt.Println("Couldn't create session: ", err)
t.Fail()
}
cv, err := aviClient.AviSession.GetControllerVersion()
fmt.Printf("Avi Controller Version: %v:%v\n", cv, err)
params := map[string]string{
"page_size": "3",
"page": "1",
"tenant": "admin",
}
hmData, err := aviClient.HealthMonitor.GetAll(session.SetParams(params))
if err != nil {
fmt.Println("\n [ERROR] : ", err)
} else {
fmt.Println("\n Health monitors : ", hmData)
if len(hmData) != 3 {
t.Fail()
}
}
params1 := map[string]string{
"page": "1",
"tenant": "admin",
}
data, err := aviClient.HealthMonitor.GetObject(session.SetParams(params1), session.SetName("Test-Hm"))
if err != nil {
fmt.Println("\n [ERROR] : ", err)
} else {
fmt.Println("\n Health monitor object : ", *data)
}
}
| [
"\"controller\"",
"\"password\"",
"\"version\""
]
| []
| [
"password",
"version",
"controller"
]
| [] | ["password", "version", "controller"] | go | 3 | 0 | |
scripts/kconfig/menuconfig.py | #!/usr/bin/env python3
# Copyright (c) 2018, Nordic Semiconductor ASA and Ulf Magnusson
# SPDX-License-Identifier: ISC
"""
Overview
========
A curses-based menuconfig implementation. The interface should feel familiar to
people used to mconf ('make menuconfig').
Supports the same keys as mconf, and also supports a set of keybindings
inspired by Vi:
J/K : Down/Up
L : Enter menu/Toggle item
H : Leave menu
Ctrl-D/U: Page Down/Page Up
G/End : Jump to end of list
g/Home : Jump to beginning of list
The mconf feature where pressing a key jumps to a menu entry with that
character in it in the current menu isn't supported. A jump-to feature for
jumping directly to any symbol (including invisible symbols), choice, menu or
comment (as in a Kconfig 'comment "Foo"') is available instead.
Space and Enter are "smart" and try to do what you'd expect for the given
menu entry.
Running
=======
menuconfig.py can be run either as a standalone executable or by calling the
menuconfig() function with an existing Kconfig instance. The second option is a
bit inflexible in that it will still load and save .config, etc.
When run in standalone mode, the top-level Kconfig file to load can be passed
as a command-line argument. With no argument, it defaults to "Kconfig".
The KCONFIG_CONFIG environment variable specifies the .config file to load (if
it exists) and save. If KCONFIG_CONFIG is unset, ".config" is used.
$srctree is supported through Kconfiglib.
Color schemes
=============
It is possible to customize the color scheme by setting the MENUCONFIG_STYLE
environment variable. For example, setting it to 'aquatic' will enable an
alternative, less yellow, more 'make menuconfig'-like color scheme, contributed
by Mitja Horvat (pinkfluid).
This is the current list of built-in styles:
- default classic Kconfiglib theme with a yellow accent
- monochrome colorless theme (uses only bold and standout) attributes,
this style is used if the terminal doesn't support colors
- aquatic blue tinted style loosely resembling the lxdialog theme
It is possible to customize the current style by changing colors of UI
elements on the screen. This is the list of elements that can be stylized:
- path Top row in the main display, with the menu path
- separator Separator lines between windows. Also used for the top line
in the symbol information display.
- list List of items, e.g. the main display
- selection Style for the selected item
- inv-list Like list, but for invisible items. Used in show-all mode.
- inv-selection Like selection, but for invisible items. Used in show-all
mode.
- help Help text windows at the bottom of various fullscreen
dialogs
- frame Frame around dialog boxes
- body Body of dialog boxes
- edit Edit box in pop-up dialogs
- jump-edit Edit box in jump-to dialog
- text Symbol information text
The color definition is a comma separated list of attributes:
- fg:COLOR Set the foreground/background colors. COLOR can be one of
* or * the basic 16 colors (black, red, green, yellow, blue,
- bg:COLOR magenta,cyan, white and brighter versions, for example,
brightred). On terminals that support more than 8 colors,
you can also directly put in a color number, e.g. fg:123
(hexadecimal and octal constants are accepted as well).
Colors outside the range -1..curses.COLORS-1 (which is
terminal-dependent) are ignored (with a warning). The COLOR
can be also specified using a RGB value in the HTML
notation, for example #RRGGBB. If the terminal supports
color changing, the color is rendered accurately.
Otherwise, the visually nearest color is used.
If the background or foreground color of an element is not
specified, it defaults to -1, representing the default
terminal foreground or background color.
Note: On some terminals a bright version of the color
implies bold.
- bold Use bold text
- underline Use underline text
- standout Standout text attribute (reverse color)
More often than not, some UI elements share the same color definition. In such
cases the right value may specify an UI element from which the color definition
will be copied. For example, "separator=help" will apply the current color
definition for "help" to "separator".
A keyword without the '=' is assumed to be a style template. The template name
is looked up in the built-in styles list and the style definition is expanded
in-place. With this, built-in styles can be used as basis for new styles.
For example, take the aquatic theme and give it a red selection bar:
MENUCONFIG_STYLE="aquatic selection=fg:white,bg:red"
If there's an error in the style definition or if a missing style is assigned
to, the assignment will be ignored, along with a warning being printed on
stderr.
The 'default' theme is always implicitly parsed first (or the 'monochrome'
theme if the terminal lacks colors), so the following two settings have the
same effect:
MENUCONFIG_STYLE="selection=fg:white,bg:red"
MENUCONFIG_STYLE="default selection=fg:white,bg:red"
Other features
==============
- Seamless terminal resizing
- No dependencies on *nix, as the 'curses' module is in the Python standard
library
- Unicode text entry
- Improved information screen compared to mconf:
* Expressions are split up by their top-level &&/|| operands to improve
readability
* Undefined symbols in expressions are pointed out
* Menus and comments have information displays
* Kconfig definitions are printed
* The include path is shown, listing the locations of the 'source'
statements that included the Kconfig file of the symbol (or other
item)
Limitations
===========
- Python 3 only
This is mostly due to Python 2 not having curses.get_wch(), which is needed
for Unicode support.
- Doesn't work out of the box on Windows
Has been tested to work with the wheels provided at
https://www.lfd.uci.edu/~gohlke/pythonlibs/#curses though.
"""
import curses
import errno
import locale
import os
import platform
import re
import sys
import textwrap
from kconfiglib import Symbol, Choice, MENU, COMMENT, MenuNode, \
BOOL, STRING, INT, HEX, UNKNOWN, \
AND, OR, \
expr_str, expr_value, split_expr, \
standard_sc_expr_str, \
TRI_TO_STR, TYPE_TO_STR, \
standard_kconfig, standard_config_filename
#
# Configuration variables
#
# If True, try to convert LC_CTYPE to a UTF-8 locale if it is set to the C
# locale (which implies ASCII). This fixes curses Unicode I/O issues on systems
# with bad defaults. ncurses configures itself from the locale settings.
#
# Related PEP: https://www.python.org/dev/peps/pep-0538/
_CONVERT_C_LC_CTYPE_TO_UTF8 = True
# How many steps an implicit submenu will be indented. Implicit submenus are
# created when an item depends on the symbol before it. Note that symbols
# defined with 'menuconfig' create a separate menu instead of indenting.
_SUBMENU_INDENT = 4
# Number of steps for Page Up/Down to jump
_PG_JUMP = 6
# How far the cursor needs to be from the edge of the window before it starts
# to scroll. Used for the main menu display, the information display, the
# search display, and for text boxes.
_SCROLL_OFFSET = 5
# Minimum width of dialogs that ask for text input
_INPUT_DIALOG_MIN_WIDTH = 30
# Number of arrows pointing up/down to draw when a window is scrolled
_N_SCROLL_ARROWS = 14
# Lines of help text shown at the bottom of the "main" display
_MAIN_HELP_LINES = """
[Space/Enter] Toggle/enter [ESC] Leave menu [S] Save
[O] Load [?] Symbol info [/] Jump to symbol
[A] Toggle show-all mode [C] Toggle show-name mode
[Q] Quit (prompts for save) [D] Save minimal config (advanced)
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the information dialog
_INFO_HELP_LINES = """
[ESC/q] Return to menu [/] Jump to symbol
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the search dialog
_JUMP_TO_HELP_LINES = """
Type text to narrow the search. Regexes are supported (via Python's 're'
module). The up/down cursor keys step in the list. [Enter] jumps to the
selected symbol. [ESC] aborts the search. Type multiple space-separated
strings/regexes to find entries that match all of them. Type Ctrl-F to
view the help of the selected item without leaving the dialog.
"""[1:-1].split("\n")
#
# Styling
#
_STYLES = {
"default": """
path=fg:black,bg:white,bold
separator=fg:black,bg:yellow,bold
list=fg:black,bg:white
selection=fg:white,bg:blue,bold
inv-list=fg:red,bg:white
inv-selection=fg:red,bg:blue
help=path
frame=fg:black,bg:yellow,bold
body=fg:white,bg:black
edit=fg:white,bg:blue
jump-edit=edit
text=list
""",
# This style is forced on terminals that do no support colors
"monochrome": """
path=bold
separator=bold,standout
list=
selection=bold,standout
inv-list=bold
inv-selection=bold,standout
help=bold
frame=bold,standout
body=
edit=standout
jump-edit=
text=
""",
# Blue tinted style loosely resembling lxdialog
"aquatic": """
path=fg:cyan,bg:blue,bold
separator=fg:white,bg:cyan,bold
help=path
frame=fg:white,bg:cyan,bold
body=fg:brightwhite,bg:blue
edit=fg:black,bg:white
"""
}
# Standard colors definition
_STYLE_STD_COLORS = {
# Basic colors
"black": curses.COLOR_BLACK,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"magenta": curses.COLOR_MAGENTA,
"cyan": curses.COLOR_CYAN,
"white": curses.COLOR_WHITE,
# Bright versions
"brightblack": curses.COLOR_BLACK + 8,
"brightred": curses.COLOR_RED + 8,
"brightgreen": curses.COLOR_GREEN + 8,
"brightyellow": curses.COLOR_YELLOW + 8,
"brightblue": curses.COLOR_BLUE + 8,
"brightmagenta": curses.COLOR_MAGENTA + 8,
"brightcyan": curses.COLOR_CYAN + 8,
"brightwhite": curses.COLOR_WHITE + 8,
# Aliases
"purple": curses.COLOR_MAGENTA,
"brightpurple": curses.COLOR_MAGENTA + 8,
}
def _rgb_to_6cube(rgb):
# Converts an 888 RGB color to a 3-tuple (nice in that it's hashable)
# representing the closests xterm 256-color 6x6x6 color cube color.
#
# The xterm 256-color extension uses a RGB color palette with components in
# the range 0-5 (a 6x6x6 cube). The catch is that the mapping is nonlinear.
# Index 0 in the 6x6x6 cube is mapped to 0, index 1 to 95, then 135, 175,
# etc., in increments of 40. See the links below:
#
# https://commons.wikimedia.org/wiki/File:Xterm_256color_chart.svg
# https://github.com/tmux/tmux/blob/master/colour.c
# 48 is the middle ground between 0 and 95.
return tuple(0 if x < 48 else int(round(max(1, (x - 55)/40))) for x in rgb)
def _6cube_to_rgb(r6g6b6):
# Returns the 888 RGB color for a 666 xterm color cube index
return tuple(0 if x == 0 else 40*x + 55 for x in r6g6b6)
def _rgb_to_gray(rgb):
# Converts an 888 RGB color to the index of an xterm 256-color grayscale
# color with approx. the same perceived brightness
# Calculate the luminance (gray intensity) of the color. See
# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
# and
# https://www.w3.org/TR/AERT/#color-contrast
luma = 0.299*rgb[0] + 0.587*rgb[1] + 0.114*rgb[2]
# Closests index in the grayscale palette, which starts at RGB 0x080808,
# with stepping 0x0A0A0A
index = int(round((luma - 8)/10))
# Clamp the index to 0-23, corresponding to 232-255
return max(0, min(index, 23))
def _gray_to_rgb(index):
# Convert a grayscale index to its closet single RGB component
return 3*(10*index + 8,) # Returns a 3-tuple
# Obscure Python: We never pass a value for rgb2index, and it keeps pointing to
# the same dict. This avoids a global.
def _alloc_rgb(rgb, rgb2index={}):
# Initialize a new entry in the xterm palette to the given RGB color,
# returning its index. If the color has already been initialized, the index
# of the existing entry is returned.
#
# ncurses is palette-based, so we need to overwrite palette entries to make
# new colors.
#
# The colors from 0 to 15 are user-defined, and there's no way to query
# their RGB values, so we better leave them untouched. Also leave any
# hypothetical colors above 255 untouched (though we're unlikely to
# allocate that many colors anyway).
if rgb in rgb2index:
return rgb2index[rgb]
# Many terminals allow the user to customize the first 16 colors. Avoid
# changing their values.
color_index = 16 + len(rgb2index)
if color_index >= 256:
_warn("Unable to allocate new RGB color ", rgb, ". Too many colors "
"allocated.")
return 0
# Map each RGB component from the range 0-255 to the range 0-1000, which is
# what curses uses
curses.init_color(color_index, *(int(round(1000*x/255)) for x in rgb))
rgb2index[rgb] = color_index
return color_index
def _color_from_num(num):
# Returns the index of a color that looks like color 'num' in the xterm
# 256-color palette (but that might not be 'num', if we're redefining
# colors)
# - _alloc_rgb() won't touch the first 16 colors or any (hypothetical)
# colors above 255, so we can always return them as-is
#
# - If the terminal doesn't support changing color definitions, or if
# curses.COLORS < 256, _alloc_rgb() won't touch any color, and all colors
# can be returned as-is
if num < 16 or num > 255 or not curses.can_change_color() or \
curses.COLORS < 256:
return num
# _alloc_rgb() might redefine colors, so emulate the xterm 256-color
# palette by allocating new colors instead of returning color numbers
# directly
if num < 232:
num -= 16
return _alloc_rgb(_6cube_to_rgb(((num//36)%6, (num//6)%6, num%6)))
return _alloc_rgb(_gray_to_rgb(num - 232))
def _color_from_rgb(rgb):
# Returns the index of a color matching the 888 RGB color 'rgb'. The
# returned color might be an ~exact match or an approximation, depending on
# terminal capabilities.
# Calculates the Euclidean distance between two RGB colors
def dist(r1, r2): return sum((x - y)**2 for x, y in zip(r1, r2))
if curses.COLORS >= 256:
# Assume we're dealing with xterm's 256-color extension
if curses.can_change_color():
# Best case -- the terminal supports changing palette entries via
# curses.init_color(). Initialize an unused palette entry and
# return it.
return _alloc_rgb(rgb)
# Second best case -- pick between the xterm 256-color extension colors
# Closest 6-cube "color" color
c6 = _rgb_to_6cube(rgb)
# Closest gray color
gray = _rgb_to_gray(rgb)
if dist(rgb, _6cube_to_rgb(c6)) < dist(rgb, _gray_to_rgb(gray)):
# Use the "color" color from the 6x6x6 color palette. Calculate the
# color number from the 6-cube index triplet.
return 16 + 36*c6[0] + 6*c6[1] + c6[2]
# Use the color from the gray palette
return 232 + gray
# Terminal not in xterm 256-color mode. This is probably the best we can
# do, or is it? Submit patches. :)
min_dist = float('inf')
best = -1
for color in range(curses.COLORS):
# ncurses uses the range 0..1000. Scale that down to 0..255.
d = dist(rgb, tuple(int(round(255*c/1000))
for c in curses.color_content(color)))
if d < min_dist:
min_dist = d
best = color
return best
# Dictionary mapping element types to the curses attributes used to display
# them
_style = {}
def _parse_style(style_str, parsing_default):
# Parses a string with '<element>=<style>' assignments. Anything not
# containing '=' is assumed to be a reference to a built-in style, which is
# treated as if all the assignments from the style were inserted at that
# point in the string.
#
# The parsing_default flag is set to True when we're implicitly parsing the
# 'default'/'monochrome' style, to prevent warnings.
for sline in style_str.split():
# Words without a "=" character represents a style template
if "=" in sline:
key, data = sline.split("=", 1)
# The 'default' style template is assumed to define all keys. We
# run _style_to_curses() for non-existing keys as well, so that we
# print warnings for errors to the right of '=' for those too.
if key not in _style and not parsing_default:
_warn("Ignoring non-existent style", key)
# If data is a reference to another key, copy its style
if data in _style:
_style[key] = _style[data]
else:
_style[key] = _style_to_curses(data)
elif sline in _STYLES:
# Recursively parse style template. Ignore styles that don't exist,
# for backwards/forwards compatibility.
_parse_style(_STYLES[sline], parsing_default)
else:
_warn("Ignoring non-existent style template", sline)
def _style_to_curses(style_def):
# Parses a style definition string (<element>=<style>), returning
# a (fg_color, bg_color, attributes) tuple.
def parse_color(color_def):
color_def = color_def.split(":", 1)[1]
if color_def in _STYLE_STD_COLORS:
return _color_from_num(_STYLE_STD_COLORS[color_def])
# HTML format, #RRGGBB
if re.match("#[A-Fa-f0-9]{6}", color_def):
return _color_from_rgb((
int(color_def[1:3], 16),
int(color_def[3:5], 16),
int(color_def[5:7], 16)))
try:
color_num = _color_from_num(int(color_def, 0))
except ValueError:
_warn("Ignoring color ", color_def, "that's neither predefined "
"nor a number")
return -1
if not -1 <= color_num < curses.COLORS:
_warn("Ignoring color {}, which is outside the range "
"-1..curses.COLORS-1 (-1..{})"
.format(color_def, curses.COLORS - 1))
return -1
return color_num
fg_color = -1
bg_color = -1
attrs = 0
if style_def:
for field in style_def.split(","):
if field.startswith("fg:"):
fg_color = parse_color(field)
elif field.startswith("bg:"):
bg_color = parse_color(field)
elif field == "bold":
# A_BOLD tends to produce faint and hard-to-read text on the
# Windows console, especially with the old color scheme, before
# the introduction of
# https://blogs.msdn.microsoft.com/commandline/2017/08/02/updating-the-windows-console-colors/
attrs |= curses.A_NORMAL if _IS_WINDOWS else curses.A_BOLD
elif field == "standout":
attrs |= curses.A_STANDOUT
elif field == "underline":
attrs |= curses.A_UNDERLINE
else:
_warn("Ignoring unknown style attribute", field)
return _style_attr(fg_color, bg_color, attrs)
def _init_styles():
if curses.has_colors():
curses.use_default_colors()
# Use the 'monochrome' style template as the base on terminals without
# color
_parse_style("default" if curses.has_colors() else "monochrome", True)
# Add any user-defined style from the environment
if "MENUCONFIG_STYLE" in os.environ:
_parse_style(os.environ["MENUCONFIG_STYLE"], False)
# color_attribs holds the color pairs we've already created, indexed by a
# (<foreground color>, <background color>) tuple.
#
# Obscure Python: We never pass a value for color_attribs, and it keeps
# pointing to the same dict. This avoids a global.
def _style_attr(fg_color, bg_color, attribs, color_attribs={}):
# Returns an attribute with the specified foreground and background color
# and the attributes in 'attribs'. Reuses color pairs already created if
# possible, and creates a new color pair otherwise.
#
# Returns 'attribs' if colors aren't supported.
if not curses.has_colors():
return attribs
if (fg_color, bg_color) not in color_attribs:
# Create new color pair. Color pair number 0 is hardcoded and cannot be
# changed, hence the +1s.
curses.init_pair(len(color_attribs) + 1, fg_color, bg_color)
color_attribs[(fg_color, bg_color)] = \
curses.color_pair(len(color_attribs) + 1)
return color_attribs[(fg_color, bg_color)] | attribs
#
# Main application
#
# Used as the entry point in setup.py
def _main():
menuconfig(standard_kconfig())
def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _config_filename
global _show_all
global _conf_changed
_kconf = kconf
_config_filename = standard_config_filename()
if os.path.exists(_config_filename):
_conf_changed = False
print("Using existing configuration '{}' as base"
.format(_config_filename))
_kconf.load_config(_config_filename)
else:
# Always prompt for save if the .config doesn't exist
_conf_changed = True
if kconf.defconfig_filename is not None:
print("Using default configuration found in '{}' as base"
.format(kconf.defconfig_filename))
_kconf.load_config(kconf.defconfig_filename)
else:
print("Using default symbol values as base")
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(_kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(_kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print("Empty configuration -- nothing to configure.\n"
"Check that environment variables are set properly.")
return
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
_kconf.disable_warnings()
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, "")
# Try to fix Unicode issues on systems with bad defaults
if _CONVERT_C_LC_CTYPE_TO_UTF8:
_convert_c_lc_ctype_to_utf8()
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault("ESCDELAY", "0")
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig))
# Global variables used below:
#
# _stdscr:
# stdscr from curses
#
# _cur_menu:
# Menu node of the menu (or menuconfig symbol, or choice) currently being
# shown
#
# _shown:
# List of items in _cur_menu that are shown (ignoring scrolling). In
# show-all mode, this list contains all items in _cur_menu. Otherwise, it
# contains just the visible items.
#
# _sel_node_i:
# Index in _shown of the currently selected node
#
# _menu_scroll:
# Index in _shown of the top row of the main display
#
# _parent_screen_rows:
# List/stack of the row numbers that the selections in the parent menus
# appeared on. This is used to prevent the scrolling from jumping around
# when going in and out of menus.
#
# _show_all:
# If True, "show-all" mode is on. Show-all mode shows all symbols and other
# items in the current menu, including those that lack a prompt or aren't
# currently visible.
#
# Invisible items are drawn in a different style to make them stand out.
#
# _show_name:
# If True, the names of all symbol are shown in addition to the prompt.
#
# _conf_changed:
# True if the configuration has been changed. If False, we don't bother
# showing the save-and-quit dialog.
#
# We reset this to False whenever the configuration is saved explicitly
# from the save dialog.
def _menuconfig(stdscr):
# Logic for the main display, with the list of symbols, etc.
global _stdscr
global _conf_changed
global _show_name
_stdscr = stdscr
_init()
while True:
_draw_main()
curses.doupdate()
c = _get_wch_compat(_menu_win)
if c == curses.KEY_RESIZE:
_resize_main()
elif c in (curses.KEY_DOWN, "j", "J"):
_select_next_menu_entry()
elif c in (curses.KEY_UP, "k", "K"):
_select_prev_menu_entry()
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
_select_next_menu_entry()
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
for _ in range(_PG_JUMP):
_select_prev_menu_entry()
elif c in (curses.KEY_END, "G"):
_select_last_menu_entry()
elif c in (curses.KEY_HOME, "g"):
_select_first_menu_entry()
elif c in (curses.KEY_RIGHT, " ", "\n", "l", "L"):
# Do appropriate node action. Only Space is treated specially,
# preferring to toggle nodes rather than enter menus.
sel_node = _shown[_sel_node_i]
if sel_node.is_menuconfig and not \
(c == " " and _prefer_toggle(sel_node.item)):
_enter_menu(sel_node)
else:
_change_node(sel_node)
if _is_y_mode_choice_sym(sel_node.item) and not sel_node.list:
# Immediately jump to the parent menu after making a choice
# selection, like 'make menuconfig' does, except if the
# menu node has children (which can happen if a symbol
# 'depends on' a choice symbol that immediately precedes
# it).
_leave_menu()
elif c in ("n", "N"):
_set_sel_node_tri_val(0)
elif c in ("m", "M"):
_set_sel_node_tri_val(1)
elif c in ("y", "Y"):
_set_sel_node_tri_val(2)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", # \x1B = ESC
"h", "H"):
if c == "\x1B" and _cur_menu is _kconf.top_node:
res = _quit_dialog()
if res:
return res
else:
_leave_menu()
elif c in ("o", "O"):
if _conf_changed:
c = _key_dialog(
"Load",
"You have unsaved changes. Load new\n"
"configuration anyway?\n"
"\n"
" (Y)es (C)ancel",
"yc")
if c is None or c == "c":
continue
if _load_dialog():
_conf_changed = False
elif c in ("s", "S"):
if _save_dialog(_kconf.write_config, _config_filename,
"configuration"):
_conf_changed = False
elif c in ("d", "D"):
_save_dialog(_kconf.write_min_config, "defconfig",
"minimal configuration")
elif c == "/":
_jump_to_dialog()
# The terminal might have been resized while the fullscreen jump-to
# dialog was open
_resize_main()
elif c == "?":
_info_dialog(_shown[_sel_node_i], False)
# The terminal might have been resized while the fullscreen info
# dialog was open
_resize_main()
elif c in ("a", "A"):
_toggle_show_all()
elif c in ("c", "C"):
_show_name = not _show_name
elif c in ("q", "Q"):
res = _quit_dialog()
if res:
return res
def _quit_dialog():
if not _conf_changed:
return "No changes to save"
while True:
c = _key_dialog(
"Quit",
" Save configuration?\n"
"\n"
"(Y)es (N)o (C)ancel",
"ync")
if c is None or c == "c":
return None
if c == "y":
if _try_save(_kconf.write_config, _config_filename,
"configuration"):
return "Configuration saved to '{}'" \
.format(_config_filename)
elif c == "n":
return "Configuration was not saved"
def _init():
# Initializes the main display with the list of symbols, etc. Also does
# misc. global initialization that needs to happen after initializing
# curses.
global _ERASE_CHAR
global _path_win
global _top_sep_win
global _menu_win
global _bot_sep_win
global _help_win
global _parent_screen_rows
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_name
# Looking for this in addition to KEY_BACKSPACE (which is unreliable) makes
# backspace work with TERM=vt100. That makes it likely to work in sane
# environments.
#
# erasechar() returns a 'bytes' object. Since we use get_wch(), we need to
# decode it. Just give up and avoid crashing if it can't be decoded.
_ERASE_CHAR = curses.erasechar().decode("utf-8", "ignore")
_init_styles()
# Hide the cursor
_safe_curs_set(0)
# Initialize windows
# Top row, with menu path
_path_win = _styled_win("path")
# Separator below menu path, with title and arrows pointing up
_top_sep_win = _styled_win("separator")
# List of menu entries with symbols, etc.
_menu_win = _styled_win("list")
_menu_win.keypad(True)
# Row below menu list, with arrows pointing down
_bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom
_help_win = _styled_win("help")
# The rows we'd like the nodes in the parent menus to appear on. This
# prevents the scroll from jumping around when going in and out of menus.
_parent_screen_rows = []
# Initial state
_cur_menu = _kconf.top_node
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _menu_scroll = 0
_show_name = False
# Give windows their initial size
_resize_main()
def _resize_main():
# Resizes the main display, with the list of symbols, etc., to fill the
# terminal
global _menu_scroll
screen_height, screen_width = _stdscr.getmaxyx()
_path_win.resize(1, screen_width)
_top_sep_win.resize(1, screen_width)
_bot_sep_win.resize(1, screen_width)
help_win_height = len(_MAIN_HELP_LINES)
menu_win_height = screen_height - help_win_height - 3
if menu_win_height >= 1:
_menu_win.resize(menu_win_height, screen_width)
_help_win.resize(help_win_height, screen_width)
_top_sep_win.mvwin(1, 0)
_menu_win.mvwin(2, 0)
_bot_sep_win.mvwin(2 + menu_win_height, 0)
_help_win.mvwin(2 + menu_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
menu_win_height = 1
_menu_win.resize(1, screen_width)
_help_win.resize(1, screen_width)
for win in _top_sep_win, _menu_win, _bot_sep_win, _help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected node is still within the window,
# if needed
if _sel_node_i - _menu_scroll >= menu_win_height:
_menu_scroll = _sel_node_i - menu_win_height + 1
def _menu_win_height():
# Returns the height of the menu display
return _menu_win.getmaxyx()[0]
def _prefer_toggle(item):
# For nodes with menus, determines whether Space should change the value of
# the node's item or enter its menu. We toggle symbols (which have menus
# when they're defined with 'menuconfig') and choices that can be in more
# than one mode (e.g. optional choices). In other cases, we enter the menu.
return isinstance(item, Symbol) or \
(isinstance(item, Choice) and len(item.assignable) > 1)
def _enter_menu(menu):
# Makes 'menu' the currently displayed menu
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
shown_sub = _shown_nodes(menu)
# Never enter empty menus. We depend on having a current node.
if shown_sub:
# Remember where the current node appears on the screen, so we can try
# to get it to appear in the same place when we leave the menu
_parent_screen_rows.append(_sel_node_i - _menu_scroll)
# Jump into menu
_cur_menu = menu
_shown = shown_sub
_sel_node_i = _menu_scroll = 0
def _jump_to(node):
# Jumps directly to the menu node 'node'
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_all
global _parent_screen_rows
# Clear remembered menu locations. We might not even have been in the
# parent menus before.
_parent_screen_rows = []
old_show_all = _show_all
jump_into = (isinstance(node.item, Choice) or node.item == MENU) and \
node.list
# If we're jumping to a non-empty choice or menu, jump to the first entry
# in it instead of jumping to its menu node
if jump_into:
_cur_menu = node
node = node.list
else:
_cur_menu = _parent_menu(node)
_shown = _shown_nodes(_cur_menu)
if node not in _shown:
# The node wouldn't be shown. Turn on show-all to show it.
_show_all = True
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _shown.index(node)
if jump_into and not old_show_all and _show_all:
# If we're jumping into a choice or menu and were forced to turn on
# show-all because the first entry wasn't visible, try turning it off.
# That will land us at the first visible node if there are visible
# nodes, and is a no-op otherwise.
_toggle_show_all()
_center_vertically()
def _leave_menu():
# Jumps to the parent menu of the current menu. Does nothing if we're in
# the top menu.
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
if _cur_menu is _kconf.top_node:
return
# Jump to parent menu
parent = _parent_menu(_cur_menu)
_shown = _shown_nodes(parent)
_sel_node_i = _shown.index(_cur_menu)
_cur_menu = parent
# Try to make the menu entry appear on the same row on the screen as it did
# before we entered the menu.
if _parent_screen_rows:
# The terminal might have shrunk since we were last in the parent menu
screen_row = min(_parent_screen_rows.pop(), _menu_win_height() - 1)
_menu_scroll = max(_sel_node_i - screen_row, 0)
else:
# No saved parent menu locations, meaning we jumped directly to some
# node earlier
_center_vertically()
def _select_next_menu_entry():
# Selects the menu entry after the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the last menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i < len(_shown) - 1:
# Jump to the next node
_sel_node_i += 1
# If the new node is sufficiently close to the edge of the menu window
# (as determined by _SCROLL_OFFSET), increase the scroll by one. This
# gives nice and non-jumpy behavior even when
# _SCROLL_OFFSET >= _menu_win_height().
if _sel_node_i >= _menu_scroll + _menu_win_height() - _SCROLL_OFFSET:
_menu_scroll = min(_menu_scroll + 1,
_max_scroll(_shown, _menu_win))
def _select_prev_menu_entry():
# Selects the menu entry before the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the first menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i > 0:
# Jump to the previous node
_sel_node_i -= 1
# See _select_next_menu_entry()
if _sel_node_i <= _menu_scroll + _SCROLL_OFFSET:
_menu_scroll = max(_menu_scroll - 1, 0)
def _select_last_menu_entry():
# Selects the last menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = len(_shown) - 1
_menu_scroll = _max_scroll(_shown, _menu_win)
def _select_first_menu_entry():
# Selects the first menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = _menu_scroll = 0
def _toggle_show_all():
# Toggles show-all mode on/off. If turning it off would give no visible
# items in the current menu, it is left on.
global _show_all
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor is on. Preferably we want the same row to
# stay highlighted.
old_row = _sel_node_i - _menu_scroll
_show_all = not _show_all
# List of new nodes to be shown after toggling _show_all
new_shown = _shown_nodes(_cur_menu)
# Find a good node to select. The selected node might disappear if show-all
# mode is turned off.
# If there are visible nodes before the previously selected node, select
# the closest one. This will select the previously selected node itself if
# it is still visible.
for node in reversed(_shown[:_sel_node_i + 1]):
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes before the previously selected node. Select the
# closest visible node after it instead.
for node in _shown[_sel_node_i + 1:]:
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes at all, meaning show-all was turned off inside
# an invisible menu. Don't allow that, as the implementation relies
# on always having a selected node.
_show_all = True
return
_shown = new_shown
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _center_vertically():
# Centers the selected node vertically, if possible
global _menu_scroll
_menu_scroll = max(_sel_node_i - _menu_win_height()//2, 0)
def _draw_main():
# Draws the "main" display, with the list of symbols, the header, and the
# footer.
#
# This could be optimized to only update the windows that have actually
# changed, but keep it simple for now and let curses sort it out.
term_width = _stdscr.getmaxyx()[1]
#
# Update the separator row below the menu path
#
_top_sep_win.erase()
# Draw arrows pointing up if the symbol window is scrolled down. Draw them
# before drawing the title, so the title ends up on top for small windows.
if _menu_scroll > 0:
_safe_hline(_top_sep_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
# Add the 'mainmenu' text as the title, centered at the top
_safe_addstr(_top_sep_win,
0, max((term_width - len(_kconf.mainmenu_text))//2, 0),
_kconf.mainmenu_text)
_top_sep_win.noutrefresh()
# Note: The menu path at the top is deliberately updated last. See below.
#
# Update the symbol window
#
_menu_win.erase()
# Draw the _shown nodes starting from index _menu_scroll up to either as
# many as fit in the window, or to the end of _shown
for i in range(_menu_scroll,
min(_menu_scroll + _menu_win_height(), len(_shown))):
node = _shown[i]
# The 'not _show_all' test avoids showing invisible items in red
# outside show-all mode, which could look confusing/broken. Invisible
# symbols show up outside show-all mode if an invisible symbol has
# visible children in an implicit (indented) menu.
if not _show_all or (node.prompt and expr_value(node.prompt[1])):
style = _style["selection" if i == _sel_node_i else "list"]
else:
style = _style["inv-selection" if i == _sel_node_i else "inv-list"]
_safe_addstr(_menu_win, i - _menu_scroll, 0, _node_str(node), style)
_menu_win.noutrefresh()
#
# Update the bottom separator window
#
_bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if _menu_scroll < _max_scroll(_shown, _menu_win):
_safe_hline(_bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
# Indicate when show-all and/or show-name mode is enabled
enabled_modes = []
if _show_all:
enabled_modes.append("show-all")
if _show_name:
enabled_modes.append("show-name")
if enabled_modes:
s = " and ".join(enabled_modes) + " mode enabled"
_safe_addstr(_bot_sep_win, 0, term_width - len(s) - 2, s)
_bot_sep_win.noutrefresh()
#
# Update the help window
#
_help_win.erase()
for i, line in enumerate(_MAIN_HELP_LINES):
_safe_addstr(_help_win, i, 0, line)
_help_win.noutrefresh()
#
# Update the top row with the menu path.
#
# Doing this last leaves the cursor on the top row, which avoids some minor
# annoying jumpiness in gnome-terminal when reducing the height of the
# terminal. It seems to happen whenever the row with the cursor on it
# disappears.
#
_path_win.erase()
# Draw the menu path ("(top menu) -> menu -> submenu -> ...")
menu_prompts = []
menu = _cur_menu
while menu is not _kconf.top_node:
# Promptless choices can be entered in show-all mode. Use
# standard_sc_expr_str() for them, so they show up as
# '<choice (name if any)>'.
menu_prompts.append(menu.prompt[0] if menu.prompt else
standard_sc_expr_str(menu.item))
menu = _parent_menu(menu)
menu_prompts.append("(top menu)")
menu_prompts.reverse()
# Hack: We can't put ACS_RARROW directly in the string. Temporarily
# represent it with NULL. Maybe using a Unicode character would be better.
menu_path_str = " \0 ".join(menu_prompts)
# Scroll the menu path to the right if needed to make the current menu's
# title visible
if len(menu_path_str) > term_width:
menu_path_str = menu_path_str[len(menu_path_str) - term_width:]
# Print the path with the arrows reinserted
split_path = menu_path_str.split("\0")
_safe_addstr(_path_win, split_path[0])
for s in split_path[1:]:
_safe_addch(_path_win, curses.ACS_RARROW)
_safe_addstr(_path_win, s)
_path_win.noutrefresh()
def _parent_menu(node):
# Returns the menu node of the menu that contains 'node'. In addition to
# proper 'menu's, this might also be a 'menuconfig' symbol or a 'choice'.
# "Menu" here means a menu in the interface.
menu = node.parent
while not menu.is_menuconfig:
menu = menu.parent
return menu
def _shown_nodes(menu):
# Returns the list of menu nodes from 'menu' (see _parent_menu()) that
# would be shown when entering it
def rec(node):
res = []
while node:
# If a node has children but doesn't have the is_menuconfig flag
# set, the children come from a submenu created implicitly from
# dependencies, and are shown (indented) in the same menu as the
# parent node
shown_children = \
rec(node.list) if node.list and not node.is_menuconfig else []
# Always show the node if it is the root of an implicit submenu
# with visible items, even when the node itself is invisible. This
# can happen e.g. if the symbol has an optional prompt
# ('prompt "foo" if COND') that is currently invisible.
if shown(node) or shown_children:
res.append(node)
res.extend(shown_children)
node = node.next
return res
def shown(node):
# Show the node if its prompt is visible. For menus, also check
# 'visible if'. In show-all mode, show everything.
return _show_all or \
(node.prompt and expr_value(node.prompt[1]) and not
(node.item == MENU and not expr_value(node.visibility)))
if isinstance(menu.item, Choice):
# For named choices defined in multiple locations, entering the choice
# at a particular menu node would normally only show the choice symbols
# defined there (because that's what the MenuNode tree looks like).
#
# That might look confusing, and makes extending choices by defining
# them in multiple locations less useful. Instead, gather all the child
# menu nodes for all the choices whenever a choice is entered. That
# makes all choice symbols visible at all locations.
#
# Choices can contain non-symbol items (people do all sorts of weird
# stuff with them), hence the generality here. We really need to
# preserve the menu tree at each choice location.
#
# Note: Named choices are pretty broken in the C tools, and this is
# super obscure, so you probably won't find much that relies on this.
return [node
for choice_node in menu.item.nodes
for node in rec(choice_node.list)]
return rec(menu.list)
def _change_node(node):
# Changes the value of the menu node 'node' if it is a symbol. Bools and
# tristates are toggled, while other symbol types pop up a text entry
# dialog.
if not isinstance(node.item, (Symbol, Choice)):
return
# This will hit for invisible symbols, which appear in show-all mode and
# when an invisible symbol has visible children (which can happen e.g. for
# symbols with optional prompts)
if not (node.prompt and expr_value(node.prompt[1])):
return
# sc = symbol/choice
sc = node.item
if sc.type in (INT, HEX, STRING):
s = sc.str_value
while True:
s = _input_dialog("{} ({})".format(
node.prompt[0], TYPE_TO_STR[sc.type]),
s, _range_info(sc))
if s is None:
break
if sc.type in (INT, HEX):
s = s.strip()
# 'make menuconfig' does this too. Hex values not starting with
# '0x' are accepted when loading .config files though.
if sc.type == HEX and not s.startswith(("0x", "0X")):
s = "0x" + s
if _check_validity(sc, s):
_set_val(sc, s)
break
elif len(sc.assignable) == 1:
# Handles choice symbols for choices in y mode, which are a special
# case: .assignable can be (2,) while .tri_value is 0.
_set_val(sc, sc.assignable[0])
else:
# Set the symbol to the value after the current value in
# sc.assignable, with wrapping
val_index = sc.assignable.index(sc.tri_value)
_set_val(sc, sc.assignable[(val_index + 1) % len(sc.assignable)])
def _set_sel_node_tri_val(tri_val):
# Sets the value of the currently selected menu entry to 'tri_val', if that
# value can be assigned
sc = _shown[_sel_node_i].item
if isinstance(sc, (Symbol, Choice)) and tri_val in sc.assignable:
_set_val(sc, tri_val)
def _set_val(sc, val):
# Wrapper around Symbol/Choice.set_value() for updating the menu state and
# _conf_changed
global _conf_changed
# Use the string representation of tristate values. This makes the format
# consistent for all symbol types.
if val in TRI_TO_STR:
val = TRI_TO_STR[val]
if val != sc.str_value:
sc.set_value(val)
_conf_changed = True
# Changing the value of the symbol might have changed what items in the
# current menu are visible. Recalculate the state.
_update_menu()
def _update_menu():
# Updates the current menu after the value of a symbol or choice has been
# changed. Changing a value might change which items in the menu are
# visible.
#
# Tries to preserve the location of the cursor when items disappear above
# it.
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor was on
old_row = _sel_node_i - _menu_scroll
sel_node = _shown[_sel_node_i]
# New visible nodes
_shown = _shown_nodes(_cur_menu)
# New index of selected node
_sel_node_i = _shown.index(sel_node)
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _input_dialog(title, initial_text, info_text=None):
# Pops up a dialog that prompts the user for a string
#
# title:
# Title to display at the top of the dialog window's border
#
# initial_text:
# Initial text to prefill the input field with
#
# info_text:
# String to show next to the input field. If None, just the input field
# is shown.
win = _styled_win("body")
win.keypad(True)
info_lines = info_text.split("\n") if info_text else []
# Give the input dialog its initial size
_resize_input_dialog(win, title, info_lines)
_safe_curs_set(2)
# Input field text
s = initial_text
# Cursor position
i = len(initial_text)
def edit_width():
return win.getmaxyx()[1] - 4
# Horizontal scroll offset
hscroll = max(i - edit_width() + 1, 0)
while True:
# Draw the "main" display with the menu, etc., so that resizing still
# works properly. This is like a stack of windows, only hardcoded for
# now.
_draw_main()
_draw_input_dialog(win, title, info_lines, s, i, hscroll)
curses.doupdate()
c = _get_wch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_input_dialog(win, title, info_lines)
elif c == "\n":
_safe_curs_set(0)
return s
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return None
else:
s, i, hscroll = _edit_text(c, s, i, hscroll, edit_width())
def _resize_input_dialog(win, title, info_lines):
# Resizes the input dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
win_height = 5
if info_lines:
win_height += len(info_lines) + 1
win_height = min(win_height, screen_height)
win_width = max(_INPUT_DIALOG_MIN_WIDTH,
len(title) + 4,
*(len(line) + 4 for line in info_lines))
win_width = min(win_width, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_input_dialog(win, title, info_lines, s, i, hscroll):
edit_width = win.getmaxyx()[1] - 4
win.erase()
# Note: Perhaps having a separate window for the input field would be nicer
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(win, 2, 2, visible_s + " "*(edit_width - len(visible_s)),
_style["edit"])
for linenr, line in enumerate(info_lines):
_safe_addstr(win, 4 + linenr, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
_safe_move(win, 2, 2 + i - hscroll)
win.noutrefresh()
def _load_dialog():
# Dialog for loading a new configuration
#
# Return value:
# True if a new configuration was loaded, and False if the user canceled
# the dialog
global _show_all
filename = ""
while True:
filename = _input_dialog("File to load", filename, _load_save_info())
if filename is None:
return False
filename = os.path.expanduser(filename)
if _try_load(filename):
sel_node = _shown[_sel_node_i]
# Turn on show-all mode if the current node is (no longer) visible
if not (sel_node.prompt and expr_value(sel_node.prompt[1])):
_show_all = True
_update_menu()
# The message dialog indirectly updates the menu display, so _msg()
# must be called after the new state has been initialized
_msg("Success", "Loaded {}".format(filename))
return True
def _try_load(filename):
# Tries to load a configuration file. Pops up an error and returns False on
# failure.
#
# filename:
# Configuration file to load
# Hack: strerror and errno are lost after we raise the custom IOError with
# troubleshooting help in Kconfig.load_config(). Adding them back to the
# exception loses the custom message. As a workaround, try opening the file
# separately first and report any errors.
try:
open(filename).close()
except OSError as e:
_error("Error loading {}\n\n{} (errno: {})"
.format(filename, e.strerror, errno.errorcode[e.errno]))
return False
try:
_kconf.load_config(filename)
return True
except OSError as e:
_error("Error loading {}\n\nUnknown error".format(filename))
return False
def _save_dialog(save_fn, default_filename, description):
# Dialog for saving the current configuration
#
# save_fn:
# Function to call with 'filename' to save the file
#
# default_filename:
# Prefilled filename in the input field
#
# description:
# String describing the thing being saved
#
# Return value:
# True if the configuration was saved, and False if the user canceled the
# dialog
filename = default_filename
while True:
filename = _input_dialog("Filename to save {} to".format(description),
filename, _load_save_info())
if filename is None:
return False
filename = os.path.expanduser(filename)
if _try_save(save_fn, filename, description):
_msg("Success", "{} saved to {}".format(description, filename))
return True
def _try_save(save_fn, filename, description):
# Tries to save a configuration file. Pops up an error and returns False on
# failure.
#
# save_fn:
# Function to call with 'filename' to save the file
#
# description:
# String describing the thing being saved
try:
save_fn(filename)
return True
except OSError as e:
_error("Error saving {} to '{}'\n\n{} (errno: {})"
.format(description, e.filename, e.strerror,
errno.errorcode[e.errno]))
return False
def _key_dialog(title, text, keys):
# Pops up a dialog that can be closed by pressing a key
#
# title:
# Title to display at the top of the dialog window's border
#
# text:
# Text to show in the dialog
#
# keys:
# List of keys that will close the dialog. Other keys (besides ESC) are
# ignored. The caller is responsible for providing a hint about which
# keys can be pressed in 'text'.
#
# Return value:
# The key that was pressed to close the dialog. Uppercase characters are
# converted to lowercase. ESC will always close the dialog, and returns
# None.
win = _styled_win("body")
win.keypad(True)
_resize_key_dialog(win, text)
while True:
# See _input_dialog()
_draw_main()
_draw_key_dialog(win, title, text)
curses.doupdate()
c = _get_wch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_key_dialog(win, text)
elif c == "\x1B": # \x1B = ESC
return None
elif isinstance(c, str):
c = c.lower()
if c in keys:
return c
def _resize_key_dialog(win, text):
# Resizes the key dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
lines = text.split("\n")
win_height = min(len(lines) + 4, screen_height)
win_width = min(max(len(line) for line in lines) + 4, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_key_dialog(win, title, text):
win.erase()
for i, line in enumerate(text.split("\n")):
_safe_addstr(win, 2 + i, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
win.noutrefresh()
def _draw_frame(win, title):
# Draw a frame around the inner edges of 'win', with 'title' at the top
win_height, win_width = win.getmaxyx()
win.attron(_style["frame"])
# Draw top/bottom edge
_safe_hline(win, 0, 0, " ", win_width)
_safe_hline(win, win_height - 1, 0, " ", win_width)
# Draw left/right edge
_safe_vline(win, 0, 0, " ", win_height)
_safe_vline(win, 0, win_width - 1, " ", win_height)
# Draw title
_safe_addstr(win, 0, max((win_width - len(title))//2, 0), title)
win.attroff(_style["frame"])
def _jump_to_dialog():
# Implements the jump-to dialog, where symbols can be looked up via
# incremental search and jumped to.
#
# Returns True if the user jumped to a symbol, and False if the dialog was
# canceled.
# Search text
s = ""
# Previous search text
prev_s = None
# Search text cursor position
s_i = 0
# Horizontal scroll offset
hscroll = 0
# Index of selected row
sel_node_i = 0
# Index in 'matches' of the top row of the list
scroll = 0
# Edit box at the top
edit_box = _styled_win("jump-edit")
edit_box.keypad(True)
# List of matches
matches_win = _styled_win("list")
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with instructions at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
_safe_curs_set(2)
# TODO: Code duplication with _select_{next,prev}_menu_entry(). Can this be
# factored out in some nice way?
def select_next_match():
nonlocal sel_node_i
nonlocal scroll
if sel_node_i < len(matches) - 1:
sel_node_i += 1
if sel_node_i >= scroll + matches_win.getmaxyx()[0] - _SCROLL_OFFSET:
scroll = min(scroll + 1, _max_scroll(matches, matches_win))
def select_prev_match():
nonlocal sel_node_i
nonlocal scroll
if sel_node_i > 0:
sel_node_i -= 1
if sel_node_i <= scroll + _SCROLL_OFFSET:
scroll = max(scroll - 1, 0)
while True:
if s != prev_s:
# The search text changed. Find new matching nodes.
prev_s = s
try:
# We could use re.IGNORECASE here instead of lower(), but this
# is noticeably less jerky while inputting regexes like
# '.*debug$' (though the '.*' is redundant there). Those
# probably have bad interactions with re.search(), which
# matches anywhere in the string.
#
# It's not horrible either way. Just a bit smoother.
regex_searches = [re.compile(regex).search
for regex in s.lower().split()]
# No exception thrown, so the regexes are okay
bad_re = None
# List of matching nodes
matches = []
# Search symbols and choices
for node in _sorted_sc_nodes():
# Symbol/choice
sc = node.item
for search in regex_searches:
# Both the name and the prompt might be missing, since
# we're searching both symbols and choices
# Does the regex match either the symbol name or the
# prompt (if any)?
if not (sc.name and search(sc.name.lower()) or
node.prompt and search(node.prompt[0].lower())):
# Give up on the first regex that doesn't match, to
# speed things up a bit when multiple regexes are
# entered
break
else:
matches.append(node)
# Search menus and comments
for node in _sorted_menu_comment_nodes():
for search in regex_searches:
if not search(node.prompt[0].lower()):
break
else:
matches.append(node)
except re.error as e:
# Bad regex. Remember the error message so we can show it.
bad_re = "Bad regular expression"
# re.error.msg was added in Python 3.5
if hasattr(e, "msg"):
bad_re += ": " + e.msg
matches = []
# Reset scroll and jump to the top of the list of matches
sel_node_i = scroll = 0
_draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll)
curses.doupdate()
c = _get_wch_compat(edit_box)
if c == "\n":
if matches:
_jump_to(matches[sel_node_i])
_safe_curs_set(0)
return True
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return False
elif c == curses.KEY_RESIZE:
# We adjust the scroll so that the selected node stays visible in
# the list when the terminal is resized, hence the 'scroll'
# assignment
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == "\x06": # \x06 = Ctrl-F
if matches:
_safe_curs_set(0)
_info_dialog(matches[sel_node_i], True)
_safe_curs_set(2)
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == curses.KEY_DOWN:
select_next_match()
elif c == curses.KEY_UP:
select_prev_match()
elif c == curses.KEY_NPAGE: # Page Down
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
select_next_match()
elif c == curses.KEY_PPAGE: # Page Up
for _ in range(_PG_JUMP):
select_prev_match()
else:
s, s_i, hscroll = _edit_text(c, s, s_i, hscroll,
edit_box.getmaxyx()[1] - 2)
# Obscure Python: We never pass a value for cached_nodes, and it keeps pointing
# to the same list. This avoids a global.
def _sorted_sc_nodes(cached_nodes=[]):
# Returns a sorted list of symbol and choice nodes to search. The symbol
# nodes appear first, sorted by name, and then the choice nodes, sorted by
# prompt and (secondarily) name.
if not cached_nodes:
# Add symbol nodes
for sym in sorted(_kconf.unique_defined_syms,
key=lambda sym: sym.name):
# += is in-place for lists
cached_nodes += sym.nodes
# Add choice nodes
choices = sorted(_kconf.unique_choices,
key=lambda choice: choice.name or "")
cached_nodes += sorted(
[node
for choice in choices
for node in choice.nodes],
key=lambda node: node.prompt[0] if node.prompt else "")
return cached_nodes
def _sorted_menu_comment_nodes(cached_nodes=[]):
# Returns a list of menu and comment nodes to search, sorted by prompt,
# with the menus first
if not cached_nodes:
def prompt_text(mc):
return mc.prompt[0]
cached_nodes += sorted(_kconf.menus, key=prompt_text) + \
sorted(_kconf.comments, key=prompt_text)
return cached_nodes
def _resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll):
# Resizes the jump-to dialog to fill the terminal.
#
# Returns the new scroll index. We adjust the scroll if needed so that the
# selected node stays visible.
screen_height, screen_width = _stdscr.getmaxyx()
bot_sep_win.resize(1, screen_width)
help_win_height = len(_JUMP_TO_HELP_LINES)
matches_win_height = screen_height - help_win_height - 4
if matches_win_height >= 1:
edit_box.resize(3, screen_width)
matches_win.resize(matches_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
matches_win.mvwin(3, 0)
bot_sep_win.mvwin(3 + matches_win_height, 0)
help_win.mvwin(3 + matches_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
matches_win_height = 1
edit_box.resize(screen_height, screen_width)
matches_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in matches_win, bot_sep_win, help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected row is still within the window, if
# needed
if sel_node_i - scroll >= matches_win_height:
return sel_node_i - matches_win_height + 1
return scroll
def _draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll):
edit_width = edit_box.getmaxyx()[1] - 2
#
# Update list of matches
#
matches_win.erase()
if matches:
for i in range(scroll,
min(scroll + matches_win.getmaxyx()[0], len(matches))):
node = matches[i]
if isinstance(node.item, (Symbol, Choice)):
node_str = _name_and_val_str(node.item)
if node.prompt:
node_str += ' "{}"'.format(node.prompt[0])
elif node.item == MENU:
node_str = 'menu "{}"'.format(node.prompt[0])
else: # node.item == COMMENT
node_str = 'comment "{}"'.format(node.prompt[0])
_safe_addstr(matches_win, i - scroll, 0, node_str,
_style["selection" if i == sel_node_i else "list"])
else:
# bad_re holds the error message from the re.error exception on errors
_safe_addstr(matches_win, 0, 0, bad_re or "No matches")
matches_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol list is scrolled up
if scroll < _max_scroll(matches, matches_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_JUMP_TO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update edit box. We do this last since it makes it handy to position the
# cursor.
#
edit_box.erase()
_draw_frame(edit_box, "Jump to symbol")
# Draw arrows pointing up if the symbol list is scrolled down
if scroll > 0:
# TODO: Bit ugly that _style["frame"] is repeated here
_safe_hline(edit_box, 2, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS,
_style["frame"])
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(edit_box, 1, 1, visible_s)
_safe_move(edit_box, 1, 1 + s_i - hscroll)
edit_box.noutrefresh()
def _info_dialog(node, from_jump_to_dialog):
# Shows a fullscreen window with information about 'node'.
#
# If 'from_jump_to_dialog' is True, the information dialog was opened from
# within the jump-to-dialog. In this case, we make '/' from within the
# information dialog just return, to avoid a confusing recursive invocation
# of the jump-to-dialog.
# Top row, with title and arrows point up
top_line_win = _styled_win("separator")
# Text display
text_win = _styled_win("text")
text_win.keypad(True)
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
# Get lines of help text
lines = _info_str(node).split("\n")
# Index of first row in 'lines' to show
scroll = 0
while True:
_draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win)
curses.doupdate()
c = _get_wch_compat(text_win)
if c == curses.KEY_RESIZE:
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_DOWN, "j", "J"):
if scroll < _max_scroll(lines, text_win):
scroll += 1
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
scroll = min(scroll + _PG_JUMP, _max_scroll(lines, text_win))
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
scroll = max(scroll - _PG_JUMP, 0)
elif c in (curses.KEY_END, "G"):
scroll = _max_scroll(lines, text_win)
elif c in (curses.KEY_HOME, "g"):
scroll = 0
elif c in (curses.KEY_UP, "k", "K"):
if scroll > 0:
scroll -= 1
elif c == "/":
# Support starting a search from within the information dialog
if from_jump_to_dialog:
# Avoid recursion
return
if _jump_to_dialog():
# Jumped to a symbol. Cancel the information dialog.
return
# Stay in the information dialog if the jump-to dialog was
# canceled. Resize it in case the terminal was resized while the
# fullscreen jump-to dialog was open.
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", # \x1B = ESC
"q", "Q", "h", "H"):
return
def _resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win):
# Resizes the info dialog to fill the terminal
screen_height, screen_width = _stdscr.getmaxyx()
top_line_win.resize(1, screen_width)
bot_sep_win.resize(1, screen_width)
help_win_height = len(_INFO_HELP_LINES)
text_win_height = screen_height - help_win_height - 2
if text_win_height >= 1:
text_win.resize(text_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
text_win.mvwin(1, 0)
bot_sep_win.mvwin(1 + text_win_height, 0)
help_win.mvwin(1 + text_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
text_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in text_win, bot_sep_win, help_win:
win.mvwin(0, 0)
def _draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win):
text_win_height, text_win_width = text_win.getmaxyx()
# Note: The top row is deliberately updated last. See _draw_main().
#
# Update text display
#
text_win.erase()
for i, line in enumerate(lines[scroll:scroll + text_win_height]):
_safe_addstr(text_win, i, 0, line)
text_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if scroll < _max_scroll(lines, text_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_INFO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update top row
#
top_line_win.erase()
# Draw arrows pointing up if the information window is scrolled down. Draw
# them before drawing the title, so the title ends up on top for small
# windows.
if scroll > 0:
_safe_hline(top_line_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
title = ("Symbol" if isinstance(node.item, Symbol) else
"Choice" if isinstance(node.item, Choice) else
"Menu" if node.item == MENU else
"Comment") + " information"
_safe_addstr(top_line_win, 0, max((text_win_width - len(title))//2, 0),
title)
top_line_win.noutrefresh()
def _info_str(node):
# Returns information about the menu node 'node' as a string.
#
# The helper functions are responsible for adding newlines. This allows
# them to return "" if they don't want to add any output.
if isinstance(node.item, Symbol):
sym = node.item
return (
_name_info(sym) +
_prompt_info(sym) +
"Type: {}\n".format(TYPE_TO_STR[sym.type]) +
_value_info(sym) +
_help_info(sym) +
_direct_dep_info(sym) +
_defaults_info(sym) +
_select_imply_info(sym) +
_kconfig_def_info(sym)
)
if isinstance(node.item, Choice):
choice = node.item
return (
_name_info(choice) +
_prompt_info(choice) +
"Type: {}\n".format(TYPE_TO_STR[choice.type]) +
'Mode: {}\n'.format(choice.str_value) +
_help_info(choice) +
_choice_syms_info(choice) +
_direct_dep_info(choice) +
_defaults_info(choice) +
_kconfig_def_info(choice)
)
# node.item in (MENU, COMMENT)
return _kconfig_def_info(node)
def _name_info(sc):
# Returns a string with the name of the symbol/choice. Names are optional
# for choices.
return "Name: {}\n".format(sc.name) if sc.name else ""
def _prompt_info(sc):
# Returns a string listing the prompts of 'sc' (Symbol or Choice)
s = ""
for node in sc.nodes:
if node.prompt:
s += "Prompt: {}\n".format(node.prompt[0])
return s
def _value_info(sym):
# Returns a string showing 'sym's value
# Only put quotes around the value for string symbols
return "Value: {}\n".format(
'"{}"'.format(sym.str_value)
if sym.orig_type == STRING
else sym.str_value)
def _choice_syms_info(choice):
# Returns a string listing the choice symbols in 'choice'. Adds
# "(selected)" next to the selected one.
s = "Choice symbols:\n"
for sym in choice.syms:
s += " - " + sym.name
if sym is choice.selection:
s += " (selected)"
s += "\n"
return s + "\n"
def _help_info(sc):
# Returns a string with the help text(s) of 'sc' (Symbol or Choice).
# Symbols and choices defined in multiple locations can have multiple help
# texts.
s = "\n"
for node in sc.nodes:
if node.help is not None:
s += "Help:\n\n{}\n\n" \
.format(textwrap.indent(node.help, " "))
return s
def _direct_dep_info(sc):
# Returns a string describing the direct dependencies of 'sc' (Symbol or
# Choice). The direct dependencies are the OR of the dependencies from each
# definition location. The dependencies at each definition location come
# from 'depends on' and dependencies inherited from parent items.
if sc.direct_dep is _kconf.y:
return ""
return 'Direct dependencies (={}):\n{}\n' \
.format(TRI_TO_STR[expr_value(sc.direct_dep)],
_split_expr_info(sc.direct_dep, 2))
def _defaults_info(sc):
# Returns a string describing the defaults of 'sc' (Symbol or Choice)
if not sc.defaults:
return ""
s = "Defaults:\n"
for val, cond in sc.defaults:
s += " - "
if isinstance(sc, Symbol):
s += _expr_str(val)
# Skip the tristate value hint if the expression is just a single
# symbol. _expr_str() already shows its value as a string.
#
# This also avoids showing the tristate value for string/int/hex
# defaults, which wouldn't make any sense.
if isinstance(val, tuple):
s += ' (={})'.format(TRI_TO_STR[expr_value(val)])
else:
# Don't print the value next to the symbol name for choice
# defaults, as it looks a bit confusing
s += val.name
s += "\n"
if cond is not _kconf.y:
s += " Condition (={}):\n{}" \
.format(TRI_TO_STR[expr_value(cond)],
_split_expr_info(cond, 4))
return s + "\n"
def _split_expr_info(expr, indent):
# Returns a string with 'expr' split into its top-level && or || operands,
# with one operand per line, together with the operand's value. This is
# usually enough to get something readable for long expressions. A fancier
# recursive thingy would be possible too.
#
# indent:
# Number of leading spaces to add before the split expression.
if len(split_expr(expr, AND)) > 1:
split_op = AND
op_str = "&&"
else:
split_op = OR
op_str = "||"
s = ""
for i, term in enumerate(split_expr(expr, split_op)):
s += "{}{} {}".format(" "*indent,
" " if i == 0 else op_str,
_expr_str(term))
# Don't bother showing the value hint if the expression is just a
# single symbol. _expr_str() already shows its value.
if isinstance(term, tuple):
s += " (={})".format(TRI_TO_STR[expr_value(term)])
s += "\n"
return s
def _select_imply_info(sym):
# Returns a string with information about which symbols 'select' or 'imply'
# 'sym'. The selecting/implying symbols are grouped according to which
# value they select/imply 'sym' to (n/m/y).
s = ""
def add_sis(expr, val, title):
nonlocal s
# sis = selects/implies
sis = [si for si in split_expr(expr, OR) if expr_value(si) == val]
if sis:
s += title
for si in sis:
s += " - {}\n".format(split_expr(si, AND)[0].name)
s += "\n"
if sym.rev_dep is not _kconf.n:
add_sis(sym.rev_dep, 2,
"Symbols currently y-selecting this symbol:\n")
add_sis(sym.rev_dep, 1,
"Symbols currently m-selecting this symbol:\n")
add_sis(sym.rev_dep, 0,
"Symbols currently n-selecting this symbol (no effect):\n")
if sym.weak_rev_dep is not _kconf.n:
add_sis(sym.weak_rev_dep, 2,
"Symbols currently y-implying this symbol:\n")
add_sis(sym.weak_rev_dep, 1,
"Symbols currently m-implying this symbol:\n")
add_sis(sym.weak_rev_dep, 0,
"Symbols currently n-implying this symbol (no effect):\n")
return s
def _kconfig_def_info(item):
# Returns a string with the definition of 'item' in Kconfig syntax,
# together with the definition location(s) and their include and menu paths
nodes = [item] if isinstance(item, MenuNode) else item.nodes
s = "Kconfig definition{}, with propagated dependencies\n" \
.format("s" if len(nodes) > 1 else "")
s += (len(s) - 1)*"="
for node in nodes:
s += "\n\n" \
"At {}:{}\n" \
"{}" \
"Menu path: {}\n\n" \
"{}" \
.format(node.filename, node.linenr,
_include_path_info(node),
_menu_path_info(node),
textwrap.indent(node.custom_str(_name_and_val_str), " "))
return s
def _include_path_info(node):
if not node.include_path:
# In the top-level Kconfig file
return ""
return "Included via {}\n".format(
" -> ".join("{}:{}".format(filename, linenr)
for filename, linenr in node.include_path))
def _menu_path_info(node):
# Returns a string describing the menu path leading up to 'node'
path = ""
node = _parent_menu(node)
while node is not _kconf.top_node:
# Promptless choices might appear among the parents. Use
# standard_sc_expr_str() for them, so that they show up as
# '<choice (name if any)>'.
path = " -> " + (node.prompt[0] if node.prompt else
standard_sc_expr_str(node.item)) + path
node = _parent_menu(node)
return "(top menu)" + path
def _name_and_val_str(sc):
# Custom symbol/choice printer that shows symbol values after symbols
# Show the values of non-constant (non-quoted) symbols that don't look like
# numbers. Things like 123 are actually symbol references, and only work as
# expected due to undefined symbols getting their name as their value.
# Showing the symbol value for those isn't helpful though.
if isinstance(sc, Symbol) and not sc.is_constant and not _is_num(sc.name):
if not sc.nodes:
# Undefined symbol reference
return "{}(undefined/n)".format(sc.name)
return '{}(={})'.format(sc.name, sc.str_value)
# For other items, use the standard format
return standard_sc_expr_str(sc)
def _expr_str(expr):
# Custom expression printer that shows symbol values
return expr_str(expr, _name_and_val_str)
def _styled_win(style):
# Returns a new curses window with style 'style' and space as the fill
# character. The initial dimensions are (1, 1), so the window needs to be
# sized and positioned separately.
win = curses.newwin(1, 1)
win.bkgdset(" ", _style[style])
return win
def _max_scroll(lst, win):
# Assuming 'lst' is a list of items to be displayed in 'win',
# returns the maximum number of steps 'win' can be scrolled down.
# We stop scrolling when the bottom item is visible.
return max(0, len(lst) - win.getmaxyx()[0])
def _edit_text(c, s, i, hscroll, width):
# Implements text editing commands for edit boxes. Takes a character (which
# could also be e.g. curses.KEY_LEFT) and the edit box state, and returns
# the new state after the character has been processed.
#
# c:
# Character from user
#
# s:
# Current contents of string
#
# i:
# Current cursor index in string
#
# hscroll:
# Index in s of the leftmost character in the edit box, for horizontal
# scrolling
#
# width:
# Width in characters of the edit box
#
# Return value:
# An (s, i, hscroll) tuple for the new state
if c == curses.KEY_LEFT:
if i > 0:
i -= 1
elif c == curses.KEY_RIGHT:
if i < len(s):
i += 1
elif c in (curses.KEY_HOME, "\x01"): # \x01 = CTRL-A
i = 0
elif c in (curses.KEY_END, "\x05"): # \x05 = CTRL-E
i = len(s)
elif c in (curses.KEY_BACKSPACE, _ERASE_CHAR):
if i > 0:
s = s[:i-1] + s[i:]
i -= 1
elif c == curses.KEY_DC:
s = s[:i] + s[i+1:]
elif c == "\x17": # \x17 = CTRL-W
# The \W removes characters like ',' one at a time
new_i = re.search(r"(?:\w*|\W)\s*$", s[:i]).start()
s = s[:new_i] + s[i:]
i = new_i
elif c == "\x0B": # \x0B = CTRL-K
s = s[:i]
elif c == "\x15": # \x15 = CTRL-U
s = s[i:]
i = 0
elif isinstance(c, str):
# Insert character
s = s[:i] + c + s[i:]
i += 1
# Adjust the horizontal scroll so that the cursor never touches the left or
# right edges of the edit box, except when it's at the beginning or the end
# of the string
if i < hscroll + _SCROLL_OFFSET:
hscroll = max(i - _SCROLL_OFFSET, 0)
elif i >= hscroll + width - _SCROLL_OFFSET:
max_scroll = max(len(s) - width + 1, 0)
hscroll = min(i - width + _SCROLL_OFFSET + 1, max_scroll)
return s, i, hscroll
def _load_save_info():
# Returns an information string for load/save dialog boxes
return "(Relative to {})\n\nRefer to your home directory with ~" \
.format(os.path.join(os.getcwd(), ""))
def _msg(title, text):
# Pops up a message dialog that can be dismissed with Space/Enter/ESC
_key_dialog(title, text, " \n")
def _error(text):
# Pops up an error dialog that can be dismissed with Space/Enter/ESC
_msg("Error", text)
def _node_str(node):
# Returns the complete menu entry text for a menu node.
#
# Example return value: "[*] Support for X"
# Calculate the indent to print the item with by checking how many levels
# above it the closest 'menuconfig' item is (this includes menus and
# choices as well as menuconfig symbols)
indent = 0
parent = node.parent
while not parent.is_menuconfig:
indent += _SUBMENU_INDENT
parent = parent.parent
# This approach gives nice alignment for empty string symbols ("() Foo")
s = "{:{}}".format(_value_str(node), 3 + indent)
if _should_show_name(node):
if isinstance(node.item, Symbol):
s += " <{}>".format(node.item.name)
else:
# For choices, use standard_sc_expr_str(). That way they show up as
# '<choice (name if any)>'.
s += " " + standard_sc_expr_str(node.item)
if node.prompt:
if node.item == COMMENT:
s += " *** {} ***".format(node.prompt[0])
else:
s += " " + node.prompt[0]
if isinstance(node.item, Symbol):
sym = node.item
# Print "(NEW)" next to symbols without a user value (from e.g. a
# .config), but skip it for choice symbols in choices in y mode
if sym.user_value is None and \
not (sym.choice and sym.choice.tri_value == 2):
s += " (NEW)"
if isinstance(node.item, Choice) and node.item.tri_value == 2:
# Print the prompt of the selected symbol after the choice for
# choices in y mode
sym = node.item.selection
if sym:
for node_ in sym.nodes:
if node_.prompt:
s += " ({})".format(node_.prompt[0])
# Print "--->" next to nodes that have menus that can potentially be
# entered. Add "(empty)" if the menu is empty. We don't allow those to be
# entered.
if node.is_menuconfig:
s += " --->" if _shown_nodes(node) else " ---> (empty)"
return s
def _should_show_name(node):
# Returns True if 'node' is a symbol or choice whose name should shown (if
# any, as names are optional for choices)
# The 'not node.prompt' case only hits in show-all mode, for promptless
# symbols and choices
return not node.prompt or \
(_show_name and isinstance(node.item, (Symbol, Choice)))
def _value_str(node):
# Returns the value part ("[*]", "<M>", "(foo)" etc.) of a menu node
item = node.item
if item in (MENU, COMMENT):
return ""
# Wouldn't normally happen, and generates a warning
if item.type == UNKNOWN:
return ""
if item.type in (STRING, INT, HEX):
return "({})".format(item.str_value)
# BOOL or TRISTATE
if _is_y_mode_choice_sym(item):
return "(X)" if item.choice.selection is item else "( )"
tri_val_str = (" ", "M", "*")[item.tri_value]
if len(item.assignable) <= 1:
# Pinned to a single value
return "" if isinstance(item, Choice) else "-{}-".format(tri_val_str)
if item.type == BOOL:
return "[{}]".format(tri_val_str)
# item.type == TRISTATE
if item.assignable == (1, 2):
return "{{{}}}".format(tri_val_str) # {M}/{*}
return "<{}>".format(tri_val_str)
def _is_y_mode_choice_sym(item):
# The choice mode is an upper bound on the visibility of choice symbols, so
# we can check the choice symbols' own visibility to see if the choice is
# in y mode
return isinstance(item, Symbol) and item.choice and item.visibility == 2
def _check_validity(sym, s):
# Returns True if the string 's' is a well-formed value for 'sym'.
# Otherwise, displays an error and returns False.
if sym.type not in (INT, HEX):
# Anything goes for non-int/hex symbols
return True
base = 10 if sym.type == INT else 16
try:
int(s, base)
except ValueError:
_error("'{}' is a malformed {} value"
.format(s, TYPE_TO_STR[sym.type]))
return False
for low_sym, high_sym, cond in sym.ranges:
if expr_value(cond):
low = int(low_sym.str_value, base)
val = int(s, base)
high = int(high_sym.str_value, base)
if not low <= val <= high:
_error("{} is outside the range {}-{}"
.format(s, low_sym.str_value, high_sym.str_value))
return False
break
return True
def _range_info(sym):
# Returns a string with information about the valid range for the symbol
# 'sym', or None if 'sym' doesn't have a range
if sym.type in (INT, HEX):
for low, high, cond in sym.ranges:
if expr_value(cond):
return "Range: {}-{}".format(low.str_value, high.str_value)
return None
def _is_num(name):
# Heuristic to see if a symbol name looks like a number, for nicer output
# when printing expressions. Things like 16 are actually symbol names, only
# they get their name as their value when the symbol is undefined.
try:
int(name)
except ValueError:
if not name.startswith(("0x", "0X")):
return False
try:
int(name, 16)
except ValueError:
return False
return True
def _get_wch_compat(win):
# Decent resizing behavior on PDCurses requires calling resize_term(0, 0)
# after receiving KEY_RESIZE, while NCURSES (usually) handles terminal
# resizing automatically in get(_w)ch() (see the end of the
# resizeterm(3NCURSES) man page).
#
# resize_term(0, 0) reliably fails and does nothing on NCURSES, so this
# hack gives NCURSES/PDCurses compatibility for resizing. I don't know
# whether it would cause trouble for other implementations.
c = win.get_wch()
if c == curses.KEY_RESIZE:
try:
curses.resize_term(0, 0)
except curses.error:
pass
return c
def _warn(*args):
# Temporarily returns from curses to shell mode and prints a warning to
# stderr. The warning would get lost in curses mode.
curses.endwin()
print("menuconfig warning: ", end="", file=sys.stderr)
print(*args, file=sys.stderr)
curses.doupdate()
# Ignore exceptions from some functions that might fail, e.g. for small
# windows. They usually do reasonable things anyway.
def _safe_curs_set(visibility):
try:
curses.curs_set(visibility)
except curses.error:
pass
def _safe_addstr(win, *args):
# Clip the line to avoid wrapping to the next line, which looks glitchy.
# addchstr() would do it for us, but it's not available in the 'curses'
# module.
attr = None
if isinstance(args[0], str):
y, x = win.getyx()
s = args[0]
if len(args) == 2:
attr = args[1]
else:
y, x, s = args[:3]
if len(args) == 4:
attr = args[3]
maxlen = win.getmaxyx()[1] - x
s = s.expandtabs()
try:
# The 'curses' module uses wattr_set() internally if you pass 'attr',
# overwriting the background style, so setting 'attr' to 0 in the first
# case won't do the right thing
if attr is None:
win.addnstr(y, x, s, maxlen)
else:
win.addnstr(y, x, s, maxlen, attr)
except curses.error:
pass
def _safe_addch(win, *args):
try:
win.addch(*args)
except curses.error:
pass
def _safe_hline(win, *args):
try:
win.hline(*args)
except curses.error:
pass
def _safe_vline(win, *args):
try:
win.vline(*args)
except curses.error:
pass
def _safe_move(win, *args):
try:
win.move(*args)
except curses.error:
pass
def _convert_c_lc_ctype_to_utf8():
# See _CONVERT_C_LC_CTYPE_TO_UTF8
if _IS_WINDOWS:
# Windows rarely has issues here, and the PEP 538 implementation avoids
# changing the locale on it. None of the UTF-8 locales below were
# supported from some quick testing either. Play it safe.
return
def _try_set_locale(loc):
try:
locale.setlocale(locale.LC_CTYPE, loc)
return True
except locale.Error:
return False
# Is LC_CTYPE set to the C locale?
if locale.setlocale(locale.LC_CTYPE, None) == "C":
# This list was taken from the PEP 538 implementation in the CPython
# code, in Python/pylifecycle.c
for loc in "C.UTF-8", "C.utf8", "UTF-8":
if _try_set_locale(loc):
print("Note: Your environment is configured to use ASCII. To "
"avoid Unicode issues, LC_CTYPE was changed from the "
"C locale to the {} locale.".format(loc))
break
# Are we running on Windows?
_IS_WINDOWS = (platform.system() == "Windows")
if __name__ == "__main__":
_main()
| []
| []
| [
"MENUCONFIG_STYLE"
]
| [] | ["MENUCONFIG_STYLE"] | python | 1 | 0 | |
objectScanner/objectScanner.go | package main
import (
"log"
"os"
"oss/apiServer/objects"
"oss/lib/es"
"oss/lib/utils"
"path/filepath"
"strings"
)
func main() {
files, _ := filepath.Glob(os.Getenv("STORAGE_ROOT") + "/objects/*")
for i := range files {
hash := strings.Split(filepath.Base(files[i]), ".")[0]
verify(hash)
}
}
func verify(hash string) {
log.Println("verify", hash)
size, e := es.SearchHashSize(hash)
if e != nil {
log.Println(e)
return
}
stream, e := objects.GetStream(hash, size)
if e != nil {
log.Println(e)
return
}
d := utils.CalculateHash(stream)
if d != hash {
log.Printf("object hash mismatch, calculated=%s, requested=%s", d, hash)
}
stream.Close()
}
| [
"\"STORAGE_ROOT\""
]
| []
| [
"STORAGE_ROOT"
]
| [] | ["STORAGE_ROOT"] | go | 1 | 0 | |
upup/pkg/fi/cloudup/aliup/ali_cloud.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aliup
import (
"errors"
"fmt"
"os"
"github.com/golang/glog"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/denverdino/aliyungo/ess"
"github.com/denverdino/aliyungo/ram"
"github.com/denverdino/aliyungo/slb"
prj "k8s.io/kops"
"k8s.io/kops/dnsprovider/pkg/dnsprovider"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/upup/pkg/fi"
)
const TagClusterName = "KubernetesCluster"
const TagNameRolePrefix = "k8s.io/role/"
const TagNameEtcdClusterPrefix = "k8s.io/etcd/"
const TagRoleMaster = "master"
// This is for statistic purpose.
var KubernetesKopsIdentity = fmt.Sprintf("Kubernetes.Kops/%s", prj.Version)
type ALICloud interface {
fi.Cloud
EcsClient() *ecs.Client
SlbClient() *slb.Client
RamClient() *ram.RamClient
EssClient() *ess.Client
Region() string
AddClusterTags(tags map[string]string)
GetTags(resourceId string, resourceType string) (map[string]string, error)
CreateTags(resourceId string, resourceType string, tags map[string]string) error
RemoveTags(resourceId string, resourceType string, tags map[string]string) error
GetClusterTags() map[string]string
FindClusterStatus(cluster *kops.Cluster) (*kops.ClusterStatus, error)
GetApiIngressStatus(cluster *kops.Cluster) ([]kops.ApiIngressStatus, error)
}
type aliCloudImplementation struct {
ecsClient *ecs.Client
slbClient *slb.Client
ramClient *ram.RamClient
essClient *ess.Client
region string
tags map[string]string
}
var _ fi.Cloud = &aliCloudImplementation{}
// NewALICloud returns a Cloud, expecting the env vars ALIYUN_ACCESS_KEY_ID && ALIYUN_ACCESS_KEY_SECRET
// NewALICloud will return an err if env vars are not defined
func NewALICloud(region string, tags map[string]string) (ALICloud, error) {
c := &aliCloudImplementation{region: region}
accessKeyId := os.Getenv("ALIYUN_ACCESS_KEY_ID")
if accessKeyId == "" {
return nil, errors.New("ALIYUN_ACCESS_KEY_ID is required")
}
accessKeySecret := os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
if accessKeySecret == "" {
return nil, errors.New("ALIYUN_ACCESS_KEY_SECRET is required")
}
c.ecsClient = ecs.NewClient(accessKeyId, accessKeySecret)
c.ecsClient.SetUserAgent(KubernetesKopsIdentity)
c.slbClient = slb.NewClient(accessKeyId, accessKeySecret)
ramclient := ram.NewClient(accessKeyId, accessKeySecret)
c.ramClient = ramclient.(*ram.RamClient)
c.essClient = ess.NewClient(accessKeyId, accessKeySecret)
c.tags = tags
return c, nil
}
func (c *aliCloudImplementation) EcsClient() *ecs.Client {
return c.ecsClient
}
func (c *aliCloudImplementation) SlbClient() *slb.Client {
return c.slbClient
}
func (c *aliCloudImplementation) RamClient() *ram.RamClient {
return c.ramClient
}
func (c *aliCloudImplementation) EssClient() *ess.Client {
return c.essClient
}
func (c *aliCloudImplementation) Region() string {
return c.region
}
func (c *aliCloudImplementation) ProviderID() kops.CloudProviderID {
return kops.CloudProviderALI
}
func (c *aliCloudImplementation) DNS() (dnsprovider.Interface, error) {
return nil, errors.New("DNS not implemented on aliCloud")
}
func (c *aliCloudImplementation) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error {
return errors.New("DeleteGroup not implemented on aliCloud")
}
func (c *aliCloudImplementation) DeleteInstance(i *cloudinstances.CloudInstanceGroupMember) error {
return errors.New("DeleteInstance not implemented on aliCloud")
}
func (c *aliCloudImplementation) FindVPCInfo(id string) (*fi.VPCInfo, error) {
request := &ecs.DescribeVpcsArgs{
RegionId: common.Region(c.Region()),
VpcId: id,
}
vpcs, _, err := c.EcsClient().DescribeVpcs(request)
if err != nil {
return nil, fmt.Errorf("error listing VPCs: %v", err)
}
if len(vpcs) != 1 {
return nil, fmt.Errorf("found multiple VPCs for %q", id)
}
vpcInfo := &fi.VPCInfo{
CIDR: vpcs[0].CidrBlock,
}
describeVSwitchesArgs := &ecs.DescribeVSwitchesArgs{
VpcId: id,
RegionId: common.Region(c.Region()),
}
vswitchList, _, err := c.EcsClient().DescribeVSwitches(describeVSwitchesArgs)
if err != nil {
return nil, fmt.Errorf("error listing VSwitchs: %v", err)
}
for _, vswitch := range vswitchList {
s := &fi.SubnetInfo{
ID: vswitch.VSwitchId,
Zone: vswitch.ZoneId,
CIDR: vswitch.CidrBlock,
}
vpcInfo.Subnets = append(vpcInfo.Subnets, s)
}
return vpcInfo, nil
}
// GetTags will get the specified resource's tags.
func (c *aliCloudImplementation) GetTags(resourceId string, resourceType string) (map[string]string, error) {
if resourceId == "" {
return nil, errors.New("resourceId not provided to GetTags")
}
tags := map[string]string{}
request := &ecs.DescribeTagsArgs{
RegionId: common.Region(c.Region()),
ResourceType: ecs.TagResourceType(resourceType), //image, instance, snapshot or disk
ResourceId: resourceId,
}
responseTags, _, err := c.EcsClient().DescribeTags(request)
if err != nil {
return tags, fmt.Errorf("error getting tags on %v: %v", resourceId, err)
}
for _, tag := range responseTags {
tags[tag.TagKey] = tag.TagValue
}
return tags, nil
}
// AddClusterTags will add ClusterTags to resources (in ALI, only disk, instance, snapshot or image can be tagged )
func (c *aliCloudImplementation) AddClusterTags(tags map[string]string) {
if c.tags != nil && len(c.tags) != 0 && tags != nil {
for k, v := range c.tags {
tags[k] = v
}
}
}
// CreateTags will add tags to the specified resource.
func (c *aliCloudImplementation) CreateTags(resourceId string, resourceType string, tags map[string]string) error {
if len(tags) == 0 {
return nil
} else if len(tags) > 10 {
glog.V(4).Info("The number of specified resource's tags exceeds 10, resourceId:%q", resourceId)
}
if resourceId == "" {
return errors.New("resourceId not provided to CreateTags")
}
if resourceType == "" {
return errors.New("resourceType not provided to CreateTags")
}
request := &ecs.AddTagsArgs{
ResourceId: resourceId,
ResourceType: ecs.TagResourceType(resourceType), //image, instance, snapshot or disk
RegionId: common.Region(c.Region()),
Tag: tags,
}
err := c.EcsClient().AddTags(request)
if err != nil {
return fmt.Errorf("error creating tags on %v: %v", resourceId, err)
}
return nil
}
// RemoveTags will remove tags from the specified resource.
func (c *aliCloudImplementation) RemoveTags(resourceId string, resourceType string, tags map[string]string) error {
if len(tags) == 0 {
return nil
}
if resourceId == "" {
return errors.New("resourceId not provided to RemoveTags")
}
if resourceType == "" {
return errors.New("resourceType not provided to RemoveTags")
}
request := &ecs.RemoveTagsArgs{
ResourceId: resourceId,
ResourceType: ecs.TagResourceType(resourceType), //image, instance, snapshot or disk
RegionId: common.Region(c.Region()),
Tag: tags,
}
err := c.EcsClient().RemoveTags(request)
if err != nil {
return fmt.Errorf("error removing tags on %v: %v", resourceId, err)
}
return nil
}
// GetClusterTags will get the ClusterTags
func (c *aliCloudImplementation) GetClusterTags() map[string]string {
return c.tags
}
func (c *aliCloudImplementation) GetApiIngressStatus(cluster *kops.Cluster) ([]kops.ApiIngressStatus, error) {
var ingresses []kops.ApiIngressStatus
name := "api." + cluster.Name
describeLoadBalancersArgs := &slb.DescribeLoadBalancersArgs{
RegionId: common.Region(c.Region()),
LoadBalancerName: name,
}
responseLoadBalancers, err := c.SlbClient().DescribeLoadBalancers(describeLoadBalancersArgs)
if err != nil {
return nil, fmt.Errorf("error finding LoadBalancers: %v", err)
}
// Don't exist loadbalancer with specified ClusterTags or Name.
if len(responseLoadBalancers) == 0 {
return nil, nil
}
if len(responseLoadBalancers) > 1 {
glog.V(4).Info("The number of specified loadbalancer with the same name exceeds 1, loadbalancerName:%q", name)
}
address := responseLoadBalancers[0].Address
ingresses = append(ingresses, kops.ApiIngressStatus{IP: address})
return ingresses, nil
}
func ZoneToVSwitchID(VPCID string, zones []string, vswitchIDs []string) (map[string]string, error) {
regionId, err := getRegionByZones(zones)
if err != nil {
return nil, err
}
res := make(map[string]string)
cloudTags := map[string]string{}
aliCloud, err := NewALICloud(regionId, cloudTags)
if err != nil {
return res, fmt.Errorf("error loading cloud: %v", err)
}
describeVpcsArgs := &ecs.DescribeVpcsArgs{
RegionId: common.Region(regionId),
VpcId: VPCID,
}
vpc, _, err := aliCloud.EcsClient().DescribeVpcs(describeVpcsArgs)
if err != nil {
return res, fmt.Errorf("error describing VPC: %v", err)
}
if vpc == nil || len(vpc) == 0 {
return res, fmt.Errorf("VPC %q not found", VPCID)
}
if len(vpc) != 1 {
return nil, fmt.Errorf("found multiple VPCs for %q", VPCID)
}
subnetByID := make(map[string]string)
for _, VSId := range vpc[0].VSwitchIds.VSwitchId {
subnetByID[VSId] = VSId
}
for _, VSwitchId := range vswitchIDs {
_, ok := subnetByID[VSwitchId]
if !ok {
return res, fmt.Errorf("vswitch %s not found in VPC %s", VSwitchId, VPCID)
}
describeVSwitchesArgs := &ecs.DescribeVSwitchesArgs{
VpcId: vpc[0].VpcId,
RegionId: common.Region(regionId),
VSwitchId: VSwitchId,
}
vswitchList, _, err := aliCloud.EcsClient().DescribeVSwitches(describeVSwitchesArgs)
if err != nil {
return nil, fmt.Errorf("error listing VSwitchs: %v", err)
}
if len(vswitchList) == 0 {
return nil, fmt.Errorf("VSwitch %q not found", VSwitchId)
}
if len(vswitchList) != 1 {
return nil, fmt.Errorf("found multiple VSwitchs for %q", VSwitchId)
}
zone := vswitchList[0].ZoneId
if res[zone] != "" {
return res, fmt.Errorf("vswitch %s and %s have the same zone", vswitchList[0].VSwitchId, zone)
}
res[zone] = vswitchList[0].VSwitchId
}
return res, nil
}
| [
"\"ALIYUN_ACCESS_KEY_ID\"",
"\"ALIYUN_ACCESS_KEY_SECRET\""
]
| []
| [
"ALIYUN_ACCESS_KEY_SECRET",
"ALIYUN_ACCESS_KEY_ID"
]
| [] | ["ALIYUN_ACCESS_KEY_SECRET", "ALIYUN_ACCESS_KEY_ID"] | go | 2 | 0 | |
services/configuration/env-service.go | // Package configuration implements configuration service required by the edge-core service
package configuration
import (
"fmt"
"os"
"strconv"
"strings"
commonErrors "github.com/micro-business/go-core/system/errors"
)
type envConfigurationService struct {
}
// NewEnvConfigurationService creates new instance of the EnvConfigurationService, setting up all dependencies and returns the instance
// Returns the new service or error if something goes wrong
func NewEnvConfigurationService() (ConfigurationContract, error) {
return &envConfigurationService{}, nil
}
// GetHttpHost returns HTTP host name
// Returns the HTTP host name
func (service *envConfigurationService) GetHttpHost() string {
return os.Getenv("HTTP_HOST")
}
// GetHttpPort returns HTTP port number
// Returns the HTTP port number or error if something goes wrong
func (service *envConfigurationService) GetHttpPort() (int, error) {
valueStr := os.Getenv("HTTP_PORT")
if strings.Trim(valueStr, " ") == "" {
return 0, commonErrors.NewUnknownError("HTTP_PORT is required")
}
value, err := strconv.Atoi(valueStr)
if err != nil {
return 0, commonErrors.NewUnknownErrorWithError("Failed to convert HTTP_PORT to integer", err)
}
return value, nil
}
// GetRunningNodeName returns the name of the node that currently running the pod
// Returns the name of the node that currently running the pod or error if something goes wrong
func (service *envConfigurationService) GetRunningNodeName() (string, error) {
value := os.Getenv("NODE_NAME")
if strings.Trim(value, " ") == "" {
return "", commonErrors.NewUnknownError("NODE_NAME is required")
}
return value, nil
}
// GetEdgeClusterType returns the type of edge cluster such as K3S
// Returns the type of edge cluster or error if something goes wrong
func (service *envConfigurationService) GetEdgeClusterType() (ClusterType, error) {
switch value := strings.Trim(os.Getenv("EDGE_CLUSTER_TYPE"), " "); value {
case "K3S":
return K3S, nil
case "":
return Unknown, commonErrors.NewUnknownError(
"EDGE_CLUSTER_TYPE is required")
default:
return Unknown, commonErrors.NewUnknownError(
fmt.Sprintf("Could not figure out the edge cluster type from the given EDGE_CLUSTER_TYPE (%s)", value))
}
}
// ShouldUpdatePublciIPAndGeolocationDetails determines whether the edge-core should periodically check
// for the node public IP address and geolocation details
// Returns true if the edge-core should periodically check for the node public IP address and
// geolocation details otherwise returns false
func (service *envConfigurationService) ShouldUpdatePublciIPAndGeolocationDetails() bool {
if value := strings.Trim(os.Getenv("UPDATE_PUBLIC_IP_GEOLOCATION_DETAILS"), " "); value == "true" {
return true
}
return false
}
// GetGeolocationUpdaterCronSpec returns Geolocation Updater updating interval
// Returns the Geolocation Updater updating interval or error if something goes wrong
func (service *envConfigurationService) GetGeolocationUpdaterCronSpec() (string, error) {
value := os.Getenv("GEOLOCATION_UPDATER_CRON_SPEC")
if strings.Trim(value, " ") == "" {
return "", commonErrors.NewUnknownError("GEOLOCATION_UPDATER_CRON_SPEC is required")
}
return value, nil
}
// GetIpinfoUrl returns the URL to the Ipinfo website that returns the node public IP address
// Returns the URL to the Ipinfo website that returns the node public IP address or error if something goes wrong
func (service *envConfigurationService) GetIpinfoUrl() (string, error) {
value := os.Getenv("IPINFO_URL")
if strings.Trim(value, " ") == "" {
return "https://ipinfo.io", nil
}
return value, nil
}
// GetIpinfoAccessToken returns the access token to be used when making request to the Ipinfo website
// to return the node public IP address
// Returns the access token to be used when making request to the Ipinfo website to return the node
// public IP address or error if something goes wrong
func (service *envConfigurationService) GetIpinfoAccessToken() (string, error) {
return os.Getenv("IPINFO_ACCESS_TOKEN"), nil
}
| [
"\"HTTP_HOST\"",
"\"HTTP_PORT\"",
"\"NODE_NAME\"",
"\"EDGE_CLUSTER_TYPE\"",
"\"UPDATE_PUBLIC_IP_GEOLOCATION_DETAILS\"",
"\"GEOLOCATION_UPDATER_CRON_SPEC\"",
"\"IPINFO_URL\"",
"\"IPINFO_ACCESS_TOKEN\""
]
| []
| [
"EDGE_CLUSTER_TYPE",
"IPINFO_ACCESS_TOKEN",
"HTTP_HOST",
"UPDATE_PUBLIC_IP_GEOLOCATION_DETAILS",
"GEOLOCATION_UPDATER_CRON_SPEC",
"NODE_NAME",
"IPINFO_URL",
"HTTP_PORT"
]
| [] | ["EDGE_CLUSTER_TYPE", "IPINFO_ACCESS_TOKEN", "HTTP_HOST", "UPDATE_PUBLIC_IP_GEOLOCATION_DETAILS", "GEOLOCATION_UPDATER_CRON_SPEC", "NODE_NAME", "IPINFO_URL", "HTTP_PORT"] | go | 8 | 0 | |
project/app/api/wage_hospitality_viz.py | from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
import json
from dotenv import load_dotenv
import os
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Float, Text, String, DateTime
from fastapi.encoders import jsonable_encoder
from os.path import join as join_path
router = APIRouter()
@router.post('/wage_hospitality_viz/')
async def wage_hospitality_viz(user_queried_citystates: list):
"""
### Path Parameter (POST from front-end)
list: A list of city-states the user queried in this format: ["Albany, NY", "San Francisco, CA", "Chicago, IL"]
### Response
JSON string of all figures to render with [react-plotly.js](https://plotly.com/javascript/react/)
"""
def create_db_uri():
# give full path to .env
env_path = r'.env'
# LOAD environment variables
load_dotenv(dotenv_path=env_path, verbose=True)
# GET .env vars
DB_FLAVOR = os.getenv("DB_FLAVOR")
DB_PYTHON_LIBRARY = os.getenv("DB_PYTHON_LIBRARY")
DB_HOST = os.getenv("DB_HOST")
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASS = os.getenv("DB_PASS")
DB_PORT = os.getenv("DB_PORT")
DB_URI = DB_FLAVOR + "+" + DB_PYTHON_LIBRARY + "://" + DB_USER + ":" + DB_PASS + "@" + DB_HOST + ":" + DB_PORT + "/" + DB_NAME
return DB_URI
DB_URI = create_db_uri()
# CONNECTION Engine with SQLAlchemy
engine = create_engine(DB_URI, echo=True)
def cc_json():
'''
Opens county_city.json file, converts to .json object and returns it
'''
with open(join_path('app', 'db', 'city-county.json')) as f:
data_to_encode = json.load(f)
encoded_json = jsonable_encoder(data_to_encode)
county_city_json = json.dumps(encoded_json)
return county_city_json
cc = cc_json()
cc = json.loads(cc)
# city_states_list = ["New York City, NY", "San Francisco, CA", "Chicago, IL"]
def get_county_from_city(city_states_list):
county_list = []
i = 0
for i in range(len(city_states_list)):
county_list.append(cc[city_states_list[i]])
i += 1
return county_list
county_list = get_county_from_city(user_queried_citystates)
def sql_query(county_list):
'''
Create a SQL query to grab only the user queried cities' data from the covid table in the DB.
Output: subset grouped DF by month and city with only queried cities
'''
# get length of list of queried cities
list_length = len(county_list)
# Create Boolean Statements to Avoid Errors with output
if list_length == 1:
county1 = county_list[0]
query1 = 'SELECT * FROM jobs WHERE county_state IN (%(county1)s)'
subsetJ = pd.read_sql(sql = query1, columns = "county_state", params={"county1":county1}, con=engine, parse_dates=['created_at', 'updated_at'])
elif list_length == 2:
county1 = county_list[0]
county2 = county_list[1]
query2 = 'SELECT * FROM jobs WHERE county_state IN (%(county1)s, %(county2)s)'
subsetJ = pd.read_sql(sql = query2, columns = "county_state", params={"county1":county1, "county2":county2}, con=engine, parse_dates=['created_at', 'updated_at'])
elif list_length == 3:
county1 = county_list[0]
county2 = county_list[1]
county3 = county_list[2]
query3 = 'SELECT * FROM jobs WHERE "county_state" IN (%(county1)s, %(county2)s, %(county3)s)'
subsetJ = pd.read_sql(sql = query3, columns = "county_state", params={"county1":county1, "county2":county2, "county3":county3}, con=engine, parse_dates=['created_at', 'updated_at'])
else:
raise Exception("Please pass a list of 1-3 City-States")
return subsetJ
subsetJ = sql_query(county_list)
industry_list = ['Goods-producing', 'Natural resources and mining', 'Construction', 'Manufacturing', 'Service-providing', 'Trade, transportation, and utilities', 'Information', 'Financial activities', 'Professional and business services', 'Education and health services', 'Leisure and hospitality', 'Other services', 'Unclassified']
def create_wage_plots(df, industry_list, industry_name):
subsetJ['County, State'] = subsetJ['county_state']
subsetJ['date'] = pd.PeriodIndex(year=subsetJ['Year'], quarter=subsetJ['Qtr']).to_timestamp()
industry = subsetJ[subsetJ['Industry']==industry_name]
industry = industry.sort_values('date')
fig = px.line(industry, x='date', y='Average Weekly Wage', labels={'Average Weekly Wage': 'Average Weekly Wage ($)', 'date': 'Date'}, color='County, State', title=f"{industry_name}: Average Weekly Wage").for_each_trace(lambda t: t.update(name=t.name.split("=")[-1]))
fig.update_layout(legend=dict(orientation="h",yanchor="bottom",y=1.02,xanchor="right",x=1),
xaxis = dict(
tickmode = 'array',
tick0 = 1,
dtick = 1,
tickvals = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020],
ticktext = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020']
)) # legend above graph top right
fig.write_image("fig1.png")
jobs_json = fig.to_json() # save figure to JSON object to pass to WEB
return jobs_json
wage_json = create_wage_plots(subsetJ, industry_list, industry_list[10])
return wage_json
| []
| []
| [
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_PYTHON_LIBRARY",
"DB_PASS",
"DB_FLAVOR",
"DB_USER"
]
| [] | ["DB_HOST", "DB_PORT", "DB_NAME", "DB_PYTHON_LIBRARY", "DB_PASS", "DB_FLAVOR", "DB_USER"] | python | 7 | 0 | |
providers/digitalocean/digitalocean_provider.go | // Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digitalocean
import (
"errors"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraform_utils"
"github.com/GoogleCloudPlatform/terraformer/terraform_utils/provider_wrapper"
)
type DigitalOceanProvider struct {
terraform_utils.Provider
token string
}
func (p *DigitalOceanProvider) Init(args []string) error {
if os.Getenv("DIGITALOCEAN_TOKEN") == "" {
return errors.New("set DIGITALOCEAN_TOKEN env var")
}
p.token = os.Getenv("DIGITALOCEAN_TOKEN")
return nil
}
func (p *DigitalOceanProvider) GetName() string {
return "digitalocean"
}
func (p *DigitalOceanProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{
"provider": map[string]interface{}{
"digitalocean": map[string]interface{}{
"version": provider_wrapper.GetProviderVersion(p.GetName()),
"token": p.token,
},
},
}
}
func (DigitalOceanProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{}
}
func (p *DigitalOceanProvider) GetSupportedService() map[string]terraform_utils.ServiceGenerator {
return map[string]terraform_utils.ServiceGenerator{
"cdn": &CDNGenerator{},
"database_cluster": &DatabaseClusterGenerator{},
"domain": &DomainGenerator{},
"droplet": &DropletGenerator{},
"droplet_snapshot": &DropletSnapshotGenerator{},
"firewall": &FirewallGenerator{},
"floating_ip": &FloatingIPGenerator{},
"kubernetes_cluster": &KubernetesClusterGenerator{},
"loadbalancer": &LoadBalancerGenerator{},
"project": &ProjectGenerator{},
"ssh_key": &SSHKeyGenerator{},
"tag": &TagGenerator{},
"volume": &VolumeGenerator{},
"volume_snapshot": &VolumeSnapshotGenerator{},
}
}
func (p *DigitalOceanProvider) InitService(serviceName string) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New("digitalocean: " + serviceName + " not supported service")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetProviderName(p.GetName())
p.Service.SetArgs(map[string]interface{}{
"token": p.token,
})
return nil
}
| [
"\"DIGITALOCEAN_TOKEN\"",
"\"DIGITALOCEAN_TOKEN\""
]
| []
| [
"DIGITALOCEAN_TOKEN"
]
| [] | ["DIGITALOCEAN_TOKEN"] | go | 1 | 0 | |
src/java/world/micks/Key.java | package world.micks;
public class Key {
private static final String KEY = System.getenv("TOKEN");
public static String get() { return KEY; }
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | java | 1 | 0 | |
reader_test.go | package kafka
import (
"bytes"
"context"
"fmt"
"io"
"math/rand"
"net"
"os"
"reflect"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
tests := []struct {
scenario string
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "calling Read with a context that has been canceled returns an error",
function: testReaderReadCanceled,
},
{
scenario: "all messages of the stream are returned when calling ReadMessage repeatedly",
function: testReaderReadMessages,
},
{
scenario: "test special offsets -1 and -2",
function: testReaderSetSpecialOffsets,
},
{
scenario: "setting the offset to random values returns the expected messages when Read is called",
function: testReaderSetRandomOffset,
},
{
scenario: "setting the offset by TimeStamp",
function: testReaderSetOffsetAt,
},
{
scenario: "calling Lag returns the lag of the last message read from kafka",
function: testReaderLag,
},
{
scenario: "calling ReadLag returns the current lag of a reader",
function: testReaderReadLag,
},
{ // https://github.com/segmentio/kafka-go/issues/30
scenario: "reading from an out-of-range offset waits until the context is cancelled",
function: testReaderOutOfRangeGetsCanceled,
},
}
for _, test := range tests {
testFunc := test.function
t.Run(test.scenario, func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: makeTopic(),
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
testFunc(t, ctx, r)
})
}
}
func testReaderReadCanceled(t *testing.T, ctx context.Context, r *Reader) {
ctx, cancel := context.WithCancel(ctx)
cancel()
if _, err := r.ReadMessage(ctx); err != context.Canceled {
t.Error(err)
}
}
func testReaderReadMessages(t *testing.T, ctx context.Context, r *Reader) {
const N = 1000
prepareReader(t, ctx, r, makeTestSequence(N)...)
var offset int64
for i := 0; i != N; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading message at offset", offset, "failed:", err)
return
}
offset = m.Offset + 1
v, _ := strconv.Atoi(string(m.Value))
if v != i {
t.Error("message at index", i, "has wrong value:", v)
return
}
}
}
func testReaderSetSpecialOffsets(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, ctx, r, Message{Value: []byte("first")})
prepareReader(t, ctx, r, makeTestSequence(3)...)
go func() {
time.Sleep(1 * time.Second)
prepareReader(t, ctx, r, Message{Value: []byte("last")})
}()
for _, test := range []struct {
off, final int64
want string
}{
{FirstOffset, 1, "first"},
{LastOffset, 5, "last"},
} {
offset := test.off
if err := r.SetOffset(offset); err != nil {
t.Error("setting offset", offset, "failed:", err)
}
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading at offset", offset, "failed:", err)
}
if string(m.Value) != test.want {
t.Error("message at offset", offset, "has wrong value:", string(m.Value))
}
if off := r.Offset(); off != test.final {
t.Errorf("bad final offset: got %d, want %d", off, test.final)
}
}
}
func testReaderSetRandomOffset(t *testing.T, ctx context.Context, r *Reader) {
const N = 10
prepareReader(t, ctx, r, makeTestSequence(N)...)
for i := 0; i != 2*N; i++ {
offset := rand.Intn(N)
r.SetOffset(int64(offset))
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("seeking to offset", offset, "failed:", err)
return
}
v, _ := strconv.Atoi(string(m.Value))
if v != offset {
t.Error("message at offset", offset, "has wrong value:", v)
return
}
}
}
func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
// We make 2 batches of messages here with a brief 2 second pause
// to ensure messages 0...9 will be written a few seconds before messages 10...19
// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
// our reader
const N = 10
prepareReader(t, ctx, r, makeTestSequence(N)...)
time.Sleep(time.Second * 2)
prepareReader(t, ctx, r, makeTestSequence(N)...)
var ts time.Time
for i := 0; i < N*2; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("error reading message", err)
}
// grab the time for the 10th message
if i == 10 {
ts = m.Time
}
}
err := r.SetOffsetAt(ctx, ts)
if err != nil {
t.Fatal("error setting offset by timestamp", err)
}
m, err := r.ReadMessage(context.Background())
if err != nil {
t.Fatal("error reading message", err)
}
if m.Offset != 10 {
t.Errorf("expected offset of 10, received offset %d", m.Offset)
}
}
func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
const N = 5
prepareReader(t, ctx, r, makeTestSequence(N)...)
if lag := r.Lag(); lag != 0 {
t.Errorf("the initial lag value is %d but was expected to be 0", lag)
}
for i := 0; i != N; i++ {
r.ReadMessage(ctx)
expect := int64(N - (i + 1))
if lag := r.Lag(); lag != expect {
t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
}
}
}
func testReaderReadLag(t *testing.T, ctx context.Context, r *Reader) {
const N = 5
prepareReader(t, ctx, r, makeTestSequence(N)...)
if lag, err := r.ReadLag(ctx); err != nil {
t.Error(err)
} else if lag != N {
t.Errorf("the initial lag value is %d but was expected to be %d", lag, N)
}
for i := 0; i != N; i++ {
r.ReadMessage(ctx)
expect := int64(N - (i + 1))
if lag, err := r.ReadLag(ctx); err != nil {
t.Error(err)
} else if lag != expect {
t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
}
}
}
func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, ctx, r, makeTestSequence(10)...)
const D = 100 * time.Millisecond
t0 := time.Now()
ctx, cancel := context.WithTimeout(ctx, D)
defer cancel()
if err := r.SetOffset(42); err != nil {
t.Error(err)
}
_, err := r.ReadMessage(ctx)
if err != context.DeadlineExceeded {
t.Error("bad error:", err)
}
t1 := time.Now()
if d := t1.Sub(t0); d < D {
t.Error("ReadMessage returned too early after", d)
}
}
func createTopic(t *testing.T, topic string, partitions int) {
t.Helper()
t.Logf("createTopic(%s, %d)", topic, partitions)
conn, err := Dial("tcp", "localhost:9092")
if err != nil {
err = fmt.Errorf("createTopic, Dial: %w", err)
t.Fatal(err)
}
defer conn.Close()
controller, err := conn.Controller()
if err != nil {
err = fmt.Errorf("createTopic, conn.Controller: %w", err)
t.Fatal(err)
}
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
if err != nil {
t.Fatal(err)
}
conn.SetDeadline(time.Now().Add(10 * time.Second))
_, err = conn.createTopics(createTopicsRequestV0{
Topics: []createTopicsRequestV0Topic{
{
Topic: topic,
NumPartitions: int32(partitions),
ReplicationFactor: 1,
},
},
Timeout: milliseconds(time.Second),
})
switch err {
case nil:
// ok
case TopicAlreadyExists:
// ok
default:
err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
t.Error(err)
t.FailNow()
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
waitForTopic(ctx, t, topic)
}
// Block until topic exists
func waitForTopic(ctx context.Context, t *testing.T, topic string) {
t.Helper()
for {
select {
case <-ctx.Done():
t.Fatalf("reached deadline before verifying topic existence")
default:
}
cli := &Client{
Addr: TCP("localhost:9092"),
Timeout: 5 * time.Second,
}
response, err := cli.Metadata(ctx, &MetadataRequest{
Addr: cli.Addr,
Topics: []string{topic},
})
if err != nil {
t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
}
// Find a topic which has at least 1 partition in the metadata response
for _, top := range response.Topics {
if top.Name != topic {
continue
}
numPartitions := len(top.Partitions)
t.Logf("waitForTopic: found topic %q with %d partitions",
topic, numPartitions)
if numPartitions > 0 {
return
}
}
t.Logf("retrying after 1s")
time.Sleep(time.Second)
continue
}
}
func deleteTopic(t *testing.T, topic ...string) {
t.Helper()
conn, err := Dial("tcp", "localhost:9092")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
controller, err := conn.Controller()
if err != nil {
t.Fatal(err)
}
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
if err != nil {
t.Fatal(err)
}
conn.SetDeadline(time.Now().Add(10 * time.Second))
if err := conn.DeleteTopics(topic...); err != nil {
t.Fatal(err)
}
}
func TestReaderOnNonZeroPartition(t *testing.T) {
tests := []struct {
scenario string
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "topic and partition should now be included in header",
function: testReaderSetsTopicAndPartition,
},
}
for _, test := range tests {
testFunc := test.function
t.Run(test.scenario, func(t *testing.T) {
t.Parallel()
topic := makeTopic()
createTopic(t, topic, 2)
defer deleteTopic(t, topic)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
Partition: 1,
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
testFunc(t, ctx, r)
})
}
}
func testReaderSetsTopicAndPartition(t *testing.T, ctx context.Context, r *Reader) {
const N = 3
prepareReader(t, ctx, r, makeTestSequence(N)...)
for i := 0; i != N; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading message failed:", err)
return
}
if m.Topic == "" {
t.Error("expected topic to be set")
return
}
if m.Topic != r.config.Topic {
t.Errorf("expected message to contain topic, %v; got %v", r.config.Topic, m.Topic)
return
}
if m.Partition != r.config.Partition {
t.Errorf("expected partition to be set; expected 1, got %v", m.Partition)
return
}
}
}
// TestReadTruncatedMessages uses a configuration designed to get the Broker to
// return truncated messages. It exercises the case where an earlier bug caused
// reading to time out by attempting to read beyond the current response. This
// test is not perfect, but it is pretty reliable about reproducing the issue.
//
// NOTE : it currently only succeeds against kafka 0.10.1.0, so it will be
// skipped. It's here so that it can be manually run.
func TestReadTruncatedMessages(t *testing.T) {
// todo : it would be great to get it to work against 0.11.0.0 so we could
// include it in CI unit tests.
t.Skip()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: makeTopic(),
MinBytes: 1,
MaxBytes: 100,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
n := 500
prepareReader(t, ctx, r, makeTestSequence(n)...)
for i := 0; i < n; i++ {
if _, err := r.ReadMessage(ctx); err != nil {
t.Fatal(err)
}
}
}
func makeTestSequence(n int) []Message {
base := time.Now()
msgs := make([]Message, n)
for i := 0; i != n; i++ {
msgs[i] = Message{
Time: base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
Value: []byte(strconv.Itoa(i)),
}
}
return msgs
}
func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
config := r.Config()
var conn *Conn
var err error
for {
if conn, err = DialLeader(ctx, "tcp", "localhost:9092", config.Topic, config.Partition); err == nil {
break
}
select {
case <-time.After(time.Second):
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}
defer conn.Close()
if _, err := conn.WriteMessages(msgs...); err != nil {
t.Fatal(err)
}
}
var (
benchmarkReaderOnce sync.Once
benchmarkReaderTopic = makeTopic()
benchmarkReaderPayload = make([]byte, 2*1024)
)
func BenchmarkReader(b *testing.B) {
const broker = "localhost:9092"
ctx := context.Background()
benchmarkReaderOnce.Do(func() {
conn, err := DialLeader(ctx, "tcp", broker, benchmarkReaderTopic, 0)
if err != nil {
b.Fatal(err)
}
defer conn.Close()
msgs := make([]Message, 1000)
for i := range msgs {
msgs[i].Value = benchmarkReaderPayload
}
for i := 0; i != 10; i++ { // put 10K messages
if _, err := conn.WriteMessages(msgs...); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
})
r := NewReader(ReaderConfig{
Brokers: []string{broker},
Topic: benchmarkReaderTopic,
Partition: 0,
MinBytes: 1e3,
MaxBytes: 1e6,
MaxWait: 100 * time.Millisecond,
})
for i := 0; i < b.N; i++ {
if (i % 10000) == 0 {
r.SetOffset(-1)
}
_, err := r.ReadMessage(ctx)
if err != nil {
b.Fatal(err)
}
}
r.Close()
b.SetBytes(int64(len(benchmarkReaderPayload)))
}
func TestCloseLeavesGroup(t *testing.T) {
if os.Getenv("KAFKA_VERSION") == "2.3.1" {
// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
// leads to an error when decoding the DescribeGroupsResponse.
//
// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
t.Skip("Skipping because kafka version is 2.3.1")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topic := makeTopic()
createTopic(t, topic, 1)
defer deleteTopic(t, topic)
groupID := makeGroupID()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
GroupID: groupID,
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
RebalanceTimeout: time.Second,
})
prepareReader(t, ctx, r, Message{Value: []byte("test")})
conn, err := Dial("tcp", r.config.Brokers[0])
if err != nil {
t.Fatalf("error dialing: %v", err)
}
defer conn.Close()
client, shutdown := newLocalClient()
defer shutdown()
descGroups := func() DescribeGroupsResponse {
resp, err := client.DescribeGroups(
ctx,
&DescribeGroupsRequest{
GroupIDs: []string{groupID},
},
)
if err != nil {
t.Fatalf("error from describeGroups %v", err)
}
return *resp
}
_, err = r.ReadMessage(ctx)
if err != nil {
t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
}
resp := descGroups()
if len(resp.Groups) != 1 {
t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
}
if len(resp.Groups[0].Members) != 1 {
t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
}
err = r.Close()
if err != nil {
t.Fatalf("unexpected error closing reader: %s", err.Error())
}
resp = descGroups()
if len(resp.Groups) != 1 {
t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
}
if len(resp.Groups[0].Members) != 0 {
t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
}
}
func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
if err := r.Close(); err != nil {
t.Fatalf("bad err: %v", err)
}
}
func testConsumerGroupSimple(t *testing.T, ctx context.Context, r *Reader) {
if err := r.Close(); err != nil {
t.Fatalf("bad err: %v", err)
}
}
func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if err := r.SetOffset(LastOffset); err != errNotAvailableWithGroup {
t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
}
}
func TestReaderOffsetWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if offset := r.Offset(); offset != -1 {
t.Fatalf("expected -1; got %v", offset)
}
}
func TestReaderLagWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if offset := r.Lag(); offset != -1 {
t.Fatalf("expected -1; got %v", offset)
}
}
func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
lag, err := r.ReadLag(context.Background())
if err != errNotAvailableWithGroup {
t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
}
if lag != 0 {
t.Fatalf("expected 0; got %d", lag)
}
}
func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
invoke := func() (boom bool) {
defer func() {
if r := recover(); r != nil {
boom = true
}
}()
NewReader(ReaderConfig{
GroupID: "set",
Partition: 1,
})
return false
}
if !invoke() {
t.Fatalf("expected panic; but NewReader worked?!")
}
}
func TestExtractTopics(t *testing.T) {
testCases := map[string]struct {
Members []GroupMember
Topics []string
}{
"nil": {},
"single member, single topic": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic"},
},
},
Topics: []string{"topic"},
},
"two members, single topic": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic"},
},
{
ID: "b",
Topics: []string{"topic"},
},
},
Topics: []string{"topic"},
},
"two members, two topics": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic-1"},
},
{
ID: "b",
Topics: []string{"topic-2"},
},
},
Topics: []string{"topic-1", "topic-2"},
},
"three members, three shared topics": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic-1", "topic-2"},
},
{
ID: "b",
Topics: []string{"topic-2", "topic-3"},
},
{
ID: "c",
Topics: []string{"topic-3", "topic-1"},
},
},
Topics: []string{"topic-1", "topic-2", "topic-3"},
},
}
for label, tc := range testCases {
t.Run(label, func(t *testing.T) {
topics := extractTopics(tc.Members)
if !reflect.DeepEqual(tc.Topics, topics) {
t.Errorf("expected %v; got %v", tc.Topics, topics)
}
})
}
}
func TestReaderConsumerGroup(t *testing.T) {
tests := []struct {
scenario string
partitions int
commitInterval time.Duration
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "basic handshake",
partitions: 1,
function: testReaderConsumerGroupHandshake,
},
{
scenario: "verify offset committed",
partitions: 1,
function: testReaderConsumerGroupVerifyOffsetCommitted,
},
{
scenario: "verify offset committed when using interval committer",
partitions: 1,
commitInterval: 400 * time.Millisecond,
function: testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
},
{
scenario: "rebalance across many partitions and consumers",
partitions: 8,
function: testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
},
{
scenario: "consumer group commits on close",
partitions: 3,
function: testReaderConsumerGroupVerifyCommitsOnClose,
},
{
scenario: "consumer group rebalance",
partitions: 3,
function: testReaderConsumerGroupRebalance,
},
{
scenario: "consumer group rebalance across topics",
partitions: 3,
function: testReaderConsumerGroupRebalanceAcrossTopics,
},
{
scenario: "consumer group reads content across partitions",
partitions: 3,
function: testReaderConsumerGroupReadContentAcrossPartitions,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupImmediateClose,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupSimple,
},
}
for _, test := range tests {
t.Run(test.scenario, func(t *testing.T) {
// It appears that some of the tests depend on all these tests being
// run concurrently to pass... this is brittle and should be fixed
// at some point.
t.Parallel()
topic := makeTopic()
createTopic(t, topic, test.partitions)
defer deleteTopic(t, topic)
groupID := makeGroupID()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
GroupID: groupID,
HeartbeatInterval: 2 * time.Second,
CommitInterval: test.commitInterval,
RebalanceTimeout: 2 * time.Second,
RetentionTime: time.Hour,
MinBytes: 1,
MaxBytes: 1e6,
})
defer r.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
test.function(t, ctx, r)
})
}
}
func testReaderConsumerGroupHandshake(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(5)...)
m, err := r.ReadMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if m.Topic != r.config.Topic {
t.Errorf("topic not set")
}
if m.Offset != 0 {
t.Errorf("offset not set")
}
m, err = r.ReadMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if m.Topic != r.config.Topic {
t.Errorf("topic not set")
}
if m.Offset != 1 {
t.Errorf("offset not set")
}
}
func testReaderConsumerGroupVerifyOffsetCommitted(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
offsets := getOffsets(t, r.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupVerifyPeriodicOffsetCommitter(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
started := time.Now()
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
if elapsed := time.Now().Sub(started); elapsed > 10*time.Millisecond {
t.Errorf("background commits should happen nearly instantly")
}
// wait for committer to pick up the commits
time.Sleep(r.config.CommitInterval * 3)
offsets := getOffsets(t, r.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupVerifyCommitsOnClose(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
if err := r.Close(); err != nil {
t.Errorf("bad Close: %v", err)
}
r2 := NewReader(r.config)
defer r2.Close()
offsets := getOffsets(t, r2.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
const N = 12
client, shutdown := newLocalClient()
defer shutdown()
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
partitions := map[int]struct{}{}
for i := 0; i < N; i++ {
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad error: %s", err)
}
partitions[m.Partition] = struct{}{}
}
if v := len(partitions); v != 3 {
t.Errorf("expected messages across 3 partitions; got messages across %v partitions", v)
}
}
func testReaderConsumerGroupRebalance(t *testing.T, ctx context.Context, r *Reader) {
r2 := NewReader(r.config)
defer r.Close()
const (
N = 12
partitions = 2
)
client, shutdown := newLocalClient()
defer shutdown()
// rebalance should result in 12 message in each of the partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// after rebalance, each reader should have a partition to itself
for i := 0; i < N; i++ {
if _, err := r2.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 2")
}
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 1")
}
}
}
func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
// create a second reader that shares the groupID, but reads from a different topic
client, topic2, shutdown := newLocalClientAndTopic()
defer shutdown()
r2 := NewReader(ReaderConfig{
Brokers: r.config.Brokers,
Topic: topic2,
GroupID: r.config.GroupID,
HeartbeatInterval: r.config.HeartbeatInterval,
SessionTimeout: r.config.SessionTimeout,
RetentionTime: r.config.RetentionTime,
MinBytes: r.config.MinBytes,
MaxBytes: r.config.MaxBytes,
Logger: r.config.Logger,
})
defer r.Close()
prepareReader(t, ctx, r2, makeTestSequence(1)...)
const (
N = 12
)
// write messages across both partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// after rebalance, r2 should read topic2 and r1 should read ALL of the original topic
if _, err := r2.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 2")
}
// all N messages on the original topic should be read by the original reader
for i := 0; i < N; i++ {
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 1")
}
}
}
func testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers(t *testing.T, ctx context.Context, r *Reader) {
// I've rebalanced up to 100 servers, but the rebalance can take upwards
// of a minute and that seems too long for unit tests. Also, setting this
// to a larger number seems to make the kafka broker unresponsive.
// TODO research if there's a way to reduce rebalance time across many partitions
// svls: the described behavior is due to the thundering herd of readers
// hitting the rebalance timeout. introducing the 100ms sleep in the
// loop below in order to give time for the sync group to finish has
// greatly helped, though we still hit the timeout from time to time.
const N = 8
var readers []*Reader
for i := 0; i < N-1; i++ {
reader := NewReader(r.config)
readers = append(readers, reader)
time.Sleep(100 * time.Millisecond)
}
defer func() {
for _, r := range readers {
r.Close()
time.Sleep(100 * time.Millisecond)
}
}()
client, shutdown := newLocalClient()
defer shutdown()
// write messages across both partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// all N messages on the original topic should be read by the original reader
for i := 0; i < N-1; i++ {
if _, err := readers[i].FetchMessage(ctx); err != nil {
t.Errorf("reader %v expected to read 1 message", i)
}
}
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from original reader")
}
}
func TestOffsetStash(t *testing.T) {
const topic = "topic"
newMessage := func(partition int, offset int64) Message {
return Message{
Topic: topic,
Partition: partition,
Offset: offset,
}
}
tests := map[string]struct {
Given offsetStash
Messages []Message
Expected offsetStash
}{
"nil": {},
"empty given, single message": {
Given: offsetStash{},
Messages: []Message{newMessage(0, 0)},
Expected: offsetStash{
topic: {0: 1},
},
},
"ignores earlier offsets": {
Given: offsetStash{
topic: {0: 2},
},
Messages: []Message{newMessage(0, 0)},
Expected: offsetStash{
topic: {0: 2},
},
},
"uses latest offset": {
Given: offsetStash{},
Messages: []Message{
newMessage(0, 2),
newMessage(0, 3),
newMessage(0, 1),
},
Expected: offsetStash{
topic: {0: 4},
},
},
"uses latest offset, across multiple topics": {
Given: offsetStash{},
Messages: []Message{
newMessage(0, 2),
newMessage(0, 3),
newMessage(0, 1),
newMessage(1, 5),
newMessage(1, 6),
},
Expected: offsetStash{
topic: {
0: 4,
1: 7,
},
},
},
}
for label, test := range tests {
t.Run(label, func(t *testing.T) {
test.Given.merge(makeCommits(test.Messages...))
if !reflect.DeepEqual(test.Expected, test.Given) {
t.Errorf("expected %v; got %v", test.Expected, test.Given)
}
})
}
}
type mockOffsetCommitter struct {
invocations int
failCount int
err error
}
func (m *mockOffsetCommitter) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
m.invocations++
if m.failCount > 0 {
m.failCount--
return offsetCommitResponseV2{}, io.EOF
}
return offsetCommitResponseV2{}, nil
}
func TestValidateReader(t *testing.T) {
tests := []struct {
config ReaderConfig
errorOccured bool
}{
{config: ReaderConfig{}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
}
for _, test := range tests {
err := test.config.Validate()
if test.errorOccured && err == nil {
t.Fail()
}
if !test.errorOccured && err != nil {
t.Fail()
}
}
}
func TestCommitLoopImmediateFlushOnGenerationEnd(t *testing.T) {
t.Parallel()
var committedOffset int64
var commitCount int
gen := &Generation{
conn: mockCoordinator{
offsetCommitFunc: func(r offsetCommitRequestV2) (offsetCommitResponseV2, error) {
commitCount++
committedOffset = r.Topics[0].Partitions[0].Offset
return offsetCommitResponseV2{}, nil
},
},
done: make(chan struct{}),
log: func(func(Logger)) {},
logError: func(func(Logger)) {},
joined: make(chan struct{}),
}
// initialize commits so that the commitLoopImmediate select statement blocks
r := &Reader{stctx: context.Background(), commits: make(chan commitRequest, 100)}
for i := 0; i < 100; i++ {
cr := commitRequest{
commits: []commit{{
topic: "topic",
partition: 0,
offset: int64(i) + 1,
}},
errch: make(chan<- error, 1),
}
r.commits <- cr
}
gen.Start(func(ctx context.Context) {
r.commitLoopImmediate(ctx, gen)
})
gen.close()
if committedOffset != 100 {
t.Fatalf("expected commited offset to be 100 but got %d", committedOffset)
}
if commitCount >= 100 {
t.Fatalf("expected a single final commit on generation end got %d", commitCount)
}
}
func TestCommitOffsetsWithRetry(t *testing.T) {
offsets := offsetStash{"topic": {0: 0}}
tests := map[string]struct {
Fails int
Invocations int
HasError bool
}{
"happy path": {
Invocations: 1,
},
"1 retry": {
Fails: 1,
Invocations: 2,
},
"out of retries": {
Fails: defaultCommitRetries + 1,
Invocations: defaultCommitRetries,
HasError: true,
},
}
for label, test := range tests {
t.Run(label, func(t *testing.T) {
count := 0
gen := &Generation{
conn: mockCoordinator{
offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
count++
if count <= test.Fails {
return offsetCommitResponseV2{}, io.EOF
}
return offsetCommitResponseV2{}, nil
},
},
done: make(chan struct{}),
log: func(func(Logger)) {},
logError: func(func(Logger)) {},
}
r := &Reader{stctx: context.Background()}
err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
switch {
case test.HasError && err == nil:
t.Error("bad err: expected not nil; got nil")
case !test.HasError && err != nil:
t.Errorf("bad err: expected nil; got %v", err)
}
})
}
}
// Test that a reader won't continually rebalance when there are more consumers
// than partitions in a group.
// https://github.com/segmentio/kafka-go/issues/200
func TestRebalanceTooManyConsumers(t *testing.T) {
ctx := context.Background()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
}
// Create the first reader and wait for it to become the leader.
r1 := NewReader(conf)
prepareReader(t, ctx, r1, makeTestSequence(1)...)
r1.ReadMessage(ctx)
// Clear the stats from the first rebalance.
r1.Stats()
// Second reader should cause one rebalance for each r1 and r2.
r2 := NewReader(conf)
// Wait for rebalances.
time.Sleep(5 * time.Second)
// Before the fix, r2 would cause continuous rebalances,
// as it tried to handshake() repeatedly.
rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
if rebalances > 2 {
t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
}
}
func TestConsumerGroupWithMissingTopic(t *testing.T) {
t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, 1)
go func() {
_, err := r.ReadMessage(ctx)
recvErr <- err
}()
time.Sleep(time.Second)
client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{}); err != nil {
t.Fatalf("write error: %+v", err)
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != 1 {
t.Fatalf("expected to receive one message, but got %d", nMsgs)
}
}
func TestConsumerGroupWithTopic(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, len(conf.GroupTopics))
go func() {
msg, err := r.ReadMessage(ctx)
t.Log(msg)
recvErr <- err
}()
time.Sleep(conf.MaxWait)
client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: conf.Topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, "Writer:"),
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
t.Fatalf("write error: %+v", err)
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != 1 {
t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
}
}
func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
GroupTopics: []string{makeTopic()},
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, len(conf.GroupTopics))
go func() {
msg, err := r.ReadMessage(ctx)
t.Log(msg)
recvErr <- err
}()
time.Sleep(conf.MaxWait)
for i, topic := range conf.GroupTopics {
client, shutdown := newLocalClientWithTopic(topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
t.Fatalf("write error: %+v", err)
}
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != int64(len(conf.GroupTopics)) {
t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
}
}
func TestConsumerGroupWithGroupTopicsMultple(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, shutdown := newLocalClient()
defer shutdown()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
GroupTopics: []string{makeTopic(), makeTopic()},
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
w := &Writer{
Addr: TCP(r.config.Brokers...),
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, "Writer:"),
}
defer w.Close()
time.Sleep(time.Second)
msgs := make([]Message, 0, len(conf.GroupTopics))
for _, topic := range conf.GroupTopics {
msgs = append(msgs, Message{Topic: topic})
}
if err := w.WriteMessages(ctx, msgs...); err != nil {
t.Logf("write error: %+v", err)
}
wg := new(sync.WaitGroup)
wg.Add(len(msgs))
go func() {
wg.Wait()
t.Log("closing reader")
r.Close()
}()
for {
msg, err := r.ReadMessage(ctx)
if err != nil {
if err == io.EOF {
t.Log("reader closed")
break
}
t.Fatalf("read error: %+v", err)
} else {
t.Logf("message read: %+v", msg)
wg.Done()
}
}
nMsgs := r.Stats().Messages
if nMsgs != int64(len(conf.GroupTopics)) {
t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
}
}
func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
// minimal config required to lookup coordinator
cg := ConsumerGroup{
config: ConsumerGroupConfig{
ID: config.GroupID,
Brokers: config.Brokers,
Dialer: config.Dialer,
},
}
conn, err := cg.coordinator()
if err != nil {
t.Errorf("unable to connect to coordinator: %v", err)
}
defer conn.Close()
offsets, err := conn.offsetFetch(offsetFetchRequestV1{
GroupID: config.GroupID,
Topics: []offsetFetchRequestV1Topic{{
Topic: config.Topic,
Partitions: []int32{0},
}},
})
if err != nil {
t.Errorf("bad fetchOffsets: %v", err)
}
m := map[int]int64{}
for _, r := range offsets.Responses {
if r.Topic == config.Topic {
for _, p := range r.PartitionResponses {
m[int(p.Partition)] = p.Offset
}
}
}
return m
}
const (
connTO = 1 * time.Second
connTestTO = 2 * connTO
)
func TestErrorCannotConnect(t *testing.T) {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9093"},
Dialer: &Dialer{Timeout: connTO},
MaxAttempts: 1,
Topic: makeTopic(),
})
ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
defer cancel()
_, err := r.FetchMessage(ctx)
if err == nil || ctx.Err() != nil {
t.Errorf("Reader.FetchMessage must fail when it cannot " +
"connect")
}
}
func TestErrorCannotConnectGroupSubscription(t *testing.T) {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9093"},
Dialer: &Dialer{Timeout: 1 * time.Second},
GroupID: "foobar",
MaxAttempts: 1,
Topic: makeTopic(),
})
ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
defer cancel()
_, err := r.FetchMessage(ctx)
if err == nil || ctx.Err() != nil {
t.Errorf("Reader.FetchMessage with a group subscription " +
"must fail when it cannot connect")
}
}
// Tests that the reader can handle messages where the response is truncated
// due to reaching MaxBytes.
//
// If MaxBytes is too small to fit 1 record then it will never truncate, so
// we start from a small message size and increase it until we are sure
// truncation has happened at some point.
func TestReaderTruncatedResponse(t *testing.T) {
topic := makeTopic()
createTopic(t, topic, 1)
defer deleteTopic(t, topic)
readerMaxBytes := 100
batchSize := 4
maxMsgPadding := 5
readContextTimeout := 10 * time.Second
var msgs []Message
// The key of each message
n := 0
// `i` is the amount of padding per message
for i := 0; i < maxMsgPadding; i++ {
bb := bytes.Buffer{}
for x := 0; x < i; x++ {
_, err := bb.WriteRune('0')
require.NoError(t, err)
}
padding := bb.Bytes()
// `j` is the number of times the message repeats
for j := 0; j < batchSize*4; j++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("%05d", n)),
Value: padding,
})
n++
}
}
wr := NewWriter(WriterConfig{
Brokers: []string{"localhost:9092"},
BatchSize: batchSize,
Async: false,
Topic: topic,
Balancer: &LeastBytes{},
})
err := wr.WriteMessages(context.Background(), msgs...)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), readContextTimeout)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
MinBytes: 1,
MaxBytes: readerMaxBytes,
// Speed up testing
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
expectedKeys := map[string]struct{}{}
for _, k := range msgs {
expectedKeys[string(k.Key)] = struct{}{}
}
keys := map[string]struct{}{}
for {
m, err := r.FetchMessage(ctx)
require.NoError(t, err)
keys[string(m.Key)] = struct{}{}
t.Logf("got key %s have %d keys expect %d\n", string(m.Key), len(keys), len(expectedKeys))
if len(keys) == len(expectedKeys) {
require.Equal(t, expectedKeys, keys)
return
}
}
}
// Tests that the reader can read record batches from log compacted topics
// where the batch ends with compacted records.
//
// This test forces varying sized chunks of duplicated messages along with
// configuring the topic with a minimal `segment.bytes` in order to
// guarantee that at least 1 batch can be compacted down to 0 "unread" messages
// with at least 1 "old" message otherwise the batch is skipped entirely.
func TestReaderReadCompactedMessage(t *testing.T) {
topic := makeTopic()
createTopicWithCompaction(t, topic, 1)
defer deleteTopic(t, topic)
msgs := makeTestDuplicateSequence()
writeMessagesForCompactionCheck(t, topic, msgs)
expectedKeys := map[string]int{}
for _, msg := range msgs {
expectedKeys[string(msg.Key)] = 1
}
// kafka 2.0.1 is extra slow
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel()
for {
success := func() bool {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
MinBytes: 200,
MaxBytes: 200,
// Speed up testing
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
keys := map[string]int{}
for {
m, err := r.FetchMessage(ctx)
if err != nil {
t.Logf("can't get message from compacted log: %v", err)
return false
}
keys[string(m.Key)]++
if len(keys) == countKeys(msgs) {
t.Logf("got keys: %+v", keys)
return reflect.DeepEqual(keys, expectedKeys)
}
}
}()
if success {
return
}
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
default:
}
}
}
// writeMessagesForCompactionCheck writes messages with specific writer configuration
func writeMessagesForCompactionCheck(t *testing.T, topic string, msgs []Message) {
t.Helper()
wr := NewWriter(WriterConfig{
Brokers: []string{"localhost:9092"},
// Batch size must be large enough to have multiple compacted records
// for testing more edge cases.
BatchSize: 3,
Async: false,
Topic: topic,
Balancer: &LeastBytes{},
})
err := wr.WriteMessages(context.Background(), msgs...)
require.NoError(t, err)
}
// makeTestDuplicateSequence creates messages for compacted log testing
//
// All keys and values are 4 characters long to tightly control how many
// messages are per log segment.
func makeTestDuplicateSequence() []Message {
var msgs []Message
// `n` is an increasing counter so it is never compacted.
n := 0
// `i` determines how many compacted records to create
for i := 0; i < 5; i++ {
// `j` is how many times the current pattern repeats. We repeat because
// as long as we have a pattern that is slightly larger/smaller than
// the log segment size then if we repeat enough it will eventually
// try all configurations.
for j := 0; j < 30; j++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("%04d", n)),
Value: []byte(fmt.Sprintf("%04d", n)),
})
n++
// This produces the duplicated messages to compact.
for k := 0; k < i; k++ {
msgs = append(msgs, Message{
Key: []byte("dup_"),
Value: []byte("dup_"),
})
}
}
}
// "end markers" to force duplicate message outside of the last segment of
// the log so that they can all be compacted.
for i := 0; i < 10; i++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("e-%02d", i)),
Value: []byte(fmt.Sprintf("e-%02d", i)),
})
}
return msgs
}
// countKeys counts unique keys from given Message slice
func countKeys(msgs []Message) int {
m := make(map[string]struct{})
for _, msg := range msgs {
m[string(msg.Key)] = struct{}{}
}
return len(m)
}
func createTopicWithCompaction(t *testing.T, topic string, partitions int) {
t.Helper()
t.Logf("createTopic(%s, %d)", topic, partitions)
conn, err := Dial("tcp", "localhost:9092")
require.NoError(t, err)
defer conn.Close()
controller, err := conn.Controller()
require.NoError(t, err)
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
require.NoError(t, err)
conn.SetDeadline(time.Now().Add(10 * time.Second))
err = conn.CreateTopics(TopicConfig{
Topic: topic,
NumPartitions: partitions,
ReplicationFactor: 1,
ConfigEntries: []ConfigEntry{
{
ConfigName: "cleanup.policy",
ConfigValue: "compact",
},
{
ConfigName: "segment.bytes",
ConfigValue: "200",
},
},
})
switch err {
case nil:
// ok
case TopicAlreadyExists:
// ok
default:
require.NoError(t, err)
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
waitForTopic(ctx, t, topic)
}
| [
"\"KAFKA_VERSION\""
]
| []
| [
"KAFKA_VERSION"
]
| [] | ["KAFKA_VERSION"] | go | 1 | 0 | |
evaluate_iou.py | import os
import time
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from argparse import ArgumentParser
# user
from builders.model_builder import build_model
from builders.dataset_builder import build_dataset_test
from utils.utils import save_predict
from utils.iouEval import iouEval
def eval(args, test_loader, model):
"""
args:
test_loader: loaded for test dataset
model: model
return: class IoU and mean IoU
"""
# evaluation or test mode
model.eval()
total_batches = len(test_loader)
iouEvalVal = iouEval(args.classes+1) # cityscapes
# iouEvalVal = iouEval(args.classes+1, ignoreIndex=11) #camvid
data_list = []
for i, (input, label, size, name) in enumerate(test_loader):
with torch.no_grad():
if args.cuda:
input_var = Variable(input).cuda()
else:
input_var = Variable(input)
#label = torch.from_numpy(np.array(label)).long().unsqueeze(0).cuda()
#label = label.long().unsqueeze(1).cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print('[%d/%d] time: %.2f' % (i + 1, total_batches, time_taken))
# print(output.max(1)[1].unsqueeze(1).dtype)
# print(label.dtype)
iouEvalVal.addBatch(output.max(1)[1].unsqueeze(1).data, label.unsqueeze(1))
output = output.cpu().data[0].numpy()
gt = np.asarray(label[0].numpy(), dtype=np.uint8)
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# data_list.append([gt.flatten(), output.flatten()])
# save the predicted image
if args.save:
save_predict(output, gt, name[0], args.dataset, args.save_seg_dir,
output_grey=False, output_color=True, gt_color=False)
#iouVal, iou_classes = iouEvalVal.getIoU()
#meanIoU, per_class_iu = get_iou(data_list, args.classes)
meanIoU, per_class_iu = iouEvalVal.getIoU()
return meanIoU, per_class_iu
def eval_model(args):
"""
main function for testing
param args: global arguments
return: None
"""
print(args)
logFileLoc = 'log_test_' + args.model + '.txt'
logFileLoc = os.path.join(os.path.dirname(args.checkpoint), logFileLoc)
if os.path.isfile(logFileLoc):
logger = open(logFileLoc, 'a')
else:
logger = open(logFileLoc, 'w')
if args.cuda:
print("=====> use gpu id: '{}'".format(args.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not torch.cuda.is_available():
raise Exception("no GPU found or wrong gpu id, please run without --cuda")
# build the model
model = build_model(args.model, num_classes=args.classes)
#print(model)
if args.cuda:
model = model.cuda() # using GPU for inference
cudnn.benchmark = True
if args.save:
if not os.path.exists(args.save_seg_dir):
os.makedirs(args.save_seg_dir)
# load the test set
datas, testLoader = build_dataset_test(args.dataset, args.num_workers)
if not args.best:
if args.checkpoint:
if os.path.isfile(args.checkpoint):
print("=====> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'])
# model.load_state_dict(convert_state_dict(checkpoint['model']))
# model.load_state_dict(convert_state_dict(checkpoint))
else:
print("=====> no checkpoint found at '{}'".format(args.checkpoint))
raise FileNotFoundError("no checkpoint found at '{}'".format(args.checkpoint))
print("=====> beginning validation")
print("validation set length: ", len(testLoader))
mIOU_val, per_class_iu = eval(args, testLoader, model)
print(mIOU_val)
print(per_class_iu)
# Get the best test result among the last 10 model records.
else:
if args.checkpoint:
if os.path.isfile(args.checkpoint):
dirname, basename = os.path.split(args.checkpoint)
epoch = int(os.path.splitext(basename)[0].split('_')[1])
mIOU_val = []
per_class_iu = []
min = epoch - args.eval_num + 1
max = epoch + 1
for i in range(min, max):
basename = 'model_' + str(i) + '.pth'
resume = os.path.join(dirname, basename)
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['model'])
print("=====> beginning test the " + basename)
print("validation set length: ", len(testLoader))
mIOU_val_0, per_class_iu_0 = eval(args, testLoader, model)
mIOU_val.append(mIOU_val_0)
per_class_iu.append(per_class_iu_0)
logger.write("%d\t%.4f\n" % (i, mIOU_val_0))
logger.flush()
index = list(range(min, max))[np.argmax(mIOU_val)]
print("The best mIoU among the last 10 models is", index)
print(mIOU_val)
per_class_iu = per_class_iu[np.argmax(mIOU_val)]
mIOU_val = np.max(mIOU_val)
print(mIOU_val)
print(per_class_iu)
else:
print("=====> no checkpoint found at '{}'".format(args.checkpoint))
raise FileNotFoundError("no checkpoint found at '{}'".format(args.checkpoint))
# Save the result
if not args.best:
model_path = os.path.splitext(os.path.basename(args.checkpoint))
args.logFile = 'log_test_' + model_path[0] + '.txt'
logFileLoc = os.path.join(os.path.dirname(args.checkpoint), args.logFile)
else:
args.logFile = 'log_test_' + 'best' + str(index) + '.txt'
logFileLoc = os.path.join(os.path.dirname(args.checkpoint), args.logFile)
# Save the result
if os.path.isfile(logFileLoc):
logger = open(logFileLoc, 'a')
else:
logger = open(logFileLoc, 'w')
logger.write("Mean IoU: %.4f" % mIOU_val)
logger.write("\nPer class IoU: ")
for i in range(len(per_class_iu)):
logger.write("%.4f\t" % per_class_iu[i])
logger.flush()
logger.close()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--model', default="EACNet_ResNet-18-ENC", help="model name: Context Guided Network (CGNet)")
parser.add_argument('--dataset', default="cityscapes", help="dataset: cityscapes or camvid")
parser.add_argument('--num_workers', type=int, default=1, help="the number of parallel threads")
parser.add_argument('--batch_size', type=int, default=1,
help=" the batch_size is set to 1 when evaluating or testing")
parser.add_argument('--checkpoint', default="checkpoint/cityscapes/EACNet_ResNet-18-ENC/bs8_gpu1_train_adam_ohem/model_400.pth")
parser.add_argument('--eval_num', type=int, default=50)
# parser.add_argument('--checkpoint', type=str,
# default="./checkpoint/cityscapes/DABNet_cityscapes.pth",
# help="use the file to load the checkpoint for evaluating or testing ")
parser.add_argument('--save_seg_dir', type=str, default="./result/",
help="saving path of prediction result")
parser.add_argument('--best', action='store_true', default=True, help="Get the best result among last few checkpoints")
parser.add_argument('--save', action='store_true', default=False, help="Save the predicted image")
parser.add_argument('--cuda', default=True, help="run on CPU or GPU")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
args = parser.parse_args()
args.save_seg_dir = os.path.join(args.save_seg_dir, args.dataset, args.model)
if args.dataset == 'cityscapes':
args.classes = 19
elif args.dataset == 'camvid':
args.classes = 11
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset)
eval_model(args)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/dockerclient/shell.go | /*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockerclient
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/nuclio/nuclio/pkg/cmdrunner"
"github.com/nuclio/nuclio/pkg/common"
"github.com/docker/distribution/reference"
"github.com/nuclio/errors"
"github.com/nuclio/logger"
"k8s.io/apimachinery/pkg/util/json"
)
// RestrictedNameChars collects the characters allowed to represent a network or endpoint name.
const restrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
// taken from moby and used to validate names (network, container, labels, endpoints)
var restrictedNameRegex = regexp.MustCompile(`^/?` + restrictedNameChars + `+$`)
var containerIDRegex = regexp.MustCompile(`^[\w+-\.]+$`)
// loose regexes, today just prohibit whitespaces
var restrictedBuildArgRegex = regexp.MustCompile(`^[\S]+$`)
var volumeNameRegex = regexp.MustCompile(`^[\S]+$`)
// this is an open issue https://github.com/kubernetes/kubernetes/issues/53201#issuecomment-534647130
// taking the loose approach,
var envVarNameRegex = regexp.MustCompile(`^[^=]+$`)
// ShellClient is a docker client that uses the shell to communicate with docker
type ShellClient struct {
logger logger.Logger
cmdRunner cmdrunner.CmdRunner
redactedValues []string
buildTimeout time.Duration
buildRetryInterval time.Duration
}
// NewShellClient creates a new docker client
func NewShellClient(parentLogger logger.Logger, runner cmdrunner.CmdRunner) (*ShellClient, error) {
var err error
newClient := &ShellClient{
logger: parentLogger.GetChild("docker"),
cmdRunner: runner,
buildTimeout: 1 * time.Hour,
buildRetryInterval: 3 * time.Second,
}
// set cmd runner
if newClient.cmdRunner == nil {
newClient.cmdRunner, err = cmdrunner.NewShellRunner(newClient.logger)
if err != nil {
return nil, errors.Wrap(err, "Failed to create command runner")
}
}
// verify
if _, err := newClient.GetVersion(false); err != nil {
return nil, errors.Wrap(err, "No docker client found")
}
return newClient, nil
}
// Build will build a docker image, given build options
func (c *ShellClient) Build(buildOptions *BuildOptions) error {
c.logger.DebugWith("Building image", "buildOptions", buildOptions)
if err := c.validateBuildOptions(buildOptions); err != nil {
return errors.Wrap(err, "Invalid build options passed")
}
// if context dir is not passed, use the dir containing the dockerfile
if buildOptions.ContextDir == "" && buildOptions.DockerfilePath != "" {
buildOptions.ContextDir = path.Dir(buildOptions.DockerfilePath)
}
// user can only specify context directory
if buildOptions.DockerfilePath == "" && buildOptions.ContextDir != "" {
buildOptions.DockerfilePath = path.Join(buildOptions.ContextDir, "Dockerfile")
}
buildArgs := ""
for buildArgName, buildArgValue := range buildOptions.BuildArgs {
buildArgs += fmt.Sprintf("--build-arg %s=%s ", buildArgName, buildArgValue)
}
if err := c.build(buildOptions, buildArgs); err != nil {
return errors.Wrap(err, "Failed to build")
}
c.logger.DebugWith("Successfully built image", "image", buildOptions.Image)
return nil
}
// CopyObjectsFromImage copies objects (files, directories) from a given image to local storage. it does
// this through an intermediate container which is deleted afterwards
func (c *ShellClient) CopyObjectsFromImage(imageName string,
objectsToCopy map[string]string,
allowCopyErrors bool) error {
// create container from image
containerID, err := c.createContainer(imageName)
if err != nil {
return errors.Wrapf(err, "Failed to create container from %s", imageName)
}
// delete once done copying objects
defer c.runCommand(nil, "docker rm -f %s", containerID) // nolint: errcheck
// copy objects
for objectImagePath, objectLocalPath := range objectsToCopy {
_, err = c.runCommand(nil, "docker cp %s:%s %s", containerID, objectImagePath, objectLocalPath)
if err != nil && !allowCopyErrors {
return errors.Wrapf(err, "Can't copy %s:%s -> %s", containerID, objectImagePath, objectLocalPath)
}
}
return nil
}
// PushImage pushes a local image to a remote docker repository
func (c *ShellClient) PushImage(imageName string, registryURL string) error {
taggedImage := common.CompileImageName(registryURL, imageName)
c.logger.InfoWith("Pushing image", "from", imageName, "to", taggedImage)
if _, err := reference.Parse(imageName); err != nil {
return errors.Wrap(err, "Invalid image name to tag/push")
}
if _, err := reference.Parse(taggedImage); err != nil {
return errors.Wrap(err, "Invalid tagged image name to tag/push")
}
_, err := c.runCommand(nil, "docker tag %s %s", imageName, taggedImage)
if err != nil {
return errors.Wrap(err, "Failed to tag image")
}
_, err = c.runCommand(nil, "docker push %s", taggedImage)
if err != nil {
return errors.Wrap(err, "Failed to push image")
}
return nil
}
// PullImage pulls an image from a remote docker repository
func (c *ShellClient) PullImage(imageURL string) error {
c.logger.InfoWith("Pulling image", "imageName", imageURL)
if _, err := reference.Parse(imageURL); err != nil {
return errors.Wrap(err, "Invalid image URL to pull")
}
_, err := c.runCommand(nil, "docker pull %s", imageURL)
return err
}
// RemoveImage will remove (delete) a local image
func (c *ShellClient) RemoveImage(imageName string) error {
c.logger.DebugWith("Removing image", "imageName", imageName)
if _, err := reference.Parse(imageName); err != nil {
return errors.Wrap(err, "Invalid image name to remove")
}
_, err := c.runCommand(nil, "docker rmi -f %s", imageName)
return err
}
// RunContainer will run a container based on an image and run options
func (c *ShellClient) RunContainer(imageName string, runOptions *RunOptions) (string, error) {
c.logger.DebugWith("Running container", "imageName", imageName, "runOptions", runOptions)
// validate the given run options against malicious contents
if err := c.validateRunOptions(imageName, runOptions); err != nil {
return "", errors.Wrap(err, "Invalid run options passed")
}
var dockerArguments []string
for localPort, dockerPort := range runOptions.Ports {
if localPort == RunOptionsNoPort {
dockerArguments = append(dockerArguments, fmt.Sprintf("-p %d", dockerPort))
} else {
dockerArguments = append(dockerArguments, fmt.Sprintf("-p %d:%d", localPort, dockerPort))
}
}
if runOptions.RestartPolicy != nil && runOptions.RestartPolicy.Name != RestartPolicyNameNo {
// sanity check
// https://docs.docker.com/engine/reference/run/#restart-policies---restart
// combining --restart (restart policy) with the --rm (clean up) flag results in an error.
if runOptions.Remove {
return "", errors.Errorf("Cannot combine restart policy with container removal")
}
restartMaxRetries := runOptions.RestartPolicy.MaximumRetryCount
restartPolicy := fmt.Sprintf("--restart %s", runOptions.RestartPolicy.Name)
if runOptions.RestartPolicy.Name == RestartPolicyNameOnFailure && restartMaxRetries >= 0 {
restartPolicy += fmt.Sprintf(":%d", restartMaxRetries)
}
dockerArguments = append(dockerArguments, restartPolicy)
}
if !runOptions.Attach {
dockerArguments = append(dockerArguments, "-d")
}
if runOptions.GPUs != "" {
dockerArguments = append(dockerArguments, fmt.Sprintf("--gpus %s", runOptions.GPUs))
}
if runOptions.Remove {
dockerArguments = append(dockerArguments, "--rm")
}
if runOptions.ContainerName != "" {
dockerArguments = append(dockerArguments, fmt.Sprintf("--name %s", runOptions.ContainerName))
}
if runOptions.Network != "" {
dockerArguments = append(dockerArguments, fmt.Sprintf("--net %s", runOptions.Network))
}
if runOptions.Labels != nil {
for labelName, labelValue := range runOptions.Labels {
dockerArguments = append(dockerArguments,
fmt.Sprintf("--label %s='%s'", labelName, c.replaceSingleQuotes(labelValue)))
}
}
if runOptions.Env != nil {
for envName, envValue := range runOptions.Env {
dockerArguments = append(dockerArguments, fmt.Sprintf("--env %s='%s'", envName, envValue))
}
}
if runOptions.Volumes != nil {
for volumeHostPath, volumeContainerPath := range runOptions.Volumes {
dockerArguments = append(dockerArguments,
fmt.Sprintf("--volume %s:%s ", volumeHostPath, volumeContainerPath))
}
}
if len(runOptions.MountPoints) > 0 {
for _, mountPoint := range runOptions.MountPoints {
readonly := ""
if !mountPoint.RW {
readonly = ",readonly"
}
dockerArguments = append(dockerArguments,
fmt.Sprintf("--mount source=%s,destination=%s%s",
mountPoint.Source,
mountPoint.Destination,
readonly))
}
}
if runOptions.RunAsUser != nil || runOptions.RunAsGroup != nil {
userStr := ""
if runOptions.RunAsUser != nil {
userStr += fmt.Sprintf("%d", *runOptions.RunAsUser)
}
if runOptions.RunAsGroup != nil {
userStr += fmt.Sprintf(":%d", *runOptions.RunAsGroup)
}
dockerArguments = append(dockerArguments, fmt.Sprintf("--user %s", userStr))
}
if runOptions.FSGroup != nil {
dockerArguments = append(dockerArguments, fmt.Sprintf("--group-add %d", *runOptions.FSGroup))
}
runResult, err := c.cmdRunner.Run(
&cmdrunner.RunOptions{LogRedactions: c.redactedValues},
"docker run %s %s %s",
strings.Join(dockerArguments, " "),
imageName,
runOptions.Command)
if err != nil {
c.logger.WarnWith("Failed to run container",
"err", err,
"stdout", runResult.Output,
"stderr", runResult.Stderr)
return "", err
}
// if user requested, set stdout / stderr
if runOptions.Stdout != nil {
*runOptions.Stdout = runResult.Output
}
if runOptions.Stderr != nil {
*runOptions.Stderr = runResult.Stderr
}
stdoutLines := strings.Split(runResult.Output, "\n")
lastStdoutLine := c.getLastNonEmptyLine(stdoutLines, 0)
// make sure there are no spaces in the ID, as normally we expect this command to only produce container ID
if strings.Contains(lastStdoutLine, " ") {
// if the image didn't exist prior to calling RunContainer, it will be pulled implicitly which will
// cause additional information to be outputted. if runOptions.ImageMayNotExist is false,
// this will result in an error.
if !runOptions.ImageMayNotExist {
return "", fmt.Errorf("Output from docker command includes more than just ID: %s", lastStdoutLine)
}
// if the implicit image pull was allowed and actually happened, the container ID will appear in the
// second to last line ¯\_(ツ)_/¯
lastStdoutLine = c.getLastNonEmptyLine(stdoutLines, 1)
}
return lastStdoutLine, err
}
// ExecInContainer will run a command in a container
func (c *ShellClient) ExecInContainer(containerID string, execOptions *ExecOptions) error {
c.logger.DebugWith("Executing in container", "containerID", containerID, "execOptions", execOptions)
// validate the given run options against malicious contents
if err := c.validateExecOptions(containerID, execOptions); err != nil {
return errors.Wrap(err, "Invalid exec options passed")
}
envArgument := ""
if execOptions.Env != nil {
for envName, envValue := range execOptions.Env {
envArgument += fmt.Sprintf("--env %s='%s' ", envName, envValue)
}
}
runResult, err := c.cmdRunner.Run(
&cmdrunner.RunOptions{LogRedactions: c.redactedValues},
"docker exec %s %s %s",
envArgument,
containerID,
execOptions.Command)
if err != nil {
c.logger.DebugWith("Failed to execute command in container",
"err", err,
"stdout", runResult.Output,
"stderr", runResult.Stderr)
return err
}
// if user requested, set stdout / stderr
if execOptions.Stdout != nil {
*execOptions.Stdout = runResult.Output
}
if execOptions.Stderr != nil {
*execOptions.Stderr = runResult.Stderr
}
return nil
}
// RemoveContainer removes a container given a container ID
func (c *ShellClient) RemoveContainer(containerID string) error {
c.logger.DebugWith("Removing container", "containerID", containerID)
// containerID is ID or name
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return errors.New("Invalid container ID name in remove container")
}
_, err := c.runCommand(nil, "docker rm -f %s", containerID)
return err
}
// StopContainer stops a container given a container ID
func (c *ShellClient) StopContainer(containerID string) error {
c.logger.DebugWith("Stopping container", "containerID", containerID)
// containerID is ID or name
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return errors.New("Invalid container ID to stop")
}
_, err := c.runCommand(nil, "docker stop %s", containerID)
return err
}
// StartContainer stops a container given a container ID
func (c *ShellClient) StartContainer(containerID string) error {
c.logger.DebugWith("Starting container", "containerID", containerID)
// containerID is ID or name
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return errors.New("Invalid container ID to start")
}
_, err := c.runCommand(nil, "docker start %s", containerID)
return err
}
func (c *ShellClient) GetContainerPort(container *Container, boundPort int) (int, error) {
functionHostPort := Port(fmt.Sprintf("%d/tcp", boundPort))
portBindings := container.HostConfig.PortBindings[functionHostPort]
ports := container.NetworkSettings.Ports[functionHostPort]
if len(portBindings) == 0 && len(ports) == 0 {
return 0, nil
}
// by default take the port binding, as if the user requested
if len(portBindings) != 0 &&
portBindings[0].HostPort != "" && // docker version < 20.10
portBindings[0].HostPort != "0" { // on docker version >= 20.10, the host port would by 0 and not empty string.
return strconv.Atoi(portBindings[0].HostPort)
}
// port was not explicit by user, take port assigned by docker daemon
if len(ports) != 0 && ports[0].HostPort != "" {
return strconv.Atoi(ports[0].HostPort)
}
// function might failed during deploying and did not assign a port
return 0, nil
}
// GetContainerLogs returns raw logs from a given container ID
// Concatenating stdout and stderr since there's no way to re-interlace them
func (c *ShellClient) GetContainerLogs(containerID string) (string, error) {
c.logger.DebugWith("Getting container logs", "containerID", containerID)
// containerID is ID or name
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return "", errors.New("Invalid container ID to get logs from")
}
runOptions := &cmdrunner.RunOptions{
CaptureOutputMode: cmdrunner.CaptureOutputModeCombined,
}
runResult, err := c.runCommand(runOptions, "docker logs %s", containerID)
return runResult.Output, err
}
// AwaitContainerHealth blocks until the given container is healthy or the timeout passes
func (c *ShellClient) AwaitContainerHealth(containerID string, timeout *time.Duration) error {
c.logger.DebugWith("Awaiting container health", "containerID", containerID, "timeout", timeout)
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return errors.New("Invalid container ID to await health for")
}
timedOut := false
containerHealthy := make(chan error, 1)
var timeoutChan <-chan time.Time
// if no timeout is given, create a channel that we'll never send on
if timeout == nil {
timeoutChan = make(<-chan time.Time, 1)
} else {
timeoutChan = time.After(*timeout)
}
go func() {
// start with a small interval between health checks, increasing it gradually
inspectInterval := 100 * time.Millisecond
for !timedOut {
containers, err := c.GetContainers(&GetContainerOptions{
ID: containerID,
Stopped: true,
})
if err == nil && len(containers) > 0 {
container := containers[0]
// container is healthy
if container.State.Health.Status == "healthy" {
containerHealthy <- nil
return
}
// container exited, bail out
if container.State.Status == "exited" {
containerHealthy <- errors.Errorf("Container exited with status: %d", container.State.ExitCode)
return
}
// container is dead, bail out
// https://docs.docker.com/engine/reference/commandline/ps/#filtering
if container.State.Status == "dead" {
containerHealthy <- errors.New("Container seems to be dead")
return
}
// wait a bit before retrying
c.logger.DebugWith("Container not healthy yet, retrying soon",
"timeout", timeout,
"containerID", containerID,
"containerState", container.State,
"nextCheckIn", inspectInterval)
}
time.Sleep(inspectInterval)
// increase the interval up to a cap
if inspectInterval < 800*time.Millisecond {
inspectInterval *= 2
}
}
}()
// wait for either the container to be healthy or the timeout
select {
case err := <-containerHealthy:
if err != nil {
return errors.Wrapf(err, "Container %s is not healthy", containerID)
}
c.logger.DebugWith("Container is healthy", "containerID", containerID)
case <-timeoutChan:
timedOut = true
containerLogs, err := c.GetContainerLogs(containerID)
if err != nil {
c.logger.ErrorWith("Container wasn't healthy within timeout (failed to get logs)",
"containerID", containerID,
"timeout", timeout,
"err", err)
} else {
c.logger.WarnWith("Container wasn't healthy within timeout",
"containerID", containerID,
"timeout", timeout,
"logs", containerLogs)
}
return errors.New("Container wasn't healthy in time")
}
return nil
}
// GetContainers returns a list of container IDs which match a certain criteria
func (c *ShellClient) GetContainers(options *GetContainerOptions) ([]Container, error) {
c.logger.DebugWith("Getting containers", "options", options)
if err := c.validateGetContainerOptions(options); err != nil {
return nil, errors.Wrap(err, "Invalid get container options passed")
}
stoppedContainersArgument := ""
if options.Stopped {
stoppedContainersArgument = "--all "
}
nameFilterArgument := ""
if options.Name != "" {
nameFilterArgument = fmt.Sprintf(`--filter "name=^/%s$" `, options.Name)
}
idFilterArgument := ""
if options.ID != "" {
idFilterArgument = fmt.Sprintf(`--filter "id=%s"`, options.ID)
}
labelFilterArgument := ""
for labelName, labelValue := range options.Labels {
labelFilterArgument += fmt.Sprintf(`--filter "label=%s=%s" `,
labelName,
labelValue)
}
runResult, err := c.runCommand(nil,
"docker ps --quiet %s %s %s %s",
stoppedContainersArgument,
idFilterArgument,
nameFilterArgument,
labelFilterArgument)
if err != nil {
return nil, errors.Wrap(err, "Failed to get containers")
}
containerIDsAsString := runResult.Output
if len(containerIDsAsString) == 0 {
return []Container{}, nil
}
runResult, err = c.runCommand(nil,
"docker inspect %s",
strings.ReplaceAll(containerIDsAsString, "\n", " "))
if err != nil {
return nil, errors.Wrap(err, "Failed to inspect containers")
}
containersInfoString := runResult.Output
var containersInfo []Container
// parse the result
if err := json.Unmarshal([]byte(containersInfoString), &containersInfo); err != nil {
return nil, errors.Wrap(err, "Failed to parse inspect response")
}
return containersInfo, nil
}
// GetContainerEvents returns a list of container events which occurred within a time range
func (c *ShellClient) GetContainerEvents(containerName string, since string, until string) ([]string, error) {
c.logger.DebugWith("Getting container events",
"containerName", containerName,
"since", since,
"until", until)
if !restrictedNameRegex.MatchString(containerName) {
return nil, errors.New("Invalid container name to get events for")
}
runResults, err := c.runCommand(nil, "docker events --filter container=%s --since %s --until %s",
containerName,
since,
until)
if err != nil {
return nil, errors.Wrap(err, "Failed to get container events")
}
return strings.Split(strings.TrimSpace(runResults.Output), "\n"), nil
}
// LogIn allows docker client to access secured registries
func (c *ShellClient) LogIn(options *LogInOptions) error {
// TODO: validate login URL
c.logger.DebugWith("Performing docker login", "URL", options.URL)
c.redactedValues = append(c.redactedValues, options.Password)
_, err := c.runCommand(nil, `docker login -u %s -p '%s' %s`,
options.Username,
options.Password,
options.URL)
return err
}
// CreateNetwork creates a docker network
func (c *ShellClient) CreateNetwork(options *CreateNetworkOptions) error {
c.logger.DebugWith("Creating docker network", "options", options)
// validate the given create network options against malicious contents
if err := c.validateCreateNetworkOptions(options); err != nil {
return errors.Wrap(err, "Invalid network creation options passed")
}
_, err := c.runCommand(nil, `docker network create %s`, options.Name)
return err
}
// DeleteNetwork deletes a docker network
func (c *ShellClient) DeleteNetwork(networkName string) error {
c.logger.DebugWith("Deleting docker network", "networkName", networkName)
if !restrictedNameRegex.MatchString(networkName) {
return errors.New("Invalid network name to delete")
}
_, err := c.runCommand(nil, `docker network rm %s`, networkName)
return err
}
// CreateVolume creates a docker volume
func (c *ShellClient) CreateVolume(options *CreateVolumeOptions) error {
c.logger.DebugWith("Creating docker volume", "options", options)
// validate the given create network options against malicious contents
if err := c.validateCreateVolumeOptions(options); err != nil {
return errors.Wrap(err, "Invalid volume creation options passed")
}
_, err := c.runCommand(nil, `docker volume create %s`, options.Name)
return err
}
// DeleteVolume deletes a docker volume
func (c *ShellClient) DeleteVolume(volumeName string) error {
c.logger.DebugWith("Deleting docker volume", "volumeName", volumeName)
if !volumeNameRegex.MatchString(volumeName) {
return errors.New("Invalid volume name to delete")
}
_, err := c.runCommand(nil, `docker volume rm --force %s`, volumeName)
return err
}
func (c *ShellClient) Save(imageName string, outPath string) error {
c.logger.DebugWith("Docker saving to path", "outPath", outPath, "imageName", imageName)
if _, err := reference.Parse(imageName); err != nil {
return errors.Wrap(err, "Invalid image name to save")
}
_, err := c.runCommand(nil, `docker save --output %s %s`, outPath, imageName)
return err
}
func (c *ShellClient) Load(inPath string) error {
c.logger.DebugWith("Docker loading from path", "inPath", inPath)
_, err := c.runCommand(nil, `docker load --input %s`, inPath)
return err
}
func (c *ShellClient) GetVersion(quiet bool) (string, error) {
runOptions := &cmdrunner.RunOptions{
LogOnlyOnFailure: quiet,
}
output, err := c.runCommand(runOptions, `docker version --format "{{json .}}"`)
if err != nil {
return "", errors.Wrap(err, "Failed to get docker version")
}
return output.Output, nil
}
func (c *ShellClient) GetContainerIPAddresses(containerID string) ([]string, error) {
c.logger.DebugWith("Getting container IP addresses", "containerID", containerID)
runResults, err := c.runCommand(nil, `docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' %s`, containerID)
if err != nil {
return nil, errors.Wrap(err, "Failed to get container ip addresses")
}
return strings.Split(strings.TrimSpace(runResults.Output), "\n"), nil
}
func (c *ShellClient) GetContainerLogStream(ctx context.Context,
containerID string,
logOptions *ContainerLogsOptions) (io.ReadCloser, error) {
if logOptions == nil {
logOptions = &ContainerLogsOptions{
Follow: true,
}
}
var cmdArgs []string
if logOptions.Since != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--since %s", common.Quote(logOptions.Since)))
}
if logOptions.Tail != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--tail %s", common.Quote(logOptions.Tail)))
}
if logOptions.Follow {
return c.streamCommand(ctx,
nil,
"docker logs %s --follow %s", strings.Join(cmdArgs, " "), containerID)
}
output, err := c.runCommand(&cmdrunner.RunOptions{
CaptureOutputMode: cmdrunner.CaptureOutputModeCombined,
}, "docker logs %s %s", strings.Join(cmdArgs, " "), containerID)
if err != nil {
return nil, errors.Wrap(err, "Failed to get container log stream")
}
return ioutil.NopCloser(strings.NewReader(output.Output)), nil
}
func (c *ShellClient) runCommand(runOptions *cmdrunner.RunOptions,
format string,
vars ...interface{}) (cmdrunner.RunResult, error) {
if runOptions == nil {
runOptions = &cmdrunner.RunOptions{
CaptureOutputMode: cmdrunner.CaptureOutputModeStdout,
}
}
runOptions.LogRedactions = append(runOptions.LogRedactions, c.redactedValues...)
runResult, err := c.cmdRunner.Run(runOptions, format, vars...)
if runOptions.CaptureOutputMode == cmdrunner.CaptureOutputModeStdout && runResult.Stderr != "" {
c.logger.WarnWith("Docker command outputted to stderr - this may result in errors",
"workingDir", runOptions.WorkingDir,
"cmd", cmdrunner.Redact(runOptions.LogRedactions, fmt.Sprintf(format, vars...)),
"stderr", runResult.Stderr)
}
return runResult, err
}
func (c *ShellClient) streamCommand(ctx context.Context,
runOptions *cmdrunner.RunOptions,
format string,
vars ...interface{}) (io.ReadCloser, error) {
return c.cmdRunner.Stream(ctx, runOptions, format, vars...)
}
func (c *ShellClient) getLastNonEmptyLine(lines []string, offset int) string {
numLines := len(lines)
// protect ourselves from overflows
if offset >= numLines {
offset = numLines - 1
} else if offset < 0 {
offset = 0
}
// iterate backwards over the lines
for idx := numLines - 1 - offset; idx >= 0; idx-- {
if lines[idx] != "" {
return lines[idx]
}
}
return ""
}
func (c *ShellClient) replaceSingleQuotes(input string) string {
return strings.ReplaceAll(input, "'", "’")
}
func (c *ShellClient) resolveDockerBuildNetwork() string {
// may contain none as a value
networkInterface := os.Getenv("NUCLIO_DOCKER_BUILD_NETWORK")
if networkInterface == "" {
networkInterface = common.GetEnvOrDefaultString("NUCLIO_BUILD_USE_HOST_NET", "host")
}
switch networkInterface {
case "host":
fallthrough
case "default":
fallthrough
case "none":
return fmt.Sprintf("--network %s", networkInterface)
default:
return ""
}
}
func (c *ShellClient) build(buildOptions *BuildOptions, buildArgs string) error {
var lastBuildErr error
cacheOption := ""
if buildOptions.NoCache {
cacheOption = "--no-cache"
}
pullOption := ""
if buildOptions.Pull {
pullOption = "--pull"
}
buildCommand := fmt.Sprintf("docker build %s --force-rm -t %s -f %s %s %s %s .",
c.resolveDockerBuildNetwork(),
buildOptions.Image,
buildOptions.DockerfilePath,
cacheOption,
pullOption,
buildArgs)
retryOnErrorMessages := []string{
// when one of the underlying image is gone (from cache)
"^No such image: sha256:",
"^unknown parent image ID sha256:",
"^failed to set parent sha256:",
"^failed to export image:",
// when overlay image is gone (from disk)
"^failed to get digest sha256:",
// when trying to reuse a missing nuclio-onbuild between functions
"^Unable to find image 'nuclio-onbuild-.*' locally",
}
runOptions := &cmdrunner.RunOptions{
CaptureOutputMode: cmdrunner.CaptureOutputModeStdout,
WorkingDir: &buildOptions.ContextDir,
}
// retry build on predefined errors that occur during race condition and collisions between
// shared onbuild layers
common.RetryUntilSuccessfulOnErrorPatterns(c.buildTimeout, // nolint: errcheck
c.buildRetryInterval,
retryOnErrorMessages,
func() string {
runResults, err := c.runCommand(runOptions, buildCommand)
// preserve error
lastBuildErr = err
if err != nil {
return runResults.Stderr
}
return ""
})
return lastBuildErr
}
func (c *ShellClient) createContainer(imageName string) (string, error) {
var lastCreateContainerError error
var containerID string
if _, err := reference.Parse(imageName); err != nil {
return "", errors.Wrap(err, "Invalid image name to create container from")
}
retryOnErrorMessages := []string{
// sometimes, creating the container fails on not finding the image because
// docker was on high load and did not get to update its cache
fmt.Sprintf("^Unable to find image '%s.*' locally", imageName),
}
// retry in case docker daemon is under high load
// e.g.: between build and create, docker would need to update its cached manifest of built images
common.RetryUntilSuccessfulOnErrorPatterns(10*time.Second, // nolint: errcheck
2*time.Second,
retryOnErrorMessages,
func() string {
// create container from image
runResults, err := c.runCommand(nil, "docker create %s /bin/sh", imageName)
// preserve error
lastCreateContainerError = err
if err != nil {
return runResults.Stderr
}
containerID = runResults.Output
containerID = strings.TrimSpace(containerID)
return ""
})
return containerID, lastCreateContainerError
}
func (c *ShellClient) validateBuildOptions(buildOptions *BuildOptions) error {
if _, err := reference.Parse(buildOptions.Image); err != nil {
return errors.Wrap(err, "Invalid image name in build options")
}
for buildArgName, buildArgValue := range buildOptions.BuildArgs {
if !restrictedBuildArgRegex.MatchString(buildArgName) {
message := "Invalid build arg name supplied"
c.logger.WarnWith(message, "buildArgName", buildArgName)
return errors.New(message)
}
if !restrictedBuildArgRegex.MatchString(buildArgValue) {
message := "Invalid build arg value supplied"
c.logger.WarnWith(message, "buildArgValue", buildArgValue)
return errors.New(message)
}
}
return nil
}
func (c *ShellClient) validateRunOptions(imageName string, runOptions *RunOptions) error {
if _, err := reference.Parse(imageName); err != nil {
return errors.Wrap(err, "Invalid image name passed to run command")
}
// container name can't be empty
if runOptions.ContainerName != "" && !restrictedNameRegex.MatchString(runOptions.ContainerName) {
return errors.New("Invalid container name in build options")
}
for envVarName := range runOptions.Env {
if !envVarNameRegex.MatchString(envVarName) {
return errors.New("Invalid env var name in run options")
}
}
for volumeHostPath, volumeContainerPath := range runOptions.Volumes {
if !volumeNameRegex.MatchString(volumeHostPath) {
return errors.New("Invalid volume host path in run options")
}
if !volumeNameRegex.MatchString(volumeContainerPath) {
return errors.New("Invalid volume container path in run options")
}
}
if runOptions.Network != "" && !restrictedNameRegex.MatchString(runOptions.Network) {
return errors.New("Invalid network name in run options")
}
return nil
}
func (c *ShellClient) validateExecOptions(containerID string, execOptions *ExecOptions) error {
// containerID is ID or name
if !containerIDRegex.MatchString(containerID) && !restrictedNameRegex.MatchString(containerID) {
return errors.New("Invalid container ID name in container exec")
}
for envVarName := range execOptions.Env {
if !envVarNameRegex.MatchString(envVarName) {
return errors.New("Invalid env var name in exec options")
}
}
return nil
}
func (c *ShellClient) validateCreateNetworkOptions(options *CreateNetworkOptions) error {
if !restrictedNameRegex.MatchString(options.Name) {
return errors.New("Invalid network name in network creation options")
}
return nil
}
func (c *ShellClient) validateCreateVolumeOptions(options *CreateVolumeOptions) error {
if !restrictedNameRegex.MatchString(options.Name) {
return errors.New("Invalid volume name in volume creation options")
}
return nil
}
func (c *ShellClient) validateGetContainerOptions(options *GetContainerOptions) error {
if options.Name != "" && !restrictedNameRegex.MatchString(options.Name) {
return errors.New("Invalid container name in get container options")
}
if options.ID != "" && !containerIDRegex.MatchString(options.ID) {
return errors.New("Invalid container ID in get container options")
}
return nil
}
| [
"\"NUCLIO_DOCKER_BUILD_NETWORK\""
]
| []
| [
"NUCLIO_DOCKER_BUILD_NETWORK"
]
| [] | ["NUCLIO_DOCKER_BUILD_NETWORK"] | go | 1 | 0 | |
main.py | from os import chdir
chdir(r"C:\Users\jsalm\Documents\Python Scripts\Automated_Histo\unet-master\unet-master")
from model import *
from data import testGenerator,trainGenerator,saveResult
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=300,epochs=5,callbacks=[model_checkpoint])
testGene = testGenerator("data/membrane/test")
results = model.predict_generator(testGene,4549,verbose=1)
saveResult("data/membrane/test",results) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
sayit/wsgi.py | import os
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "sayit.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
public_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'public')
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
application.add_files(public_path, prefix='/')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ssh/server.go | package ssh
import (
"fmt"
"io"
"log"
"net"
"os"
"time"
"github.com/aau-claaudia/citen/filter"
"github.com/aau-claaudia/citen/keyscan"
"github.com/fasmide/hostkeys"
"golang.org/x/crypto/ssh"
)
// Server represents a listening ssh server
type Server struct {
config *ssh.ServerConfig
}
// Serve will accept ssh connections
func (s *Server) Serve(l net.Listener) error {
var err error
s.config, err = DefaultConfig()
if err != nil {
return fmt.Errorf("unable to configure: %w", err)
}
for {
nConn, err := l.Accept()
if err != nil {
return fmt.Errorf("failed to accept incoming connection: %w", err)
}
go s.accept(nConn)
}
}
// DefaultConfig generates a default ssh.ServerConfig
func DefaultConfig() (*ssh.ServerConfig, error) {
config := &ssh.ServerConfig{
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
// store public key
s := &ssh.Permissions{
Extensions: map[string]string{
"publickey": string(key.Marshal()),
"publickey-fp": ssh.FingerprintSHA256(key),
},
}
return s, nil
},
}
// in the event that the environment variable is unset
// the manager will default to the current work directory
m := &hostkeys.Manager{
Directory: os.Getenv("CONFIGURATION_DIRECTORY"),
}
err := m.Manage(config)
return config, err
}
func (s *Server) accept(c net.Conn) {
// auth timeout
// only give people 10 seconds to ssh handshake and authenticate themselves
authTimer := time.AfterFunc(10*time.Second, func() {
c.Close()
})
// ssh handshake and auth
conn, chans, reqs, err := ssh.NewServerConn(c, s.config)
if err != nil {
log.Print("failed to handshake: ", err)
return
}
authTimer.Stop()
log.Printf("accepted session from %s@%s with key %s", conn.User(), conn.RemoteAddr(), conn.Permissions.Extensions["publickey-fp"])
// the incoming Request channel must be serviced.
// we will only respond to keepalive requests
go func(reqs <-chan *ssh.Request) {
for req := range reqs {
if req.Type == "[email protected]" {
req.Reply(true, nil)
continue
}
req.Reply(false, nil)
}
}(reqs)
// we should also send out keepalive requests
// the primary reason for this is to clean up dead connections
go func() {
// send keepalive requests every minute
ticker := time.NewTicker(time.Minute)
for range ticker.C {
// If this timer fires - the client didnt respond to our
// keepalive - and we should teardown the session
timeout := time.AfterFunc(10*time.Second, func() {
// dont send any more keepalive requests
ticker.Stop()
// teardown the connection
conn.Close()
})
_, _, err := conn.SendRequest("[email protected]", true, nil)
// stop timeout, we did in fact receive something
timeout.Stop()
if err != nil {
// dont send any more keepalive requests
ticker.Stop()
// teardown the connection
conn.Close()
return
}
}
}()
// service the incoming Channel channel.
for channelRequest := range chans {
if channelRequest.ChannelType() != "direct-tcpip" {
channelRequest.Reject(ssh.Prohibited, fmt.Sprintf("no %s allowed, only direct-tcpip", channelRequest.ChannelType()))
continue
}
// parse request
forwardInfo := directTCPIP{}
err := ssh.Unmarshal(channelRequest.ExtraData(), &forwardInfo)
if err != nil {
log.Printf("unable to unmarshal forward information: %s", err)
channelRequest.Reject(ssh.UnknownChannelType, "failed to parse forward information")
continue
}
// filter target
if !filter.IsAllowed(forwardInfo.Addr) {
channelRequest.Reject(ssh.Prohibited, fmt.Sprintf("%s is not in my allowed forward list", forwardInfo.Addr))
continue
}
// keyscan target
if !keyscan.IsAllowed(forwardInfo.To(), conn.User(), []byte(conn.Permissions.Extensions["publickey"])) {
channelRequest.Reject(ssh.Prohibited, fmt.Sprintf("%s does not approve", forwardInfo.Addr))
continue
}
// dial target
forwardConnection, err := net.Dial("tcp", forwardInfo.To())
if err != nil {
log.Printf("unable to dial %s: %s", forwardInfo.To(), err)
channelRequest.Reject(ssh.ConnectionFailed, fmt.Sprintf("failed to dial %s: %s", forwardInfo.To(), err))
continue
}
// accept channel from ssh client
log.Printf("accepting forward to %s:%d", forwardInfo.Addr, forwardInfo.Rport)
channel, requests, err := channelRequest.Accept()
if err != nil {
log.Print("could not accept forward channel: ", err)
continue
}
go ssh.DiscardRequests(requests)
// pass traffic in both directions - close channel when io.Copy returns
go func() {
io.Copy(forwardConnection, channel)
channel.Close()
}()
go func() {
io.Copy(channel, forwardConnection)
channel.Close()
}()
}
log.Printf("session from %s@%s with key %s closed", conn.User(), conn.RemoteAddr(), conn.Permissions.Extensions["publickey-fp"])
}
| [
"\"CONFIGURATION_DIRECTORY\""
]
| []
| [
"CONFIGURATION_DIRECTORY"
]
| [] | ["CONFIGURATION_DIRECTORY"] | go | 1 | 0 | |
src/test/java/com/uber/cadence/workflow/WorkflowTest.java | /*
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Modifications copyright (C) 2017 Uber Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not
* use this file except in compliance with the License. A copy of the License is
* located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.uber.cadence.workflow;
import static com.uber.cadence.worker.NonDeterministicWorkflowPolicy.FailWorkflow;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.google.common.base.Strings;
import com.google.common.util.concurrent.UncheckedExecutionException;
import com.uber.cadence.BadRequestError;
import com.uber.cadence.CancellationAlreadyRequestedError;
import com.uber.cadence.DomainAlreadyExistsError;
import com.uber.cadence.DomainNotActiveError;
import com.uber.cadence.EntityNotExistsError;
import com.uber.cadence.FeatureFlags;
import com.uber.cadence.GetWorkflowExecutionHistoryResponse;
import com.uber.cadence.HistoryEvent;
import com.uber.cadence.Memo;
import com.uber.cadence.QueryConsistencyLevel;
import com.uber.cadence.QueryFailedError;
import com.uber.cadence.QueryRejectCondition;
import com.uber.cadence.SearchAttributes;
import com.uber.cadence.SignalExternalWorkflowExecutionFailedCause;
import com.uber.cadence.TimeoutType;
import com.uber.cadence.WorkflowExecution;
import com.uber.cadence.WorkflowExecutionAlreadyCompletedError;
import com.uber.cadence.WorkflowExecutionAlreadyStartedError;
import com.uber.cadence.WorkflowExecutionCloseStatus;
import com.uber.cadence.WorkflowIdReusePolicy;
import com.uber.cadence.activity.Activity;
import com.uber.cadence.activity.ActivityMethod;
import com.uber.cadence.activity.ActivityOptions;
import com.uber.cadence.activity.ActivityTask;
import com.uber.cadence.activity.LocalActivityOptions;
import com.uber.cadence.client.*;
import com.uber.cadence.common.CronSchedule;
import com.uber.cadence.common.MethodRetry;
import com.uber.cadence.common.RetryOptions;
import com.uber.cadence.converter.JsonDataConverter;
import com.uber.cadence.internal.common.WorkflowExecutionUtils;
import com.uber.cadence.internal.sync.DeterministicRunnerTest;
import com.uber.cadence.internal.worker.PollerOptions;
import com.uber.cadence.serviceclient.ClientOptions;
import com.uber.cadence.serviceclient.IWorkflowService;
import com.uber.cadence.serviceclient.WorkflowServiceTChannel;
import com.uber.cadence.testing.TestEnvironmentOptions;
import com.uber.cadence.testing.TestWorkflowEnvironment;
import com.uber.cadence.testing.WorkflowReplayer;
import com.uber.cadence.worker.*;
import com.uber.cadence.workflow.Functions.Func;
import com.uber.cadence.workflow.Functions.Func1;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.reflect.Type;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiPredicate;
import java.util.function.Function;
import java.util.function.Supplier;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.rules.TestWatcher;
import org.junit.rules.Timeout;
import org.junit.runner.Description;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@RunWith(Parameterized.class)
public class WorkflowTest {
/**
* When set to true increases test, activity and workflow timeouts to large values to support
* stepping through code in a debugger without timing out.
*/
private static final boolean DEBUGGER_TIMEOUTS = false;
private static final String ANNOTATION_TASK_LIST = "WorkflowTest-testExecute[Docker]";
private TracingWorkflowInterceptorFactory tracer;
private static final boolean useDockerService =
Boolean.parseBoolean(System.getenv("USE_DOCKER_SERVICE"));
private static final boolean stickyOff = Boolean.parseBoolean(System.getenv("STICKY_OFF"));
@Parameters(name = "{1}")
public static Object[] data() {
if (!useDockerService) {
return new Object[][] {
{false, "TestService Sticky OFF", true}, {false, "TestService Sticky ON", false}
};
} else {
return new Object[][] {
{true, "Docker Sticky " + (stickyOff ? "OFF" : "ON"), stickyOff},
};
}
}
@Rule public TestName testName = new TestName();
@Rule
public Timeout globalTimeout =
Timeout.seconds(DEBUGGER_TIMEOUTS ? 500 : !useDockerService ? 15 : 30);
@Rule
public TestWatcher watchman =
new TestWatcher() {
@Override
protected void failed(Throwable e, Description description) {
if (tracer != null) {
System.err.println("TRACE:\n" + tracer.getTrace());
}
if (testEnvironment != null) {
System.err.println("HISTORIES:\n" + testEnvironment.getDiagnostics());
}
}
};
@Parameter public boolean useExternalService;
@Parameter(1)
public String testType;
@Parameter(2)
public boolean disableStickyExecution;
public static final String DOMAIN = "UnitTest";
private static final Logger log = LoggerFactory.getLogger(WorkflowTest.class);
private static String UUID_REGEXP =
"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}";
private String taskList;
private WorkerFactory workerFactory;
private Worker worker;
private TestActivitiesImpl activitiesImpl;
private WorkflowClient workflowClient;
private TestWorkflowEnvironment testEnvironment;
private ScheduledExecutorService scheduledExecutor;
private List<ScheduledFuture<?>> delayedCallbacks = new ArrayList<>();
private static final IWorkflowService service =
new WorkflowServiceTChannel(
ClientOptions.newBuilder()
.setFeatureFlags(
new FeatureFlags().setWorkflowExecutionAlreadyCompletedErrorEnabled(true))
.build());
@AfterClass
public static void closeService() {
service.close();
}
private static WorkflowOptions.Builder newWorkflowOptionsBuilder(String taskList) {
if (DEBUGGER_TIMEOUTS) {
return new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(1000))
.setTaskStartToCloseTimeout(Duration.ofSeconds(60))
.setTaskList(taskList);
} else {
return new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(30))
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskList(taskList);
}
}
private static ActivityOptions newActivityOptions1(String taskList) {
if (DEBUGGER_TIMEOUTS) {
return new ActivityOptions.Builder()
.setTaskList(taskList)
.setScheduleToCloseTimeout(Duration.ofSeconds(1000))
.setHeartbeatTimeout(Duration.ofSeconds(1000))
.setScheduleToStartTimeout(Duration.ofSeconds(1000))
.setStartToCloseTimeout(Duration.ofSeconds(10000))
.build();
} else {
return new ActivityOptions.Builder()
.setTaskList(taskList)
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10))
.build();
}
}
private static LocalActivityOptions newLocalActivityOptions1() {
if (DEBUGGER_TIMEOUTS) {
return new LocalActivityOptions.Builder()
.setScheduleToCloseTimeout(Duration.ofSeconds(1000))
.build();
} else {
return new LocalActivityOptions.Builder()
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.build();
}
}
private static ActivityOptions newActivityOptions2() {
return new ActivityOptions.Builder().setScheduleToCloseTimeout(Duration.ofSeconds(20)).build();
}
@Before
public void setUp() {
String testMethod = testName.getMethodName();
if (testMethod.startsWith("testExecute") || testMethod.startsWith("testStart")) {
taskList = ANNOTATION_TASK_LIST;
} else {
taskList = "WorkflowTest-" + testMethod + "-" + UUID.randomUUID().toString();
}
tracer = new TracingWorkflowInterceptorFactory();
// TODO: Create a version of TestWorkflowEnvironment that runs against a real service.
WorkflowClientOptions clientOptions =
WorkflowClientOptions.newBuilder().setDomain(DOMAIN).build();
if (useExternalService) {
workflowClient = WorkflowClient.newInstance(service, clientOptions);
WorkerFactoryOptions factoryOptions =
WorkerFactoryOptions.newBuilder()
.setDisableStickyExecution(disableStickyExecution)
.build();
workerFactory = new WorkerFactory(workflowClient, factoryOptions);
WorkerOptions workerOptions =
WorkerOptions.newBuilder()
.setActivityPollerOptions(PollerOptions.newBuilder().setPollThreadCount(5).build())
.setMaxConcurrentActivityExecutionSize(1000)
.setInterceptorFactory(tracer)
.build();
worker = workerFactory.newWorker(taskList, workerOptions);
scheduledExecutor = new ScheduledThreadPoolExecutor(1);
} else {
TestEnvironmentOptions testOptions =
new TestEnvironmentOptions.Builder()
.setWorkflowClientOptions(clientOptions)
.setInterceptorFactory(tracer)
.setWorkerFactoryOptions(
WorkerFactoryOptions.newBuilder()
.setDisableStickyExecution(disableStickyExecution)
.build())
.build();
testEnvironment = TestWorkflowEnvironment.newInstance(testOptions);
worker = testEnvironment.newWorker(taskList);
workflowClient = testEnvironment.newWorkflowClient();
}
ActivityCompletionClient completionClient = workflowClient.newActivityCompletionClient();
activitiesImpl = new TestActivitiesImpl(completionClient);
worker.registerActivitiesImplementations(activitiesImpl);
newWorkflowOptionsBuilder(taskList);
newActivityOptions1(taskList);
activitiesImpl.invocations.clear();
activitiesImpl.procResult.clear();
}
@After
public void tearDown() throws Throwable {
if (activitiesImpl != null) {
activitiesImpl.close();
}
if (testEnvironment != null) {
testEnvironment.close();
}
for (ScheduledFuture<?> result : delayedCallbacks) {
if (result.isDone() && !result.isCancelled()) {
try {
result.get();
} catch (InterruptedException e) {
} catch (ExecutionException e) {
throw e.getCause();
}
}
}
if (tracer != null) {
tracer.assertExpected();
}
}
private void startWorkerFor(Class<?>... workflowTypes) {
worker.registerWorkflowImplementationTypes(workflowTypes);
if (useExternalService) {
workerFactory.start();
} else {
testEnvironment.start();
}
}
// TODO: Refactor testEnvironment to support testing through real service to avoid this
// conditional switches
void registerDelayedCallback(Duration delay, Runnable r) {
if (useExternalService) {
ScheduledFuture<?> result =
scheduledExecutor.schedule(r, delay.toMillis(), TimeUnit.MILLISECONDS);
delayedCallbacks.add(result);
} else {
testEnvironment.registerDelayedCallback(delay, r);
}
}
void sleep(Duration d) {
if (useExternalService) {
try {
Thread.sleep(d.toMillis());
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted", e);
}
} else {
testEnvironment.sleep(d);
}
}
long currentTimeMillis() {
if (useExternalService) {
return System.currentTimeMillis();
} else {
return testEnvironment.currentTimeMillis();
}
}
public interface TestWorkflow1 {
@WorkflowMethod
String execute(String taskList);
}
public interface TestWorkflowSignaled {
@WorkflowMethod
String execute();
@SignalMethod(name = "testSignal")
void signal1(String arg);
}
public interface TestWorkflow2 {
@WorkflowMethod(name = "testActivity")
String execute(boolean useExternalService);
@QueryMethod(name = "getTrace")
List<String> getTrace();
}
public interface TestWorkflow3 {
@WorkflowMethod
String execute(String taskList);
@SignalMethod(name = "testSignal")
void signal1(String arg);
@QueryMethod(name = "getState")
String getState();
}
public interface TestWorkflowQuery {
@WorkflowMethod()
String execute(String taskList);
@QueryMethod()
String query();
}
public static class TestSyncWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities activities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
// Invoke synchronously in a separate thread for testing purposes only.
// In real workflows use
// Async.procedure(activities::activityWithDelay, 1000, true)
Promise<String> a1 = Async.function(() -> activities.activityWithDelay(1000, true));
Workflow.sleep(2000);
return activities.activity2(a1.get(), 10);
}
}
@Test
public void testSync() {
startWorkerFor(TestSyncWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("activity10", result);
tracer.setExpected(
"sleep PT2S",
"executeActivity TestActivities::activityWithDelay",
"executeActivity TestActivities::activity2");
}
public interface TestMultipleTimers {
@WorkflowMethod
long execute();
}
public static class TestMultipleTimersImpl implements TestMultipleTimers {
@Override
public long execute() {
Promise<Void> t1 = Async.procedure(() -> Workflow.sleep(Duration.ofSeconds(1)));
Promise<Void> t2 = Async.procedure(() -> Workflow.sleep(Duration.ofSeconds(2)));
long start = Workflow.currentTimeMillis();
Promise.anyOf(t1, t2).get();
long elapsed = Workflow.currentTimeMillis() - start;
return elapsed;
}
}
@Test
public void testMultipleTimers() {
startWorkerFor(TestMultipleTimersImpl.class);
TestMultipleTimers workflowStub =
workflowClient.newWorkflowStub(
TestMultipleTimers.class, newWorkflowOptionsBuilder(taskList).build());
long result = workflowStub.execute();
assertTrue("should be around 1 second: " + result, result < 2000);
}
public static class TestActivityRetryWithMaxAttempts implements TestWorkflow1 {
@Override
@SuppressWarnings("Finally")
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(3))
.setRetryOptions(
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.setDoNotRetry(AssertionError.class)
.build())
.build();
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options);
long start = Workflow.currentTimeMillis();
try {
activities.heartbeatAndThrowIO();
} finally {
if (Workflow.currentTimeMillis() - start < 2000) {
fail("Activity retried without delay");
}
}
return "ignored";
}
}
@Test
public void testActivityRetryWithMaxAttempts() {
startWorkerFor(TestActivityRetryWithMaxAttempts.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
public static class TestActivityRetryWithExpiration implements TestWorkflow1 {
@Override
@SuppressWarnings("Finally")
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(3))
.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(3))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setDoNotRetry(AssertionError.class)
.build())
.build();
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options);
long start = Workflow.currentTimeMillis();
try {
activities.heartbeatAndThrowIO();
} finally {
if (Workflow.currentTimeMillis() - start < 2000) {
fail("Activity retried without delay");
}
}
return "ignored";
}
}
@Test
public void testActivityRetryWithExpiration() {
startWorkerFor(TestActivityRetryWithExpiration.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
public static class TestLocalActivityRetry implements TestWorkflow1 {
@Override
@SuppressWarnings("Finally")
public String execute(String taskList) {
LocalActivityOptions options =
new LocalActivityOptions.Builder()
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(100))
.setMaximumInterval(Duration.ofSeconds(20))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(5)
.setDoNotRetry(AssertionError.class)
.build())
.build();
TestActivities activities = Workflow.newLocalActivityStub(TestActivities.class, options);
activities.throwIO();
return "ignored";
}
}
@Test
public void testLocalActivityRetry() {
startWorkerFor(TestLocalActivityRetry.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class,
newWorkflowOptionsBuilder(taskList)
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 5, activitiesImpl.invocations.size());
assertEquals("last attempt", 5, activitiesImpl.getLastAttempt());
}
public static class TestActivityRetryOnTimeout implements TestWorkflow1 {
@Override
@SuppressWarnings("Finally")
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setScheduleToCloseTimeout(Duration.ofSeconds(1))
.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(100))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.setDoNotRetry(AssertionError.class)
.build())
.build();
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options);
long start = Workflow.currentTimeMillis();
try {
activities.neverComplete(); // should timeout as scheduleToClose is 1 second
throw new IllegalStateException("unreachable");
} catch (ActivityTimeoutException e) {
long elapsed = Workflow.currentTimeMillis() - start;
if (elapsed < 5000) {
throw new RuntimeException("Activity retried without delay: " + elapsed);
}
throw e;
}
}
}
@Test
public void testActivityRetryOnTimeout() {
startWorkerFor(TestActivityRetryOnTimeout.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
// Wall time on purpose
long start = System.currentTimeMillis();
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause() instanceof ActivityTimeoutException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
long elapsed = System.currentTimeMillis() - start;
if (testName.toString().contains("TestService")) {
assertTrue("retry timer skips time", elapsed < 5000);
}
}
public static class TestActivityRetryOptionsChange implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ActivityOptions.Builder options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10));
RetryOptions retryOptions;
if (Workflow.isReplaying()) {
retryOptions =
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofDays(1))
.setMaximumAttempts(3)
.build();
} else {
retryOptions =
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofDays(1))
.setMaximumAttempts(2)
.build();
}
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options.build());
Workflow.retry(retryOptions, () -> activities.throwIO());
return "ignored";
}
}
@Test
public void testActivityRetryOptionsChange() {
startWorkerFor(TestActivityRetryOptionsChange.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 2, activitiesImpl.invocations.size());
}
public static class TestUntypedActivityRetry implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10))
.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(100))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.build())
.build();
ActivityStub activities = Workflow.newUntypedActivityStub(options);
activities.execute("TestActivities::throwIO", Void.class);
return "ignored";
}
}
@Test
public void testUntypedActivityRetry() {
startWorkerFor(TestUntypedActivityRetry.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
public static class TestActivityRetryAnnotated implements TestWorkflow1 {
private final TestActivities activities;
public TestActivityRetryAnnotated() {
this.activities = Workflow.newActivityStub(TestActivities.class);
}
@Override
public String execute(String taskList) {
activities.throwIOAnnotated();
return "ignored";
}
}
@Test
public void testActivityRetryAnnotated() {
startWorkerFor(TestActivityRetryAnnotated.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
public static class TestAsyncActivityRetry implements TestWorkflow1 {
private TestActivities activities;
@Override
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10))
.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(100))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.build())
.build();
this.activities = Workflow.newActivityStub(TestActivities.class, options);
Async.procedure(activities::heartbeatAndThrowIO).get();
return "ignored";
}
}
@Test
public void testAsyncActivityRetry() {
startWorkerFor(TestAsyncActivityRetry.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
/**
* Tests that history that was created before server side retry was supported is backwards
* compatible with the client that supports the server side retry.
*/
@Test
public void testAsyncActivityRetryReplay() throws Exception {
// Avoid executing 4 times
Assume.assumeFalse("skipping for docker tests", useExternalService);
Assume.assumeFalse("skipping for sticky off", disableStickyExecution);
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testAsyncActivityRetryHistory.json", TestAsyncActivityRetry.class);
}
/**
* Tests that history created before marker header change is backwards compatible with old markers
* generated without headers.
*/
@Test
public void testMutableSideEffectReplay() throws Exception {
// Avoid executing 4 times
if (!testName.getMethodName().equals("testAsyncActivityRetryReplay[Docker Sticky OFF]")) {
return;
}
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testMutableSideEffectBackwardCompatibility.json", TestMutableSideEffectWorkflowImpl.class);
}
public static class TestAsyncActivityRetryOptionsChange implements TestWorkflow1 {
private TestActivities activities;
@Override
public String execute(String taskList) {
ActivityOptions.Builder options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10));
if (Workflow.isReplaying()) {
options.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(100))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setDoNotRetry(NullPointerException.class)
.setMaximumAttempts(3)
.build());
} else {
options.setRetryOptions(
new RetryOptions.Builder()
.setExpiration(Duration.ofSeconds(10))
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(2)
.setDoNotRetry(NullPointerException.class)
.build());
}
this.activities = Workflow.newActivityStub(TestActivities.class, options.build());
Async.procedure(activities::throwIO).get();
return "ignored";
}
}
@Test
public void testAsyncActivityRetryOptionsChange() {
startWorkerFor(TestAsyncActivityRetryOptionsChange.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().getCause() instanceof IOException);
}
assertEquals(activitiesImpl.toString(), 2, activitiesImpl.invocations.size());
}
public static class TestHeartbeatTimeoutDetails implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(1)) // short heartbeat timeout;
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.build();
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options);
try {
// false for second argument means to heartbeat once to set details and then stop.
activities.activityWithDelay(5000, false);
} catch (ActivityTimeoutException e) {
assertEquals(TimeoutType.HEARTBEAT, e.getTimeoutType());
return e.getDetails(String.class);
}
throw new RuntimeException("unreachable");
}
}
@Test
public void testHeartbeatTimeoutDetails() {
startWorkerFor(TestHeartbeatTimeoutDetails.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("heartbeatValue", result);
}
@Test
public void testSyncUntypedAndStackTrace() {
startWorkerFor(TestSyncWorkflowImpl.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
WorkflowExecution execution = workflowStub.start(taskList);
testUntypedAndStackTraceHelper(workflowStub, execution);
}
@Test
public void testUntypedAsyncStart() throws Exception {
startWorkerFor(TestSyncWorkflowImpl.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<WorkflowExecution> future = workflowStub.startAsync(taskList);
testUntypedAndStackTraceHelper(workflowStub, future.get());
}
@Test
public void testUntypedAsyncStartWithTimeout() throws Exception {
startWorkerFor(TestSyncWorkflowImpl.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
Long timeout = Long.valueOf(200);
CompletableFuture<WorkflowExecution> future =
workflowStub.startAsyncWithTimeout(timeout, TimeUnit.MILLISECONDS, taskList);
testUntypedAndStackTraceHelper(workflowStub, future.get());
}
@Test
public void testUntypedAsyncStartFailed() throws InterruptedException {
startWorkerFor(TestSyncWorkflowImpl.class);
String testWorkflowID = "test-untyped-async-failed";
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute",
newWorkflowOptionsBuilder(taskList).setWorkflowId(testWorkflowID).build());
workflowStub.start(taskList);
try {
workflowStub.startAsync(taskList);
fail("unreachable");
} catch (DuplicateWorkflowException e) {
// expected error from stub reuse
}
WorkflowStub newWorkflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute",
newWorkflowOptionsBuilder(taskList).setWorkflowId(testWorkflowID).build());
CompletableFuture<WorkflowExecution> future = newWorkflowStub.startAsync(taskList);
try {
future.get();
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof WorkflowExecutionAlreadyStartedError);
}
}
@Test
public void testUntypedAsyncStartAndGetResult() throws InterruptedException {
startWorkerFor(TestSyncWorkflowImpl.class);
String wfType = "TestWorkflow1::execute";
long timeoutMillis = 5000;
long startTime = System.currentTimeMillis();
WorkflowStub startStub =
workflowClient.newUntypedWorkflowStub(wfType, newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<String> resultFuture =
startStub
.startAsyncWithTimeout(timeoutMillis, TimeUnit.MILLISECONDS, taskList)
.thenCompose(
execution -> {
long remainingTimeMillis =
timeoutMillis - (System.currentTimeMillis() - startTime);
if (remainingTimeMillis <= 0) {
CompletableFuture<String> f = new CompletableFuture<>();
f.completeExceptionally(new TimeoutException());
return f;
}
WorkflowStub resultStub =
workflowClient.newUntypedWorkflowStub(execution, Optional.of(wfType));
return resultStub.getResultAsync(
remainingTimeMillis, TimeUnit.MILLISECONDS, String.class);
});
try {
assertEquals("activity10", resultFuture.get());
} catch (ExecutionException e) {
fail("unreachable");
}
}
private void testUntypedAndStackTraceHelper(
WorkflowStub workflowStub, WorkflowExecution execution) {
sleep(Duration.ofMillis(500));
String stackTrace = workflowStub.query(WorkflowClient.QUERY_TYPE_STACK_TRACE, String.class);
assertTrue(stackTrace, stackTrace.contains("WorkflowTest$TestSyncWorkflowImpl.execute"));
assertTrue(stackTrace, stackTrace.contains("activityWithDelay"));
// Test stub created from workflow execution.
workflowStub = workflowClient.newUntypedWorkflowStub(execution, workflowStub.getWorkflowType());
stackTrace = workflowStub.query(WorkflowClient.QUERY_TYPE_STACK_TRACE, String.class);
assertTrue(stackTrace, stackTrace.contains("WorkflowTest$TestSyncWorkflowImpl.execute"));
assertTrue(stackTrace, stackTrace.contains("activityWithDelay"));
String result = workflowStub.getResult(String.class);
assertEquals("activity10", result);
}
public static class TestCancellationForWorkflowsWithFailedPromises implements TestWorkflow1 {
@Override
public String execute(String taskList) {
Async.function(
() -> {
throw new UncheckedExecutionException(new Exception("Oh noo!"));
});
Async.function(
() -> {
throw new UncheckedExecutionException(new Exception("Oh noo again!"));
});
Workflow.await(() -> false);
fail("unreachable");
return "done";
}
}
@Test
public void workflowsWithFailedPromisesCanBeCancelled() {
startWorkerFor(TestCancellationForWorkflowsWithFailedPromises.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
client.start(taskList);
client.cancel();
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
}
@Test
public void testWorkflowCancellation() {
startWorkerFor(TestSyncWorkflowImpl.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
client.start(taskList);
client.cancel();
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
}
public static class TestCancellationScopePromise implements TestWorkflow1 {
@Override
public String execute(String taskList) {
Promise<String> cancellationRequest = CancellationScope.current().getCancellationRequest();
cancellationRequest.get();
return "done";
}
}
@Test
public void testWorkflowCancellationScopePromise() {
startWorkerFor(TestCancellationScopePromise.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
client.start(taskList);
client.cancel();
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
}
public static class TestDetachedCancellationScope implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
try {
testActivities.activityWithDelay(100000, true);
fail("unreachable");
} catch (CancellationException e) {
Workflow.newDetachedCancellationScope(() -> assertEquals(1, testActivities.activity1(1)))
.run();
}
try {
Workflow.sleep(Duration.ofHours(1));
fail("unreachable");
} catch (CancellationException e) {
Workflow.newDetachedCancellationScope(
() -> assertEquals("a12", testActivities.activity2("a1", 2)))
.run();
;
}
try {
Workflow.newTimer(Duration.ofHours(1)).get();
fail("unreachable");
} catch (CancellationException e) {
Workflow.newDetachedCancellationScope(
() -> assertEquals("a123", testActivities.activity3("a1", 2, 3)))
.run();
;
}
return "result";
}
}
@Test
public void testDetachedScope() {
startWorkerFor(TestDetachedCancellationScope.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute", newWorkflowOptionsBuilder(taskList).build());
client.start(taskList);
sleep(Duration.ofMillis(500)); // To let activityWithDelay start.
client.cancel();
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
activitiesImpl.assertInvocations("activityWithDelay", "activity1", "activity2", "activity3");
}
public interface TestContinueAsNew {
@WorkflowMethod
int execute(int count, String continueAsNewTaskList);
}
public static class TestContinueAsNewImpl implements TestContinueAsNew {
@Override
public int execute(int count, String continueAsNewTaskList) {
String taskList = Workflow.getWorkflowInfo().getTaskList();
if (count == 0) {
assertEquals(continueAsNewTaskList, taskList);
return 111;
}
ContinueAsNewOptions options =
new ContinueAsNewOptions.Builder().setTaskList(continueAsNewTaskList).build();
TestContinueAsNew next = Workflow.newContinueAsNewStub(TestContinueAsNew.class, options);
next.execute(count - 1, continueAsNewTaskList);
throw new RuntimeException("unreachable");
}
}
@Test
public void testContinueAsNew() {
Worker w2;
String continuedTaskList = this.taskList + "_continued";
if (useExternalService) {
w2 = workerFactory.newWorker(continuedTaskList);
} else {
w2 = testEnvironment.newWorker(continuedTaskList);
}
w2.registerWorkflowImplementationTypes(TestContinueAsNewImpl.class);
startWorkerFor(TestContinueAsNewImpl.class);
TestContinueAsNew client =
workflowClient.newWorkflowStub(
TestContinueAsNew.class, newWorkflowOptionsBuilder(this.taskList).build());
int result = client.execute(4, continuedTaskList);
assertEquals(111, result);
tracer.setExpected("continueAsNew", "continueAsNew", "continueAsNew", "continueAsNew");
}
public static class TestAsyncActivityWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions2());
Promise<String> a = Async.function(testActivities::activity);
Promise<Integer> a1 = Async.function(testActivities::activity1, 1);
Promise<String> a2 = Async.function(testActivities::activity2, "1", 2);
Promise<String> a3 = Async.function(testActivities::activity3, "1", 2, 3);
Promise<String> a4 = Async.function(testActivities::activity4, "1", 2, 3, 4);
Promise<String> a5 = Async.function(testActivities::activity5, "1", 2, 3, 4, 5);
Promise<String> a6 = Async.function(testActivities::activity6, "1", 2, 3, 4, 5, 6);
assertEquals("activity", a.get());
assertEquals(1, (int) a1.get());
assertEquals("12", a2.get());
assertEquals("123", a3.get());
assertEquals("1234", a4.get());
assertEquals("12345", a5.get());
assertEquals("123456", a6.get());
Async.procedure(testActivities::proc).get();
Async.procedure(testActivities::proc1, "1").get();
Async.procedure(testActivities::proc2, "1", 2).get();
Async.procedure(testActivities::proc3, "1", 2, 3).get();
Async.procedure(testActivities::proc4, "1", 2, 3, 4).get();
Async.procedure(testActivities::proc5, "1", 2, 3, 4, 5).get();
Async.procedure(testActivities::proc6, "1", 2, 3, 4, 5, 6).get();
// Test serialization of generic data structure
List<UUID> uuids = new ArrayList<>();
uuids.add(Workflow.randomUUID());
uuids.add(Workflow.randomUUID());
List<UUID> uuidsResult = Async.function(testActivities::activityUUIDList, uuids).get();
assertEquals(uuids, uuidsResult);
return "workflow";
}
}
@Test
public void testAsyncActivity() {
startWorkerFor(TestAsyncActivityWorkflowImpl.class);
TestWorkflow1 client =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = client.execute(taskList);
assertEquals("workflow", result);
assertEquals("proc", activitiesImpl.procResult.get(0));
assertEquals("1", activitiesImpl.procResult.get(1));
assertEquals("12", activitiesImpl.procResult.get(2));
assertEquals("123", activitiesImpl.procResult.get(3));
assertEquals("1234", activitiesImpl.procResult.get(4));
assertEquals("12345", activitiesImpl.procResult.get(5));
assertEquals("123456", activitiesImpl.procResult.get(6));
}
public static class TestAsyncUtypedActivityWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ActivityStub testActivities = Workflow.newUntypedActivityStub(newActivityOptions2());
Promise<String> a =
Async.function(testActivities::<String>execute, "TestActivities::activity", String.class);
Promise<String> a1 =
Async.function(
testActivities::<String>execute,
"customActivity1",
String.class,
"1"); // name overridden in annotation
Promise<String> a2 =
Async.function(
testActivities::<String>execute, "TestActivities::activity2", String.class, "1", 2);
Promise<String> a3 =
Async.function(
testActivities::<String>execute,
"TestActivities::activity3",
String.class,
"1",
2,
3);
Promise<String> a4 =
Async.function(
testActivities::<String>execute,
"TestActivities::activity4",
String.class,
"1",
2,
3,
4);
assertEquals("activity", a.get());
assertEquals("1", a1.get());
assertEquals("12", a2.get());
assertEquals("123", a3.get());
assertEquals("1234", a4.get());
Async.procedure(testActivities::<Void>execute, "TestActivities::proc", Void.class).get();
Async.procedure(testActivities::<Void>execute, "TestActivities::proc1", Void.class, "1")
.get();
Async.procedure(testActivities::<Void>execute, "TestActivities::proc2", Void.class, "1", 2)
.get();
Async.procedure(testActivities::<Void>execute, "TestActivities::proc3", Void.class, "1", 2, 3)
.get();
Async.procedure(
testActivities::<Void>execute, "TestActivities::proc4", Void.class, "1", 2, 3, 4)
.get();
return "workflow";
}
}
@Test
public void testAsyncUntypedActivity() {
startWorkerFor(TestAsyncUtypedActivityWorkflowImpl.class);
TestWorkflow1 client =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = client.execute(taskList);
assertEquals("workflow", result);
assertEquals("proc", activitiesImpl.procResult.get(0));
assertEquals("1", activitiesImpl.procResult.get(1));
assertEquals("12", activitiesImpl.procResult.get(2));
assertEquals("123", activitiesImpl.procResult.get(3));
assertEquals("1234", activitiesImpl.procResult.get(4));
}
public static class TestAsyncUtypedActivity2WorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ActivityStub testActivities = Workflow.newUntypedActivityStub(newActivityOptions2());
Promise<String> a = testActivities.executeAsync("TestActivities::activity", String.class);
Promise<String> a1 =
testActivities.executeAsync(
"customActivity1", String.class, "1"); // name overridden in annotation
Promise<String> a2 =
testActivities.executeAsync("TestActivities::activity2", String.class, "1", 2);
Promise<String> a3 =
testActivities.executeAsync("TestActivities::activity3", String.class, "1", 2, 3);
Promise<String> a4 =
testActivities.executeAsync("TestActivities::activity4", String.class, "1", 2, 3, 4);
Promise<String> a5 =
testActivities.executeAsync("TestActivities::activity5", String.class, "1", 2, 3, 4, 5);
Promise<String> a6 =
testActivities.executeAsync(
"TestActivities::activity6", String.class, "1", 2, 3, 4, 5, 6);
assertEquals("activity", a.get());
assertEquals("1", a1.get());
assertEquals("12", a2.get());
assertEquals("123", a3.get());
assertEquals("1234", a4.get());
assertEquals("12345", a5.get());
assertEquals("123456", a6.get());
testActivities.executeAsync("TestActivities::proc", Void.class).get();
testActivities.executeAsync("TestActivities::proc1", Void.class, "1").get();
testActivities.executeAsync("TestActivities::proc2", Void.class, "1", 2).get();
testActivities.executeAsync("TestActivities::proc3", Void.class, "1", 2, 3).get();
testActivities.executeAsync("TestActivities::proc4", Void.class, "1", 2, 3, 4).get();
testActivities.executeAsync("TestActivities::proc5", Void.class, "1", 2, 3, 4, 5).get();
testActivities.executeAsync("TestActivities::proc6", Void.class, "1", 2, 3, 4, 5, 6).get();
return "workflow";
}
}
@Test
public void testAsyncUntyped2Activity() {
startWorkerFor(TestAsyncUtypedActivity2WorkflowImpl.class);
TestWorkflow1 client =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = client.execute(taskList);
assertEquals("workflow", result);
assertEquals("proc", activitiesImpl.procResult.get(0));
assertEquals("1", activitiesImpl.procResult.get(1));
assertEquals("12", activitiesImpl.procResult.get(2));
assertEquals("123", activitiesImpl.procResult.get(3));
assertEquals("1234", activitiesImpl.procResult.get(4));
assertEquals("12345", activitiesImpl.procResult.get(5));
assertEquals("123456", activitiesImpl.procResult.get(6));
}
private void assertResult(String expected, WorkflowExecution execution) {
String result =
workflowClient.newUntypedWorkflowStub(execution, Optional.empty()).getResult(String.class);
assertEquals(expected, result);
}
private void assertResult(int expected, WorkflowExecution execution) {
int result =
workflowClient.newUntypedWorkflowStub(execution, Optional.empty()).getResult(int.class);
assertEquals(expected, result);
}
private void waitForProc(WorkflowExecution execution) {
workflowClient.newUntypedWorkflowStub(execution, Optional.empty()).getResult(Void.class);
}
@Test
public void testStart() {
startWorkerFor(TestMultiargsWorkflowsImpl.class);
WorkflowOptions workflowOptions = newWorkflowOptionsBuilder(taskList).build();
TestMultiargsWorkflowsFunc stubF =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
assertResult("func", WorkflowClient.start(stubF::func));
assertEquals("func", stubF.func()); // Check that duplicated start just returns the result.
TestMultiargsWorkflowsFunc1 stubF1 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc1.class);
if (!useExternalService) {
// Use worker that polls on a task list configured through @WorkflowMethod annotation of func1
assertResult(1, WorkflowClient.start(stubF1::func1, 1));
assertEquals(1, stubF1.func1(1)); // Check that duplicated start just returns the result.
}
// Check that duplicated start is not allowed for AllowDuplicate IdReusePolicy
TestMultiargsWorkflowsFunc2 stubF2 =
workflowClient.newWorkflowStub(
TestMultiargsWorkflowsFunc2.class,
newWorkflowOptionsBuilder(taskList)
.setWorkflowIdReusePolicy(WorkflowIdReusePolicy.AllowDuplicate)
.build());
assertResult("12", WorkflowClient.start(stubF2::func2, "1", 2));
try {
stubF2.func2("1", 2);
fail("unreachable");
} catch (DuplicateWorkflowException e) {
// expected
}
TestMultiargsWorkflowsFunc3 stubF3 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc3.class, workflowOptions);
assertResult("123", WorkflowClient.start(stubF3::func3, "1", 2, 3));
TestMultiargsWorkflowsFunc4 stubF4 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc4.class, workflowOptions);
assertResult("1234", WorkflowClient.start(stubF4::func4, "1", 2, 3, 4));
TestMultiargsWorkflowsFunc5 stubF5 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc5.class, workflowOptions);
assertResult("12345", WorkflowClient.start(stubF5::func5, "1", 2, 3, 4, 5));
TestMultiargsWorkflowsFunc6 stubF6 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc6.class, workflowOptions);
assertResult("123456", WorkflowClient.start(stubF6::func6, "1", 2, 3, 4, 5, 6));
TestMultiargsWorkflowsProc stubP =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP::proc));
TestMultiargsWorkflowsProc1 stubP1 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc1.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP1::proc1, "1"));
TestMultiargsWorkflowsProc2 stubP2 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc2.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP2::proc2, "1", 2));
TestMultiargsWorkflowsProc3 stubP3 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc3.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP3::proc3, "1", 2, 3));
TestMultiargsWorkflowsProc4 stubP4 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc4.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP4::proc4, "1", 2, 3, 4));
TestMultiargsWorkflowsProc5 stubP5 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc5.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP5::proc5, "1", 2, 3, 4, 5));
TestMultiargsWorkflowsProc6 stubP6 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc6.class, workflowOptions);
waitForProc(WorkflowClient.start(stubP6::proc6, "1", 2, 3, 4, 5, 6));
assertEquals("proc", stubP.query());
assertEquals("1", stubP1.query());
assertEquals("12", stubP2.query());
assertEquals("123", stubP3.query());
assertEquals("1234", stubP4.query());
assertEquals("12345", stubP5.query());
assertEquals("123456", stubP6.query());
// Test execution from untyped stub.
workflowOptions =
newWorkflowOptionsBuilder(taskList).setWorkflowId(UUID.randomUUID().toString()).build();
TestMultiargsWorkflowsFunc stub2 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
WorkflowStub untypedStub = WorkflowStub.fromTyped(stub2);
untypedStub.start();
String result = untypedStub.getResult(String.class);
assertEquals("func", result);
}
@Test
public void testMemo() {
if (testEnvironment != null) {
String testMemoKey = "testKey";
String testMemoValue = "testValue";
Map<String, Object> memo = new HashMap<>();
memo.put(testMemoKey, testMemoValue);
startWorkerFor(TestMultiargsWorkflowsImpl.class);
WorkflowOptions workflowOptions = newWorkflowOptionsBuilder(taskList).setMemo(memo).build();
TestMultiargsWorkflowsFunc stubF =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
WorkflowExecution executionF = WorkflowClient.start(stubF::func);
GetWorkflowExecutionHistoryResponse historyResp =
WorkflowExecutionUtils.getHistoryPage(
new byte[] {}, testEnvironment.getWorkflowService(), DOMAIN, executionF);
HistoryEvent startEvent = historyResp.history.getEvents().get(0);
Memo memoFromEvent = startEvent.workflowExecutionStartedEventAttributes.getMemo();
byte[] memoBytes = memoFromEvent.getFields().get(testMemoKey).array();
String memoRetrieved =
JsonDataConverter.getInstance().fromData(memoBytes, String.class, String.class);
assertEquals(testMemoValue, memoRetrieved);
}
}
@Test
public void testSearchAttributes() {
if (testEnvironment != null) {
String testKeyString = "CustomKeywordField";
String testValueString = "testKeyword";
String testKeyInteger = "CustomIntField";
Integer testValueInteger = 1;
String testKeyDateTime = "CustomDateTimeField";
LocalDateTime testValueDateTime = LocalDateTime.now();
String testKeyBool = "CustomBoolField";
Boolean testValueBool = true;
String testKeyDouble = "CustomDoubleField";
Double testValueDouble = 1.23;
// add more type to test
Map<String, Object> searchAttr = new HashMap<>();
searchAttr.put(testKeyString, testValueString);
searchAttr.put(testKeyInteger, testValueInteger);
searchAttr.put(testKeyDateTime, testValueDateTime);
searchAttr.put(testKeyBool, testValueBool);
searchAttr.put(testKeyDouble, testValueDouble);
startWorkerFor(TestMultiargsWorkflowsImpl.class);
WorkflowOptions workflowOptions =
newWorkflowOptionsBuilder(taskList).setSearchAttributes(searchAttr).build();
TestMultiargsWorkflowsFunc stubF =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
WorkflowExecution executionF = WorkflowClient.start(stubF::func);
GetWorkflowExecutionHistoryResponse historyResp =
WorkflowExecutionUtils.getHistoryPage(
new byte[] {}, testEnvironment.getWorkflowService(), DOMAIN, executionF);
HistoryEvent startEvent = historyResp.history.getEvents().get(0);
SearchAttributes searchAttrFromEvent =
startEvent.workflowExecutionStartedEventAttributes.getSearchAttributes();
byte[] searchAttrStringBytes =
searchAttrFromEvent.getIndexedFields().get(testKeyString).array();
String retrievedString =
JsonDataConverter.getInstance()
.fromData(searchAttrStringBytes, String.class, String.class);
assertEquals(testValueString, retrievedString);
byte[] searchAttrIntegerBytes =
searchAttrFromEvent.getIndexedFields().get(testKeyInteger).array();
Integer retrievedInteger =
JsonDataConverter.getInstance()
.fromData(searchAttrIntegerBytes, Integer.class, Integer.class);
assertEquals(testValueInteger, retrievedInteger);
byte[] searchAttrDateTimeBytes =
searchAttrFromEvent.getIndexedFields().get(testKeyDateTime).array();
LocalDateTime retrievedDateTime =
JsonDataConverter.getInstance()
.fromData(searchAttrDateTimeBytes, LocalDateTime.class, LocalDateTime.class);
assertEquals(testValueDateTime, retrievedDateTime);
byte[] searchAttrBoolBytes = searchAttrFromEvent.getIndexedFields().get(testKeyBool).array();
Boolean retrievedBool =
JsonDataConverter.getInstance()
.fromData(searchAttrBoolBytes, Boolean.class, Boolean.class);
assertEquals(testValueBool, retrievedBool);
byte[] searchAttrDoubleBytes =
searchAttrFromEvent.getIndexedFields().get(testKeyDouble).array();
Double retrievedDouble =
JsonDataConverter.getInstance()
.fromData(searchAttrDoubleBytes, Double.class, Double.class);
assertEquals(testValueDouble, retrievedDouble);
}
}
@Test
public void testExecute() throws ExecutionException, InterruptedException {
startWorkerFor(TestMultiargsWorkflowsImpl.class);
WorkflowOptions workflowOptions = newWorkflowOptionsBuilder(taskList).build();
TestMultiargsWorkflowsFunc stubF =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
assertEquals("func", WorkflowClient.execute(stubF::func).get());
TestMultiargsWorkflowsFunc1 stubF1 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc1.class, workflowOptions);
assertEquals(1, (int) WorkflowClient.execute(stubF1::func1, 1).get());
assertEquals(1, stubF1.func1(1)); // Check that duplicated start just returns the result.
TestMultiargsWorkflowsFunc2 stubF2 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc2.class, workflowOptions);
assertEquals("12", WorkflowClient.execute(stubF2::func2, "1", 2).get());
TestMultiargsWorkflowsFunc3 stubF3 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc3.class, workflowOptions);
assertEquals("123", WorkflowClient.execute(stubF3::func3, "1", 2, 3).get());
TestMultiargsWorkflowsFunc4 stubF4 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc4.class, workflowOptions);
assertEquals("1234", WorkflowClient.execute(stubF4::func4, "1", 2, 3, 4).get());
TestMultiargsWorkflowsFunc5 stubF5 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc5.class, workflowOptions);
assertEquals("12345", WorkflowClient.execute(stubF5::func5, "1", 2, 3, 4, 5).get());
TestMultiargsWorkflowsFunc6 stubF6 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc6.class, workflowOptions);
assertEquals("123456", WorkflowClient.execute(stubF6::func6, "1", 2, 3, 4, 5, 6).get());
TestMultiargsWorkflowsProc stubP =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc.class, workflowOptions);
WorkflowClient.execute(stubP::proc).get();
TestMultiargsWorkflowsProc1 stubP1 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc1.class, workflowOptions);
WorkflowClient.execute(stubP1::proc1, "1").get();
TestMultiargsWorkflowsProc2 stubP2 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc2.class, workflowOptions);
WorkflowClient.execute(stubP2::proc2, "1", 2).get();
TestMultiargsWorkflowsProc3 stubP3 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc3.class, workflowOptions);
WorkflowClient.execute(stubP3::proc3, "1", 2, 3).get();
TestMultiargsWorkflowsProc4 stubP4 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc4.class, workflowOptions);
WorkflowClient.execute(stubP4::proc4, "1", 2, 3, 4).get();
TestMultiargsWorkflowsProc5 stubP5 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc5.class, workflowOptions);
WorkflowClient.execute(stubP5::proc5, "1", 2, 3, 4, 5).get();
TestMultiargsWorkflowsProc6 stubP6 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsProc6.class, workflowOptions);
WorkflowClient.execute(stubP6::proc6, "1", 2, 3, 4, 5, 6).get();
assertEquals("proc", stubP.query());
assertEquals("1", stubP1.query());
assertEquals("12", stubP2.query());
assertEquals("123", stubP3.query());
assertEquals("1234", stubP4.query());
assertEquals("12345", stubP5.query());
assertEquals("123456", stubP6.query());
}
@Test
public void testWorkflowIdResuePolicy() {
startWorkerFor(TestMultiargsWorkflowsImpl.class);
// Without setting WorkflowIdReusePolicy, the semantics is to get result for the previous run.
String workflowID = UUID.randomUUID().toString();
WorkflowOptions workflowOptions =
newWorkflowOptionsBuilder(taskList).setWorkflowId(workflowID).build();
TestMultiargsWorkflowsFunc1 stubF1_1 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc1.class, workflowOptions);
assertEquals(1, stubF1_1.func1(1));
TestMultiargsWorkflowsFunc1 stubF1_2 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc1.class, workflowOptions);
assertEquals(1, stubF1_2.func1(2));
// Setting WorkflowIdReusePolicy to AllowDuplicate will trigger new run.
workflowOptions =
newWorkflowOptionsBuilder(taskList)
.setWorkflowIdReusePolicy(WorkflowIdReusePolicy.AllowDuplicate)
.setWorkflowId(workflowID)
.build();
TestMultiargsWorkflowsFunc1 stubF1_3 =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc1.class, workflowOptions);
assertEquals(2, stubF1_3.func1(2));
// Setting WorkflowIdReusePolicy to RejectDuplicate or AllowDuplicateFailedOnly does not work as
// expected. See https://github.com/uber/cadence-java-client/issues/295.
}
public static class TestChildAsyncWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder().setTaskList(taskList).build();
TestMultiargsWorkflowsFunc stubF =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
assertEquals("func", Async.function(stubF::func).get());
TestMultiargsWorkflowsFunc1 stubF1 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc1.class, workflowOptions);
assertEquals(1, (int) Async.function(stubF1::func1, 1).get());
TestMultiargsWorkflowsFunc2 stubF2 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc2.class, workflowOptions);
assertEquals("12", Async.function(stubF2::func2, "1", 2).get());
TestMultiargsWorkflowsFunc3 stubF3 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc3.class, workflowOptions);
assertEquals("123", Async.function(stubF3::func3, "1", 2, 3).get());
TestMultiargsWorkflowsFunc4 stubF4 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc4.class, workflowOptions);
assertEquals("1234", Async.function(stubF4::func4, "1", 2, 3, 4).get());
TestMultiargsWorkflowsFunc5 stubF5 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc5.class, workflowOptions);
assertEquals("12345", Async.function(stubF5::func5, "1", 2, 3, 4, 5).get());
TestMultiargsWorkflowsFunc6 stubF6 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc6.class, workflowOptions);
assertEquals("123456", Async.function(stubF6::func6, "1", 2, 3, 4, 5, 6).get());
TestMultiargsWorkflowsProc stubP =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc.class, workflowOptions);
Async.procedure(stubP::proc).get();
TestMultiargsWorkflowsProc1 stubP1 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc1.class, workflowOptions);
Async.procedure(stubP1::proc1, "1").get();
TestMultiargsWorkflowsProc2 stubP2 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc2.class, workflowOptions);
Async.procedure(stubP2::proc2, "1", 2).get();
TestMultiargsWorkflowsProc3 stubP3 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc3.class, workflowOptions);
Async.procedure(stubP3::proc3, "1", 2, 3).get();
TestMultiargsWorkflowsProc4 stubP4 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc4.class, workflowOptions);
Async.procedure(stubP4::proc4, "1", 2, 3, 4).get();
TestMultiargsWorkflowsProc5 stubP5 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc5.class, workflowOptions);
Async.procedure(stubP5::proc5, "1", 2, 3, 4, 5).get();
TestMultiargsWorkflowsProc6 stubP6 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsProc6.class, workflowOptions);
Async.procedure(stubP6::proc6, "1", 2, 3, 4, 5, 6).get();
return null;
}
}
@Test
public void testChildAsyncWorkflow() {
startWorkerFor(TestChildAsyncWorkflow.class, TestMultiargsWorkflowsImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals(null, client.execute(taskList));
}
// This workflow is designed specifically for testing some internal logic in Async.procedure
// and ChildWorkflowStubImpl. See comments on testChildAsyncLambdaWorkflow for more details.
public interface WaitOnSignalWorkflow {
@WorkflowMethod()
void execute();
@SignalMethod
void signal(String value);
}
public static class TestWaitOnSignalWorkflowImpl implements WaitOnSignalWorkflow {
private final CompletablePromise<String> signal = Workflow.newPromise();
@Override
public void execute() {
signal.get();
}
@Override
public void signal(String value) {
Workflow.sleep(Duration.ofSeconds(1));
signal.complete(value);
}
}
public static class TestChildAsyncLambdaWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(100))
.setTaskStartToCloseTimeout(Duration.ofSeconds(60))
.setTaskList(taskList)
.build();
WaitOnSignalWorkflow child =
Workflow.newChildWorkflowStub(WaitOnSignalWorkflow.class, workflowOptions);
Promise<Void> promise = Async.procedure(child::execute);
Promise<WorkflowExecution> executionPromise = Workflow.getWorkflowExecution(child);
assertNotNull(executionPromise);
WorkflowExecution execution = executionPromise.get();
assertNotEquals("", execution.getWorkflowId());
assertNotEquals("", execution.getRunId());
child.signal("test");
promise.get();
return null;
}
}
// The purpose of this test is to exercise the lambda execution logic inside Async.procedure(),
// which executes on a different thread than workflow-main. This is different than executing
// classes that implements the workflow method interface, which executes on the workflow main
// thread.
@Test
public void testChildAsyncLambdaWorkflow() {
startWorkerFor(TestChildAsyncLambdaWorkflow.class, TestWaitOnSignalWorkflowImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals(null, client.execute(taskList));
}
public static class TestUntypedChildStubWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder().setTaskList(taskList).build();
ChildWorkflowStub stubF =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsFunc::func", workflowOptions);
assertEquals("func", stubF.execute(String.class));
// Workflow type overridden through the @WorkflowMethod.name
ChildWorkflowStub stubF1 = Workflow.newUntypedChildWorkflowStub("func1", workflowOptions);
assertEquals("1", stubF1.execute(String.class, "1"));
ChildWorkflowStub stubF2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc2::func2", workflowOptions);
assertEquals("12", stubF2.execute(String.class, "1", 2));
ChildWorkflowStub stubF3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc3::func3", workflowOptions);
assertEquals("123", stubF3.execute(String.class, "1", 2, 3));
ChildWorkflowStub stubF4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc4::func4", workflowOptions);
assertEquals("1234", stubF4.execute(String.class, "1", 2, 3, 4));
ChildWorkflowStub stubF5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc5::func5", workflowOptions);
assertEquals("12345", stubF5.execute(String.class, "1", 2, 3, 4, 5));
ChildWorkflowStub stubF6 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc6::func6", workflowOptions);
assertEquals("123456", stubF6.execute(String.class, "1", 2, 3, 4, 5, 6));
ChildWorkflowStub stubP =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsProc::proc", workflowOptions);
stubP.execute(Void.class);
ChildWorkflowStub stubP1 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc1::proc1", workflowOptions);
stubP1.execute(Void.class, "1");
ChildWorkflowStub stubP2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc2::proc2", workflowOptions);
stubP2.execute(Void.class, "1", 2);
ChildWorkflowStub stubP3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc3::proc3", workflowOptions);
stubP3.execute(Void.class, "1", 2, 3);
ChildWorkflowStub stubP4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc4::proc4", workflowOptions);
stubP4.execute(Void.class, "1", 2, 3, 4);
ChildWorkflowStub stubP5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc5::proc5", workflowOptions);
stubP5.execute(Void.class, "1", 2, 3, 4, 5);
ChildWorkflowStub stubP6 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc6::proc6", workflowOptions);
stubP6.execute(Void.class, "1", 2, 3, 4, 5, 6);
return null;
}
}
@Test
public void testUntypedChildStubWorkflow() {
startWorkerFor(TestUntypedChildStubWorkflow.class, TestMultiargsWorkflowsImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals(null, client.execute(taskList));
}
public static class TestUntypedChildStubWorkflowAsync implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder().setTaskList(taskList).build();
ChildWorkflowStub stubF =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsFunc::func", workflowOptions);
assertEquals("func", stubF.executeAsync(String.class).get());
// Workflow type overridden through the @WorkflowMethod.name
ChildWorkflowStub stubF1 = Workflow.newUntypedChildWorkflowStub("func1", workflowOptions);
assertEquals("1", stubF1.executeAsync(String.class, "1").get());
ChildWorkflowStub stubF2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc2::func2", workflowOptions);
assertEquals("12", stubF2.executeAsync(String.class, "1", 2).get());
ChildWorkflowStub stubF3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc3::func3", workflowOptions);
assertEquals("123", stubF3.executeAsync(String.class, "1", 2, 3).get());
ChildWorkflowStub stubF4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc4::func4", workflowOptions);
assertEquals("1234", stubF4.executeAsync(String.class, "1", 2, 3, 4).get());
ChildWorkflowStub stubF5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc5::func5", workflowOptions);
assertEquals("12345", stubF5.executeAsync(String.class, "1", 2, 3, 4, 5).get());
ChildWorkflowStub stubF6 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc6::func6", workflowOptions);
assertEquals("123456", stubF6.executeAsync(String.class, "1", 2, 3, 4, 5, 6).get());
ChildWorkflowStub stubP =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsProc::proc", workflowOptions);
stubP.executeAsync(Void.class).get();
ChildWorkflowStub stubP1 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc1::proc1", workflowOptions);
stubP1.executeAsync(Void.class, "1").get();
ChildWorkflowStub stubP2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc2::proc2", workflowOptions);
stubP2.executeAsync(Void.class, "1", 2).get();
ChildWorkflowStub stubP3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc3::proc3", workflowOptions);
stubP3.executeAsync(Void.class, "1", 2, 3).get();
ChildWorkflowStub stubP4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc4::proc4", workflowOptions);
stubP4.executeAsync(Void.class, "1", 2, 3, 4).get();
ChildWorkflowStub stubP5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc5::proc5", workflowOptions);
stubP5.executeAsync(Void.class, "1", 2, 3, 4, 5).get();
ChildWorkflowStub stubP6 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc6::proc6", workflowOptions);
stubP6.executeAsync(Void.class, "1", 2, 3, 4, 5, 6).get();
return null;
}
}
@Test
public void testUntypedChildStubWorkflowAsync() {
startWorkerFor(TestUntypedChildStubWorkflowAsync.class, TestMultiargsWorkflowsImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals(null, client.execute(taskList));
}
public static class TestUntypedChildStubWorkflowAsyncInvoke implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder().setTaskList(taskList).build();
ChildWorkflowStub stubF =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsFunc::func", workflowOptions);
assertEquals("func", Async.function(stubF::<String>execute, String.class).get());
// Workflow type overridden through the @WorkflowMethod.name
ChildWorkflowStub stubF1 = Workflow.newUntypedChildWorkflowStub("func1", workflowOptions);
assertEquals("1", Async.function(stubF1::<String>execute, String.class, "1").get());
ChildWorkflowStub stubF2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc2::func2", workflowOptions);
assertEquals("12", Async.function(stubF2::<String>execute, String.class, "1", 2).get());
ChildWorkflowStub stubF3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc3::func3", workflowOptions);
assertEquals("123", Async.function(stubF3::<String>execute, String.class, "1", 2, 3).get());
ChildWorkflowStub stubF4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc4::func4", workflowOptions);
assertEquals(
"1234", Async.function(stubF4::<String>execute, String.class, "1", 2, 3, 4).get());
ChildWorkflowStub stubF5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsFunc5::func5", workflowOptions);
assertEquals(
"12345", Async.function(stubF5::<String>execute, String.class, "1", 2, 3, 4, 5).get());
ChildWorkflowStub stubP =
Workflow.newUntypedChildWorkflowStub("TestMultiargsWorkflowsProc::proc", workflowOptions);
Async.procedure(stubP::<Void>execute, Void.class).get();
ChildWorkflowStub stubP1 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc1::proc1", workflowOptions);
Async.procedure(stubP1::<Void>execute, Void.class, "1").get();
ChildWorkflowStub stubP2 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc2::proc2", workflowOptions);
Async.procedure(stubP2::<Void>execute, Void.class, "1", 2).get();
ChildWorkflowStub stubP3 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc3::proc3", workflowOptions);
Async.procedure(stubP3::<Void>execute, Void.class, "1", 2, 3).get();
ChildWorkflowStub stubP4 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc4::proc4", workflowOptions);
Async.procedure(stubP4::<Void>execute, Void.class, "1", 2, 3, 4).get();
ChildWorkflowStub stubP5 =
Workflow.newUntypedChildWorkflowStub(
"TestMultiargsWorkflowsProc5::proc5", workflowOptions);
Async.procedure(stubP5::<Void>execute, Void.class, "1", 2, 3, 4, 5).get();
return null;
}
}
@Test
public void testUntypedChildStubWorkflowAsyncInvoke() {
startWorkerFor(TestUntypedChildStubWorkflowAsyncInvoke.class, TestMultiargsWorkflowsImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals(null, client.execute(taskList));
}
public static class TestTimerWorkflowImpl implements TestWorkflow2 {
@Override
public String execute(boolean useExternalService) {
Promise<Void> timer1;
Promise<Void> timer2;
if (useExternalService) {
timer1 = Workflow.newTimer(Duration.ofMillis(700));
timer2 = Workflow.newTimer(Duration.ofMillis(1300));
} else {
timer1 = Workflow.newTimer(Duration.ofSeconds(700));
timer2 = Workflow.newTimer(Duration.ofSeconds(1300));
}
long time = Workflow.currentTimeMillis();
timer1
.thenApply(
r -> {
// Testing that timer can be created from a callback thread.
if (useExternalService) {
Workflow.newTimer(Duration.ofSeconds(10));
} else {
Workflow.newTimer(Duration.ofHours(10));
}
Workflow.currentTimeMillis(); // Testing that time is available here.
return r;
})
.get();
timer1.get();
long slept = Workflow.currentTimeMillis() - time;
// Also checks that rounding up to a second works.
assertTrue(String.valueOf(slept), slept > 1000);
timer2.get();
slept = Workflow.currentTimeMillis() - time;
assertTrue(String.valueOf(slept), slept > 2000);
return "testTimer";
}
@Override
public List<String> getTrace() {
throw new UnsupportedOperationException("not implemented");
}
}
@Test
public void testTimer() {
startWorkerFor(TestTimerWorkflowImpl.class);
WorkflowOptions options;
if (useExternalService) {
options = newWorkflowOptionsBuilder(taskList).build();
} else {
options =
newWorkflowOptionsBuilder(taskList)
.setExecutionStartToCloseTimeout(Duration.ofDays(1))
.build();
}
TestWorkflow2 client = workflowClient.newWorkflowStub(TestWorkflow2.class, options);
String result = client.execute(useExternalService);
assertEquals("testTimer", result);
if (useExternalService) {
tracer.setExpected(
"registerQuery getTrace", "newTimer PT0.7S", "newTimer PT1.3S", "newTimer PT10S");
} else {
tracer.setExpected(
"registerQuery getTrace", "newTimer PT11M40S", "newTimer PT21M40S", "newTimer PT10H");
}
}
private static final RetryOptions retryOptions =
new RetryOptions.Builder()
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofSeconds(2))
.setBackoffCoefficient(1)
.build();
public static class TestAsyncRetryWorkflowImpl implements TestWorkflow2 {
private final List<String> trace = new ArrayList<>();
@Override
public String execute(boolean useExternalService) {
trace.clear(); // clear because of replay
trace.add("started");
Async.retry(
retryOptions,
() -> {
trace.add("retry at " + Workflow.currentTimeMillis());
return Workflow.newFailedPromise(new IllegalThreadStateException("simulated"));
})
.get();
trace.add("beforeSleep");
Workflow.sleep(60000);
trace.add("done");
return "";
}
@Override
public List<String> getTrace() {
return trace;
}
}
/** @see DeterministicRunnerTest#testRetry() */
@Test
public void testAsyncRetry() {
startWorkerFor(TestAsyncRetryWorkflowImpl.class);
TestWorkflow2 client =
workflowClient.newWorkflowStub(
TestWorkflow2.class, newWorkflowOptionsBuilder(taskList).build());
String result = null;
try {
result = client.execute(useExternalService);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause() instanceof IllegalThreadStateException);
assertEquals("simulated", e.getCause().getMessage());
}
assertNull(result);
List<String> trace = client.getTrace();
assertEquals(trace.toString(), 3, trace.size());
assertEquals("started", trace.get(0));
assertTrue(trace.get(1).startsWith("retry at "));
assertTrue(trace.get(2).startsWith("retry at "));
}
public static class TestAsyncRetryOptionsChangeWorkflow implements TestWorkflow2 {
private final List<String> trace = new ArrayList<>();
@Override
public String execute(boolean useExternalService) {
RetryOptions retryOptions;
if (Workflow.isReplaying()) {
retryOptions =
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.build();
} else {
retryOptions =
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(2)
.build();
}
trace.clear(); // clear because of replay
trace.add("started");
Async.retry(
retryOptions,
() -> {
trace.add("retry at " + Workflow.currentTimeMillis());
return Workflow.newFailedPromise(new IllegalThreadStateException("simulated"));
})
.get();
trace.add("beforeSleep");
Workflow.sleep(60000);
trace.add("done");
return "";
}
@Override
public List<String> getTrace() {
return trace;
}
}
/** @see DeterministicRunnerTest#testRetry() */
@Test
public void testAsyncRetryOptionsChange() {
startWorkerFor(TestAsyncRetryOptionsChangeWorkflow.class);
TestWorkflow2 client =
workflowClient.newWorkflowStub(
TestWorkflow2.class, newWorkflowOptionsBuilder(taskList).build());
String result = null;
try {
result = client.execute(useExternalService);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause() instanceof IllegalThreadStateException);
assertEquals("simulated", e.getCause().getMessage());
}
assertNull(result);
List<String> trace = client.getTrace();
assertEquals(trace.toString(), 3, trace.size());
assertEquals("started", trace.get(0));
assertTrue(trace.get(1).startsWith("retry at "));
assertTrue(trace.get(2).startsWith("retry at "));
}
public interface TestExceptionPropagation {
@WorkflowMethod
void execute(String taskList);
}
public static class ThrowingChild implements TestWorkflow1 {
@Override
@SuppressWarnings("AssertionFailureIgnored")
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions2());
try {
testActivities.throwIO();
fail("unreachable");
return "ignored";
} catch (ActivityFailureException e) {
try {
assertTrue(e.getMessage().contains("TestActivities::throwIO"));
assertTrue(e.getCause() instanceof IOException);
assertEquals("simulated IO problem", e.getCause().getMessage());
} catch (AssertionError ae) {
// Errors cause decision to fail. But we want workflow to fail in this case.
throw new RuntimeException(ae);
}
Exception ee = new NumberFormatException();
ee.initCause(e);
throw Workflow.wrap(ee);
}
}
}
public static class TestExceptionPropagationImpl implements TestExceptionPropagation {
@Override
@SuppressWarnings("AssertionFailureIgnored")
public void execute(String taskList) {
ChildWorkflowOptions options =
new ChildWorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofHours(1))
.build();
TestWorkflow1 child = Workflow.newChildWorkflowStub(TestWorkflow1.class, options);
try {
child.execute(taskList);
fail("unreachable");
} catch (RuntimeException e) {
try {
assertNoEmptyStacks(e);
assertTrue(e.getMessage().contains("TestWorkflow1::execute"));
assertTrue(e instanceof ChildWorkflowException);
assertTrue(e.getCause() instanceof NumberFormatException);
assertTrue(e.getCause().getCause() instanceof ActivityFailureException);
assertTrue(e.getCause().getCause().getCause() instanceof IOException);
assertEquals("simulated IO problem", e.getCause().getCause().getCause().getMessage());
} catch (AssertionError ae) {
// Errors cause decision to fail. But we want workflow to fail in this case.
throw new RuntimeException(ae);
}
Exception fnf = new FileNotFoundException();
fnf.initCause(e);
throw Workflow.wrap(fnf);
}
}
}
private static void assertNoEmptyStacks(RuntimeException e) {
// Check that there are no empty stacks
Throwable c = e;
while (c != null) {
assertTrue(c.getStackTrace().length > 0);
c = c.getCause();
}
}
/**
* Test that an NPE thrown in an activity executed from a child workflow results in the following
* chain of exceptions when an exception is received in an external client that executed workflow
* through a WorkflowClient:
*
* <pre>
* {@link WorkflowFailureException}
* ->{@link ChildWorkflowFailureException}
* ->{@link ActivityFailureException}
* ->OriginalActivityException
* </pre>
*
* <p>This test also tests that Checked exception wrapping and unwrapping works producing a nice
* exception chain without the wrappers.
*/
@Test
public void testExceptionPropagation() {
startWorkerFor(ThrowingChild.class, TestExceptionPropagationImpl.class);
TestExceptionPropagation client =
workflowClient.newWorkflowStub(
TestExceptionPropagation.class, newWorkflowOptionsBuilder(taskList).build());
try {
client.execute(taskList);
fail("Unreachable");
} catch (WorkflowFailureException e) {
// Rethrow the assertion failure
if (e.getCause().getCause() instanceof AssertionError) {
throw (AssertionError) e.getCause().getCause();
}
assertNoEmptyStacks(e);
// Uncomment to see the actual trace.
// e.printStackTrace();
assertTrue(e.getMessage(), e.getMessage().contains("TestExceptionPropagation::execute"));
assertTrue(e.getStackTrace().length > 0);
assertTrue(e.getCause() instanceof FileNotFoundException);
assertTrue(e.getCause().getCause() instanceof ChildWorkflowException);
assertTrue(e.getCause().getCause().getCause() instanceof NumberFormatException);
assertTrue(e.getCause().getCause().getCause().getCause() instanceof ActivityFailureException);
assertTrue(e.getCause().getCause().getCause().getCause().getCause() instanceof IOException);
assertEquals(
"simulated IO problem",
e.getCause().getCause().getCause().getCause().getCause().getMessage());
}
}
public interface QueryableWorkflow {
@WorkflowMethod
String execute();
@QueryMethod
String getState();
@SignalMethod(name = "testSignal")
void mySignal(String value);
}
public static class TestSignalWorkflowImpl implements QueryableWorkflow {
String state = "initial";
List<String> signals = new ArrayList<>();
CompletablePromise<Void> promise = Workflow.newPromise();
@Override
public String execute() {
promise.get();
return signals.get(0) + signals.get(1);
}
@Override
public String getState() {
return state;
}
@Override
public void mySignal(String value) {
log.info("TestSignalWorkflowImpl.mySignal value=" + value);
state = value;
signals.add(value);
if (signals.size() == 2) {
promise.complete(null);
}
}
}
@Test
public void testSignal() {
// Test getTrace through replay by a local worker.
Worker queryWorker;
if (useExternalService) {
WorkerFactory workerFactory = WorkerFactory.newInstance(workflowClient);
queryWorker = workerFactory.newWorker(taskList);
} else {
queryWorker = testEnvironment.newWorker(taskList);
}
queryWorker.registerWorkflowImplementationTypes(TestSignalWorkflowImpl.class);
startWorkerFor(TestSignalWorkflowImpl.class);
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
String workflowId = UUID.randomUUID().toString();
optionsBuilder.setWorkflowId(workflowId);
QueryableWorkflow client =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
// To execute workflow client.execute() would do. But we want to start workflow and immediately
// return.
WorkflowExecution execution = WorkflowClient.start(client::execute);
sleep(Duration.ofSeconds(1));
assertEquals(workflowId, execution.getWorkflowId());
// Calls query multiple times to check at the end of the method that if it doesn't leak threads
assertEquals("initial", client.getState());
sleep(Duration.ofSeconds(1));
client.mySignal("Hello ");
sleep(Duration.ofSeconds(1));
// Test client created using WorkflowExecution
QueryableWorkflow client2 =
workflowClient.newWorkflowStub(
QueryableWorkflow.class, execution.getWorkflowId(), Optional.of(execution.getRunId()));
assertEquals("Hello ", client2.getState());
sleep(Duration.ofMillis(500));
client2.mySignal("World!");
sleep(Duration.ofMillis(500));
assertEquals("World!", client2.getState());
assertEquals(
"Hello World!",
workflowClient.newUntypedWorkflowStub(execution, Optional.empty()).getResult(String.class));
client2.execute();
}
// Simple workflow completes immediately after it starts. This workflow
// will be used to test sending signals to completed workflows
public static class TestSimpleWorkflowImpl implements QueryableWorkflow {
@Override
public String execute() {
return "Execution complete";
}
@Override
public String getState() {
return "simple_state";
}
@Override
public void mySignal(String value) {
log.info("#### You should never see this line ####");
}
}
@Test
public void testSignalingCompletedWorkflow() {
Worker queryWorker;
if (useExternalService) {
WorkerFactory workerFactory = WorkerFactory.newInstance(workflowClient);
queryWorker = workerFactory.newWorker(taskList);
} else {
queryWorker = testEnvironment.newWorker(taskList);
}
queryWorker.registerWorkflowImplementationTypes(TestSimpleWorkflowImpl.class);
startWorkerFor(TestSimpleWorkflowImpl.class);
String workflowId = UUID.randomUUID().toString();
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
RetryOptions workflowRetryOptions =
new RetryOptions.Builder()
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofSeconds(1))
.setMaximumAttempts(1)
.setBackoffCoefficient(1.0)
.setDoNotRetry(
BadRequestError.class,
EntityNotExistsError.class,
WorkflowExecutionAlreadyCompletedError.class,
WorkflowExecutionAlreadyStartedError.class,
DomainAlreadyExistsError.class,
QueryFailedError.class,
DomainNotActiveError.class,
CancellationAlreadyRequestedError.class)
.build();
optionsBuilder.setRetryOptions(workflowRetryOptions);
optionsBuilder.setWorkflowId(workflowId);
QueryableWorkflow client =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
// This completes the workflow, workflow won't receive the signal after here
client.execute();
try {
client.mySignal("Hello!");
assert (false); // Signal call should throw an exception, so fail if it doesn't
} catch (Exception e) {
assertEquals(WorkflowExecutionAlreadyCompletedError.class, e.getCause().getClass());
}
}
public static class TestSignalWithStartWorkflowImpl implements QueryableWorkflow {
String state = "initial";
List<String> signals = new ArrayList<>();
CompletablePromise<Void> promise = Workflow.newPromise();
@Override
public String execute() {
promise.get();
return signals.get(0) + signals.get(1);
}
@Override
public String getState() {
return state;
}
@Override
public void mySignal(String value) {
log.info("TestSignalWorkflowImpl.mySignal value=" + value);
state = value;
signals.add(value);
if (signals.size() == 2) {
promise.complete(null);
}
}
}
@Test
public void testSignalWithStart() {
// Test getTrace through replay by a local worker.
Worker queryWorker;
if (useExternalService) {
WorkerFactory workerFactory = WorkerFactory.newInstance(workflowClient);
queryWorker = workerFactory.newWorker(taskList);
} else {
queryWorker = testEnvironment.newWorker(taskList);
}
queryWorker.registerWorkflowImplementationTypes(TestSignalWithStartWorkflowImpl.class);
startWorkerFor(TestSignalWorkflowImpl.class);
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
String workflowId = UUID.randomUUID().toString();
optionsBuilder.setWorkflowId(workflowId);
QueryableWorkflow client =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
// SignalWithStart starts a workflow and delivers the signal to it.
BatchRequest batch = workflowClient.newSignalWithStartRequest();
batch.add(client::mySignal, "Hello ");
batch.add(client::execute);
WorkflowExecution execution = workflowClient.signalWithStart(batch);
sleep(Duration.ofSeconds(1));
// Test client created using WorkflowExecution
QueryableWorkflow client2 =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
// SignalWithStart delivers the signal to the already running workflow.
BatchRequest batch2 = workflowClient.newSignalWithStartRequest();
batch2.add(client2::mySignal, "World!");
batch2.add(client2::execute);
WorkflowExecution execution2 = workflowClient.signalWithStart(batch2);
assertEquals(execution, execution2);
sleep(Duration.ofMillis(500));
assertEquals("World!", client2.getState());
assertEquals(
"Hello World!",
workflowClient.newUntypedWorkflowStub(execution, Optional.empty()).getResult(String.class));
}
public static class TestConsistentQueryImpl implements QueryableWorkflow {
String state = "initial";
@Override
public String execute() {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions2());
while (!state.equals("exit")) {
String oldState = state;
Workflow.await(() -> !Objects.equals(state, oldState));
testActivities.activity();
}
return "";
}
@Override
public String getState() {
return state;
}
@Override
public void mySignal(String newState) {
log.info("TestConsistentQueryImpl.mySignal newState=" + newState);
state = newState;
}
}
@Test
@Ignore("until version check is merged in server")
public void testConsistentQuery() throws Exception {
startWorkerFor(TestConsistentQueryImpl.class);
String workflowType = QueryableWorkflow.class.getSimpleName() + "::execute";
WorkflowOptions.Builder ob = newWorkflowOptionsBuilder(taskList);
WorkflowStub client = workflowClient.newUntypedWorkflowStub(workflowType, ob.build());
java.util.function.Function<QueryConsistencyLevel, String> query =
(consistencyLevel) ->
client.query(
"QueryableWorkflow::getState", String.class, String.class, null, consistencyLevel);
client.start();
client.signal("testSignal", "A");
assertEquals("A", query.apply(QueryConsistencyLevel.STRONG));
client.signal("testSignal", "B");
assertEquals("B", query.apply(QueryConsistencyLevel.STRONG));
client.signal("testSignal", "exit");
}
public static class TestNoQueryWorkflowImpl implements QueryableWorkflow {
CompletablePromise<Void> promise = Workflow.newPromise();
@Override
public String execute() {
promise.get();
return "done";
}
@Override
public String getState() {
return "some state";
}
@Override
public void mySignal(String value) {
promise.complete(null);
}
}
@Test
public void testNoQueryThreadLeak() throws InterruptedException {
startWorkerFor(TestNoQueryWorkflowImpl.class);
int threadCount = ManagementFactory.getThreadMXBean().getThreadCount();
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
QueryableWorkflow client =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
WorkflowClient.start(client::execute);
sleep(Duration.ofSeconds(1));
// Calls query multiple times to check at the end of the method that if it doesn't leak threads
int queryCount = 100;
for (int i = 0; i < queryCount; i++) {
assertEquals("some state", client.getState());
if (useDockerService) {
// Sleep a little bit to avoid server throttling error.
Thread.sleep(50);
}
}
client.mySignal("Hello ");
client.execute();
// Ensures that no threads were leaked due to query
int threadsCreated = ManagementFactory.getThreadMXBean().getThreadCount() - threadCount;
assertTrue("query leaks threads: " + threadsCreated, threadsCreated < queryCount);
}
@Test
public void testQueryRejectionConditionDefault() {
startWorkerFor(TestNoQueryWorkflowImpl.class);
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
QueryableWorkflow client =
workflowClient.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
WorkflowClient.start(client::execute);
sleep(Duration.ofSeconds(1));
assertEquals("some state", client.getState());
client.mySignal("Hello ");
client.execute();
assertEquals("some state", client.getState());
}
@Test
public void testQueryRejectionConditionNotOpen() {
startWorkerFor(TestNoQueryWorkflowImpl.class);
WorkflowClientOptions clientOptions =
WorkflowClientOptions.newBuilder(workflowClient.getOptions())
.setQueryRejectCondition(QueryRejectCondition.NOT_OPEN)
.build();
WorkflowClient wc;
if (useExternalService) {
wc = WorkflowClient.newInstance(service, clientOptions);
} else {
wc = testEnvironment.newWorkflowClient(clientOptions);
}
WorkflowOptions.Builder optionsBuilder = newWorkflowOptionsBuilder(taskList);
QueryableWorkflow client = wc.newWorkflowStub(QueryableWorkflow.class, optionsBuilder.build());
WorkflowClient.start(client::execute);
sleep(Duration.ofSeconds(1));
assertEquals("some state", client.getState());
client.mySignal("Hello ");
client.execute();
try {
client.getState();
} catch (WorkflowQueryRejectedException e) {
assertEquals(WorkflowExecutionCloseStatus.COMPLETED, e.getWorkflowExecutionStatus());
}
}
@Test
public void testSignalUntyped() {
startWorkerFor(TestSignalWorkflowImpl.class);
String workflowType = QueryableWorkflow.class.getSimpleName() + "::execute";
AtomicReference<WorkflowExecution> execution = new AtomicReference<>();
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
workflowType, newWorkflowOptionsBuilder(taskList).build());
// To execute workflow client.execute() would do. But we want to start workflow and immediately
// return.
registerDelayedCallback(
Duration.ofSeconds(1),
() -> {
assertEquals("initial", client.query("QueryableWorkflow::getState", String.class));
client.signal("testSignal", "Hello ");
sleep(Duration.ofMillis(500));
while (!"Hello ".equals(client.query("QueryableWorkflow::getState", String.class))) {}
assertEquals("Hello ", client.query("QueryableWorkflow::getState", String.class));
client.signal("testSignal", "World!");
while (!"World!".equals(client.query("QueryableWorkflow::getState", String.class))) {}
assertEquals("World!", client.query("QueryableWorkflow::getState", String.class));
assertEquals(
"Hello World!",
workflowClient
.newUntypedWorkflowStub(execution.get(), Optional.of(workflowType))
.getResult(String.class));
});
execution.set(client.start());
assertEquals("Hello World!", client.getResult(String.class));
assertEquals("World!", client.query("QueryableWorkflow::getState", String.class));
try {
client.query("QueryableWorkflow::getState", String.class, QueryRejectCondition.NOT_OPEN);
fail("unreachable");
} catch (WorkflowQueryRejectedException e) {
assertEquals(WorkflowExecutionCloseStatus.COMPLETED, e.getWorkflowExecutionStatus());
}
}
static final AtomicInteger decisionCount = new AtomicInteger();
static CompletableFuture<Boolean> sendSignal;
public static class TestSignalDuringLastDecisionWorkflowImpl implements TestWorkflowSignaled {
private String signal;
@Override
public String execute() {
if (decisionCount.incrementAndGet() == 1) {
sendSignal.complete(true);
// Never sleep in a real workflow using Thread.sleep.
// Here it is to simulate a race condition.
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return signal;
}
@Override
public void signal1(String arg) {
signal = arg;
}
}
@Test
public void testSignalDuringLastDecision() {
decisionCount.set(0);
sendSignal = new CompletableFuture<>();
startWorkerFor(TestSignalDuringLastDecisionWorkflowImpl.class);
WorkflowOptions.Builder options = newWorkflowOptionsBuilder(taskList);
options.setWorkflowId("testSignalDuringLastDecision-" + UUID.randomUUID().toString());
TestWorkflowSignaled client =
workflowClient.newWorkflowStub(TestWorkflowSignaled.class, options.build());
WorkflowExecution execution = WorkflowClient.start(client::execute);
registerDelayedCallback(
Duration.ofSeconds(1),
() -> {
try {
try {
sendSignal.get(2, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
client.signal1("Signal Input");
} catch (TimeoutException | ExecutionException e) {
throw new RuntimeException(e);
}
assertEquals(
"Signal Input",
workflowClient
.newUntypedWorkflowStub(execution, Optional.empty())
.getResult(String.class));
});
sleep(Duration.ofSeconds(2));
}
public static class TestTimerCallbackBlockedWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
Promise<Void> timer1 = Workflow.newTimer(Duration.ZERO);
Promise<Void> timer2 = Workflow.newTimer(Duration.ofSeconds(1));
return timer1
.thenApply(
e -> {
timer2.get();
return "timer2Fired";
})
.get();
}
}
/** Test that it is not allowed to block in the timer callback thread. */
@Test
public void testTimerCallbackBlocked() {
startWorkerFor(TestTimerCallbackBlockedWorkflowImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(10));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(1));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
String result = client.execute(taskList);
assertEquals("timer2Fired", result);
}
public interface ITestChild {
@WorkflowMethod
String execute(String arg, int delay);
}
public interface ITestNamedChild {
@WorkflowMethod(name = "namedChild")
String execute(String arg);
}
private static String child2Id;
public static class TestParentWorkflow implements TestWorkflow1 {
private final ITestChild child1 = Workflow.newChildWorkflowStub(ITestChild.class);
private final ITestNamedChild child2;
public TestParentWorkflow() {
ChildWorkflowOptions.Builder options = new ChildWorkflowOptions.Builder();
options.setWorkflowId(child2Id);
child2 = Workflow.newChildWorkflowStub(ITestNamedChild.class, options.build());
}
@Override
public String execute(String taskList) {
Promise<String> r1 = Async.function(child1::execute, "Hello ", 0);
String r2 = child2.execute("World!");
assertEquals(child2Id, Workflow.getWorkflowExecution(child2).get().getWorkflowId());
return r1.get() + r2;
}
}
public static class TestParentWorkflowWithChildTimeout implements TestWorkflow1 {
private final ITestChild child;
public TestParentWorkflowWithChildTimeout() {
ChildWorkflowOptions.Builder options = new ChildWorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(1));
child = Workflow.newChildWorkflowStub(ITestChild.class, options.build());
}
@Override
public String execute(String taskList) {
try {
child.execute("Hello ", (int) Duration.ofDays(1).toMillis());
} catch (Exception e) {
return e.getClass().getSimpleName();
}
throw new RuntimeException("not reachable");
}
}
public static class TestChild implements ITestChild {
@Override
public String execute(String arg, int delay) {
Workflow.sleep(delay);
return arg.toUpperCase();
}
}
public static class TestNamedChild implements ITestNamedChild {
@Override
public String execute(String arg) {
return arg.toUpperCase();
}
}
@Test
public void testChildWorkflow() {
child2Id = UUID.randomUUID().toString();
startWorkerFor(TestParentWorkflow.class, TestNamedChild.class, TestChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals("HELLO WORLD!", client.execute(taskList));
}
@Test
public void testChildWorkflowTimeout() {
child2Id = UUID.randomUUID().toString();
startWorkerFor(TestParentWorkflowWithChildTimeout.class, TestChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals("ChildWorkflowTimedOutException", client.execute(taskList));
}
public static class TestParentWorkflowContinueAsNew implements TestWorkflow1 {
private final ITestChild child1 =
Workflow.newChildWorkflowStub(
ITestChild.class,
new ChildWorkflowOptions.Builder()
.setWorkflowIdReusePolicy(WorkflowIdReusePolicy.RejectDuplicate)
.build());
private final TestWorkflow1 self = Workflow.newContinueAsNewStub(TestWorkflow1.class);
@Override
public String execute(String arg) {
child1.execute("Hello", 0);
if (arg.length() > 0) {
self.execute(""); // continue as new
}
return "foo";
}
}
/** Reproduction of a bug when a child of continued as new workflow has the same UUID ID. */
@Test
public void testParentContinueAsNew() {
child2Id = UUID.randomUUID().toString();
startWorkerFor(TestParentWorkflowContinueAsNew.class, TestChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
assertEquals("foo", client.execute("not empty"));
}
private static String childReexecuteId = UUID.randomUUID().toString();
public interface WorkflowIdReusePolicyParent {
@WorkflowMethod
String execute(boolean parallel, WorkflowIdReusePolicy policy);
}
public static class TestChildReexecuteWorkflow implements WorkflowIdReusePolicyParent {
public TestChildReexecuteWorkflow() {}
@Override
public String execute(boolean parallel, WorkflowIdReusePolicy policy) {
ChildWorkflowOptions options =
new ChildWorkflowOptions.Builder()
.setWorkflowId(childReexecuteId)
.setWorkflowIdReusePolicy(policy)
.build();
ITestNamedChild child1 = Workflow.newChildWorkflowStub(ITestNamedChild.class, options);
Promise<String> r1P = Async.function(child1::execute, "Hello ");
String r1 = null;
if (!parallel) {
r1 = r1P.get();
}
ITestNamedChild child2 = Workflow.newChildWorkflowStub(ITestNamedChild.class, options);
String r2 = child2.execute("World!");
if (parallel) {
r1 = r1P.get();
}
assertEquals(childReexecuteId, Workflow.getWorkflowExecution(child1).get().getWorkflowId());
assertEquals(childReexecuteId, Workflow.getWorkflowExecution(child2).get().getWorkflowId());
return r1 + r2;
}
}
@Test
public void testChildAlreadyRunning() {
startWorkerFor(TestChildReexecuteWorkflow.class, TestNamedChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
WorkflowIdReusePolicyParent client =
workflowClient.newWorkflowStub(WorkflowIdReusePolicyParent.class, options.build());
try {
client.execute(false, WorkflowIdReusePolicy.RejectDuplicate);
fail("unreachable");
} catch (WorkflowFailureException e) {
assertTrue(e.getCause() instanceof StartChildWorkflowFailedException);
}
}
@Test
public void testChildStartTwice() {
startWorkerFor(TestChildReexecuteWorkflow.class, TestNamedChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
WorkflowIdReusePolicyParent client =
workflowClient.newWorkflowStub(WorkflowIdReusePolicyParent.class, options.build());
try {
client.execute(true, WorkflowIdReusePolicy.RejectDuplicate);
fail("unreachable");
} catch (WorkflowFailureException e) {
assertTrue(e.getCause() instanceof StartChildWorkflowFailedException);
}
}
@Test
public void testChildReexecute() {
startWorkerFor(TestChildReexecuteWorkflow.class, TestNamedChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(200));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
WorkflowIdReusePolicyParent client =
workflowClient.newWorkflowStub(WorkflowIdReusePolicyParent.class, options.build());
assertEquals("HELLO WORLD!", client.execute(false, WorkflowIdReusePolicy.AllowDuplicate));
}
public static class TestChildWorkflowRetryWorkflow implements TestWorkflow1 {
private ITestChild child;
public TestChildWorkflowRetryWorkflow() {}
@Override
public String execute(String taskList) {
ChildWorkflowOptions options =
new ChildWorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(500))
.setTaskStartToCloseTimeout(Duration.ofSeconds(2))
.setTaskList(taskList)
.setRetryOptions(
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setMaximumAttempts(3)
.setExpiration(Duration.ofDays(1))
.build())
.build();
child = Workflow.newChildWorkflowStub(ITestChild.class, options);
return child.execute(taskList, 0);
}
}
public interface AngryChildActivity {
@ActivityMethod(scheduleToCloseTimeoutSeconds = 5)
void execute();
}
public static class AngryChildActivityImpl implements AngryChildActivity {
private long invocationCount;
@Override
public void execute() {
invocationCount++;
}
public long getInvocationCount() {
return invocationCount;
}
}
public static class AngryChild implements ITestChild {
@Override
public String execute(String taskList, int delay) {
AngryChildActivity activity =
Workflow.newActivityStub(
AngryChildActivity.class,
new ActivityOptions.Builder().setTaskList(taskList).build());
activity.execute();
throw new UnsupportedOperationException("simulated failure");
}
}
@Test
public void testChildWorkflowRetry() {
AngryChildActivityImpl angryChildActivity = new AngryChildActivityImpl();
worker.registerActivitiesImplementations(angryChildActivity);
startWorkerFor(TestChildWorkflowRetryWorkflow.class, AngryChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
AtomicReference<String> capturedWorkflowType = new AtomicReference<>();
WorkflowClientOptions clientOptions =
WorkflowClientOptions.newBuilder()
.setInterceptors(
new WorkflowClientInterceptorBase() {
@Override
public WorkflowStub newUntypedWorkflowStub(
String workflowType, WorkflowOptions options, WorkflowStub next) {
capturedWorkflowType.set(workflowType);
return next;
}
})
.setDomain(DOMAIN)
.build();
WorkflowClient wc;
if (useExternalService) {
wc = WorkflowClient.newInstance(service, clientOptions);
} else {
wc = testEnvironment.newWorkflowClient(clientOptions);
}
TestWorkflow1 client = wc.newWorkflowStub(TestWorkflow1.class, options.build());
try {
client.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.toString(), e.getCause() instanceof ChildWorkflowFailureException);
assertTrue(e.toString(), e.getCause().getCause() instanceof UnsupportedOperationException);
assertEquals("simulated failure", e.getCause().getCause().getMessage());
}
assertEquals("TestWorkflow1::execute", capturedWorkflowType.get());
assertEquals(3, angryChildActivity.getInvocationCount());
}
/**
* Tests that history that was created before server side retry was supported is backwards
* compatible with the client that supports the server side retry.
*/
@Test
public void testChildWorkflowRetryReplay() throws Exception {
Assume.assumeFalse("skipping for docker tests", useExternalService);
Assume.assumeFalse("skipping for sticky off", disableStickyExecution);
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testChildWorkflowRetryHistory.json", TestChildWorkflowRetryWorkflow.class);
}
public static class TestChildWorkflowExecutionPromiseHandler implements TestWorkflow1 {
private ITestNamedChild child;
@Override
public String execute(String taskList) {
child = Workflow.newChildWorkflowStub(ITestNamedChild.class);
Promise<String> childResult = Async.function(child::execute, "foo");
Promise<WorkflowExecution> executionPromise = Workflow.getWorkflowExecution(child);
CompletablePromise<String> result = Workflow.newPromise();
// Ensure that the callback can execute Workflow.* functions.
executionPromise.thenApply(
(we) -> {
Workflow.sleep(Duration.ofSeconds(1));
result.complete(childResult.get());
return null;
});
return result.get();
}
}
/** Tests that handler of the WorkflowExecution promise is executed in a workflow thread. */
@Test
public void testChildWorkflowExecutionPromiseHandler() {
startWorkerFor(TestChildWorkflowExecutionPromiseHandler.class, TestNamedChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
WorkflowClient wc;
if (useExternalService) {
wc = workflowClient;
} else {
wc = testEnvironment.newWorkflowClient();
}
TestWorkflow1 client = wc.newWorkflowStub(TestWorkflow1.class, options.build());
String result = client.execute(taskList);
assertEquals("FOO", result);
}
public static class TestSignalExternalWorkflow implements TestWorkflowSignaled {
private final SignalingChild child = Workflow.newChildWorkflowStub(SignalingChild.class);
private final CompletablePromise<Object> fromSignal = Workflow.newPromise();
@Override
public String execute() {
Promise<String> result =
Async.function(child::execute, "Hello", Workflow.getWorkflowInfo().getWorkflowId());
return result.get() + " " + fromSignal.get() + "!";
}
@Override
public void signal1(String arg) {
fromSignal.complete(arg);
}
}
public interface SignalingChild {
@WorkflowMethod
String execute(String arg, String parentWorkflowID);
}
public static class SignalingChildImpl implements SignalingChild {
@Override
public String execute(String greeting, String parentWorkflowID) {
WorkflowExecution parentExecution = new WorkflowExecution().setWorkflowId(parentWorkflowID);
TestWorkflowSignaled parent =
Workflow.newExternalWorkflowStub(TestWorkflowSignaled.class, parentExecution);
parent.signal1("World");
return greeting;
}
}
@Test
public void testSignalExternalWorkflow() {
startWorkerFor(TestSignalExternalWorkflow.class, SignalingChildImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(2000));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(60));
options.setTaskList(taskList);
TestWorkflowSignaled client =
workflowClient.newWorkflowStub(TestWorkflowSignaled.class, options.build());
assertEquals("Hello World!", client.execute());
tracer.setExpected(
"executeChildWorkflow SignalingChild::execute",
"signalExternalWorkflow " + UUID_REGEXP + " testSignal");
}
public static class TestUntypedSignalExternalWorkflow implements TestWorkflowSignaled {
private final ChildWorkflowStub child =
Workflow.newUntypedChildWorkflowStub("SignalingChild::execute");
private final CompletablePromise<Object> fromSignal = Workflow.newPromise();
@Override
public String execute() {
Promise<String> result =
child.executeAsync(String.class, "Hello", Workflow.getWorkflowInfo().getWorkflowId());
return result.get() + " " + fromSignal.get() + "!";
}
@Override
public void signal1(String arg) {
fromSignal.complete(arg);
}
}
public static class UntypedSignalingChildImpl implements SignalingChild {
@Override
public String execute(String greeting, String parentWorkflowID) {
ExternalWorkflowStub parent = Workflow.newUntypedExternalWorkflowStub(parentWorkflowID);
parent.signal("testSignal", "World");
return greeting;
}
}
@Test
public void testUntypedSignalExternalWorkflow() {
startWorkerFor(TestUntypedSignalExternalWorkflow.class, UntypedSignalingChildImpl.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
TestWorkflowSignaled client =
workflowClient.newWorkflowStub(TestWorkflowSignaled.class, options.build());
assertEquals("Hello World!", client.execute());
}
public static class TestSignalExternalWorkflowFailure implements TestWorkflow1 {
@Override
public String execute(String taskList) {
WorkflowExecution parentExecution = new WorkflowExecution().setWorkflowId("invalid id");
TestWorkflowSignaled workflow =
Workflow.newExternalWorkflowStub(TestWorkflowSignaled.class, parentExecution);
workflow.signal1("World");
return "ignored";
}
}
@Test
public void testSignalExternalWorkflowFailure() {
startWorkerFor(TestSignalExternalWorkflowFailure.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
try {
client.execute(taskList);
fail("unreachable");
} catch (WorkflowFailureException e) {
assertTrue(e.getCause() instanceof SignalExternalWorkflowException);
assertEquals(
"invalid id",
((SignalExternalWorkflowException) e.getCause()).getSignaledExecution().getWorkflowId());
assertEquals(
SignalExternalWorkflowExecutionFailedCause.UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION,
((SignalExternalWorkflowException) e.getCause()).getFailureCause());
}
}
public static class TestSignalExternalWorkflowImmediateCancellation implements TestWorkflow1 {
@Override
public String execute(String taskList) {
WorkflowExecution parentExecution = new WorkflowExecution().setWorkflowId("invalid id");
TestWorkflowSignaled workflow =
Workflow.newExternalWorkflowStub(TestWorkflowSignaled.class, parentExecution);
CompletablePromise<Void> signal = Workflow.newPromise();
CancellationScope scope =
Workflow.newCancellationScope(
() -> signal.completeFrom(Async.procedure(workflow::signal1, "World")));
scope.run();
scope.cancel();
try {
signal.get();
} catch (IllegalArgumentException e) {
// expected
}
return "result";
}
}
@Test
public void testSignalExternalWorkflowImmediateCancellation() {
startWorkerFor(TestSignalExternalWorkflowImmediateCancellation.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
try {
client.execute(taskList);
fail("unreachable");
} catch (WorkflowFailureException e) {
assertTrue(e.getCause() instanceof CancellationException);
}
}
public static class TestSignalWorkflowAsync implements TestWorkflowSignaled {
private String message;
@Override
public String execute() {
Workflow.await(() -> !Strings.isNullOrEmpty(message));
return message;
}
@Override
public void signal1(String arg) {
message = arg;
}
}
@Test
public void testSignalWorkflowAsync() throws Exception {
startWorkerFor(TestSignalWorkflowAsync.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflowSignaled::execute", newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<WorkflowExecution> future = workflowStub.startAsync(taskList);
future.get();
String testSignalInput = "hello";
CompletableFuture<String> resultFuture =
workflowStub
.signalAsync("testSignal", testSignalInput)
.thenCompose(
v -> {
return workflowStub.getResultAsync(String.class);
});
assertEquals(testSignalInput, resultFuture.get());
}
@Test
public void testSignalWorkflowAsyncWithTimeout() throws Exception {
startWorkerFor(TestSignalWorkflowAsync.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflowSignaled::execute", newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<WorkflowExecution> future = workflowStub.startAsync(taskList);
future.get();
Long timeout = Long.valueOf(200);
String testSignalInput = "hello";
CompletableFuture<String> resultFuture =
workflowStub
.signalAsyncWithTimeout(timeout, TimeUnit.MILLISECONDS, "testSignal", testSignalInput)
.thenCompose(
v -> {
return workflowStub.getResultAsync(String.class);
});
assertEquals(testSignalInput, resultFuture.get());
}
@Test
public void testSignalWorkflowAsyncFailed() throws Exception {
startWorkerFor(TestSignalWorkflowAsync.class);
WorkflowStub workflowStub =
workflowClient.newUntypedWorkflowStub(
"TestWorkflowSignaled::execute", newWorkflowOptionsBuilder(taskList).build());
String testSignalInput = "hello";
try {
workflowStub.signalAsync("testSignal", testSignalInput).get();
fail("unreachable");
} catch (IllegalStateException e) {
// expected exception, workflow should be started before signal
}
}
public static class TestChildWorkflowAsyncRetryWorkflow implements TestWorkflow1 {
private ITestChild child;
public TestChildWorkflowAsyncRetryWorkflow() {}
@Override
public String execute(String taskList) {
ChildWorkflowOptions options =
new ChildWorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskStartToCloseTimeout(Duration.ofSeconds(2))
.setTaskList(taskList)
.setRetryOptions(
new RetryOptions.Builder()
.setMaximumInterval(Duration.ofSeconds(1))
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofDays(1))
.setMaximumAttempts(3)
.build())
.build();
child = Workflow.newChildWorkflowStub(ITestChild.class, options);
return Async.function(child::execute, taskList, 0).get();
}
}
@Test
public void testChildWorkflowAsyncRetry() {
AngryChildActivityImpl angryChildActivity = new AngryChildActivityImpl();
worker.registerActivitiesImplementations(angryChildActivity);
startWorkerFor(TestChildWorkflowAsyncRetryWorkflow.class, AngryChild.class);
WorkflowOptions.Builder options = new WorkflowOptions.Builder();
options.setExecutionStartToCloseTimeout(Duration.ofSeconds(20));
options.setTaskStartToCloseTimeout(Duration.ofSeconds(2));
options.setTaskList(taskList);
TestWorkflow1 client = workflowClient.newWorkflowStub(TestWorkflow1.class, options.build());
try {
client.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause() instanceof ChildWorkflowFailureException);
assertTrue(e.getCause().getCause() instanceof UnsupportedOperationException);
assertEquals("simulated failure", e.getCause().getCause().getMessage());
}
assertEquals(3, angryChildActivity.getInvocationCount());
}
private static int testDecisionFailureBackoffReplayCount;
public static class TestDecisionFailureBackoff implements TestWorkflow1 {
@Override
public String execute(String taskList) {
if (testDecisionFailureBackoffReplayCount++ < 2) {
throw new Error("simulated decision failure");
}
return "result1";
}
}
@Test
public void testDecisionFailureBackoff() {
testDecisionFailureBackoffReplayCount = 0;
startWorkerFor(TestDecisionFailureBackoff.class);
WorkflowOptions o =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(10))
.setTaskStartToCloseTimeout(Duration.ofSeconds(1))
.setTaskList(taskList)
.build();
TestWorkflow1 workflowStub = workflowClient.newWorkflowStub(TestWorkflow1.class, o);
long start = currentTimeMillis();
String result = workflowStub.execute(taskList);
long elapsed = currentTimeMillis() - start;
assertTrue("spinned on fail decision", elapsed > 1000);
assertEquals("result1", result);
}
public static class TestAwait implements TestWorkflow1 {
private int i;
private int j;
@Override
public String execute(String taskList) {
StringBuilder result = new StringBuilder();
Async.procedure(
() -> {
while (true) {
Workflow.await(() -> i > j);
result.append(" awoken i=" + i);
j++;
}
});
for (i = 1; i < 3; i++) {
Workflow.await(() -> j >= i);
result.append(" loop i=" + i);
}
assertFalse(Workflow.await(Duration.ZERO, () -> false));
return result.toString();
}
}
@Test
public void testAwait() {
startWorkerFor(TestAwait.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals(" awoken i=1 loop i=1 awoken i=2 loop i=2", result);
}
private static Map<String, AtomicInteger> retryCount = new ConcurrentHashMap<>();
public interface TestWorkflowRetry {
@WorkflowMethod
String execute(String testName);
}
public static class TestWorkflowRetryImpl implements TestWorkflowRetry {
@Override
public String execute(String testName) {
AtomicInteger count = retryCount.get(testName);
if (count == null) {
count = new AtomicInteger();
retryCount.put(testName, count);
}
throw new IllegalStateException("simulated " + count.incrementAndGet());
}
}
@Test
public void testWorkflowRetry() {
startWorkerFor(TestWorkflowRetryImpl.class);
RetryOptions workflowRetryOptions =
new RetryOptions.Builder()
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofSeconds(10))
.setMaximumAttempts(3)
.setBackoffCoefficient(1.0)
.build();
TestWorkflowRetry workflowStub =
workflowClient.newWorkflowStub(
TestWorkflowRetry.class,
newWorkflowOptionsBuilder(taskList).setRetryOptions(workflowRetryOptions).build());
long start = currentTimeMillis();
try {
workflowStub.execute(testName.getMethodName());
fail("unreachable");
} catch (WorkflowException e) {
assertEquals(e.toString(), "simulated 3", e.getCause().getMessage());
} finally {
long elapsed = currentTimeMillis() - start;
assertTrue(String.valueOf(elapsed), elapsed >= 2000); // Ensure that retry delays the restart
}
}
public static class TestWorkflowRetryDoNotRetryException implements TestWorkflowRetry {
@Override
public String execute(String testName) {
AtomicInteger count = retryCount.get(testName);
if (count == null) {
count = new AtomicInteger();
retryCount.put(testName, count);
}
int c = count.incrementAndGet();
if (c < 3) {
throw new IllegalStateException("simulated " + c);
} else {
throw new IllegalArgumentException("simulated " + c);
}
}
}
@Test
public void testWorkflowRetryDoNotRetryException() {
startWorkerFor(TestWorkflowRetryDoNotRetryException.class);
RetryOptions workflowRetryOptions =
new RetryOptions.Builder()
.setInitialInterval(Duration.ofSeconds(1))
.setExpiration(Duration.ofSeconds(10))
.setDoNotRetry(IllegalArgumentException.class)
.setMaximumAttempts(100)
.setBackoffCoefficient(1.0)
.build();
TestWorkflowRetry workflowStub =
workflowClient.newWorkflowStub(
TestWorkflowRetry.class,
newWorkflowOptionsBuilder(taskList).setRetryOptions(workflowRetryOptions).build());
try {
workflowStub.execute(testName.getMethodName());
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
assertEquals("simulated 3", e.getCause().getMessage());
}
}
public interface TestWorkflowRetryWithMethodRetry {
@WorkflowMethod
@MethodRetry(
initialIntervalSeconds = 1,
maximumIntervalSeconds = 1,
maximumAttempts = 30,
expirationSeconds = 100,
doNotRetry = IllegalArgumentException.class
)
String execute(String testName);
}
public static class TestWorkflowRetryWithMethodRetryImpl
implements TestWorkflowRetryWithMethodRetry {
@Override
public String execute(String testName) {
AtomicInteger count = retryCount.get(testName);
if (count == null) {
count = new AtomicInteger();
retryCount.put(testName, count);
}
int c = count.incrementAndGet();
if (c < 3) {
throw new IllegalStateException("simulated " + c);
} else {
throw new IllegalArgumentException("simulated " + c);
}
}
}
@Test
public void testWorkflowRetryWithMethodRetryDoNotRetryException() {
startWorkerFor(TestWorkflowRetryWithMethodRetryImpl.class);
TestWorkflowRetryWithMethodRetry workflowStub =
workflowClient.newWorkflowStub(
TestWorkflowRetryWithMethodRetry.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(testName.getMethodName());
fail("unreachable");
} catch (WorkflowException e) {
assertTrue(e.getCause().toString(), e.getCause() instanceof IllegalArgumentException);
assertEquals("simulated 3", e.getCause().getMessage());
}
}
public interface TestWorkflowWithCronSchedule {
@WorkflowMethod
@CronSchedule("0 * * * *")
String execute(String testName);
}
static String lastCompletionResult;
public static class TestWorkflowWithCronScheduleImpl implements TestWorkflowWithCronSchedule {
@Override
public String execute(String testName) {
Logger log = Workflow.getLogger(TestWorkflowWithCronScheduleImpl.class);
if (CancellationScope.current().isCancelRequested()) {
log.debug("TestWorkflowWithCronScheduleImpl run cancelled.");
return null;
}
lastCompletionResult = Workflow.getLastCompletionResult(String.class);
AtomicInteger count = retryCount.get(testName);
if (count == null) {
count = new AtomicInteger();
retryCount.put(testName, count);
}
int c = count.incrementAndGet();
if (c == 3) {
throw new RuntimeException("simulated error");
}
SimpleDateFormat sdf = new SimpleDateFormat("MMM dd,yyyy HH:mm:ss.SSS");
Date now = new Date(Workflow.currentTimeMillis());
log.debug("TestWorkflowWithCronScheduleImpl run at " + sdf.format(now));
return "run " + c;
}
}
@Test
public void testWorkflowWithCronSchedule() {
// Min interval in cron is 1min. So we will not test it against real service in Jenkins.
// Feel free to uncomment the line below and test in local.
Assume.assumeFalse("skipping as test will timeout", useExternalService);
startWorkerFor(TestWorkflowWithCronScheduleImpl.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflowWithCronSchedule::execute",
newWorkflowOptionsBuilder(taskList)
.setExecutionStartToCloseTimeout(Duration.ofHours(1))
.setCronSchedule("0 * * * *")
.build());
registerDelayedCallback(Duration.ofHours(3), client::cancel);
client.start(testName.getMethodName());
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
// Run 3 failed. So on run 4 we get the last completion result from run 2.
Assert.assertEquals("run 2", lastCompletionResult);
}
public static class TestCronParentWorkflow implements TestWorkflow1 {
private final TestWorkflowWithCronSchedule cronChild =
Workflow.newChildWorkflowStub(TestWorkflowWithCronSchedule.class);
@Override
public String execute(String taskList) {
return cronChild.execute(taskList);
}
}
@Test
public void testChildWorkflowWithCronSchedule() {
// Min interval in cron is 1min. So we will not test it against real service in Jenkins.
// Feel free to uncomment the line below and test in local.
Assume.assumeFalse("skipping as test will timeout", useExternalService);
startWorkerFor(TestCronParentWorkflow.class, TestWorkflowWithCronScheduleImpl.class);
WorkflowStub client =
workflowClient.newUntypedWorkflowStub(
"TestWorkflow1::execute",
newWorkflowOptionsBuilder(taskList)
.setExecutionStartToCloseTimeout(Duration.ofHours(10))
.build());
client.start(testName.getMethodName());
testEnvironment.sleep(Duration.ofHours(3));
client.cancel();
try {
client.getResult(String.class);
fail("unreachable");
} catch (CancellationException ignored) {
}
// Run 3 failed. So on run 4 we get the last completion result from run 2.
Assert.assertEquals("run 2", lastCompletionResult);
}
public interface TestActivities {
String sleepActivity(long milliseconds, int input);
String activityWithDelay(long milliseconds, boolean heartbeatMoreThanOnce);
String activity();
@ActivityMethod(name = "customActivity1")
int activity1(int input);
String activity2(String a1, int a2);
String activity3(String a1, int a2, int a3);
String activity4(String a1, int a2, int a3, int a4);
String activity5(String a1, int a2, int a3, int a4, int a5);
String activity6(String a1, int a2, int a3, int a4, int a5, int a6);
void proc();
void proc1(String input);
void proc2(String a1, int a2);
void proc3(String a1, int a2, int a3);
void proc4(String a1, int a2, int a3, int a4);
void proc5(String a1, int a2, int a3, int a4, int a5);
void proc6(String a1, int a2, int a3, int a4, int a5, int a6);
void heartbeatAndThrowIO();
void throwIO();
void neverComplete();
@ActivityMethod(
scheduleToStartTimeoutSeconds = 5,
scheduleToCloseTimeoutSeconds = 5,
heartbeatTimeoutSeconds = 5,
startToCloseTimeoutSeconds = 10
)
@MethodRetry(
initialIntervalSeconds = 1,
maximumIntervalSeconds = 1,
maximumAttempts = 3,
expirationSeconds = 100
)
void throwIOAnnotated();
List<UUID> activityUUIDList(List<UUID> arg);
}
private static class TestActivitiesImpl implements TestActivities {
final ActivityCompletionClient completionClient;
final List<String> invocations = Collections.synchronizedList(new ArrayList<>());
final List<String> procResult = Collections.synchronizedList(new ArrayList<>());
final AtomicInteger heartbeatCounter = new AtomicInteger();
private final ThreadPoolExecutor executor =
new ThreadPoolExecutor(0, 100, 1, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
int lastAttempt;
private TestActivitiesImpl(ActivityCompletionClient completionClient) {
this.completionClient = completionClient;
}
void close() throws InterruptedException {
executor.shutdownNow();
executor.awaitTermination(1, TimeUnit.MINUTES);
}
void assertInvocations(String... expected) {
assertEquals(Arrays.asList(expected), invocations);
}
@Override
public String activityWithDelay(long delay, boolean heartbeatMoreThanOnce) {
byte[] taskToken = Activity.getTaskToken();
executor.execute(
() -> {
invocations.add("activityWithDelay");
long start = System.currentTimeMillis();
try {
int count = 0;
while (System.currentTimeMillis() - start < delay) {
if (heartbeatMoreThanOnce || count == 0) {
completionClient.heartbeat(taskToken, "heartbeatValue");
}
count++;
Thread.sleep(100);
}
completionClient.complete(taskToken, "activity");
} catch (InterruptedException e) {
throw new RuntimeException("unexpected", e);
} catch (ActivityNotExistsException | ActivityCancelledException e) {
completionClient.reportCancellation(taskToken, null);
}
});
Activity.doNotCompleteOnReturn();
return "ignored";
}
@Override
public String sleepActivity(long milliseconds, int input) {
try {
Thread.sleep(milliseconds);
} catch (InterruptedException e) {
throw Activity.wrap(new RuntimeException("interrupted"));
}
invocations.add("sleepActivity");
return "sleepActivity" + input;
}
@Override
public String activity() {
invocations.add("activity");
return "activity";
}
@Override
public int activity1(int a1) {
invocations.add("activity1");
return a1;
}
@Override
public String activity2(String a1, int a2) {
invocations.add("activity2");
return a1 + a2;
}
@Override
public String activity3(String a1, int a2, int a3) {
invocations.add("activity3");
return a1 + a2 + a3;
}
@Override
public String activity4(String a1, int a2, int a3, int a4) {
byte[] taskToken = Activity.getTaskToken();
executor.execute(
() -> {
invocations.add("activity4");
completionClient.complete(taskToken, a1 + a2 + a3 + a4);
});
Activity.doNotCompleteOnReturn();
return "ignored";
}
@Override
public String activity5(String a1, int a2, int a3, int a4, int a5) {
WorkflowExecution execution = Activity.getWorkflowExecution();
String id = Activity.getTask().getActivityId();
executor.execute(
() -> {
invocations.add("activity5");
completionClient.complete(execution, id, a1 + a2 + a3 + a4 + a5);
});
Activity.doNotCompleteOnReturn();
return "ignored";
}
@Override
public String activity6(String a1, int a2, int a3, int a4, int a5, int a6) {
invocations.add("activity6");
return a1 + a2 + a3 + a4 + a5 + a6;
}
@Override
public void proc() {
invocations.add("proc");
procResult.add("proc");
}
@Override
public void proc1(String a1) {
invocations.add("proc1");
procResult.add(a1);
}
@Override
public void proc2(String a1, int a2) {
invocations.add("proc2");
procResult.add(a1 + a2);
}
@Override
public void proc3(String a1, int a2, int a3) {
invocations.add("proc3");
procResult.add(a1 + a2 + a3);
}
@Override
public void proc4(String a1, int a2, int a3, int a4) {
invocations.add("proc4");
procResult.add(a1 + a2 + a3 + a4);
}
@Override
public void proc5(String a1, int a2, int a3, int a4, int a5) {
invocations.add("proc5");
procResult.add(a1 + a2 + a3 + a4 + a5);
}
@Override
public void proc6(String a1, int a2, int a3, int a4, int a5, int a6) {
invocations.add("proc6");
procResult.add(a1 + a2 + a3 + a4 + a5 + a6);
}
@Override
public void heartbeatAndThrowIO() {
ActivityTask task = Activity.getTask();
assertEquals(task.getAttempt(), heartbeatCounter.get());
invocations.add("throwIO");
Optional<Integer> heartbeatDetails = Activity.getHeartbeatDetails(int.class);
assertEquals(heartbeatCounter.get(), (int) heartbeatDetails.orElse(0));
Activity.heartbeat(heartbeatCounter.incrementAndGet());
assertEquals(heartbeatCounter.get(), (int) Activity.getHeartbeatDetails(int.class).get());
try {
throw new IOException("simulated IO problem");
} catch (IOException e) {
throw Activity.wrap(e);
}
}
@Override
public void throwIO() {
assertEquals(DOMAIN, Activity.getTask().getWorkflowDomain());
assertNotNull(Activity.getTask().getWorkflowExecution());
assertNotNull(Activity.getTask().getWorkflowExecution().getWorkflowId());
assertFalse(Activity.getTask().getWorkflowExecution().getWorkflowId().isEmpty());
assertFalse(Activity.getTask().getWorkflowExecution().getRunId().isEmpty());
lastAttempt = Activity.getTask().getAttempt();
invocations.add("throwIO");
try {
throw new IOException("simulated IO problem");
} catch (IOException e) {
throw Activity.wrap(e);
}
}
@Override
public void neverComplete() {
invocations.add("neverComplete");
Activity.doNotCompleteOnReturn(); // Simulate activity timeout
}
@Override
public void throwIOAnnotated() {
invocations.add("throwIOAnnotated");
try {
throw new IOException("simulated IO problem");
} catch (IOException e) {
throw Activity.wrap(e);
}
}
@Override
public List<UUID> activityUUIDList(List<UUID> arg) {
return arg;
}
public int getLastAttempt() {
return lastAttempt;
}
}
public interface ProcInvocationQueryable {
@QueryMethod(name = "getTrace")
String query();
}
public interface TestMultiargsWorkflowsFunc {
@WorkflowMethod
String func();
}
public interface TestMultiargsWorkflowsFunc1 {
@WorkflowMethod(
name = "func1",
taskList = ANNOTATION_TASK_LIST,
workflowIdReusePolicy = WorkflowIdReusePolicy.RejectDuplicate,
executionStartToCloseTimeoutSeconds = 10
)
int func1(int input);
}
public interface TestMultiargsWorkflowsFunc2 {
@WorkflowMethod
String func2(String a1, int a2);
}
public interface TestMultiargsWorkflowsFunc3 {
@WorkflowMethod
String func3(String a1, int a2, int a3);
}
public interface TestMultiargsWorkflowsFunc4 {
@WorkflowMethod
String func4(String a1, int a2, int a3, int a4);
}
public interface TestMultiargsWorkflowsFunc5 {
@WorkflowMethod
String func5(String a1, int a2, int a3, int a4, int a5);
}
public interface TestMultiargsWorkflowsFunc6 {
@WorkflowMethod
String func6(String a1, int a2, int a3, int a4, int a5, int a6);
}
public interface TestMultiargsWorkflowsProc extends ProcInvocationQueryable {
@WorkflowMethod
void proc();
}
public interface TestMultiargsWorkflowsProc1 extends ProcInvocationQueryable {
@WorkflowMethod
void proc1(String input);
}
public interface TestMultiargsWorkflowsProc2 extends ProcInvocationQueryable {
@WorkflowMethod
void proc2(String a1, int a2);
}
public interface TestMultiargsWorkflowsProc3 extends ProcInvocationQueryable {
@WorkflowMethod
void proc3(String a1, int a2, int a3);
}
public interface TestMultiargsWorkflowsProc4 extends ProcInvocationQueryable {
@WorkflowMethod
void proc4(String a1, int a2, int a3, int a4);
}
public interface TestMultiargsWorkflowsProc5 extends ProcInvocationQueryable {
@WorkflowMethod
void proc5(String a1, int a2, int a3, int a4, int a5);
}
public interface TestMultiargsWorkflowsProc6 extends ProcInvocationQueryable {
@WorkflowMethod
void proc6(String a1, int a2, int a3, int a4, int a5, int a6);
}
public static class TestMultiargsWorkflowsImpl
implements TestMultiargsWorkflowsFunc,
TestMultiargsWorkflowsFunc1,
TestMultiargsWorkflowsFunc2,
TestMultiargsWorkflowsFunc3,
TestMultiargsWorkflowsFunc4,
TestMultiargsWorkflowsFunc5,
TestMultiargsWorkflowsFunc6,
TestMultiargsWorkflowsProc,
TestMultiargsWorkflowsProc1,
TestMultiargsWorkflowsProc2,
TestMultiargsWorkflowsProc3,
TestMultiargsWorkflowsProc4,
TestMultiargsWorkflowsProc5,
TestMultiargsWorkflowsProc6 {
private String procResult;
@Override
public String func() {
return "func";
}
@Override
public int func1(int a1) {
return a1;
}
@Override
public String func2(String a1, int a2) {
return a1 + a2;
}
@Override
public String func3(String a1, int a2, int a3) {
return a1 + a2 + a3;
}
@Override
public String func4(String a1, int a2, int a3, int a4) {
return a1 + a2 + a3 + a4;
}
@Override
public String func5(String a1, int a2, int a3, int a4, int a5) {
return a1 + a2 + a3 + a4 + a5;
}
@Override
public String func6(String a1, int a2, int a3, int a4, int a5, int a6) {
return a1 + a2 + a3 + a4 + a5 + a6;
}
@Override
public void proc() {
procResult = "proc";
}
@Override
public void proc1(String a1) {
procResult = a1;
}
@Override
public void proc2(String a1, int a2) {
procResult = a1 + a2;
}
@Override
public void proc3(String a1, int a2, int a3) {
procResult = a1 + a2 + a3;
}
@Override
public void proc4(String a1, int a2, int a3, int a4) {
procResult = a1 + a2 + a3 + a4;
}
@Override
public void proc5(String a1, int a2, int a3, int a4, int a5) {
procResult = a1 + a2 + a3 + a4 + a5;
}
@Override
public void proc6(String a1, int a2, int a3, int a4, int a5, int a6) {
procResult = a1 + a2 + a3 + a4 + a5 + a6;
}
@Override
public String query() {
return procResult;
}
}
public static class TestWorkflowLocals implements TestWorkflow1 {
private final WorkflowThreadLocal<Integer> threadLocal =
WorkflowThreadLocal.withInitial(() -> 2);
private final WorkflowLocal<Integer> workflowLocal = WorkflowLocal.withInitial(() -> 5);
@Override
public String execute(String taskList) {
assertEquals(2, (int) threadLocal.get());
assertEquals(5, (int) workflowLocal.get());
Promise<Void> p1 =
Async.procedure(
() -> {
assertEquals(2, (int) threadLocal.get());
threadLocal.set(10);
Workflow.sleep(Duration.ofSeconds(1));
assertEquals(10, (int) threadLocal.get());
assertEquals(100, (int) workflowLocal.get());
});
Promise<Void> p2 =
Async.procedure(
() -> {
assertEquals(2, (int) threadLocal.get());
threadLocal.set(22);
workflowLocal.set(100);
assertEquals(22, (int) threadLocal.get());
});
p1.get();
p2.get();
return "result=" + threadLocal.get() + ", " + workflowLocal.get();
}
}
@Test
public void testWorkflowLocals() {
startWorkerFor(TestWorkflowLocals.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("result=2, 100", result);
}
public static class TestSideEffectWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
long workflowTime = Workflow.currentTimeMillis();
long time = Workflow.sideEffect(long.class, () -> workflowTime);
Workflow.sleep(Duration.ofSeconds(1));
String result;
if (workflowTime == time) {
result = "activity" + testActivities.activity1(1);
} else {
result = testActivities.activity2("activity2", 2);
}
return result;
}
}
@Test
public void testSideEffect() {
startWorkerFor(TestSideEffectWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("activity1", result);
tracer.setExpected("sideEffect", "sleep PT1S", "executeActivity customActivity1");
}
private static final Map<String, Queue<Long>> mutableSideEffectValue =
Collections.synchronizedMap(new HashMap<>());
public static class TestMutableSideEffectWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
StringBuilder result = new StringBuilder();
for (int i = 0; i < 4; i++) {
long value =
Workflow.mutableSideEffect(
"id1",
Long.class,
(o, n) -> n > o,
() -> mutableSideEffectValue.get(taskList).poll());
if (result.length() > 0) {
result.append(", ");
}
result.append(value);
// Sleep is here to ensure that mutableSideEffect works when replaying a history.
if (i >= 3) {
Workflow.sleep(Duration.ofSeconds(1));
}
}
return result.toString();
}
}
@Test
public void testMutableSideEffect() {
startWorkerFor(TestMutableSideEffectWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
ArrayDeque<Long> values = new ArrayDeque<>();
values.add(1234L);
values.add(1234L);
values.add(123L); // expected to be ignored as it is smaller than 1234.
values.add(3456L);
mutableSideEffectValue.put(taskList, values);
String result = workflowStub.execute(taskList);
assertEquals("1234, 1234, 1234, 3456", result);
}
private static final Set<String> getVersionExecuted =
Collections.synchronizedSet(new HashSet<>());
public static class TestGetVersionWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
// Test adding a version check in non-replay code.
int version = Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
assertEquals(version, 1);
String result = testActivities.activity2("activity2", 2);
// Test version change in non-replay code.
version = Workflow.getVersion("test_change", 1, 2);
assertEquals(version, 1);
result += "activity" + testActivities.activity1(1);
// Test adding a version check in replay code.
if (!getVersionExecuted.contains(taskList + "-test_change_2")) {
result += "activity" + testActivities.activity1(1); // This is executed in non-replay mode.
getVersionExecuted.add(taskList + "-test_change_2");
} else {
int version2 = Workflow.getVersion("test_change_2", Workflow.DEFAULT_VERSION, 1);
assertEquals(version2, Workflow.DEFAULT_VERSION);
result += "activity" + testActivities.activity1(1);
}
// Test get version in replay mode.
Workflow.sleep(1000);
version = Workflow.getVersion("test_change", 1, 2);
assertEquals(version, 1);
result += "activity" + testActivities.activity1(1);
return result;
}
}
@Test
public void testGetVersion() {
startWorkerFor(TestGetVersionWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("activity22activity1activity1activity1", result);
tracer.setExpected(
"getVersion",
"executeActivity TestActivities::activity2",
"getVersion",
"executeActivity customActivity1",
"executeActivity customActivity1",
"sleep PT1S",
"getVersion",
"executeActivity customActivity1");
}
@Test
public void testDelayStart() {
Assume.assumeTrue("skipping for non docker tests", useExternalService);
int delaySeconds = 5;
startWorkerFor(TestGetVersionWorkflowImpl.class);
WorkflowOptions options =
newWorkflowOptionsBuilder(taskList).setDelayStart(Duration.ofSeconds(delaySeconds)).build();
LocalDateTime start = LocalDateTime.now();
System.out.printf("\n\nSTART: %s \n\n", start.toString());
TestWorkflow1 workflowStub = workflowClient.newWorkflowStub(TestWorkflow1.class, options);
String result = workflowStub.execute(taskList);
System.out.printf("\n\nRESULT: %s \n\n", result.toString());
LocalDateTime end = LocalDateTime.now();
System.out.printf("\n\nEND: %s \n\n", end.toString());
assertTrue(
"end time should be at least +10 seconds", start.plusSeconds(delaySeconds).isBefore(end));
}
public static class TestGetVersionWorkflow2Impl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
// Test adding a version check in replay code.
if (!getVersionExecuted.contains(taskList + "-test_change_2")) {
getVersionExecuted.add(taskList + "-test_change_2");
Workflow.sleep(Duration.ofHours(1));
} else {
int version2 = Workflow.getVersion("test_change_2", Workflow.DEFAULT_VERSION, 1);
Workflow.sleep(Duration.ofHours(1));
int version3 = Workflow.getVersion("test_change_2", Workflow.DEFAULT_VERSION, 1);
assertEquals(version2, version3);
}
return "test";
}
}
@Test
public void testGetVersion2() {
Assume.assumeFalse("skipping for docker tests", useExternalService);
startWorkerFor(TestGetVersionWorkflow2Impl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class,
newWorkflowOptionsBuilder(taskList)
.setExecutionStartToCloseTimeout(Duration.ofHours(2))
.build());
workflowStub.execute(taskList);
}
static CompletableFuture<Boolean> executionStarted = new CompletableFuture<>();
public static class TestGetVersionWithoutDecisionEventWorkflowImpl implements TestWorkflow3 {
CompletablePromise<Boolean> signalReceived = Workflow.newPromise();
String result = "";
@Override
public String execute(String taskList) {
try {
if (!getVersionExecuted.contains("getVersionWithoutDecisionEvent")) {
// Execute getVersion in non-replay mode.
getVersionExecuted.add("getVersionWithoutDecisionEvent");
executionStarted.complete(true);
signalReceived.get();
} else {
// Execute getVersion in replay mode. In this case we have no decision event, only a
// signal.
int version = Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
if (version == Workflow.DEFAULT_VERSION) {
signalReceived.get();
result = "result 1";
} else {
result = "result 2";
}
return result;
}
} catch (Exception e) {
throw new RuntimeException("failed to get from signal");
}
throw new RuntimeException("unreachable");
}
@Override
public void signal1(String arg) {
signalReceived.complete(true);
}
@Override
public String getState() {
return result;
}
}
@Test
public void testGetVersionWithoutDecisionEvent() throws Exception {
Assume.assumeTrue("skipping as there will be no replay", disableStickyExecution);
executionStarted = new CompletableFuture<>();
getVersionExecuted.remove("getVersionWithoutDecisionEvent");
startWorkerFor(TestGetVersionWithoutDecisionEventWorkflowImpl.class);
TestWorkflow3 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow3.class, newWorkflowOptionsBuilder(taskList).build());
WorkflowClient.start(workflowStub::execute, taskList);
executionStarted.get();
workflowStub.signal1("test signal");
String result = workflowStub.execute(taskList);
assertEquals("result 1", result);
assertEquals("result 1", workflowStub.getState());
}
// The following test covers the scenario where getVersion call is removed before a
// non-version-marker decision.
public static class TestGetVersionRemovedInReplayWorkflowImpl implements TestWorkflowQuery {
String result = "";
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
// Test removing a version check in replay code.
if (!getVersionExecuted.contains(taskList)) {
int version = Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
if (version == Workflow.DEFAULT_VERSION) {
result = "activity" + testActivities.activity1(1);
} else {
result = testActivities.activity2("activity2", 2); // This is executed in non-replay mode.
}
getVersionExecuted.add(taskList);
} else {
result = testActivities.activity2("activity2", 2);
}
result += testActivities.activity();
return result;
}
@Override
public String query() {
return result;
}
}
@Test
public void testGetVersionRemovedInReplay() {
startWorkerFor(TestGetVersionRemovedInReplayWorkflowImpl.class);
TestWorkflowQuery workflowStub =
workflowClient.newWorkflowStub(
TestWorkflowQuery.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("activity22activity", result);
tracer.setExpected(
"registerQuery TestWorkflowQuery::query",
"getVersion",
"executeActivity TestActivities::activity2",
"executeActivity TestActivities::activity");
assertEquals("activity22activity", workflowStub.query());
}
// The following test covers the scenario where getVersion call is removed before another
// version-marker decision.
public static class TestGetVersionRemovedInReplay2WorkflowImpl implements TestWorkflowQuery {
String result = "";
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
// Test removing a version check in replay code.
if (!getVersionExecuted.contains(taskList)) {
Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
Workflow.getVersion("test_change_2", Workflow.DEFAULT_VERSION, 2);
getVersionExecuted.add(taskList);
} else {
Workflow.getVersion("test_change_2", Workflow.DEFAULT_VERSION, 2);
}
result = testActivities.activity();
return result;
}
@Override
public String query() {
return result;
}
}
@Test
public void testGetVersionRemovedInReplay2() {
startWorkerFor(TestGetVersionRemovedInReplay2WorkflowImpl.class);
TestWorkflowQuery workflowStub =
workflowClient.newWorkflowStub(
TestWorkflowQuery.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("activity", result);
tracer.setExpected(
"registerQuery TestWorkflowQuery::query",
"getVersion",
"getVersion",
"executeActivity TestActivities::activity");
assertEquals("activity", workflowStub.query());
}
// The following test covers the scenario where getVersion call is removed before
// upsertSearchAttributes.
public static class TestGetVersionRemovedInReplay3WorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
Map<String, Object> searchAttrMap = new HashMap<>();
searchAttrMap.put("CustomKeywordField", "abc");
// Test removing a version check in replay code.
if (!getVersionExecuted.contains(taskList)) {
Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
Workflow.upsertSearchAttributes(searchAttrMap);
getVersionExecuted.add(taskList);
} else {
Workflow.upsertSearchAttributes(searchAttrMap);
}
return "done";
}
}
@Test
public void testGetVersionRemovedInReplay3() {
startWorkerFor(TestGetVersionRemovedInReplay3WorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("done", result);
tracer.setExpected("getVersion", "upsertSearchAttributes");
}
public static class TestVersionNotSupportedWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
// Test adding a version check in non-replay code.
int version = Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
String result = "";
if (version == Workflow.DEFAULT_VERSION) {
result += "activity" + testActivities.activity1(1);
} else {
result += testActivities.activity2("activity2", 2); // This is executed.
}
// Catching error from getVersion is only for unit test purpose.
// Do not ever do it in production code.
try {
Workflow.getVersion("test_change", 2, 3);
} catch (Error e) {
throw Workflow.wrap(new Exception("unsupported change version"));
}
return result;
}
}
@Test
public void testVersionNotSupported() {
startWorkerFor(TestVersionNotSupportedWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowException e) {
assertEquals("unsupported change version", e.getCause().getMessage());
}
}
public static class TestGetVersionAddedImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
int versionNew = Workflow.getVersion("cid2", Workflow.DEFAULT_VERSION, 1);
assertEquals(-1, versionNew);
int version = Workflow.getVersion("cid1", Workflow.DEFAULT_VERSION, 1);
assertEquals(1, version);
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
return "hello" + testActivities.activity1(1);
}
}
@Test
public void testGetVersionAdded() {
try {
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testGetVersionHistory.json", TestGetVersionAddedImpl.class);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
@Test
public void testGetVersionAddedWithCadenceChangeVersion() {
try {
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testGetVersionHistoryWithCadenceChangeVersion.json", TestGetVersionAddedImpl.class);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
public static class TestGetVersionRemovedImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
// history contains cid1, but later getVersion is removed
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
return "hello" + testActivities.activity1(1);
}
}
@Test
public void testGetVersionRemoved() {
try {
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testGetVersionHistory.json", TestGetVersionRemovedImpl.class);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
public static class TestGetVersionRemoveAndAddImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
int version = Workflow.getVersion("cid2", Workflow.DEFAULT_VERSION, 1);
assertEquals(-1, version);
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
return "hello" + testActivities.activity1(1);
}
}
@Test
public void testGetVersionRemoveAndAdd() {
try {
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testGetVersionHistory.json", TestGetVersionRemoveAndAddImpl.class);
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
public interface DeterminismFailingWorkflow {
@WorkflowMethod
void execute(String taskList);
}
public static class DeterminismFailingWorkflowImpl implements DeterminismFailingWorkflow {
@Override
public void execute(String taskList) {
TestActivities activities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
if (!Workflow.isReplaying()) {
activities.activity1(1);
}
}
}
@Test
public void testNonDeterministicWorkflowPolicyBlockWorkflow() {
Assume.assumeTrue("skipping as no replay in sticky", disableStickyExecution);
startWorkerFor(DeterminismFailingWorkflowImpl.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(10))
.setTaskStartToCloseTimeout(Duration.ofSeconds(1))
.setTaskList(taskList)
.build();
DeterminismFailingWorkflow workflowStub =
workflowClient.newWorkflowStub(DeterminismFailingWorkflow.class, options);
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowTimedOutException e) {
// expected to timeout as workflow is going get blocked.
}
int workflowRootThreads = 0;
ThreadInfo[] threads = ManagementFactory.getThreadMXBean().dumpAllThreads(false, false);
for (ThreadInfo thread : threads) {
if (thread.getThreadName().contains("workflow-root")) {
workflowRootThreads++;
}
}
assertTrue("workflow threads might leak", workflowRootThreads < 10);
}
@Test
public void testNonDeterministicWorkflowPolicyFailWorkflow() {
Assume.assumeTrue("skipping as no replay in sticky", disableStickyExecution);
WorkflowImplementationOptions implementationOptions =
new WorkflowImplementationOptions.Builder()
.setNonDeterministicWorkflowPolicy(FailWorkflow)
.build();
worker.registerWorkflowImplementationTypes(
implementationOptions, DeterminismFailingWorkflowImpl.class);
if (useExternalService) {
workerFactory.start();
} else {
testEnvironment.start();
}
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(1))
.setTaskStartToCloseTimeout(Duration.ofSeconds(1))
.setTaskList(taskList)
.build();
DeterminismFailingWorkflow workflowStub =
workflowClient.newWorkflowStub(DeterminismFailingWorkflow.class, options);
try {
workflowStub.execute(taskList);
fail("unreachable");
} catch (WorkflowFailureException e) {
// expected to fail on non deterministic error
assertTrue(e.getCause() instanceof Error);
String causeMsg = e.getCause().getMessage();
assertTrue(causeMsg, causeMsg.contains("nondeterministic"));
}
}
private static class TracingWorkflowInterceptorFactory
implements Function<WorkflowInterceptor, WorkflowInterceptor> {
private final FilteredTrace trace = new FilteredTrace();
private List<String> expected;
@Override
public WorkflowInterceptor apply(WorkflowInterceptor next) {
return new TracingWorkflowInterceptor(trace, next);
}
public String getTrace() {
return String.join("\n", trace.getImpl());
}
public void setExpected(String... expected) {
this.expected = Arrays.asList(expected);
}
public void assertExpected() {
if (expected != null) {
List<String> traceElements = trace.getImpl();
for (int i = 0; i < traceElements.size(); i++) {
String t = traceElements.get(i);
String expectedRegExp = expected.get(i);
Assert.assertTrue(t + " doesn't match " + expectedRegExp, t.matches(expectedRegExp));
}
}
}
}
public static class TestUUIDAndRandom implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities activities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
Random rand1 = Workflow.newRandom();
int r11 = rand1.nextInt();
int r12 = r11 + rand1.nextInt();
int savedInt = Workflow.sideEffect(int.class, () -> r12);
String id = Workflow.randomUUID().toString() + "-" + Workflow.randomUUID().toString();
String savedId = Workflow.sideEffect(String.class, () -> id);
// Invoke activity in a blocking mode to ensure that asserts run after replay.
String result = activities.activity2("foo", 10);
// Assert that during replay values didn't change.
assertEquals(savedId, id);
assertEquals(savedInt, r12);
return result;
}
}
@Test
public void testUUIDAndRandom() {
startWorkerFor(TestUUIDAndRandom.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("foo10", result);
tracer.setExpected("sideEffect", "sideEffect", "executeActivity TestActivities::activity2");
}
public interface GenericParametersActivity {
List<UUID> execute(List<UUID> arg1, Set<UUID> arg2);
}
public static class GenericParametersActivityImpl implements GenericParametersActivity {
@Override
public List<UUID> execute(List<UUID> arg1, Set<UUID> arg2) {
List<UUID> result = new ArrayList<>();
result.addAll(arg1);
result.addAll(arg2);
return result;
}
}
public interface GenericParametersWorkflow {
@WorkflowMethod
List<UUID> execute(String taskList, List<UUID> arg1, Set<UUID> arg2);
@SignalMethod
void signal(List<UUID> arg);
@QueryMethod
List<UUID> query(List<UUID> arg);
}
public static class GenericParametersWorkflowImpl implements GenericParametersWorkflow {
private List<UUID> signaled;
private GenericParametersActivity activity;
@Override
public List<UUID> execute(String taskList, List<UUID> arg1, Set<UUID> arg2) {
Workflow.await(() -> signaled != null && signaled.size() == 0);
activity =
Workflow.newActivityStub(GenericParametersActivity.class, newActivityOptions1(taskList));
return activity.execute(arg1, arg2);
}
@Override
public void signal(List<UUID> arg) {
signaled = arg;
}
@Override
public List<UUID> query(List<UUID> arg) {
List<UUID> result = new ArrayList<>();
result.addAll(arg);
result.addAll(signaled);
return result;
}
}
@Test
public void testGenericParametersWorkflow() throws ExecutionException, InterruptedException {
worker.registerActivitiesImplementations(new GenericParametersActivityImpl());
startWorkerFor(GenericParametersWorkflowImpl.class);
GenericParametersWorkflow workflowStub =
workflowClient.newWorkflowStub(
GenericParametersWorkflow.class, newWorkflowOptionsBuilder(taskList).build());
List<UUID> uuidList = new ArrayList<>();
uuidList.add(UUID.randomUUID());
uuidList.add(UUID.randomUUID());
Set<UUID> uuidSet = new HashSet<>();
uuidSet.add(UUID.randomUUID());
uuidSet.add(UUID.randomUUID());
uuidSet.add(UUID.randomUUID());
CompletableFuture<List<UUID>> resultF =
WorkflowClient.execute(workflowStub::execute, taskList, uuidList, uuidSet);
// Test signal and query serialization
workflowStub.signal(uuidList);
sleep(Duration.ofSeconds(1));
List<UUID> queryArg = new ArrayList<>();
queryArg.add(UUID.randomUUID());
queryArg.add(UUID.randomUUID());
List<UUID> queryResult = workflowStub.query(queryArg);
List<UUID> expectedQueryResult = new ArrayList<>();
expectedQueryResult.addAll(queryArg);
expectedQueryResult.addAll(uuidList);
expectedQueryResult.sort(UUID::compareTo);
queryResult.sort(UUID::compareTo);
assertEquals(expectedQueryResult, queryResult);
workflowStub.signal(new ArrayList<>()); // empty list unblocks workflow await.
// test workflow result serialization
List<UUID> expectedResult = new ArrayList<>();
expectedResult.addAll(uuidList);
expectedResult.addAll(uuidSet);
List<UUID> result = resultF.get();
result.sort(UUID::compareTo);
expectedResult.sort(UUID::compareTo);
assertEquals(expectedResult, result);
// Workflow should still be queryable after completion, if QueryRejectionCondition is not set.
workflowStub.query(queryArg);
}
public static class NonSerializableException extends RuntimeException {
@SuppressWarnings("unused")
private final InputStream file; // gson chokes on this field
public NonSerializableException() {
try {
file = new FileInputStream(File.createTempFile("foo", "bar"));
} catch (IOException e) {
throw Activity.wrap(e);
}
}
}
public interface NonSerializableExceptionActivity {
@ActivityMethod(scheduleToCloseTimeoutSeconds = 5)
void execute();
}
public static class NonSerializableExceptionActivityImpl
implements NonSerializableExceptionActivity {
@Override
public void execute() {
throw new NonSerializableException();
}
}
public static class TestNonSerializableExceptionInActivityWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
NonSerializableExceptionActivity activity =
Workflow.newActivityStub(NonSerializableExceptionActivity.class);
try {
activity.execute();
} catch (ActivityFailureException e) {
return e.getMessage();
}
return "done";
}
}
@Test
public void testNonSerializableExceptionInActivity() {
worker.registerActivitiesImplementations(new NonSerializableExceptionActivityImpl());
startWorkerFor(TestNonSerializableExceptionInActivityWorkflow.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertTrue(result.contains("NonSerializableException"));
}
public interface NonDeserializableArgumentsActivity {
@ActivityMethod(scheduleToCloseTimeoutSeconds = 5)
void execute(int arg);
}
public static class NonDeserializableExceptionActivityImpl
implements NonDeserializableArgumentsActivity {
@Override
public void execute(int arg) {}
}
public static class TestNonSerializableArgumentsInActivityWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
StringBuilder result = new StringBuilder();
ActivityStub activity =
Workflow.newUntypedActivityStub(
new ActivityOptions.Builder()
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.build());
ActivityStub localActivity =
Workflow.newUntypedLocalActivityStub(
new LocalActivityOptions.Builder()
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.build());
try {
activity.execute("NonDeserializableArgumentsActivity::execute", Void.class, "boo");
} catch (ActivityFailureException e) {
result.append(e.getCause().getClass().getSimpleName());
}
result.append("-");
try {
localActivity.execute("NonDeserializableArgumentsActivity::execute", Void.class, "boo");
} catch (ActivityFailureException e) {
result.append(e.getCause().getClass().getSimpleName());
}
return result.toString();
}
}
@Test
public void testNonSerializableArgumentsInActivity() {
worker.registerActivitiesImplementations(new NonDeserializableExceptionActivityImpl());
startWorkerFor(TestNonSerializableArgumentsInActivityWorkflow.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("DataConverterException-DataConverterException", result);
}
public interface NonSerializableExceptionChildWorkflow {
@WorkflowMethod
String execute(String taskList);
}
public static class NonSerializableExceptionChildWorkflowImpl
implements NonSerializableExceptionChildWorkflow {
@Override
public String execute(String taskList) {
throw new NonSerializableException();
}
}
public static class TestNonSerializableExceptionInChildWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
NonSerializableExceptionChildWorkflow child =
Workflow.newChildWorkflowStub(NonSerializableExceptionChildWorkflow.class);
try {
child.execute(taskList);
} catch (ChildWorkflowFailureException e) {
return e.getMessage();
}
return "done";
}
}
@Test
public void testNonSerializableExceptionInChildWorkflow() {
startWorkerFor(
TestNonSerializableExceptionInChildWorkflow.class,
NonSerializableExceptionChildWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertTrue(result.contains("NonSerializableException"));
}
public interface TestLargeWorkflow {
@WorkflowMethod
String execute(int activityCount, String taskList);
}
public interface TestLargeWorkflowActivity {
String activity();
}
public static class TestLargeWorkflowActivityImpl implements TestLargeWorkflowActivity {
@Override
public String activity() {
return "done";
}
}
public static class TestLargeHistory implements TestLargeWorkflow {
@Override
public String execute(int activityCount, String taskList) {
TestLargeWorkflowActivity activities =
Workflow.newActivityStub(TestLargeWorkflowActivity.class, newActivityOptions1(taskList));
List<Promise<String>> results = new ArrayList<>();
for (int i = 0; i < activityCount; i++) {
Promise<String> result = Async.function(activities::activity);
results.add(result);
}
Promise.allOf(results).get();
return "done";
}
}
@Test
@Ignore // Requires DEBUG_TIMEOUTS=true
public void testLargeHistory() {
final int activityCount = 1000;
worker.registerActivitiesImplementations(new TestLargeWorkflowActivityImpl());
startWorkerFor(TestLargeHistory.class);
TestLargeWorkflow workflowStub =
workflowClient.newWorkflowStub(
TestLargeWorkflow.class, newWorkflowOptionsBuilder(taskList).build());
long start = System.currentTimeMillis();
String result = workflowStub.execute(activityCount, taskList);
long duration = System.currentTimeMillis() - start;
log.info(testName.toString() + " duration is " + duration);
assertEquals("done", result);
}
public interface DecisionTimeoutWorkflow {
@WorkflowMethod(executionStartToCloseTimeoutSeconds = 10000)
String execute(String testName) throws InterruptedException;
}
public static class DecisionTimeoutWorkflowImpl implements DecisionTimeoutWorkflow {
@Override
public String execute(String testName) throws InterruptedException {
AtomicInteger count = retryCount.get(testName);
if (count == null) {
count = new AtomicInteger();
retryCount.put(testName, count);
Thread.sleep(2000);
}
return "some result";
}
}
@Test
public void testDecisionTimeoutWorkflow() throws InterruptedException {
startWorkerFor(DecisionTimeoutWorkflowImpl.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setTaskList(taskList)
.setTaskStartToCloseTimeout(Duration.ofSeconds(1))
.build();
DecisionTimeoutWorkflow stub =
workflowClient.newWorkflowStub(DecisionTimeoutWorkflow.class, options);
String result = stub.execute(testName.getMethodName());
Assert.assertEquals("some result", result);
}
public static class TestLocalActivityWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities localActivities =
Workflow.newLocalActivityStub(TestActivities.class, newLocalActivityOptions1());
try {
localActivities.throwIO();
} catch (ActivityFailureException e) {
try {
assertTrue(e.getMessage().contains("TestActivities::throwIO"));
assertTrue(e.getCause() instanceof IOException);
assertEquals("simulated IO problem", e.getCause().getMessage());
} catch (AssertionError ae) {
// Errors cause decision to fail. But we want workflow to fail in this case.
throw new RuntimeException(ae);
}
}
String laResult = localActivities.activity2("test", 123);
TestActivities normalActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
laResult = normalActivities.activity2(laResult, 123);
return laResult;
}
}
@Test
public void testLocalActivity() {
startWorkerFor(TestLocalActivityWorkflowImpl.class);
TestWorkflow1 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
String result = workflowStub.execute(taskList);
assertEquals("test123123", result);
assertEquals(activitiesImpl.toString(), 3, activitiesImpl.invocations.size());
}
public static class TestLocalActivityMultiBatchWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities localActivities =
Workflow.newLocalActivityStub(TestActivities.class, newLocalActivityOptions1());
String result = "";
for (int i = 0; i < 5; i++) {
result += localActivities.sleepActivity(2000, i);
}
return result;
}
}
@Test
public void testLocalActivityMultipleBatches() {
startWorkerFor(TestLocalActivityMultiBatchWorkflowImpl.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofMinutes(5))
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskList(taskList)
.build();
TestWorkflow1 workflowStub = workflowClient.newWorkflowStub(TestWorkflow1.class, options);
String result = workflowStub.execute(taskList);
assertEquals("sleepActivity0sleepActivity1sleepActivity2sleepActivity3sleepActivity4", result);
assertEquals(activitiesImpl.toString(), 5, activitiesImpl.invocations.size());
}
public static class TestParallelLocalActivityExecutionWorkflowImpl implements TestWorkflow1 {
@Override
public String execute(String taskList) {
TestActivities localActivities =
Workflow.newLocalActivityStub(TestActivities.class, newLocalActivityOptions1());
List<Promise<String>> results = new ArrayList<>(4);
for (int i = 1; i <= 4; i++) {
results.add(Async.function(localActivities::sleepActivity, (long) 1000 * i, i));
}
Promise<String> result2 =
Async.function(
() -> {
String result = "";
for (int i = 0; i < 3; i++) {
result += localActivities.sleepActivity(1000, 21);
}
return result;
});
return results.get(0).get()
+ results.get(1).get()
+ results.get(2).get()
+ results.get(3).get()
+ result2.get();
}
}
@Test
public void testParallelLocalActivityExecutionWorkflow() {
startWorkerFor(TestParallelLocalActivityExecutionWorkflowImpl.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofMinutes(5))
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskList(taskList)
.build();
TestWorkflow1 workflowStub = workflowClient.newWorkflowStub(TestWorkflow1.class, options);
String result = workflowStub.execute(taskList);
assertEquals(
"sleepActivity1sleepActivity2sleepActivity3sleepActivity4sleepActivity21sleepActivity21sleepActivity21",
result);
}
public static final class TestLocalActivityAndQueryWorkflow implements TestWorkflowQuery {
String message = "initial value";
@Override
public String execute(String taskList) {
// Make sure decider is in the cache when we execute local activities.
TestActivities activities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
activities.activity();
TestActivities localActivities =
Workflow.newLocalActivityStub(TestActivities.class, newLocalActivityOptions1());
for (int i = 0; i < 5; i++) {
localActivities.sleepActivity(2000, i);
message = "run" + i;
}
return "done";
}
@Override
public String query() {
return message;
}
}
@Test
public void testLocalActivityAndQuery() throws InterruptedException {
Assume.assumeFalse("test for sticky on", disableStickyExecution);
startWorkerFor(TestLocalActivityAndQueryWorkflow.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofMinutes(5))
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskList(taskList)
.build();
TestWorkflowQuery workflowStub =
workflowClient.newWorkflowStub(TestWorkflowQuery.class, options);
WorkflowClient.start(workflowStub::execute, taskList);
// Sleep for a while before querying, so that the first decision task is processed and cache is
// populated.
// This also makes sure that the query lands when the local activities are executing.
Thread.sleep(500);
// When sticky is on, query will block until the first batch completes (in 4sec),
// and the progress will be reflected in query result.
String queryResult = workflowStub.query();
assertEquals("run1", queryResult);
// By the time the next query processes, the next decision batch is complete.
// Again the progress will be reflected in query result.
assertEquals("run3", workflowStub.query());
String result = workflowStub.execute(taskList);
assertEquals("done", result);
assertEquals("run4", workflowStub.query());
activitiesImpl.assertInvocations(
"activity",
"sleepActivity",
"sleepActivity",
"sleepActivity",
"sleepActivity",
"sleepActivity");
}
@Test
public void testLocalActivityAndQueryStickyOff() throws InterruptedException {
Assume.assumeTrue("test for sticky off", disableStickyExecution);
startWorkerFor(TestLocalActivityAndQueryWorkflow.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofMinutes(5))
.setTaskStartToCloseTimeout(Duration.ofSeconds(5))
.setTaskList(taskList)
.build();
TestWorkflowQuery workflowStub =
workflowClient.newWorkflowStub(TestWorkflowQuery.class, options);
WorkflowClient.start(workflowStub::execute, taskList);
// Sleep for a while before querying, so that query is received while local activity is running.
Thread.sleep(500);
// When sticky is off, query is independent of the ongoing decision task, and it will neither
// block
// or see the current progress.
String queryResult = workflowStub.query();
assertEquals("initial value", queryResult);
// Sleep more to make sure the next query lands while we process the next batch of local
// activities.
// In this case only the progress from the first batch is reflected in the query.
Thread.sleep(4000);
queryResult = workflowStub.query();
assertEquals("run1", queryResult);
String result = workflowStub.execute(taskList);
assertEquals("done", result);
assertEquals("run4", workflowStub.query());
activitiesImpl.assertInvocations(
"activity",
"sleepActivity",
"sleepActivity",
"sleepActivity",
"sleepActivity",
"sleepActivity");
}
public interface SignalOrderingWorkflow {
@WorkflowMethod
List<String> run();
@SignalMethod(name = "testSignal")
void signal(String s);
}
public static class SignalOrderingWorkflowImpl implements SignalOrderingWorkflow {
private List<String> signals = new ArrayList<>();
@Override
public List<String> run() {
Workflow.await(() -> signals.size() == 3);
return signals;
}
@Override
public void signal(String s) {
signals.add(s);
}
}
@Test
public void testSignalOrderingWorkflow() {
startWorkerFor(SignalOrderingWorkflowImpl.class);
WorkflowOptions options =
new WorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofMinutes(1))
.setTaskStartToCloseTimeout(Duration.ofSeconds(10))
.setTaskList(taskList)
.build();
SignalOrderingWorkflow workflowStub =
workflowClient.newWorkflowStub(SignalOrderingWorkflow.class, options);
WorkflowClient.start(workflowStub::run);
// Suspend polling so that all the signals will be received in the same decision task.
if (useExternalService) {
workerFactory.suspendPolling();
} else {
testEnvironment.getWorkerFactory().suspendPolling();
}
workflowStub.signal("test1");
workflowStub.signal("test2");
workflowStub.signal("test3");
if (useExternalService) {
workerFactory.resumePolling();
} else {
testEnvironment.getWorkerFactory().resumePolling();
}
List<String> result = workflowStub.run();
List<String> expected = Arrays.asList("test1", "test2", "test3");
assertEquals(expected, result);
}
public static class TestWorkflowResetReplayWorkflow implements TestWorkflow1 {
@Override
public String execute(String taskList) {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder()
.setTaskList(taskList)
.setRetryOptions(
new RetryOptions.Builder()
.setMaximumAttempts(3)
.setInitialInterval(Duration.ofSeconds(1))
.build())
.build();
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10))
.build();
for (int i = 0; i < 10; i++) {
if (Workflow.newRandom().nextDouble() > 0.5) {
Workflow.getLogger("test").info("Execute child workflow");
TestMultiargsWorkflowsFunc stubF =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
stubF.func();
} else {
Workflow.getLogger("test").info("Execute activity");
TestActivities activities = Workflow.newActivityStub(TestActivities.class, options);
activities.activity();
}
}
return "done";
}
}
@Test
public void testWorkflowReset() throws Exception {
// Leave the following code to generate history.
// startWorkerFor(TestWorkflowResetReplayWorkflow.class, TestMultiargsWorkflowsImpl.class);
// TestWorkflow1 workflowStub =
// workflowClient.newWorkflowStub(
// TestWorkflow1.class, newWorkflowOptionsBuilder(taskList).build());
// workflowStub.execute(taskList);
//
// try {
// Thread.sleep(60000000);
// } catch (InterruptedException e) {
// e.printStackTrace();
// }
// Avoid executing 4 times
Assume.assumeFalse("skipping for docker tests", useExternalService);
Assume.assumeFalse("skipping for sticky off", disableStickyExecution);
WorkflowReplayer.replayWorkflowExecutionFromResource(
"resetWorkflowHistory.json", TestWorkflowResetReplayWorkflow.class);
}
public interface GreetingWorkflow {
@WorkflowMethod
void createGreeting(String name);
}
public interface GreetingActivities {
@ActivityMethod(scheduleToCloseTimeoutSeconds = 60)
String composeGreeting(String string);
}
static class GreetingActivitiesImpl implements GreetingActivities {
@Override
public String composeGreeting(String string) {
try {
Thread.sleep(10000);
} catch (Exception e) {
System.out.println("Exception");
}
return "greetings: " + string;
}
}
/** GreetingWorkflow implementation that updates greeting after sleeping for 5 seconds. */
public static class TimerFiringWorkflowImpl implements GreetingWorkflow {
private final GreetingActivities activities =
Workflow.newActivityStub(GreetingActivities.class);
@Override
public void createGreeting(String name) {
Promise<String> promiseString1 = Async.function(() -> activities.composeGreeting("1"));
Promise<String> promiseString2 = Async.function(() -> "aString2");
Set<Promise<String>> promiseSet = new HashSet<>();
promiseSet.add(promiseString1);
promiseSet.add(promiseString2);
Workflow.await(
Duration.ofSeconds(30), () -> promiseSet.stream().anyMatch(Promise::isCompleted));
promiseString1.get();
Workflow.sleep(Duration.ofSeconds(20));
promiseString2.get();
}
}
// Server doesn't guarantee that the timer fire timestamp is larger or equal of the
// expected fire time. This test ensures that client still fires timer in this case.
@Test
public void testTimerFiringTimestampEarlierThanExpected() throws Exception {
// Avoid executing 4 times
Assume.assumeFalse("skipping for docker tests", useExternalService);
Assume.assumeFalse("skipping for sticky off", stickyOff);
WorkflowReplayer.replayWorkflowExecutionFromResource(
"timerfiring.json", TimerFiringWorkflowImpl.class);
}
private static class FilteredTrace {
private final List<String> impl = Collections.synchronizedList(new ArrayList<>());
public boolean add(String s) {
log.trace("FilteredTrace isReplaying=" + Workflow.isReplaying());
if (!Workflow.isReplaying()) {
return impl.add(s);
}
return true;
}
List<String> getImpl() {
return impl;
}
}
public interface TestCompensationWorkflow {
@WorkflowMethod
void compensate();
}
public static class TestMultiargsWorkflowsFuncImpl implements TestMultiargsWorkflowsFunc {
@Override
public String func() {
return "done";
}
}
public static class TestCompensationWorkflowImpl implements TestCompensationWorkflow {
@Override
public void compensate() {}
}
public interface TestSagaWorkflow {
@WorkflowMethod
String execute(String taskList, boolean parallelCompensation);
}
public static class TestSagaWorkflowImpl implements TestSagaWorkflow {
@Override
public String execute(String taskList, boolean parallelCompensation) {
TestActivities testActivities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder().setTaskList(taskList).build();
TestMultiargsWorkflowsFunc stubF1 =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
Saga saga =
new Saga(
new Saga.Options.Builder().setParallelCompensation(parallelCompensation).build());
try {
testActivities.activity1(10);
saga.addCompensation(testActivities::activity2, "compensate", -10);
stubF1.func();
TestCompensationWorkflow compensationWorkflow =
Workflow.newChildWorkflowStub(TestCompensationWorkflow.class, workflowOptions);
saga.addCompensation(compensationWorkflow::compensate);
testActivities.throwIO();
saga.addCompensation(
() -> {
throw new RuntimeException("unreachable");
});
} catch (Exception e) {
saga.compensate();
}
return "done";
}
}
@Test
public void testSaga() {
startWorkerFor(
TestSagaWorkflowImpl.class,
TestMultiargsWorkflowsFuncImpl.class,
TestCompensationWorkflowImpl.class);
TestSagaWorkflow sagaWorkflow =
workflowClient.newWorkflowStub(
TestSagaWorkflow.class, newWorkflowOptionsBuilder(taskList).build());
sagaWorkflow.execute(taskList, false);
tracer.setExpected(
"executeActivity customActivity1",
"executeChildWorkflow TestMultiargsWorkflowsFunc::func",
"executeActivity TestActivities::throwIO",
"executeChildWorkflow TestCompensationWorkflow::compensate",
"executeActivity TestActivities::activity2");
}
@Test
public void testSagaParallelCompensation() {
startWorkerFor(
TestSagaWorkflowImpl.class,
TestMultiargsWorkflowsFuncImpl.class,
TestCompensationWorkflowImpl.class);
TestSagaWorkflow sagaWorkflow =
workflowClient.newWorkflowStub(
TestSagaWorkflow.class, newWorkflowOptionsBuilder(taskList).build());
sagaWorkflow.execute(taskList, true);
assertTrue(
tracer.getTrace().contains("executeChildWorkflow TestCompensationWorkflow::compensate"));
assertTrue(tracer.getTrace().contains("executeActivity TestActivities::activity2"));
}
public static class TestSignalExceptionWorkflowImpl implements TestWorkflowSignaled {
private boolean signaled = false;
@Override
public String execute() {
Workflow.await(() -> signaled);
return null;
}
@Override
public void signal1(String arg) {
for (int i = 0; i < 100; i++) {
Async.procedure(() -> System.out.println("test"));
}
throw new RuntimeException("exception in signal method");
}
}
@Test
public void testExceptionInSignal() throws InterruptedException {
startWorkerFor(TestSignalExceptionWorkflowImpl.class);
TestWorkflowSignaled signalWorkflow =
workflowClient.newWorkflowStub(
TestWorkflowSignaled.class, newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<String> result = WorkflowClient.execute(signalWorkflow::execute);
signalWorkflow.signal1("test");
try {
result.get(1, TimeUnit.SECONDS);
fail("not reachable");
} catch (Exception e) {
// exception expected here.
}
// Suspend polling so that decision tasks are not retried. Otherwise it will affect our thread
// count.
if (useExternalService) {
workerFactory.suspendPolling();
} else {
testEnvironment.getWorkerFactory().suspendPolling();
}
// Wait for decision task retry to finish.
Thread.sleep(10000);
int workflowThreads = 0;
ThreadInfo[] threads = ManagementFactory.getThreadMXBean().dumpAllThreads(false, false);
for (ThreadInfo thread : threads) {
if (thread.getThreadName().startsWith("workflow")) {
workflowThreads++;
}
}
assertTrue(
"workflow threads might leak, #workflowThreads = " + workflowThreads, workflowThreads < 20);
}
public interface TestUpsertSearchAttributes {
@WorkflowMethod
String execute(String taskList, String keyword);
}
public static class TestUpsertSearchAttributesImpl implements TestUpsertSearchAttributes {
@Override
public String execute(String taskList, String keyword) {
SearchAttributes searchAttributes = Workflow.getWorkflowInfo().getSearchAttributes();
assertNull(searchAttributes);
Map<String, Object> searchAttrMap = new HashMap<>();
searchAttrMap.put("CustomKeywordField", keyword);
Workflow.upsertSearchAttributes(searchAttrMap);
searchAttributes = Workflow.getWorkflowInfo().getSearchAttributes();
assertEquals(
"testKey",
WorkflowUtils.getValueFromSearchAttributes(
searchAttributes, "CustomKeywordField", String.class));
// Running the activity below ensures that we have one more decision task to be executed after
// adding the search attributes. This helps with replaying the history one more time to check
// against a possible NonDeterminisicWorkflowError which could be caused by missing
// UpsertWorkflowSearchAttributes event in history.
TestActivities activities =
Workflow.newActivityStub(TestActivities.class, newActivityOptions1(taskList));
activities.activity();
return "done";
}
}
@Test
public void testUpsertSearchAttributes() {
startWorkerFor(TestUpsertSearchAttributesImpl.class);
TestUpsertSearchAttributes testWorkflow =
workflowClient.newWorkflowStub(
TestUpsertSearchAttributes.class, newWorkflowOptionsBuilder(taskList).build());
String result = testWorkflow.execute(taskList, "testKey");
assertEquals("done", result);
tracer.setExpected("upsertSearchAttributes", "executeActivity TestActivities::activity");
}
public static class TestMultiargsWorkflowsFuncChild implements TestMultiargsWorkflowsFunc2 {
@Override
public String func2(String s, int i) {
WorkflowInfo wi = Workflow.getWorkflowInfo();
String parentId = wi.getParentWorkflowId();
return parentId;
}
}
public static class TestMultiargsWorkflowsFuncParent implements TestMultiargsWorkflowsFunc {
@Override
public String func() {
ChildWorkflowOptions workflowOptions =
new ChildWorkflowOptions.Builder()
.setExecutionStartToCloseTimeout(Duration.ofSeconds(100))
.setTaskStartToCloseTimeout(Duration.ofSeconds(60))
.build();
TestMultiargsWorkflowsFunc2 child =
Workflow.newChildWorkflowStub(TestMultiargsWorkflowsFunc2.class, workflowOptions);
String parentWorkflowId = Workflow.getWorkflowInfo().getParentWorkflowId();
String childsParentWorkflowId = child.func2(null, 0);
String result = String.format("%s - %s", parentWorkflowId, childsParentWorkflowId);
return result;
}
}
@Test
public void testParentWorkflowInfoInChildWorkflows() {
startWorkerFor(TestMultiargsWorkflowsFuncParent.class, TestMultiargsWorkflowsFuncChild.class);
String workflowId = "testParentWorkflowInfoInChildWorkflows";
WorkflowOptions workflowOptions =
newWorkflowOptionsBuilder(taskList).setWorkflowId(workflowId).build();
TestMultiargsWorkflowsFunc parent =
workflowClient.newWorkflowStub(TestMultiargsWorkflowsFunc.class, workflowOptions);
String result = parent.func();
String expected = String.format("%s - %s", null, workflowId);
assertEquals(expected, result);
}
private static class TracingWorkflowInterceptor implements WorkflowInterceptor {
private final FilteredTrace trace;
private final WorkflowInterceptor next;
private TracingWorkflowInterceptor(FilteredTrace trace, WorkflowInterceptor next) {
this.trace = trace;
this.next = Objects.requireNonNull(next);
}
@Override
public <R> Promise<R> executeActivity(
String activityName,
Class<R> resultClass,
Type resultType,
Object[] args,
ActivityOptions options) {
trace.add("executeActivity " + activityName);
return next.executeActivity(activityName, resultClass, resultType, args, options);
}
@Override
public <R> Promise<R> executeLocalActivity(
String activityName,
Class<R> resultClass,
Type resultType,
Object[] args,
LocalActivityOptions options) {
trace.add("executeLocalActivity " + activityName);
return next.executeLocalActivity(activityName, resultClass, resultType, args, options);
}
@Override
public <R> WorkflowResult<R> executeChildWorkflow(
String workflowType,
Class<R> resultClass,
Type resultType,
Object[] args,
ChildWorkflowOptions options) {
trace.add("executeChildWorkflow " + workflowType);
return next.executeChildWorkflow(workflowType, resultClass, resultType, args, options);
}
@Override
public Random newRandom() {
trace.add("newRandom");
return next.newRandom();
}
@Override
public Promise<Void> signalExternalWorkflow(
WorkflowExecution execution, String signalName, Object[] args) {
trace.add("signalExternalWorkflow " + execution.getWorkflowId() + " " + signalName);
return next.signalExternalWorkflow(execution, signalName, args);
}
@Override
public Promise<Void> cancelWorkflow(WorkflowExecution execution) {
trace.add("cancelWorkflow " + execution.getWorkflowId());
return next.cancelWorkflow(execution);
}
@Override
public void sleep(Duration duration) {
trace.add("sleep " + duration);
next.sleep(duration);
}
@Override
public boolean await(Duration timeout, String reason, Supplier<Boolean> unblockCondition) {
trace.add("await " + timeout + " " + reason);
return next.await(timeout, reason, unblockCondition);
}
@Override
public void await(String reason, Supplier<Boolean> unblockCondition) {
trace.add("await " + reason);
next.await(reason, unblockCondition);
}
@Override
public Promise<Void> newTimer(Duration duration) {
trace.add("newTimer " + duration);
return next.newTimer(duration);
}
@Override
public <R> R sideEffect(Class<R> resultClass, Type resultType, Func<R> func) {
trace.add("sideEffect");
return next.sideEffect(resultClass, resultType, func);
}
@Override
public <R> R mutableSideEffect(
String id, Class<R> resultClass, Type resultType, BiPredicate<R, R> updated, Func<R> func) {
trace.add("mutableSideEffect");
return next.mutableSideEffect(id, resultClass, resultType, updated, func);
}
@Override
public int getVersion(String changeID, int minSupported, int maxSupported) {
trace.add("getVersion");
return next.getVersion(changeID, minSupported, maxSupported);
}
@Override
public void continueAsNew(
Optional<String> workflowType, Optional<ContinueAsNewOptions> options, Object[] args) {
trace.add("continueAsNew");
next.continueAsNew(workflowType, options, args);
}
@Override
public void registerQuery(String queryType, Type[] argTypes, Func1<Object[], Object> callback) {
trace.add("registerQuery " + queryType);
next.registerQuery(queryType, argTypes, callback);
}
@Override
public UUID randomUUID() {
trace.add("randomUUID");
return next.randomUUID();
}
@Override
public void upsertSearchAttributes(Map<String, Object> searchAttributes) {
trace.add("upsertSearchAttributes");
next.upsertSearchAttributes(searchAttributes);
}
}
public static class TestGetVersionWorkflowRetryImpl implements TestWorkflow3 {
private String result = "";
@Override
public String execute(String taskList) {
int version = Workflow.getVersion("test_change", Workflow.DEFAULT_VERSION, 1);
int act = 0;
if (version == 1) {
ActivityOptions options =
new ActivityOptions.Builder()
.setTaskList(taskList)
.setHeartbeatTimeout(Duration.ofSeconds(5))
.setScheduleToCloseTimeout(Duration.ofSeconds(5))
.setScheduleToStartTimeout(Duration.ofSeconds(5))
.setStartToCloseTimeout(Duration.ofSeconds(10))
.setRetryOptions(
new RetryOptions.Builder()
.setMaximumAttempts(3)
.setInitialInterval(Duration.ofSeconds(1))
.build())
.build();
TestActivities testActivities = Workflow.newActivityStub(TestActivities.class, options);
act = testActivities.activity1(1);
}
result += "activity" + act;
return result;
}
@Override
public void signal1(String arg) {
Workflow.sleep(1000);
}
@Override
public String getState() {
return result;
}
}
@Test
public void testGetVersionRetry() throws ExecutionException, InterruptedException {
TestActivities activity = mock(TestActivities.class);
when(activity.activity1(1)).thenReturn(1);
worker.registerActivitiesImplementations(activity);
startWorkerFor(TestGetVersionWorkflowRetryImpl.class);
TestWorkflow3 workflowStub =
workflowClient.newWorkflowStub(
TestWorkflow3.class, newWorkflowOptionsBuilder(taskList).build());
CompletableFuture<String> result = WorkflowClient.execute(workflowStub::execute, taskList);
workflowStub.signal1("test");
assertEquals("activity1", result.get());
// test replay
assertEquals("activity1", workflowStub.getState());
}
@Test
public void testGetVersionWithRetryReplay() throws Exception {
// Avoid executing 4 times
Assume.assumeFalse("skipping for docker tests", useExternalService);
Assume.assumeFalse("skipping for sticky off", disableStickyExecution);
WorkflowReplayer.replayWorkflowExecutionFromResource(
"testGetVersionWithRetryHistory.json", TestGetVersionWorkflowRetryImpl.class);
}
}
| [
"\"USE_DOCKER_SERVICE\"",
"\"STICKY_OFF\""
]
| []
| [
"USE_DOCKER_SERVICE",
"STICKY_OFF"
]
| [] | ["USE_DOCKER_SERVICE", "STICKY_OFF"] | java | 2 | 0 | |
tests/network.py | from pyroute2 import netns, NDB, netlink, NSPopen
from contextlib import contextmanager
import ipaddress
import subprocess
import os
import os.path
"""
TODO: Add an introduction to network namespaces, veth interfaces, and bridges,
and explain why we use them here.
"""
BRIDGE_NF_CALL_IPTABLES = "/proc/sys/net/bridge/bridge-nf-call-iptables"
COMMAND_TIMEOUT = 60
@contextmanager
def managed_nspopen(*args, **kwds):
proc = NSPopen(*args, **kwds)
try:
yield proc
finally:
if proc.poll() is None:
# send SIGKILL to the process and wait for it to die if it's still
# running
proc.kill()
# If it's not dead after 2 seconds we throw an error
proc.communicate(timeout=2)
# release proxy process resourecs
proc.release()
class VirtualLAN:
"""
Helper class to create a network of virtual nodes to simulate a virtual network.
IP addresses are assigned automatically to the nodes from a private IP range.
IP address of a virtual node can be accessed using the node.address field.
Internally, this is a network of Linux network namespaces connected by a
bridge.
TODO: explain more details and add an example.
"""
def __init__(self, namePrefix, subnet):
ipnet = ipaddress.ip_network(subnet)
self.availableHosts = ipnet.hosts()
self.prefixLen = ipnet.prefixlen
self.namePrefix = namePrefix
self.nodes = []
# create the bridge
self.bridgeName = "%s-br" % (namePrefix,)
self.bridgeAddress = next(self.availableHosts)
self._add_bridge(self.bridgeName, self.bridgeAddress, self.prefixLen)
# Don't pass bridged IPv4 traffic to iptables' chains, so namespaces
# can communicate irrespective of the host machines iptables. This is
# needed in some docker instances (e.g. travis), where traffic was
# filtered at bridge level. See
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
try:
with open(BRIDGE_NF_CALL_IPTABLES, "r") as f:
self.saved_bridge_nf_call_iptables = f.read()
with open(BRIDGE_NF_CALL_IPTABLES, "w") as f:
f.write("0\n")
except FileNotFoundError:
# In some environments this variable doesn't exist, we are ok with
# no changes in this case.
self.saved_bridge_nf_call_iptables = None
def create_node(self):
"""
Creates a VirtualNode which can access/be accessed from other nodes in
the virtual network.
"""
namespace = "%s-%s" % (self.namePrefix, len(self.nodes))
address = next(self.availableHosts)
node = VirtualNode(namespace, address, self.prefixLen)
self._add_interface_to_bridge(self.bridgeName, node.vethPeer)
self.nodes.append(node)
return node
def destroy(self):
"""
Destroys the objects created for the virtual network.
"""
for node in self.nodes:
node.destroy()
_remove_interface_if_exists(self.bridgeName)
if self.saved_bridge_nf_call_iptables is not None:
with open(BRIDGE_NF_CALL_IPTABLES, "w") as f:
f.write(self.saved_bridge_nf_call_iptables)
def _add_bridge(self, name, address, prefixLen):
"""
Creates a bridge with the given name, address, and netmask perfix length.
"""
_remove_interface_if_exists(name)
with NDB() as ndb:
(
ndb.interfaces.create(ifname=name, kind="bridge", state="up")
.add_ip("%s/%s" % (address, prefixLen))
.commit()
)
def _add_interface_to_bridge(self, bridge, interface):
"""
Adds the given interface to the bridge. In our usecase, this interface
is usually the peer end of a veth pair with the other end inside a
network namespace, in which case after calling this function the namespace
will be able to communicate with the other nodes in the virtual network.
"""
with NDB() as ndb:
ndb.interfaces[bridge].add_port(interface).commit()
ndb.interfaces[interface].set(state="up").commit()
class VirtualNode:
"""
A virtual node inside a virtual network.
Internally, this corresponds to a Linux network namespace.
"""
def __init__(self, namespace, address, prefixLen):
self.namespace = namespace
self.address = address
self.prefixLen = prefixLen
self.vethPeer = namespace + "p"
self._add_namespace(namespace, address, prefixLen)
def destroy(self):
"""
Removes all objects created for the virtual node.
"""
_remove_interface_if_exists(self.vethPeer)
try:
netns.remove(self.namespace)
except:
# Namespace doesn't exist. Return silently.
pass
def run(self, command, user=os.getenv("USER")):
"""
Executes a command under the given user from this virtual node. Returns
a context manager that returns NSOpen object to control the process.
NSOpen has the same API as subprocess.POpen.
"""
sudo_command = [
"sudo",
"-E",
"-u",
user,
"env",
"PATH=" + os.getenv("PATH"),
] + command
return managed_nspopen(
self.namespace,
sudo_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
start_new_session=True,
)
def run_unmanaged(self, command, user=os.getenv("USER")):
"""
Executes a command under the given user from this virtual node. Returns
an NSPopen object to control the process. NSOpen has the same API as
subprocess.Popen. This NSPopen object needs to be manually release. In
general you should prefer using run, where this is done automatically
by the context manager.
"""
sudo_command = [
"sudo",
"-E",
"-u",
user,
"env",
"PATH=" + os.getenv("PATH"),
] + command
return NSPopen(
self.namespace,
sudo_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
start_new_session=True,
)
def run_and_wait(self, command, name, timeout=COMMAND_TIMEOUT):
"""
Waits for command to exit successfully. If it exits with error or it timeouts,
raises an exception with stdout and stderr streams of the process.
"""
with self.run(command) as proc:
try:
out, err = proc.communicate(timeout=timeout)
if proc.returncode > 0:
raise Exception(
"%s failed, out: %s\n, err: %s" % (name, out, err)
)
return out, err
except subprocess.TimeoutExpired:
proc.kill()
out, err = proc.communicate()
raise Exception(
"%s timed out after %d seconds. out: %s\n, err: %s"
% (name, timeout, out, err)
)
def _add_namespace(self, name, address, netmaskLength):
"""
Creates a namespace with the given name, and creates a veth interface
with one endpoint inside the namespace which has the given address and
netmask length. The peer end of veth interface can be used to connect the
namespace to a bridge.
"""
self._remove_namespace_if_exists(name)
netns.create(name)
veth_name = "veth0"
_remove_interface_if_exists(self.vethPeer)
with NDB() as ndb:
#
# Add netns to the NDB sources
#
# ndb.interfaces["lo"] is a short form of
# ndb.interfaces[{"target": "localhost", "ifname": "lo"}]
#
# To address interfaces/addresses/routes wthin a netns, use
# ndb.interfaces[{"target": netns_name, "ifname": "lo"}]
ndb.sources.add(netns=name)
#
# Create veth
(
ndb.interfaces.create(
ifname=veth_name,
kind="veth",
peer=self.vethPeer,
state="up",
)
.commit()
.set(net_ns_fd=name)
.commit()
)
#
# .interfaces.wait() returns an interface object when
# it becomes available on the specified source
(
ndb.interfaces.wait(target=name, ifname=veth_name)
.set(state="up")
.add_ip("%s/%s" % (address, netmaskLength))
.commit()
)
#
(
ndb.interfaces[{"target": name, "ifname": "lo"}]
.set(state="up")
.commit()
)
def _remove_namespace_if_exists(self, name):
"""
If the given namespace exists, removes it. Otherwise just returns
silently.
"""
try:
netns.remove(name)
except Exception:
# Namespace doesn't exist. Return silently.
pass
def ifdown(self):
"""
Bring the network interface down for this node
"""
with NDB() as ndb:
# bring it down and wait until success
ndb.interfaces[self.vethPeer].set(state="down").commit()
def ifup(self):
"""
Bring the network interface up for this node
"""
with NDB() as ndb:
# bring it up and wait until success
ndb.interfaces[self.vethPeer].set(state="up").commit()
def _remove_interface_if_exists(name):
"""
If the given interface exists, brings it down and removes it. Otherwise
just returns silently. A bridge is also an interface, so this can be
used for removing bridges too.
"""
with NDB() as ndb:
if name in ndb.interfaces:
try:
ndb.interfaces[name].remove().commit()
except netlink.exceptions.NetlinkError:
pass
| []
| []
| [
"USER",
"PATH"
]
| [] | ["USER", "PATH"] | python | 2 | 0 | |
test/unit/models/test_proposals.py | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
# clear DB tables before each execution
def setup():
# clear tables first
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://zaliascentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://zaliascentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# Proposal
@pytest.fixture
def proposal():
# NOTE: no governance_object_id is set
pobj = Proposal(
start_epoch=1483250400, # 2017-01-01
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://zaliascentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
# NOTE: this object is (intentionally) not saved yet.
# We want to return an built, but unsaved, object
return pobj
def test_proposal_is_valid(proposal):
from zaliasd import ZaliasDaemon
import zaliaslib
zaliasd = ZaliasDaemon.from_zalias_conf(config.zalias_conf)
orig = Proposal(**proposal.get_dict()) # make a copy
# fixture as-is should be valid
assert proposal.is_valid() is True
# ============================================================
# ensure end_date not greater than start_date
# ============================================================
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid proposal name
# ============================================================
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
# binary gibberish
proposal.name = zaliaslib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid payment address
# ============================================================
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
# this is actually the Zalias foundation multisig address...
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# validate URL
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
# gibberish URL
proposal.url = zaliaslib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure proposal can't request negative zalias
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
def test_proposal_is_deletable(proposal):
now = misc.now()
assert proposal.is_deletable() is False
proposal.end_epoch = now - (86400 * 29)
assert proposal.is_deletable() is False
# add a couple seconds for time variance
proposal.end_epoch = now - ((86400 * 30) + 2)
assert proposal.is_deletable() is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from zaliasd import ZaliasDaemon
zaliasd = ZaliasDaemon.from_zalias_conf(config.zalias_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_zaliasd(zaliasd, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
| []
| []
| [
"SENTINEL_ENV",
"SENTINEL_CONFIG"
]
| [] | ["SENTINEL_ENV", "SENTINEL_CONFIG"] | python | 2 | 0 | |
operators/matmul_autogenerated_test.go |
package operators
import (
"os"
"testing"
onnx "github.com/owulveryck/onnx-go"
"github.com/stretchr/testify/assert"
"gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
// TestMatmul_2d is autogenerated from test_matmul_2d
func TestMatmul_2d(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Matmul{}
attributes := []*onnx.AttributeProto{
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
a := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(3, 4),
tensor.WithBacking([]float32{1.7640524, 0.4001572, 0.978738, 2.2408931, 1.867558, -0.9772779, 0.95008844, -0.1513572, -0.10321885, 0.41059852, 0.14404356, 1.4542735})),
gorgonia.WithName("a"))
b := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(4, 3),
tensor.WithBacking([]float32{0.7610377, 0.121675014, 0.44386324, 0.33367434, 1.4940791, -0.20515826, 0.3130677, -0.85409576, -2.5529897, 0.6536186, 0.8644362, -0.742165})),
gorgonia.WithName("b"))
cT := tensor.New(
tensor.WithShape(3, 3),
tensor.WithBacking([]float32{3.247133, 1.9136808, -3.4609182, 1.2937015, -2.1752005, -1.2837971, 1.0540882, 1.7350072, -1.5771054}))
c := new(gorgonia.Node)
o, err := op.Apply(
a,b,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
c = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(cT.Shape(), c.Shape(), "Tensors should be the same")
assert.InDeltaSlice(cT.Data(), c.Value().Data(), 1e-5,"Tensors should be the same")
}
// TestMatmul_3d is autogenerated from test_matmul_3d
func TestMatmul_3d(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Matmul{}
attributes := []*onnx.AttributeProto{
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
a := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(2, 3, 4),
tensor.WithBacking([]float32{2.2697546, -1.4543657, 0.045758516, -0.18718386, 1.5327792, 1.4693588, 0.15494743, 0.37816253, -0.88778573, -1.9807965, -0.34791216, 0.15634897, 1.2302907, 1.2023798, -0.3873268, -0.30230275, -1.048553, -1.420018, -1.7062702, 1.9507754, -0.5096522, -0.4380743, -1.2527953, 0.7774904})),
gorgonia.WithName("a"))
b := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(2, 4, 3),
tensor.WithBacking([]float32{-1.6138978, -0.21274029, -0.89546657, 0.3869025, -0.51080513, -1.1806322, -0.028182229, 0.42833188, 0.06651722, 0.3024719, -0.6343221, -0.36274117, -0.67246044, -0.35955316, -0.8131463, -1.7262826, 0.17742614, -0.40178093, -1.6301984, 0.46278226, -0.9072984, 0.051945396, 0.7290906, 0.12898292})),
gorgonia.WithName("b"))
cT := tensor.New(
tensor.WithShape(2, 3, 3),
tensor.WithBacking([]float32{-4.2837567, 0.3983639, -0.24447522, -1.7952335, -1.2501478, -3.2341933, 0.7235164, 0.9524713, 3.053718, -2.287253, -0.62867534, -1.1710705, 6.0393553, 0.7577226, 3.222876, 3.181653, 0.09261069, 1.8273739}))
c := new(gorgonia.Node)
o, err := op.Apply(
a,b,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
c = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(cT.Shape(), c.Shape(), "Tensors should be the same")
assert.InDeltaSlice(cT.Data(), c.Value().Data(), 1e-5,"Tensors should be the same")
}
// TestMatmul_4d is autogenerated from test_matmul_4d
func TestMatmul_4d(t *testing.T) {
debug := os.Getenv("SKIP_NOT_IMPLEMENTED")
skip := true
if debug == "false" {
skip = false
}
assert := assert.New(t)
g := gorgonia.NewGraph()
op := &Matmul{}
attributes := []*onnx.AttributeProto{
}
if len(attributes) != 0 {
err := op.Init(attributes)
t.Logf("Info: operator %#v", op)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
}
a := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 2, 3, 4),
tensor.WithBacking([]float32{1.1394007, -1.2348258, 0.40234163, -0.6848101, -0.87079716, -0.5788497, -0.31155252, 0.05616534, -1.1651498, 0.9008265, 0.46566245, -1.5362437, 1.4882522, 1.8958892, 1.1787796, -0.17992483, -1.0707526, 1.0544517, -0.40317693, 1.222445, 0.20827498, 0.97663903, 0.3563664, 0.7065732})),
gorgonia.WithName("a"))
b := gorgonia.NodeFromAny(g,
tensor.New(
tensor.WithShape(1, 2, 4, 3),
tensor.WithBacking([]float32{0.01050002, 1.7858706, 0.12691209, 0.40198937, 1.8831507, -1.347759, -1.270485, 0.9693967, -1.1731234, 1.9436212, -0.41361898, -0.7474548, 1.922942, 1.4805148, 1.867559, 0.90604466, -0.86122566, 1.9100649, -0.26800337, 0.8024564, 0.947252, -0.15501009, 0.61407936, 0.9222067})),
gorgonia.WithName("b"))
cT := tensor.New(
tensor.WithShape(1, 2, 3, 3),
tensor.WithBacking([]float32{-2.3266034, 0.38273817, 1.8487196, 0.26315218, -2.970441, 0.99314374, -3.2276042, 0.70241654, -0.75997543, 4.291556, 1.4060221, 7.3513436, -1.1850535, -2.0662394, 0.75981444, 1.080346, 0.1871081, 3.243585}))
c := new(gorgonia.Node)
o, err := op.Apply(
a,b,
)
if err != nil {
_, ok := err.(*onnx.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
_, ok = err.(*gorgonia.ErrNotImplemented)
if ok && skip {
t.Skip(err)
}
t.Fatal(err)
}
c = o[0]
machine := gorgonia.NewTapeMachine(g)
if err = machine.RunAll(); err != nil {
t.Fatal(err)
}
assert.Equal(cT.Shape(), c.Shape(), "Tensors should be the same")
assert.InDeltaSlice(cT.Data(), c.Value().Data(), 1e-5,"Tensors should be the same")
}
| [
"\"SKIP_NOT_IMPLEMENTED\"",
"\"SKIP_NOT_IMPLEMENTED\"",
"\"SKIP_NOT_IMPLEMENTED\""
]
| []
| [
"SKIP_NOT_IMPLEMENTED"
]
| [] | ["SKIP_NOT_IMPLEMENTED"] | go | 1 | 0 | |
openebs/pkg/volume/v1alpha1/snapshot.go | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/golang/glog"
v1alpha1 "github.com/kubernetes-incubator/external-storage/openebs/pkg/apis/openebs.io/v1alpha1"
)
// CreateSnapshot to create the Vsm through a API call to m-apiserver
func (v CASVolume) CreateSnapshot(castype, volName, snapName, namespace string) (string, error) {
addr := os.Getenv("MAPI_ADDR")
if addr == "" {
err := errors.New("MAPI_ADDR environment variable not set")
return "Error getting maya-apiserver IP Address", err
}
var snap v1alpha1.CASSnapshot
snap.Namespace = namespace
snap.Name = snapName
snap.Spec.CasType = castype
snap.Spec.VolumeName = volName
url := addr + "/latest/snapshots/"
//Marshal serializes the value provided into a YAML document
snapBytes, _ := json.Marshal(snap)
glog.Infof("snapshot Spec Created:\n%v\n", string(snapBytes))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(snapBytes))
req.Header.Add("Content-Type", "application/json")
c := &http.Client{
Timeout: timeout,
}
resp, err := c.Do(req)
if err != nil {
glog.Errorf("Error when connecting maya-apiserver %v", err)
return "Could not connect to maya-apiserver", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.Errorf("Unable to read response from maya-apiserver %v", err)
return "Unable to read response from maya-apiserver", err
}
code := resp.StatusCode
if err == nil && code != http.StatusOK {
return "HTTP Status error from maya-apiserver", fmt.Errorf(string(data))
}
if code != http.StatusOK {
glog.Errorf("Status error: %v\n", http.StatusText(code))
return "HTTP Status error from maya-apiserver", err
}
glog.Infof("Snapshot Successfully Created:\n%v\n", string(data))
return "Snapshot Successfully Created", nil
}
// ListVolume to get the info of Vsm through a API call to m-apiserver
func (v CASVolume) ListSnapshot(volName string, snapname string, namespace string, obj interface{}) error {
addr := os.Getenv("MAPI_ADDR")
if addr == "" {
err := errors.New("MAPI_ADDR environment variable not set")
return err
}
url := addr + "/latest/snapshots/"
glog.V(2).Infof("[DEBUG] Get details for Volume :%v", string(volName))
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
req.Header.Set("namespace", namespace)
c := &http.Client{
Timeout: timeout,
}
resp, err := c.Do(req)
if err != nil {
glog.Errorf("Error when connecting to maya-apiserver %v", err)
return err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.Errorf("Unable to read response from maya-apiserver %v", err)
return err
}
code := resp.StatusCode
if err == nil && code != http.StatusOK {
return fmt.Errorf(string(data))
}
if code != http.StatusOK {
glog.Errorf("HTTP Status error from maya-apiserver: %v\n", http.StatusText(code))
return err
}
glog.V(2).Info("volume Details Successfully Retrieved")
return json.NewDecoder(resp.Body).Decode(obj)
}
// // RevertSnapshot revert a snapshot of volume by invoking the API call to m-apiserver
// func (v CASVolume) RevertSnapshot(volName string, snapName string) (string, error) {
// addr := os.Getenv("MAPI_ADDR")
// if addr == "" {
// err := errors.New("MAPI_ADDR environment variable not set")
// return "Error getting maya-apiserver IP Address", err
// }
// var snap mayav1.SnapshotAPISpec
// snap.Metadata.Name = snapName
// snap.Spec.VolumeName = volName
// url := addr + "/latest/snapshots/revert/"
// yamlValue, _ := yaml.Marshal(snap)
// req, err := http.NewRequest("POST", url, bytes.NewBuffer(yamlValue))
// req.Header.Add("Content-Type", "application/yaml")
// c := &http.Client{
// Timeout: timeout,
// }
// resp, err := c.Do(req)
// if err != nil {
// glog.Errorf("Error when connecting maya-apiserver %v", err)
// return "Could not connect to maya-apiserver", err
// }
// defer resp.Body.Close()
// data, err := ioutil.ReadAll(resp.Body)
// if err != nil {
// glog.Errorf("Unable to read response from maya-apiserver %v", err)
// return "Unable to read response from maya-apiserver", err
// }
// code := resp.StatusCode
// if err == nil && code != http.StatusOK {
// return "HTTP Status error from maya-apiserver", fmt.Errorf(string(data))
// }
// if code != http.StatusOK {
// glog.Errorf("Status error: %v\n", http.StatusText(code))
// return "HTTP Status error from maya-apiserver", err
// }
// glog.Infof("Snapshot Successfully restore:\n%v\n", string(data))
// return "Snapshot Successfully restore", nil
// }
func (v CASVolume) SnapshotInfo(volName string, snapName string) (string, error) {
return "Not implemented", nil
}
func (v CASVolume) DeleteSnapshot(castype, volName, snapName, namespace string) (string, error) {
addr := os.Getenv("MAPI_ADDR")
if addr == "" {
err := errors.New("MAPI_ADDR environment variable not set")
return "Error getting maya-apiserver IP Address", err
}
url := addr + "/latest/snapshots/" + snapName
req, err := http.NewRequest("DELETE", url, nil)
glog.Infof("Deleting snapshot %s of %s volume %s in namespace %s", snapName, castype, volName, namespace)
// Add query params
q := req.URL.Query()
q.Add("volume", volName)
q.Add("namespace", namespace)
q.Add("casType", castype)
// Add query params to req
req.URL.RawQuery = q.Encode()
c := &http.Client{
Timeout: timeout,
}
resp, err := c.Do(req)
if err != nil {
glog.Errorf("Error when connecting maya-apiserver %v", err)
return "Could not connect to maya-apiserver", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
glog.Errorf("Unable to read response from maya-apiserver %v", err)
return "Unable to read response from maya-apiserver", err
}
code := resp.StatusCode
if err == nil && code != http.StatusOK {
return "HTTP Status error from maya-apiserver", fmt.Errorf(string(data))
}
if code != http.StatusOK {
glog.Errorf("Status error: %v\n", http.StatusText(code))
return "HTTP Status error from maya-apiserver", err
}
return string(data), nil
}
| [
"\"MAPI_ADDR\"",
"\"MAPI_ADDR\"",
"\"MAPI_ADDR\"",
"\"MAPI_ADDR\""
]
| []
| [
"MAPI_ADDR"
]
| [] | ["MAPI_ADDR"] | go | 1 | 0 | |
_unittests/ut_talk_examples/test_pydata2016_animation.py | """
@brief test log(time=20s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG, run_cmd
from pyquickhelper.pycode import get_temp_folder, fix_tkinter_issues_virtualenv, skipif_appveyor, skipif_travis
from pyquickhelper.pycode import add_missing_development_version
class TestPyData2016Animation(unittest.TestCase):
@skipif_appveyor("no ffmpeg installed")
@skipif_travis("issue with datashader.bokeh_ext, skipping")
@skipif_appveyor("issue with pyproj")
def test_matplotlib_example(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
progs = ["ffmpeg"]
if not sys.platform.startswith("win"):
progs.append("avconv")
errs = []
prog = None
for prog in progs:
out, err = run_cmd(prog, wait=True, fLOG=fLOG)
exps = "usage:"
if (exps not in out and exps not in err) or err is None or len(err) == 0:
errs.append((prog, err))
else:
break
if len(errs) >= len(progs):
if sys.platform.startswith("win"):
fLOG("download ffmpeg")
add_missing_development_version(
["pyensae"], __file__, hide=True)
from pyensae.datasource import download_data
download_data("ffmpeg.zip", website="xd")
else:
raise FileNotFoundError(
"Unable to find '{1}'.\nPATH='{0}'\n--------\n[OUT]\n{2}\n[ERR]\n{3}".format(
os.environ["PATH"], prog, out,
"\n----\n".join("{0}:\n{1}".format(*_) for _ in errs)))
temp = get_temp_folder(__file__, "temp_example_example")
fix_tkinter_issues_virtualenv()
# update a distribution based on new data.
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from matplotlib.animation import FuncAnimation, writers
# To get the list of available writers
if not writers.is_available(prog):
writers.register(prog)
fLOG(writers.list())
class UpdateDist:
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
# Set up plot parameters
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 15)
self.ax.grid(True)
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
self.ax.axvline(prob, linestyle='--', color='black')
def init(self):
self.success = 0
self.line.set_data([], [])
return self.line,
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
return self.init()
# Choose success based on exceed a threshold with a uniform
# pick
if np.random.rand(1,) < self.prob: # pylint: disable=W0143
self.success += 1
y = ss.beta.pdf(self.x, self.success + 1,
(i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, frames=np.arange(100), init_func=ud.init,
interval=100, blit=True)
try:
Writer = writers[prog]
except KeyError as e:
if prog == "avconv":
from matplotlib.animation import AVConvWriter
Writer = AVConvWriter
else:
raise e
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save(os.path.join(temp, 'lines2.mp4'), writer=writer)
plt.close('all')
fLOG("end")
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
thinc/tests/test_examples.py | import os
from pathlib import Path
import pytest
@pytest.fixture
def test_files(nb_file):
pytest.importorskip("nbconvert")
pytest.importorskip("nbformat")
import nbconvert
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
if not Path(nb_file).exists():
return
kernel_name = os.environ.get("NOTEBOOK_KERNEL", "python3")
with open(nb_file) as f:
nb = nbformat.read(f, as_version=4)
proc = ExecutePreprocessor(timeout=600, kernel_name=kernel_name)
proc.allow_errors = True
proc.preprocess(nb, {"metadata": {"path": "/"}})
cells_with_outputs = [c for c in nb.cells if "outputs" in c]
for cell in cells_with_outputs:
for output in cell["outputs"]:
if output.output_type == "error":
for l in output.traceback:
print(l)
raise Exception(f"{output.ename}: {output.evalue}")
@pytest.mark.parametrize(
"nb_file",
(
"examples/01_intro_model_definition_methods.ipynb",
"examples/05_benchmarking_layers.ipynb",
),
)
def test_ipython_notebooks(test_files: None):
...
@pytest.mark.skip(reason="these notebooks need special software or hardware")
@pytest.mark.parametrize(
"nb_file",
(
"examples/00_intro_to_thinc.ipynb",
"examples/02_transformers_tagger_bert.ipynb",
"examples/03_pos_tagger_basic_cnn.ipynb",
"examples/03_textcat_basic_neural_bow.ipynb",
"examples/04_configure_gpu_memory.ipynb",
"examples/04_parallel_training_ray.ipynb",
"examples/05_visualizing_models.ipynb",
"examples/06_predicting_like_terms.ipynb",
),
)
def test_ipython_notebooks_slow(test_files: None):
...
| []
| []
| [
"NOTEBOOK_KERNEL"
]
| [] | ["NOTEBOOK_KERNEL"] | python | 1 | 0 | |
examples/referseg/mynetwork.naive.nocap.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: deeplabFOV.py
# Author: Tao Hu <[email protected]>
import cv2
import tensorflow as tf
import argparse
from six.moves import zip
import os
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
from tensorpack.utils.stats import MIoUStatistics
from tensorpack.utils import logger
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tqdm import tqdm
from RMI_model_nocap import RMI_model
from data_loader import DataLoader
CLASS_NUM = DataLoader.class_num()
IMG_SIZE = 320
IGNORE_LABEL = 255
MAX_LENGTH = 15
VOCAB_SIZE = len(DataLoader(name = "train", max_length=MAX_LENGTH, img_size=IMG_SIZE).word_to_idx.keys())#3224#28645#24022 # careful about the VOCAB SIZE
# maximum length of caption(number of word). if caption is longer than max_length, deleted.
STEP_NUM = MAX_LENGTH+2 # equal Max Length
evaluate_every_n_epoch = 1
max_epoch = 10
init_lr = 2.5e-4
lr_schedule = [(3, 1e-4), (7, 1e-5)]
def softmax_cross_entropy_with_ignore_label(logits, label, class_num):
"""
This function accepts logits rather than predictions, and is more numerically stable than
:func:`class_balanced_cross_entropy`.
"""
with tf.name_scope('softmax_cross_entropy_with_ignore_label'):
tf.assert_equal(logits.get_shape()[1:3], label.get_shape()[1:3]) # shape assert
raw_prediction = tf.reshape(logits, [-1, class_num])
label = tf.reshape(label,[-1,])
#label_onehot = tf.one_hot(label, depth=class_num)
indices = tf.squeeze(tf.where(tf.less(label, class_num)), axis=1)
#raw_gt = tf.reshape(label_onehot, [-1, class_num])
gt = tf.gather(label, indices)
prediction = tf.gather(raw_prediction, indices)
# Pixel-wise softmax loss.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
return loss
class Model(ModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, [None, IMG_SIZE, IMG_SIZE, 3], 'image'),
InputDesc(tf.int32, [None, IMG_SIZE, IMG_SIZE, 1], 'gt'),
]
def _build_graph(self, inputs):
image, label = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
mode = "train" if get_current_tower_context().is_training else "val"
current_batch_size = args.batch_size if get_current_tower_context().is_training else 1
model = RMI_model(image, class_num=CLASS_NUM, batch_size=current_batch_size, num_steps= STEP_NUM, mode=mode, vocab_size=VOCAB_SIZE, weights="deeplab")
predict = model.up
label = tf.identity(label, name="label")
costs = []
prob = tf.identity(predict, name='prob')
prediction = tf.argmax(prob, axis=-1, name="prediction")
cost = softmax_cross_entropy_with_ignore_label(logits=prob, label=label,
class_num=CLASS_NUM)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
costs.append(cost)
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/Waaaaaa', tf.nn.l2_loss), name='wd_cost') #TODO
#wd_cost = 0.0
costs.append(wd_cost)
self.cost = tf.add_n(costs, name='cost')
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=init_lr, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('aspp.*_conv.*/Wnnnnnn', 10),('aspp.*_conv.*/bnnnnn', 20), ('conv.*/bnnnnn', 2)])]) #TODO
def get_data(name, batch_size):
isTrain = True if 'train' in name else False
ds = DataLoader(name = name, max_length=MAX_LENGTH, img_size=IMG_SIZE,use_caption=False)
if isTrain:
ds = BatchData(ds, batch_size)
ds = PrefetchDataZMQ(ds, 1)
else:
ds = BatchData(ds, 1)
return ds
def view_data():
ds = RepeatedData(get_data('train',10), -1)
ds.reset_state()
for ims, labels,captions in ds.get_data():
for im, label,caption in zip(ims, labels,captions):
cv2.imshow("im", im)
cv2.imshow("color-label", visualize_label(label,class_num=CLASS_NUM))
print(caption)
cv2.waitKey(10000)
def get_config(batch_size):
logger.auto_set_dir()
dataset_train = get_data('train', batch_size)
steps_per_epoch = dataset_train.size()*2
callbacks = [
ModelSaver(),
GPUUtilizationTracker(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=evaluate_every_n_epoch),
ProgressBar(["cross_entropy_loss", "cost", "wd_cost"]), # uncomment it to debug for every step
# RunOp(lambda: tf.add_check_numerics_ops(), run_before=False, run_as_trigger=True, run_step=True)
]
return TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
max_epoch=max_epoch,
)
class CalculateMIoU(Callback):
def __init__(self, nb_class):
self.nb_class = nb_class
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'], ['prob'])
def _before_train(self):
pass
def _trigger(self):
global args
self.val_ds = get_data('test',batch_size=args.batch_size)
self.val_ds.reset_state()
self.stat = MIoUStatistics(self.nb_class)
for image, label in tqdm(self.val_ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
def mypredictor(input_img):
# input image: 1*H*W*3
# output : H*W*C
output = self.pred(input_img[np.newaxis, :, :, :])
return output[0][0]
prediction = mypredictor(image)
#prediction = predict_scaler(image, mypredictor, scales=[1], classes=CLASS_NUM, tile_size=IMG_SIZE, is_densecrf = False)
prediction = np.argmax(prediction, axis=2)
self.stat.feed(prediction, label)
self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
self.trainer.monitors.put_scalar("mean_accuracy", self.stat.mean_accuracy)
self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='5', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', default="deeplab_resnet_init.ckpt" ,help='load model')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--run', help='run model on images')
parser.add_argument('--batch_size', type=int, default = 5, help='batch_size')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.view:
view_data()
else:
config = get_config(args.batch_size)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(
config,
SyncMultiGPUTrainer(max(get_nr_gpu(), 1)))
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TENSORPACK_TRAIN_API"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TENSORPACK_TRAIN_API"] | python | 2 | 0 | |
src/tests/long_cluster_tests.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, signal, sys, unittest
from testlib import TestBaseCluster
class LongClusterTests(TestBaseCluster):
"""Long/Soak cluster tests with async store ability"""
def test_LongCluster_01_DummyTest(self):
"""Dummy test - a placeholder for the first of the long/soak python cluster tests"""
pass
# Start the test here
if __name__ == '__main__':
if os.getenv("STORE_LIB") != None:
print "NOTE: Store enabled for the following tests:"
if not unittest.main(): sys.exit(1)
| []
| []
| [
"STORE_LIB"
]
| [] | ["STORE_LIB"] | python | 1 | 0 | |
for_012_fire_risk/contents/src/__init__.py | from __future__ import unicode_literals
import os
import sys
import datetime
import logging
import subprocess
import eeUtil
import requests
from bs4 import BeautifulSoup
import urllib.request
import time
import json
import re
# url for fire weather data
SOURCE_URL = 'https://portal.nccs.nasa.gov/datashare/GlobalFWI/v2.0/fwiCalcs.GEOS-5/Default/GPM.LATE.v5/{year}/FWI.GPM.LATE.v5.Daily.Default.{date}.nc'
# subdatasets to be converted to tif
# should be of the format 'NETCDF:"filename.nc":variable'
SDS_NAMES = ['NETCDF:"{fname}":GPM.LATE.v5_FWI', 'NETCDF:"{fname}":GPM.LATE.v5_BUI', 'NETCDF:"{fname}":GPM.LATE.v5_DC',
'NETCDF:"{fname}":GPM.LATE.v5_DMC', 'NETCDF:"{fname}":GPM.LATE.v5_FFMC', 'NETCDF:"{fname}":GPM.LATE.v5_ISI']
# filename format for GEE
FILENAME = 'for_012_fire_risk_{date}'
# nodata value for netcdf
NODATA_VALUE = None
# name of data directory in Docker container
DATA_DIR = 'data'
# name of folder to store data in Google Cloud Storage
GS_FOLDER = 'for_012_fire_risk'
# name of collection in GEE where we will upload the final data
EE_COLLECTION = '/projects/resource-watch-gee/for_012_fire_risk'
# do you want to delete everything currently in the GEE collection when you run this script?
CLEAR_COLLECTION_FIRST = False
# how many assets can be stored in the GEE collection before the oldest ones are deleted?
MAX_ASSETS = 15
# format of date (used in both the source data files and GEE)
DATE_FORMAT = '%Y%m%d'
# Resource Watch dataset API ID and GFW dataset API ID
# Important! Before testing this script:
# Please change this ID OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on a different dataset on Resource Watch
DATASET_IDS = {
'RW': 'c56ee507-9a3b-41d3-90ac-1406bee32c32',
'GFW': '3b850f92-c7e3-4103-9f24-ea7d41a94b84'}
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR RASTER DATASETS
The functions below must go in every near real-time script for a RASTER dataset.
Their format should not need to be changed.
'''
def getLastUpdate(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will get the current 'last update date' from the API
and return it as a datetime
INPUT dataset: Resource Watch API dataset ID (string)
RETURN lastUpdateDT: current 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
# find the 'last update date'
lastUpdateString=r.json()['data']['attributes']['dataLastUpdated']
# split this date into two pieces at the seconds decimal so that the datetime module can read it:
# ex: '2020-03-11T00:00:00.000Z' will become '2020-03-11T00:00:00' (nofrag) and '000Z' (frag)
nofrag, frag = lastUpdateString.split('.')
# generate a datetime object
nofrag_dt = datetime.datetime.strptime(nofrag, "%Y-%m-%dT%H:%M:%S")
# add back the microseconds to the datetime
lastUpdateDT = nofrag_dt.replace(microsecond=int(frag[:-1])*1000)
return lastUpdateDT
def getLayerIDs(dataset):
'''
Given a Resource Watch dataset's API ID,
this function will return a list of all the layer IDs associated with it
INPUT dataset: Resource Watch API dataset ID (string)
RETURN layerIDs: Resource Watch API layer IDs for the input dataset (list of strings)
'''
# generate the API url for this dataset - this must include the layers
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{}?includes=layer'.format(dataset)
# pull the dataset from the API
r = requests.get(apiUrl)
#get a list of all the layers
layers = r.json()['data']['attributes']['layer']
# create an empty list to store the layer IDs
layerIDs =[]
# go through each layer and add its ID to the list
for layer in layers:
# only add layers that have Resource Watch listed as its application
if layer['attributes']['application']==['rw']:
layerIDs.append(layer['id'])
return layerIDs
def flushTileCache(layer_id):
"""
Given the API ID for a GEE layer on Resource Watch,
this function will clear the layer cache.
If the cache is not cleared, when you view the dataset on Resource Watch, old and new tiles will be mixed together.
INPUT layer_id: Resource Watch API layer ID (string)
"""
# generate the API url for this layer's cache
apiUrl = 'http://api.resourcewatch.org/v1/layer/{}/expire-cache'.format(layer_id)
# create headers to send with the request to clear the cache
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# clear the cache for the layer
# sometimetimes this fails, so we will try multiple times, if it does
# specify that we are on the first try
try_num=1
# specify the maximum number of attempt we will make
tries = 4
while try_num<tries:
try:
# try to delete the cache
r = requests.delete(url = apiUrl, headers = headers, timeout=1000)
# if we get a 200, the cache has been deleted
# if we get a 504 (gateway timeout) - the tiles are still being deleted, but it worked
if r.ok or r.status_code==504:
logging.info('[Cache tiles deleted] for {}: status code {}'.format(layer_id, r.status_code))
return r.status_code
# if we don't get a 200 or 504:
else:
# if we are not on our last try, wait 60 seconds and try to clear the cache again
if try_num < (tries-1):
logging.info('Cache failed to flush: status code {}'.format(r.status_code))
time.sleep(60)
logging.info('Trying again.')
# if we are on our last try, log that the cache flush failed
else:
logging.error('Cache failed to flush: status code {}'.format(r.status_code))
logging.error('Aborting.')
try_num += 1
except Exception as e:
logging.error('Failed: {}'.format(e))
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def getUrl(date):
'''
format source url with date
INPUT date: date in the format YYYYMMDD (string)
RETURN source url to download data, formatted for the input date (string)
'''
return SOURCE_URL.format(year=date[0:4], date=date)
def getAssetName(date):
'''
get asset name
INPUT date: date in the format of the DATE_FORMAT variable (string)
RETURN GEE asset name for input date (string)
'''
return os.path.join(EE_COLLECTION, FILENAME.format(date=date))
def getFilename(date):
'''
get netcdf filename to save source file as
INPUT date: date in the format of the DATE_FORMAT variable (string)
RETURN file name to save netcdf from source under (string)
'''
return os.path.join(DATA_DIR, '{}.nc'.format(FILENAME.format(date=date)))
def getDate(filename):
'''
get date from filename (last 8 characters of filename after removing extension)
INPUT filename: file name that ends in a date of the format YYYYMMDD (string)
RETURN date in the format YYYYMMDD (string)
'''
return os.path.splitext(os.path.basename(filename))[0][-8:]
def getNewDates(exclude_dates):
'''
Get new dates we want to try to fetch data for
INPUT exclude_dates: list of dates that we already have in GEE, in the format of the DATE_FORMAT variable (list of strings)
RETURN new_dates: list of new dates we want to try to get, in the format of the DATE_FORMAT variable (list of strings)
'''
# create empty list to store dates we want to fetch
new_dates = []
# start with today's date
date = datetime.date.today()
# if anything is in the collection, check back until last uploaded date
if len(exclude_dates) > 0:
# if the date string is not the list of dates we already have, add it to the list of new dates to try and fetch
while (date.strftime(DATE_FORMAT) not in exclude_dates):
# generate a string from the date
datestr = date.strftime(DATE_FORMAT)
# add to list of new dates
new_dates.append(datestr)
# go back one day at a time
date -= datetime.timedelta(days=1)
#if the collection is empty, make list of most recent 10 days to check
else:
for i in range(10):
# generate a string from the date
datestr = date.strftime(DATE_FORMAT)
# add to list of new dates
new_dates.append(datestr)
# go back one day at a time
date -= datetime.timedelta(days=1)
return new_dates
def convert(files):
'''
Convert netcdf files to tifs
INPUT files: list of file names for netcdfs that have already been downloaded (list of strings)
RETURN tifs: list of file names for tifs that have been generated (list of strings)
'''
# create an empty list to store the names of the tifs we generate from all input netcdf files
tifs = []
# go through each netcdf file and translate
for f in files:
# create an empty list to store the names of the tifs we generate from this netcdf file
band_tifs = []
# go through each variables to process in this netcdf file
for sds_name in SDS_NAMES:
# extract subdataset by name
sds_path = sds_name.format(fname=f)
# generate a name to save the tif file we will translate the netcdf file's subdataset into
band_tif = '{}_{}.tif'.format(os.path.splitext(f)[0], sds_name.split('_')[-1])
# translate the netcdf file's subdataset into a tif
cmd = ['gdal_translate','-q', '-a_nodata', str(NODATA_VALUE), '-a_srs', 'EPSG:4326', sds_path, band_tif]
logging.debug('Converting {} to {}'.format(f, band_tif))
subprocess.call(cmd)
# add the new subdataset tif files to the list of tifs generated from this netcdf file
band_tifs.append(band_tif)
# generate a name to save the tif file that will be produced by merging all the sub tifs from this netcdf
merged_tif = '{}.tif'.format(os.path.splitext(f)[0])
# merge all the sub tifs from this netcdf to create an overall tif representing all variables
merge_cmd = ['gdal_merge.py', '-seperate'] + band_tifs + ['-o', merged_tif]
subprocess.call(merge_cmd)
# add the new tif files to the list of tifs
tifs.append(merged_tif)
return tifs
def list_available_files(url, ext=''):
'''
Fetch a list of filenames from source url by year
INPUT url: url for data source where we want to check for download links (string)
ext: extension of file type we are checking for (string)
RETURN list of files available for download from source website (list of strings)
'''
# open and read the url
page = requests.get(url).text
# use BeautifulSoup to read the content as a nested data structure
soup = BeautifulSoup(page, 'html.parser')
# Extract all the <a> tags within the html content to find the files available for download marked with these tags.
# Get only the files that ends with input extension (if specified)
return [node.get('href') for node in soup.find_all('a') if type(node.get('href'))==str and node.get('href').endswith(ext)]
def fetch(new_dates):
'''
Fetch files by datestamp
INPUT dates: list of dates we want to try to fetch, in the format YYYYMMDD (list of strings)
RETURN files: list of file names for netcdfs that have been downloaded (list of strings)
'''
# make an empty list to store names of the files we downloaded
files = []
# go through each input date
for date in new_dates:
# get the url to download the file from the source for the given date
url = getUrl(date)
# get the filename we want to save the file under locally
f = getFilename(date)
# get the filename to download from the url
file_name = os.path.split(url)[1]
# get a list of available filenames from source website for the input date's year
file_list = list_available_files(os.path.split(url)[0], ext='.nc')
# check if the filename to download is present in the source website
if file_name in file_list:
logging.info('Retrieving {}'.format(file_name))
try:
# try to download the data
urllib.request.urlretrieve(url, f)
# if successful, add the file to the list of files we have downloaded
files.append(f)
logging.info('Successfully retrieved {}'.format(f))
except Exception as e:
# if unsuccessful, log that the file was not downloaded
logging.error('Unable to retrieve data from {}'.format(url))
logging.debug(e)
else:
# if the filename to download is not present in the source website,
# log that we are attempting to download a file that is not available yet
logging.info('{} not available yet'.format(file_name))
return files
def processNewData(existing_dates):
'''
fetch, process, upload, and clean new data
INPUT existing_dates: list of dates we already have in GEE, in the format of the DATE_FORMAT variable (list of strings)
RETURN assets: list of file names for netcdfs that have been downloaded (list of strings)
'''
# Get list of new dates we want to try to fetch data for
new_dates = getNewDates(existing_dates)
# Fetch new files
logging.info('Fetching files')
files = fetch(new_dates)
# If we have successfully been able to fetch new data files
if files:
# Convert new files from netcdf to tif files
logging.info('Converting files')
tifs = convert(files)
logging.info('Uploading files')
# Get a list of the dates we have to upload from the tif file names
dates = [getDate(tif) for tif in tifs]
# Get a list of datetimes from these dates for each of the dates we are uploading
datestamps = [datetime.datetime.strptime(date, DATE_FORMAT) for date in dates]
# Get a list of the names we want to use for the assets once we upload the files to GEE
assets = [getAssetName(date) for date in dates]
# Upload new files (tifs) to GEE
eeUtil.uploadAssets(tifs, assets, GS_FOLDER, datestamps)
# Delete local files
logging.info('Cleaning local files')
for tif in tifs:
os.remove(tif)
for f in files:
os.remove(f)
return assets
return []
def checkCreateCollection(collection):
'''
List assests in collection if it exists, else create new collection
INPUT collection: GEE collection to check or create (string)
RETURN list of assets in collection (list of strings)
'''
# if collection exists, return list of assets in collection
if eeUtil.exists(collection):
return eeUtil.ls(collection)
# if collection does not exist, create it and return an empty list (because no assets are in the collection)
else:
logging.info('{} does not exist, creating'.format(collection))
eeUtil.createFolder(collection, True, public=True)
return []
def deleteExcessAssets(dates, max_assets):
'''
Delete oldest assets, if more than specified in max_assets variable
INPUT dates: dates for all the assets currently in the GEE collection; dates should be in the format specified
in DATE_FORMAT variable (list of strings)
max_assets: maximum number of assets allowed in the collection (int)
'''
# sort the list of dates so that the oldest is first
dates.sort()
# if we have more dates of data than allowed,
if len(dates) > max_assets:
# go through each date, starting with the oldest, and delete until we only have the max number of assets left
for date in dates[:-max_assets]:
eeUtil.removeAsset(getAssetName(date))
def get_most_recent_date(collection):
'''
Get most recent date from the data in the GEE collection
INPUT collection: GEE collection to check dates for (string)
RETURN most_recent_date: most recent date in GEE collection (datetime)
'''
# get list of assets in collection
existing_assets = checkCreateCollection(collection)
# get a list of strings of dates in the collection
existing_dates = [getDate(a) for a in existing_assets]
# sort these dates oldest to newest
existing_dates.sort()
# get the most recent date (last in the list) and turn it into a datetime
most_recent_date = datetime.datetime.strptime(existing_dates[-1], DATE_FORMAT)
return most_recent_date
def create_headers():
'''
Create headers to perform authorized actions on API
'''
return {
'Content-Type': "application/json",
'Authorization': "{}".format(os.getenv('apiToken')),
}
def pull_layers_from_API(dataset_id):
'''
Pull dictionary of current layers from API
INPUT dataset_id: Resource Watch API dataset ID (string)
RETURN layer_dict: dictionary of layers (dictionary of strings)
'''
# generate url to access layer configs for this dataset in back office
rw_api_url = 'https://api.resourcewatch.org/v1/dataset/{}?includes=layer&page[size]=100'.format(dataset_id)
# request data
r = requests.get(rw_api_url)
# convert response into json and make dictionary of layers
layer_dict = json.loads(r.content.decode('utf-8'))['data']['attributes']['layer']
return layer_dict
def update_layer(layer, new_date):
'''
Update layers in Resource Watch back office.
INPUT layer: layer that will be updated (string)
new_date: date of asset to be shown in this layer (datetime)
'''
# get current layer titile
cur_title = layer['attributes']['name']
# get current end date being used from title by string manupulation
old_date_text = re.search(r'(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{1,2},\s+\d{4}', cur_title).group()
# latest data is for one day ago, so subtracting a day
new_date_end = (new_date)
# get text for new date
new_date_text = datetime.datetime.strftime(new_date_end, "%B %d, %Y")
# replace date in layer's title with new date
layer['attributes']['name'] = layer['attributes']['name'].replace(old_date_text, new_date_text)
# send patch to API to replace layers
# generate url to patch layer
rw_api_url_layer = "https://api.resourcewatch.org/v1/dataset/{dataset_id}/layer/{layer_id}".format(
dataset_id=layer['attributes']['dataset'], layer_id=layer['id'])
# create payload with new title and layer configuration
payload = {
'name': layer['attributes']['name']
}
# patch API with updates
r = requests.request('PATCH', rw_api_url_layer, data=json.dumps(payload), headers=create_headers())
# check response
# if we get a 200, the layers have been replaced
# if we get a 504 (gateway timeout) - the layers are still being replaced, but it worked
if r.ok or r.status_code==504:
logging.info('Layer replaced: {}'.format(layer['id']))
else:
logging.error('Error replacing layer: {} ({})'.format(layer['id'], r.status_code))
def updateResourceWatch():
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date', flushing the tile cache, and updating any dates on layers
'''
# Get the most recent date from the data in the GEE collection
most_recent_date = get_most_recent_date(EE_COLLECTION)
# update the layer dates on RW and GFW
for DATASET_ID in DATASET_IDS.values():
# Get the current 'last update date' from the dataset on Resource Watch
current_date = getLastUpdate(DATASET_ID)
# Update the dates on layer legends
logging.info('Updating {}'.format(DATASET_ID))
# pull dictionary of current layers from API
layer_dict = pull_layers_from_API(DATASET_ID)
# go through each layer, pull the definition and update
for layer in layer_dict:
# replace layer title with new dates
update_layer(layer, most_recent_date)
# If the most recent date from the GEE collection does not match the 'last update date' on the RW API, update it
if current_date != most_recent_date:
logging.info('Updating last update date and flushing cache.')
# Update dataset's last update date on Resource Watch
lastUpdateDate(DATASET_IDS["RW"], most_recent_date)
# get layer ids and flush tile cache for each
layer_ids = getLayerIDs(DATASET_IDS["RW"])
for layer_id in layer_ids:
flushTileCache(layer_id)
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# Initialize eeUtil
eeUtil.initJson()
# Clear the GEE collection, if specified above
if CLEAR_COLLECTION_FIRST:
if eeUtil.exists(EE_COLLECTION):
eeUtil.removeAsset(EE_COLLECTION, recursive=True)
# Check if collection exists, create it if it does not
# If it exists return the list of assets currently in the collection
existing_assets = checkCreateCollection(EE_COLLECTION)
# Get a list of the dates of data we already have in the collection
existing_dates = [getDate(a) for a in existing_assets]
# Fetch, process, and upload the new data
new_assets = processNewData(existing_dates)
# Get the dates of the new data we have added
new_dates = [getDate(a) for a in new_assets]
logging.info('Previous assets: {}, new: {}, max: {}'.format(
len(existing_dates), len(new_dates), MAX_ASSETS))
# Delete excess assets
deleteExcessAssets(existing_dates+new_dates, MAX_ASSETS)
# Update Resource Watch
updateResourceWatch()
logging.info('SUCCESS')
| []
| []
| [
"apiToken"
]
| [] | ["apiToken"] | python | 1 | 0 | |
examples/enclave_manager/tcf_enclave_manager/tcf_enclave_bridge.py | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
from ssl import SSLError
from requests.exceptions import Timeout
from requests.exceptions import HTTPError
import tcf_enclave_manager.ias_client as ias_client
import tcf_enclave_manager.tcf_enclave as enclave
import logging
logger = logging.getLogger(__name__)
TCFHOME = os.environ.get("TCF_HOME", "../../../")
send_to_sgx_worker = enclave.HandleWorkOrderRequest
get_enclave_public_info = enclave.UnsealEnclaveData
# -----------------------------------------------------------------
_tcf_enclave_info = None
_ias = None
_sig_rl_update_time = None
_sig_rl_update_period = 8 * 60 * 60 # in seconds every 8 hours
_epid_group = None
# ----------------------------------------------------------------
def __find_enclave_library(config):
"""
Find enclave library file from the parsed config
"""
enclave_file_name = config.get('enclave_library')
enclave_file_path = TCFHOME + "/" + config.get('enclave_library_path')
logger.info("Enclave Lib: %s", enclave_file_name)
if enclave_file_path:
enclave_file = os.path.join(enclave_file_path, enclave_file_name)
if os.path.exists(enclave_file):
logger.info("Enclave Lib Exists")
return enclave_file
else:
script_directory = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)))
logger.info("Script directory - %s", script_directory)
search_path = [
script_directory,
os.path.abspath(os.path.join(script_directory, '..', 'lib')),
]
for path in search_path:
enclave_file = os.path.join(path, enclave_file_name)
if os.path.exists(enclave_file):
logger.info("Enclave Lib Exits")
return enclave_file
raise IOError("Could not find enclave shared object")
# -----------------------------------------------------------------
def __update_sig_rl():
"""
Update the signature revocation lists for EPID group on IAS server
"""
global _epid_group
global _sig_rl_update_time
global _sig_rl_update_period
if _epid_group is None:
_epid_group = _tcf_enclave_info.get_epid_group()
logger.info("EPID: " + _epid_group)
if not _sig_rl_update_time \
or (time.time() - _sig_rl_update_time) > _sig_rl_update_period:
sig_rl = ""
if (not enclave.is_sgx_simulator()):
sig_rl = _ias.get_signature_revocation_lists(_epid_group)
logger.debug("Received SigRl of {} bytes ".format(len(sig_rl)))
_tcf_enclave_info.set_signature_revocation_list(sig_rl)
_sig_rl_update_time = time.time()
# -----------------------------------------------------------------
def initialize_with_configuration(config):
"""
Create and Initialize a SGX enclave with passed config
"""
global _tcf_enclave_info
global _ias
global logger
enclave._SetLogger(logger)
# Ensure that the required keys are in the configuration
valid_keys = set(['spid', 'ias_url', 'ias_api_key'])
found_keys = set(config.keys())
missing_keys = valid_keys.difference(found_keys)
if missing_keys:
raise \
ValueError(
'TCF enclave config file missing the following keys: '
'{}'.format(
', '.join(sorted(list(missing_keys)))))
# IAS is not initialized in SGX SIM mode
if not _ias and not enclave.is_sgx_simulator():
_ias = \
ias_client.IasClient(
IasServer=config['ias_url'],
ApiKey=config['ias_api_key'],
Spid=config['spid'],
HttpsProxy=config.get('https_proxy', ""))
if not _tcf_enclave_info:
signed_enclave = __find_enclave_library(config)
logger.debug("Attempting to load enclave at: %s", signed_enclave)
_tcf_enclave_info = enclave.tcf_enclave_info(
signed_enclave, config['spid'], int(config['num_of_enclaves']))
logger.info("Basename: %s", get_enclave_basename())
logger.info("MRENCLAVE: %s", get_enclave_measurement())
sig_rl_updated = False
while not sig_rl_updated:
try:
__update_sig_rl()
sig_rl_updated = True
except (SSLError, Timeout, HTTPError) as e:
logger.warning(
"Failed to retrieve initial sig rl from IAS: %s", str(e))
logger.warning("Retrying in 60 sec")
time.sleep(60)
return get_enclave_basename(), get_enclave_measurement()
# -----------------------------------------------------------------
def shutdown():
global _tcf_enclave_info
global _ias
global _sig_rl_update_time
global _epid_group
_tcf_enclave_info = None
_ias = None
_sig_rl_update_time = None
_epid_group = None
# -----------------------------------------------------------------
def get_enclave_measurement():
global _tcf_enclave_info
return _tcf_enclave_info.mr_enclave \
if _tcf_enclave_info is not None else None
# -----------------------------------------------------------------
def get_enclave_basename():
global _tcf_enclave_info
return _tcf_enclave_info.basename \
if _tcf_enclave_info is not None else None
# -----------------------------------------------------------------
def verify_enclave_info(enclave_info, mr_enclave, originator_public_key_hash):
"""
Verifies enclave signup info
- enclave_info is a JSON serialised enclave signup info
along with IAS attestation report
- mr_enclave is enclave measurement value
"""
return enclave.VerifyEnclaveInfo(
enclave_info, mr_enclave, originator_public_key_hash)
# -----------------------------------------------------------------
def create_signup_info(originator_public_key_hash, nonce):
"""
Create enclave signup data
"""
# Part of what is returned with the signup data is an enclave quote, we
# want to update the revocation list first.
__update_sig_rl()
# Now, let the enclave create the signup data
signup_data = enclave.CreateEnclaveData(originator_public_key_hash)
if signup_data is None:
return None
# We don't really have any reason to call back down into the enclave
# as we have everything we now need. For other objects such as wait
# timer and certificate they are serialized into JSON down in C++ code.
#
# Start building up the signup info dictionary we will serialize
signup_info = {
'verifying_key': signup_data['verifying_key'],
'encryption_key': signup_data['encryption_key'],
'proof_data': 'Not present',
'enclave_persistent_id': 'Not present'
}
# If we are not running in the simulator, we are going to go and get
# an attestation verification report for our signup data.
if not enclave.is_sgx_simulator():
logger.debug("posting verification to IAS")
response = _ias.post_verify_attestation(
quote=signup_data['enclave_quote'], nonce=nonce)
logger.debug("posted verification to IAS")
# check verification report
if not _ias.verify_report_fields(
signup_data['enclave_quote'], response['verification_report']):
logger.debug("last error: " + _ias.last_verification_error())
if _ias.last_verification_error() == "GROUP_OUT_OF_DATE":
logger.warning("failure GROUP_OUT_OF_DATE " +
"(update your BIOS/microcode!!!) keep going")
else:
logger.error("invalid report fields")
return None
# ALL checks have passed
logger.info("report fields verified")
# Now put the proof data into the dictionary
signup_info['proof_data'] = \
json.dumps({
'verification_report': response['verification_report'],
'ias_report_signature': response['ias_signature'],
'ias_report_signing_certificate': response['ias_certificate']
})
# Grab the EPID pseudonym and put it in the enclave-persistent ID for
# the signup info
verification_report_dict = json.loads(response['verification_report'])
signup_info['enclave_persistent_id'] = \
verification_report_dict.get('epidPseudonym')
mr_enclave = get_enclave_measurement()
status = verify_enclave_info(
json.dumps(signup_info), mr_enclave, originator_public_key_hash)
if status != 0:
logger.error("Verification of enclave signup info failed")
else:
logger.info("Verification of enclave signup info passed")
# Now we can finally serialize the signup info and create a corresponding
# signup info object. Because we don't want the sealed signup data in the
# serialized version, we set it separately.
signup_info_obj = enclave.deserialize_signup_info(json.dumps(signup_info))
signup_info_obj.sealed_signup_data = signup_data['sealed_enclave_data']
# Now we can return the real object
return signup_info_obj
| []
| []
| [
"TCF_HOME"
]
| [] | ["TCF_HOME"] | python | 1 | 0 | |
profile_api/settings.py | """
Django settings for profile_api project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z(-+$-0t&f&wi*ic(mu_v84t^eesqyd0=6fkz)9ir8se4g9lx_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = ['ec2-3-84-253-155.compute-1.amazonaws.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework',
'rest_framework.authtoken', 'profile_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profile_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profile_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profile_app.UserProfile'
STATIC_ROOT = 'static/'
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
lrtest/lrintegrationtest/multifactor_test.go | package lrintegrationtest
import (
"os"
"testing"
lr "github.com/LoginRadius/go-sdk"
"github.com/LoginRadius/go-sdk/api/mfa"
"github.com/LoginRadius/go-sdk/lrerror"
lrjson "github.com/LoginRadius/go-sdk/lrjson"
)
// Tests in this file are skipped by default; they will only run with LoginRadius sites with MFA turned on
// If you enable MFA for your site, tests in authentication_test.go, social_test.go and phoneauthentication_test.go will
// no longer run
func TestPostMFAEmailLogin(t *testing.T) {
t.SkipNow()
_, _, _, testEmail, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
testLogin := TestEmailLogin{testEmail, testEmail}
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(testLogin)
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call: %v", err)
}
session, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAEmailLogin call: %v", err)
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(testLogin, map[string]string{"emailtemplate": "hello"})
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call with optional queries: %v", err)
}
session, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAEmailLogin call with optional queries: %v", err)
}
}
func TestPostMFAEmailLoginInvalidBody(t *testing.T) {
t.SkipNow()
_, _, _, _, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
invalid := struct{ foo string }{"bar"}
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(invalid)
if err.(lrerror.Error).Code() != "LoginradiusRespondedWithError" {
t.Errorf("PostMFAEmailLogin should fail with LoginradiusRespondedWithError but did not: %v", res.Body)
}
}
func TestPostMFAEmailLoginInvalidQuery(t *testing.T) {
t.SkipNow()
_, _, _, email, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
user := TestEmailLogin{email, email}
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(user, map[string]string{"invalidparam": "value"})
if err.(lrerror.Error).Code() != "ValidationError" {
t.Errorf("PostMFAEmailLogin should fail with ValidationError but did not :%v, %+v", res.Body, err)
}
}
func TestPostMFAUsernameLogin(t *testing.T) {
t.SkipNow()
_, username, _, password, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAUsernameLogin(
map[string]string{"username": username, "password": password},
)
if err != nil {
t.Errorf("Error making PostMFAUsernameLogin call: %v", err)
}
session, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAUsernameLogin call: %v", err)
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAUsernameLogin(
map[string]string{"username": username, "password": password},
map[string]string{"emailtemplate": "hello"},
)
if err != nil {
t.Errorf("Error making PostMFAUsernameLogin call with optional queries: %v", err)
}
session, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAUsernameLogin call with optional queries: %v", err)
}
}
func TestPostMFAUsernameLoginInvalidBody(t *testing.T) {
t.SkipNow()
_, _, _, _, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
invalid := struct{ foo string }{"bar"}
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAUsernameLogin(invalid)
if err.(lrerror.Error).Code() != "LoginradiusRespondedWithError" {
t.Errorf("PostMFAUsernameLogin should fail with LoginradiusRespondedWithError but did not: %v", res.Body)
}
}
func TestPostMFAUsernameLoginInvalidQuery(t *testing.T) {
t.SkipNow()
_, username, _, password, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAUsernameLogin(
map[string]string{"username": username, "password": password},
map[string]string{"invalidparam": "value"},
)
if err.(lrerror.Error).Code() != "ValidationError" {
t.Errorf("PostMFAUsernameLogin should fail with ValidationError but did not :%v, %+v", res.Body, err)
}
}
func TestPostMFAPhoneLogin(t *testing.T) {
t.SkipNow()
phone, _, _, password, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAPhoneLogin(
map[string]string{"phone": phone, "password": password},
)
if err != nil {
t.Errorf("Error making PostMFAPhoneLogin call: %v", err)
}
session, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAPhoneLogin call: %v", err)
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAPhoneLogin(
map[string]string{"phone": phone, "password": password},
map[string]string{"emailtemplate": "hello"},
)
if err != nil {
t.Errorf("Error making PostMFAPhoneLogin call with optional queries: %v", err)
}
session, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil || session["access_token"].(string) == "" {
t.Errorf("Error returned from PostMFAPhoneLogin call with optional queries: %v", err)
}
}
func TestPostMFAPhoneLoginInvalidBody(t *testing.T) {
t.SkipNow()
_, _, _, _, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
invalid := struct{ foo string }{"bar"}
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAPhoneLogin(invalid)
if err.(lrerror.Error).Code() != "LoginradiusRespondedWithError" {
t.Errorf("PostMFAPhoneLogin should fail with LoginradiusRespondedWithError but did not: %v", res.Body)
}
}
func TestPostMFAPhoneLoginInvalidQuery(t *testing.T) {
t.SkipNow()
phone, _, _, password, lrclient, teardownTestCase := setupAccount(t)
defer teardownTestCase(t)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAPhoneLogin(
map[string]string{"phone": phone, "password": password},
map[string]string{"invalidparam": "value"},
)
if err.(lrerror.Error).Code() != "ValidationError" {
t.Errorf("PostMFAPhoneLogin should fail with ValidationError but did not :%v, %+v", res.Body, err)
}
}
func TestGetMFAValidateAccessToken(t *testing.T) {
t.SkipNow()
_, _, _, _, _, lrclient, teardownTestCase := setupLogin(t)
defer teardownTestCase(t)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAValidateAccessToken()
if err != nil {
t.Errorf("Error making call to MFAValidateAccessToken: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil || data["QRCode"].(string) == "" {
t.Errorf("Error returned from MFAValidateAccessToken: %v", err)
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAValidateAccessToken(map[string]string{"smstemplate2fa": "hello"})
if err != nil {
t.Errorf("Error making call to MFAValidateAccessToken with optional query params: %v", err)
}
data, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil || data["QRCode"].(string) == "" {
t.Errorf("Error returned from MFAValidateAccessToken with optional query params: %v", err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// and a Google authenticator added, enter the google authenticator code in this test.
func TestPutMFAValidateGoogleAuthCode(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(
// Set user credentials here
map[string]string{"email": "", "password": ""},
)
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call for PutMFAValidateGoogleAuthCode: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PostMFAEmailLogin call for PutMFAValidateGoogleAuthCode: %v", err)
}
code, ok := data["SecondFactorAuthentication"].(map[string]interface{})["SecondFactorAuthenticationToken"].(string)
if !ok {
t.Errorf("Returned response from SecondFactorAuthentication does not contain SecondFactorAuthenticationToken")
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAValidateGoogleAuthCode(
map[string]string{"secondfactorauthenticationtoken": code},
// Set otp from Google Authenticator here
map[string]string{"googleauthenticatorcode": "246803"},
)
if err != nil {
t.Errorf("Error making call to PutMFAValidateGoogleAuthCode: %v", err)
}
data, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil || data["access_token"].(string) == "" {
t.Errorf("Error returned from PutMFAValidateGoogleAuthCode: %v", err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// this test tests for the ability to submit a valid request to the LoginRadius end point
// and will pass if a ""The OTP code is invalid, please request for a new OTP" error is returned
// from Loginradius
func TestPutMFAValidateOTP(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(
// Set user credentials here
map[string]string{"email": "[email protected]", "password": "password"},
)
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call for PutMFAValidateOTP: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PostMFAEmailLogin call for PutMFAValidateOTP: %v", err)
}
code, ok := data["SecondFactorAuthentication"].(map[string]interface{})["SecondFactorAuthenticationToken"].(string)
if !ok {
t.Errorf("Returned response from PutMFAValidateOTP does not contain SecondFactorAuthenticationToken")
}
_, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAValidateOTP(
map[string]string{"secondfactorauthenticationtoken": code},
map[string]string{"otp": "123456"},
)
errMsg, _ := lrjson.DynamicUnmarshal(err.(lrerror.Error).OrigErr().Error())
if errMsg["Description"].(string) != "The OTP code is invalid, please request for a new OTP." {
t.Errorf("PutMFAValidateOTP was supposed to return invalid OTP error, but did not: %v", errMsg)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid secondfactorauthenticationtoken through completing a mfa login attempt
// set the secondfactorauthenticationtoken and a phone number here
func TestPutMFAUpdatePhoneNumber(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(
// Set user credentials here
map[string]string{"email": "[email protected]", "password": "password"},
)
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call for PutMFAUpdatePhoneNumber: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PostMFAEmailLogin call for PutMFAUpdatePhoneNumber: %v", err)
}
code, ok := data["SecondFactorAuthentication"].(map[string]interface{})["SecondFactorAuthenticationToken"].(string)
if !ok {
t.Errorf("Returned response from SecondFactorAuthentication does not contain SecondFactorAuthenticationToken")
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAUpdatePhoneNumber(
// Set user here
map[string]string{"secondfactorauthenticationtoken": code},
map[string]string{"phoneno2fa": ""},
)
if err != nil {
t.Errorf("Error making call to PutMFAUpdatePhoneNumber: %v", err)
}
data, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PutMFAUpdatePhoneNumber: %v", err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token through completing a mfa login attempt
// set the access_token and a phone number here
func TestPutMFAUpdatePhoneNumberByToken(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "7f875c92-b7fe-4f55-8658-58b24387ed64"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAUpdatePhoneNumberByToken(
// Set user here
map[string]string{"phoneno2fa": "16047711536"},
)
if err != nil {
t.Errorf("Error making call to PutMFAUpdatePhoneNumber: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil || data["Sid"] == "" {
t.Errorf("Error returned from PutMFAUpdatePhoneNumber: %v", err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token through completing a mfa login attempt
// This test must be run with a user that has not called this end point previously
func TestGetMFABackUpCodeByAccessToken(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "77aa9464-815c-4dbe-8eec-c6c9e28e43b2"
_, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFABackUpCodeByAccessToken()
if err != nil {
t.Errorf("Error making call to GetMFABackUpCodeByAccessToken: %v", err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token through completing a mfa login attempt
func TestGetMFAResetBackUpCodeByAccessToken(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "77aa9464-815c-4dbe-8eec-c6c9e28e43b2"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAResetBackUpCodeByAccessToken()
if err != nil {
t.Errorf("Error making call to GetMFAResetBackUpCodeByAccessToken: %v", err)
}
codes, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := codes["BackUpCodes"].([]interface{})
if err != nil || !ok {
t.Errorf("Error returned from GetMFAResetBackUpCodeByAccessToken:%v, %v", err, codes)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token through completing a mfa login attempt
func TestPutMFAValidateBackupCode(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "c3b8130e-e92d-40cc-8153-83da3744aa4b"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAResetBackUpCodeByAccessToken()
if err != nil {
t.Errorf("Error making call to GetMFAResetBackUpCodeByAccessToken for PutMFAValidateBackupCode: %v", err)
}
data, err := lrjson.DynamicUnmarshal(res.Body)
codes, ok := data["BackUpCodes"].([]interface{})
if err != nil || !ok {
t.Errorf("Error returned from GetMFAResetBackUpCodeByAccessToken for PutMFAValidateBackupCode:%v, %v", err, codes)
}
// Get secondfactorauthenticationtoken
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PostMFAEmailLogin(
// Set user credentials here
map[string]string{"email": "[email protected]", "password": "password"},
)
if err != nil {
t.Errorf("Error making PostMFAEmailLogin call for PutMFAValidateBackupCode: %v", err)
}
data, err = lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PostMFAEmailLogin call for PutMFAValidateBackupCode: %v", err)
}
token, ok := data["SecondFactorAuthentication"].(map[string]interface{})["SecondFactorAuthenticationToken"].(string)
if !ok {
t.Errorf("Returned response from PostMFAEmailLogin does not contain SecondFactorAuthenticationToken")
}
res, err = mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAValidateBackupCode(
map[string]string{"secondfactorauthenticationtoken": token},
map[string]string{"backupcode": codes[0].(string)},
)
if err != nil {
t.Errorf("Error making call to PutMFAValidateBackupCode: %v", err)
}
profile, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PutMFAValidateBackupCode: %v, %v", profile, err)
}
_, ok = profile["access_token"].(string)
if !ok {
t.Errorf("Error returned from PutMFAValidateBackupCode: %v, %v", profile, err)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token through completing a mfa login attempt
// This test must be run with a user that has not called this end point previously
func TestGetMFABackUpCodeByUID(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set uid here
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFABackUpCodeByUID("3ca313699dc8423b9f7c8af9dff9d7f2")
if err != nil {
t.Errorf("Error making call to GetMFABackUpCodeByUID: %v", err)
}
codes, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := codes["BackUpCodes"].([]interface{})
if err != nil || !ok {
t.Errorf("Error returned from GetMFABackUpCodeByUID:%v, %v", err, codes)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
func TestGetMFAResetBackUpCodeByUID(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// Set uid here
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAResetBackUpCodeByUID("3ca313699dc8423b9f7c8af9dff9d7f2")
if err != nil {
t.Errorf("Error making call to GetMFAResetBackUpCodeByUID: %v", err)
}
codes, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := codes["BackUpCodes"].([]interface{})
if err != nil || !ok {
t.Errorf("Error returned from GetMFAResetBackUpCodeByUID:%v, %v", err, codes)
}
}
// To run this test, comment out t.SkipNow() and set a manually created user with mfa turned on
// and google authenticator configured
func TestDeleteMFAResetGoogleAuthenticatorByUid(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// Set uid here
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).DeleteMFAResetGoogleAuthenticatorByUid("3ca313699dc8423b9f7c8af9dff9d7f2")
if err != nil {
t.Errorf("Error making call to DeleteMFAResetGoogleAuthenticatorByUid: %v", err)
}
body, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := body["IsDeleted"].(bool)
if err != nil || !ok {
t.Errorf("Error returned from DeleteMFAResetGoogleAuthenticatorByUid :%v, %v", err, body)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// and sms authentication configured
func TestDeleteMFAResetSMSAuthenticatorByUid(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// Set uid here
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).DeleteMFAResetSMSAuthenticatorByUid("3ca313699dc8423b9f7c8af9dff9d7f2")
if err != nil {
t.Errorf("Error making call to DeleteMFAResetSMSAuthenticatorByUid: %v", err)
}
body, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := body["IsDeleted"].(bool)
if err != nil || !ok {
t.Errorf("Error returned from DeleteMFAResetSMSAuthenticatorByUid:%v, %v", err, body)
}
}
// To run this test, comment out t.SkipNow() and set a manually created user with mfa turned on
// and google authenticator configured
func TestDeleteMFAResetGoogleAuthenticatorByToken(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg, map[string]string{"token": "01a67f99-8ab5-4176-a12b-a5c3d00859b5"})
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).DeleteMFAResetGoogleAuthenticatorByToken()
if err != nil {
t.Errorf("Error making call to DeleteMFAResetGoogleAuthenticatorByToken: %v", err)
}
body, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := body["IsDeleted"].(bool)
if err != nil || !ok {
t.Errorf("Error returned from DeleteMFAResetGoogleAuthenticatorByToken :%v, %v", err, body)
}
}
// To run this test, comment out t.SkipNow() and set a manually created user with mfa turned on
// and sms authentication configured
func TestDeleteMFAResetSMSAuthenticatorByToken(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg, map[string]string{"token": "01a67f99-8ab5-4176-a12b-a5c3d00859b5"})
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).DeleteMFAResetSMSAuthenticatorByToken()
if err != nil {
t.Errorf("Error making call to DeleteMFAResetSMSAuthenticatorByToken: %v", err)
}
body, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := body["IsDeleted"].(bool)
if err != nil || !ok {
t.Errorf("Error returned from DeleteMFAResetSMSAuthenticatorByToken :%v, %v", err, body)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token and a google authenticator code
func TestPutMFAReauthenticateByGoogleAuthenticator(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "aebbf205-c9b6-458d-9e70-c3dfdabdb2ef"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAReauthenticateByGoogleAuthenticator(
// set google authenticator code here
map[string]string{"googleauthenticatorcode": ""},
)
if err != nil {
t.Errorf("Error making call to PutMFAReauthenticateByGoogleAuthenticator: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := result["SecondFactorValidationToken"].(string)
if err != nil || !ok {
t.Errorf("Error returned from PutMFAReauthenticateByGoogleAuthenticator:%v, %v", err, result)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token and a valid back up code
func TestPutMFAReauthenticateByBackupCode(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "aebbf205-c9b6-458d-9e70-c3dfdabdb2ef"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAReauthenticateByBackupCode(
// set backup code here
map[string]string{"backupcode": "53141-b07fb"},
)
if err != nil {
t.Errorf("Error making call to PutMFAReauthenticateByBackupCode: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := result["SecondFactorValidationToken"].(string)
if err != nil || !ok {
t.Errorf("Error returned from PutMFAReauthenticateByBackupCode:%v, %v", err, result)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token and a valid sms otp
func TestPutMFAReauthenticateByOTP(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "d3d95123-b14c-43d6-99ef-51528051b3bd"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAReauthenticateByOTP(
// set otp here
map[string]string{"otp": "53141-b07fb"},
)
if err != nil {
t.Errorf("Error making call to PutMFAReauthenticateByOTP: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := result["SecondFactorValidationToken"].(string)
if err != nil || !ok {
t.Errorf("Error returned from PutMFAReauthenticateByOTP:%v, %v", err, result)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token and a valid password
func TestPutMFAReauthenticateByPassword(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "d3d95123-b14c-43d6-99ef-51528051b3bd"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAReauthenticateByPassword(
// set Password here
map[string]string{"password": "password"},
)
if err != nil {
t.Errorf("Error making call to PutMFAReauthenticateByPassword: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
_, ok := result["SecondFactorValidationToken"].(string)
if err != nil || !ok {
t.Errorf("Error returned from PutMFAReauthenticateByPassword:%v, %v", err, result)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token
func TestGetMFAReAuthenticate(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "96688431-0945-4ed5-9115-733521a13a53"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).GetMFAReAuthenticate()
if err != nil {
t.Errorf("Error making call to GetMFAReAuthenticate: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from GetMFAReAuthenticate:%v, %v", err, result)
}
}
// To run this test, uncomment t.SkipNow() and set a manually created user with mfa turned on
// then obtain a valid access_token
func TestPutMFAUpdateSettings(t *testing.T) {
t.SkipNow()
SetTestEnv()
cfg := lr.Config{
ApiKey: os.Getenv("APIKEY"),
ApiSecret: os.Getenv("APISECRET"),
}
lrclient, _ := lr.NewLoginradius(&cfg)
// set valid access_token here
lrclient.Context.Token = "96688431-0945-4ed5-9115-733521a13a53"
res, err := mfa.Loginradius(mfa.Loginradius{lrclient}).PutMFAUpdateSettings(
// manually set otp obtained from sms authenticator here
map[string]string{"otp": "245212"},
)
if err != nil {
t.Errorf("Error making call to PutMFAUpdateSettings: %v", err)
}
result, err := lrjson.DynamicUnmarshal(res.Body)
if err != nil {
t.Errorf("Error returned from PutMFAUpdateSettings:%v, %v", err, result)
}
}
| [
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\"",
"\"APIKEY\"",
"\"APISECRET\""
]
| []
| [
"APIKEY",
"APISECRET"
]
| [] | ["APIKEY", "APISECRET"] | go | 2 | 0 | |
utilities/autoware_launcher/src/autoware_launcher/core/myutils.py | import rospkg
import os
import yaml
def userhome(path = ""):
return os.path.abspath(os.path.join(os.path.expanduser("~"), path))
def package(path = ""):
rospack = rospkg.RosPack()
return os.path.join(rospack.get_path("autoware_launcher"), path)
def plugins():
return os.path.join(package(), "plugins")
def profile(profile = ""):
return os.path.join(package(), "profiles", profile)
def parentpath(path):
return os.path.dirname(path)
def makedirs(path, mode=0o777, exist_ok=False): # workaround in python2
if not (exist_ok and os.path.exists(path)): os.makedirs(path, mode)
def listfiles(rootpath, relative=False):
filelist = []
for currpath, dirnames, filenames in os.walk(rootpath):
if relative:
currpath = os.path.relpath(currpath, rootpath)
if currpath == ".":
currpath = ""
for filename in filenames:
filelist.append(os.path.join(currpath, filename))
return filelist
def envpath(path):
patterns = \
[
(os.environ['HOME'], "$(env HOME)")
]
for pattern, replace in patterns:
if path.startswith(pattern):
return replace + path[len(pattern):]
return path
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
cmd/minikube/cmd/root.go | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
goflag "flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"k8s.io/kubectl/pkg/util/templates"
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/translate"
)
var dirs = [...]string{
localpath.MiniPath(),
localpath.MakeMiniPath("certs"),
localpath.MakeMiniPath("machines"),
localpath.MakeMiniPath("cache"),
localpath.MakeMiniPath("cache", "iso"),
localpath.MakeMiniPath("config"),
localpath.MakeMiniPath("addons"),
localpath.MakeMiniPath("files"),
localpath.MakeMiniPath("logs"),
}
var viperWhiteList = []string{
"alsologtostderr",
"log_dir",
"v",
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "minikube",
Short: "Minikube is a tool for managing local Kubernetes clusters.",
Long: `Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
for _, path := range dirs {
if err := os.MkdirAll(path, 0777); err != nil {
exit.WithError("Error creating minikube directory", err)
}
}
logDir := pflag.Lookup("log_dir")
if !logDir.Changed {
if err := logDir.Value.Set(localpath.MakeMiniPath("logs")); err != nil {
exit.WithError("logdir set failed", err)
}
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
for _, c := range RootCmd.Commands() {
c.Short = translate.T(c.Short)
c.Long = translate.T(c.Long)
c.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Usage = translate.T(flag.Usage)
})
c.SetUsageTemplate(usageTemplate())
}
RootCmd.Short = translate.T(RootCmd.Short)
RootCmd.Long = translate.T(RootCmd.Long)
RootCmd.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Usage = translate.T(flag.Usage)
})
if runtime.GOOS != "windows" {
// add minikube binaries to the path
targetDir := localpath.MakeMiniPath("bin")
addToPath(targetDir)
}
// Universally ensure that we never speak to the wrong DOCKER_HOST
if err := oci.PointToHostDockerDaemon(); err != nil {
glog.Errorf("oci env: %v", err)
}
if err := RootCmd.Execute(); err != nil {
// Cobra already outputs the error, typically because the user provided an unknown command.
os.Exit(exit.BadUsage)
}
}
// usageTemplate just calls translate.T on the default usage template
// explicitly using the raw string instead of calling c.UsageTemplate()
// so the extractor can find this monstrosity of a string
func usageTemplate() string {
return fmt.Sprintf(`%s:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
%s:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
%s:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
%s:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
%s:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
%s:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
%s:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
%s{{end}}
`, translate.T("Usage"), translate.T("Aliases"), translate.T("Examples"), translate.T("Available Commands"), translate.T("Flags"), translate.T("Global Flags"), translate.T("Additional help topics"), translate.T(`Use "{{.CommandPath}} [command] --help" for more information about a command.`))
}
// Handle config values for flags used in external packages (e.g. glog)
// by setting them directly, using values from viper when not passed in as args
func setFlagsUsingViper() {
for _, config := range viperWhiteList {
var a = pflag.Lookup(config)
viper.SetDefault(a.Name, a.DefValue)
// If the flag is set, override viper value
if a.Changed {
viper.Set(a.Name, a.Value.String())
}
// Viper will give precedence first to calls to the Set command,
// then to values from the config.yml
if err := a.Value.Set(viper.GetString(a.Name)); err != nil {
exit.WithError(fmt.Sprintf("failed to set value for %q", a.Name), err)
}
a.Changed = true
}
}
func init() {
translate.DetermineLocale()
RootCmd.PersistentFlags().StringP(config.ProfileName, "p", constants.DefaultClusterName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the kubernetes cluster.")
groups := templates.CommandGroups{
{
Message: translate.T("Basic Commands:"),
Commands: []*cobra.Command{
startCmd,
statusCmd,
stopCmd,
deleteCmd,
dashboardCmd,
pauseCmd,
unpauseCmd,
},
},
{
Message: translate.T("Images Commands:"),
Commands: []*cobra.Command{
dockerEnvCmd,
podmanEnvCmd,
cacheCmd,
},
},
{
Message: translate.T("Configuration and Management Commands:"),
Commands: []*cobra.Command{
configCmd.AddonsCmd,
configCmd.ConfigCmd,
configCmd.ProfileCmd,
updateContextCmd,
},
},
{
Message: translate.T("Networking and Connectivity Commands:"),
Commands: []*cobra.Command{
serviceCmd,
tunnelCmd,
},
},
{
Message: translate.T("Advanced Commands:"),
Commands: []*cobra.Command{
mountCmd,
sshCmd,
kubectlCmd,
nodeCmd,
},
},
{
Message: translate.T("Troubleshooting Commands:"),
Commands: []*cobra.Command{
sshKeyCmd,
ipCmd,
logsCmd,
updateCheckCmd,
versionCmd,
optionsCmd,
},
},
}
groups.Add(RootCmd)
// Ungrouped commands will show up in the "Other Commands" section
RootCmd.AddCommand(completionCmd)
templates.ActsAsRootCommand(RootCmd, []string{"options"}, groups...)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {
exit.WithError("Unable to bind flags", err)
}
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
configPath := localpath.ConfigFile()
viper.SetConfigFile(configPath)
viper.SetConfigType("json")
if err := viper.ReadInConfig(); err != nil {
// This config file is optional, so don't emit errors if missing
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
glog.Warningf("Error reading config file at %s: %v", configPath, err)
}
}
setupViper()
}
func setupViper() {
viper.SetEnvPrefix(minikubeEnvPrefix)
// Replaces '-' in flags with '_' in env variables
// e.g. iso-url => $ENVPREFIX_ISO_URL
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
viper.SetDefault(config.WantUpdateNotification, true)
viper.SetDefault(config.ReminderWaitPeriodInHours, 24)
viper.SetDefault(config.WantReportError, false)
viper.SetDefault(config.WantReportErrorPrompt, true)
viper.SetDefault(config.WantKubectlDownloadMsg, true)
viper.SetDefault(config.WantNoneDriverWarning, true)
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
setFlagsUsingViper()
}
func addToPath(dir string) {
new := fmt.Sprintf("%s:%s", dir, os.Getenv("PATH"))
glog.Infof("Updating PATH: %s", dir)
os.Setenv("PATH", new)
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
examples/sn_to_info.py | import os
from ciscosupportsdk.api import CiscoSupportAPI
CS_API_KEY = os.getenv("CS_API_KEY")
CS_API_SECRET = os.getenv("CS_API_SECRET")
cs = CiscoSupportAPI(CS_API_KEY, CS_API_SECRET)
# find if a serial number is covered and when it's warranty expires
for item in cs.serial_information.get_coverage_status(["FXS2130Q286"]):
print(f"{item.is_covered} {item.coverage_end_date}")
# find if a serial number is covered and when it's contract expires
for item in cs.serial_information.get_coverage_summary_by_serial(
["FXS2130Q286"]
):
print(item)
# find if a serial number is covered and when it's contract expires
for item in cs.serial_information.get_coverage_summary_by_instance(
["917280220"]
):
print(item)
# find if a serial number is covered and when it's contract expires
for item in cs.serial_information.get_orderable_pids(["FXS2130Q286"]):
print(item)
# find if a serial number is covered and when it's contract expires
for item in cs.serial_information.get_coverage_owner_status(["FXS2130Q286"]):
print(item)
| []
| []
| [
"CS_API_KEY",
"CS_API_SECRET"
]
| [] | ["CS_API_KEY", "CS_API_SECRET"] | python | 2 | 0 | |
unittests/test_cli.py | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import contextlib
import io
import itertools
import os
import pytest
import re
import sys
import reframe.core.environments as env
import reframe.frontend.runreport as runreport
import reframe.core.logging as logging
import reframe.core.runtime as rt
import unittests.fixtures as fixtures
def run_command_inline(argv, funct, *args, **kwargs):
# Save current execution context
argv_save = sys.argv
environ_save = env.snapshot()
sys.argv = argv
exitcode = None
captured_stdout = io.StringIO()
captured_stderr = io.StringIO()
print(*sys.argv)
with contextlib.redirect_stdout(captured_stdout):
with contextlib.redirect_stderr(captured_stderr):
try:
with rt.temp_runtime(None):
exitcode = funct(*args, **kwargs)
except SystemExit as e:
exitcode = e.code
finally:
# Restore execution context
environ_save.restore()
sys.argv = argv_save
return (exitcode,
captured_stdout.getvalue(),
captured_stderr.getvalue())
@pytest.fixture
def perflogdir(tmp_path):
dirname = tmp_path / '.rfm-perflogs'
yield dirname
@pytest.fixture
def run_reframe(tmp_path, perflogdir):
def _run_reframe(system='generic:default',
checkpath=['unittests/resources/checks/hellocheck.py'],
environs=['builtin'],
local=True,
action='run',
more_options=None,
mode=None,
config_file='unittests/resources/settings.py',
ignore_check_conflicts=True,
perflogdir=str(perflogdir)):
import reframe.frontend.cli as cli
# We always pass the --report-file option, because we don't want to
# pollute the user's home directory
argv = ['./bin/reframe', '--prefix', str(tmp_path), '--nocolor',
f'--report-file={tmp_path / "report.json"}']
if mode:
argv += ['--mode', mode]
if system:
argv += ['--system', system]
if config_file:
argv += ['-C', config_file]
argv += itertools.chain(*(['-c', c] for c in checkpath))
argv += itertools.chain(*(['-p', e] for e in environs))
if local:
argv += ['--force-local']
if action == 'run':
argv += ['-r']
elif action == 'list':
argv += ['-l']
elif action == 'list_detailed':
argv += ['-L']
elif action == 'help':
argv += ['-h']
if ignore_check_conflicts:
argv += ['--ignore-check-conflicts']
if perflogdir:
argv += ['--perflogdir', perflogdir]
if more_options:
argv += more_options
return run_command_inline(argv, cli.main)
return _run_reframe
@pytest.fixture
def temp_runtime(tmp_path):
def _temp_runtime(site_config, system=None, options={}):
options.update({'systems/prefix': tmp_path})
with rt.temp_runtime(site_config, system, options):
yield rt.runtime
yield _temp_runtime
@pytest.fixture
def user_exec_ctx(temp_runtime):
if fixtures.USER_CONFIG_FILE is None:
pytest.skip('no user configuration file supplied')
yield from temp_runtime(fixtures.USER_CONFIG_FILE, fixtures.USER_SYSTEM)
@pytest.fixture
def remote_exec_ctx(user_exec_ctx):
partition = fixtures.partition_by_scheduler()
if not partition:
pytest.skip('job submission not supported')
return partition, partition.environs[0]
def test_check_success(run_reframe, tmp_path):
returncode, stdout, _ = run_reframe(more_options=['--save-log-files'])
assert 'PASSED' in stdout
assert 'FAILED' not in stdout
assert returncode == 0
logfile = logging.log_files()[0]
assert os.path.exists(tmp_path / 'output' / logfile)
assert os.path.exists(tmp_path / 'report.json')
def test_check_restore_session_failed(run_reframe, tmp_path):
run_reframe(
checkpath=['unittests/resources/checks_unlisted/deps_complex.py'],
)
returncode, stdout, _ = run_reframe(
checkpath=[],
more_options=[
f'--restore-session={tmp_path}/report.json', '--failed'
]
)
report = runreport.load_report(f'{tmp_path}/report.json')
assert set(report.slice('name', when=('fail_phase', 'sanity'))) == {'T2'}
assert set(report.slice('name',
when=('fail_phase', 'startup'))) == {'T7', 'T9'}
assert set(report.slice('name', when=('fail_phase', 'setup'))) == {'T8'}
assert report['runs'][-1]['num_cases'] == 4
restored = {r['name'] for r in report['restored_cases']}
assert restored == {'T1', 'T6'}
def test_check_restore_session_succeeded_test(run_reframe, tmp_path):
run_reframe(
checkpath=['unittests/resources/checks_unlisted/deps_complex.py'],
more_options=['--keep-stage-files']
)
returncode, stdout, _ = run_reframe(
checkpath=[],
more_options=[
f'--restore-session={tmp_path}/report.json', '-n', 'T1'
]
)
report = runreport.load_report(f'{tmp_path}/report.json')
assert report['runs'][-1]['num_cases'] == 1
assert report['runs'][-1]['testcases'][0]['name'] == 'T1'
restored = {r['name'] for r in report['restored_cases']}
assert restored == {'T4', 'T5'}
def test_check_restore_session_check_search_path(run_reframe, tmp_path):
run_reframe(
checkpath=['unittests/resources/checks_unlisted/deps_complex.py']
)
returncode, stdout, _ = run_reframe(
checkpath=[f'{tmp_path}/foo'],
more_options=[
f'--restore-session={tmp_path}/report.json', '-n', 'T1', '-R'
],
action='list'
)
assert returncode == 0
assert 'Found 0 check(s)' in stdout
def test_check_success_force_local(run_reframe, tmp_path):
# We explicitly use a system here with a non-local scheduler and pass the
# `--force-local` option
returncode, stdout, _ = run_reframe(system='testsys:gpu', local=True)
assert 'PASSED' in stdout
assert 'FAILED' not in stdout
assert returncode == 0
def test_report_file_with_sessionid(run_reframe, tmp_path):
returncode, *_ = run_reframe(
more_options=[
f'--report-file={tmp_path / "rfm-report-{sessionid}.json"}'
]
)
assert returncode == 0
assert os.path.exists(tmp_path / 'rfm-report-0.json')
def test_report_ends_with_newline(run_reframe, tmp_path):
returncode, stdout, _ = run_reframe(
more_options=[
f'--report-file={tmp_path / "rfm-report.json"}'
]
)
assert returncode == 0
with open(tmp_path / 'rfm-report.json') as fp:
assert fp.read()[-1] == '\n'
def test_check_submit_success(run_reframe, remote_exec_ctx):
# This test will run on the auto-detected system
partition, environ = remote_exec_ctx
returncode, stdout, _ = run_reframe(
config_file=fixtures.USER_CONFIG_FILE,
local=False,
system=partition.fullname,
# Pick up the programming environment of the partition
# Prepend ^ and append $ so as to much exactly the given name
environs=[f'^{environ.name}$']
)
assert 'FAILED' not in stdout
assert 'PASSED' in stdout
# Assert that we have run only one test case
assert 'Ran 2/2 test case(s)' in stdout
assert 0 == returncode
def test_check_failure(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'BadSetupCheck']
)
assert 'FAILED' in stdout
assert returncode != 0
def test_check_setup_failure(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'BadSetupCheckEarly'],
local=False,
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert 'FAILED' in stdout
assert returncode != 0
def test_check_kbd_interrupt(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=[
'unittests/resources/checks_unlisted/kbd_interrupt.py'
],
more_options=['-t', 'KeyboardInterruptCheck'],
local=False,
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert 'FAILED' in stdout
assert returncode != 0
def test_check_sanity_failure(run_reframe, tmp_path):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'SanityFailureCheck']
)
assert 'FAILED' in stdout
# This is a normal failure, it should not raise any exception
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode != 0
assert os.path.exists(
tmp_path / 'stage' / 'generic' / 'default' /
'builtin' / 'SanityFailureCheck'
)
def test_dont_restage(run_reframe, tmp_path):
run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'SanityFailureCheck']
)
# Place a random file in the test's stage directory and rerun with
# `--dont-restage` and `--max-retries`
stagedir = (tmp_path / 'stage' / 'generic' / 'default' /
'builtin' / 'SanityFailureCheck')
(stagedir / 'foobar').touch()
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'SanityFailureCheck',
'--dont-restage', '--max-retries=1']
)
assert os.path.exists(stagedir / 'foobar')
assert not os.path.exists(f'{stagedir}_retry1')
# And some standard assertions
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode != 0
def test_checkpath_symlink(run_reframe, tmp_path):
# FIXME: This should move to test_loader.py
checks_symlink = tmp_path / 'checks_symlink'
os.symlink(os.path.abspath('unittests/resources/checks'),
checks_symlink)
returncode, stdout, _ = run_reframe(
action='list',
more_options=['-R'],
checkpath=['unittests/resources/checks', str(checks_symlink)]
)
num_checks_default = re.search(
r'Found (\d+) check', stdout, re.MULTILINE).group(1)
num_checks_in_checkdir = re.search(
r'Found (\d+) check', stdout, re.MULTILINE).group(1)
assert num_checks_in_checkdir == num_checks_default
def test_performance_check_failure(run_reframe, tmp_path, perflogdir):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'PerformanceFailureCheck']
)
assert 'FAILED' in stdout
# This is a normal failure, it should not raise any exception
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode != 0
assert os.path.exists(
tmp_path / 'stage' / 'generic' / 'default' /
'builtin' / 'PerformanceFailureCheck'
)
assert os.path.exists(perflogdir / 'generic' /
'default' / 'PerformanceFailureCheck.log')
def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch):
monkeypatch.setenv('FOODIR', str(tmp_path / 'perflogs'))
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'PerformanceFailureCheck'],
perflogdir='$FOODIR'
)
assert returncode == 1
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert os.path.exists(tmp_path / 'perflogs' / 'generic' /
'default' / 'PerformanceFailureCheck.log')
def test_performance_report(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'PerformanceFailureCheck', '--performance-report']
)
assert r'PERFORMANCE REPORT' in stdout
assert r'perf: 10 Gflop/s' in stdout
def test_skip_system_check_option(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['--skip-system-check', '-t', 'NoSystemCheck']
)
assert 'PASSED' in stdout
assert returncode == 0
def test_skip_prgenv_check_option(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['--skip-prgenv-check', '-t', 'NoPrgEnvCheck']
)
assert 'PASSED' in stdout
assert returncode == 0
def test_sanity_of_checks(run_reframe, tmp_path):
# This test will effectively load all the tests in the checks path and
# will force a syntactic and runtime check at least for the constructor
# of the checks
returncode, *_ = run_reframe(
action='list',
checkpath=[]
)
assert returncode == 0
def test_unknown_system(run_reframe):
returncode, stdout, stderr = run_reframe(
action='list',
system='foo',
checkpath=[]
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 1
def test_sanity_of_optconfig(run_reframe):
# Test the sanity of the command line options configuration
returncode, *_ = run_reframe(
action='help',
checkpath=[]
)
assert returncode == 0
def test_checkpath_recursion(run_reframe):
_, stdout, _ = run_reframe(action='list', checkpath=[])
num_checks_default = re.search(r'Found (\d+) check', stdout).group(1)
_, stdout, _ = run_reframe(action='list',
checkpath=['checks/'],
more_options=['-R'])
num_checks_in_checkdir = re.search(r'Found (\d+) check', stdout).group(1)
assert num_checks_in_checkdir == num_checks_default
_, stdout, _ = run_reframe(action='list',
checkpath=['checks/'],
more_options=[])
num_checks_in_checkdir = re.search(r'Found (\d+) check', stdout).group(1)
assert num_checks_in_checkdir == '0'
def test_same_output_stage_dir(run_reframe, tmp_path):
output_dir = str(tmp_path / 'foo')
returncode, *_ = run_reframe(
more_options=['-o', output_dir, '-s', output_dir]
)
assert returncode == 1
# Retry with --keep-stage-files
returncode, *_ = run_reframe(
more_options=['-o', output_dir, '-s', output_dir, '--keep-stage-files']
)
assert returncode == 0
assert os.path.exists(output_dir)
def test_execution_modes(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=[],
environs=[],
local=False,
mode='unittest'
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert 'FAILED' not in stdout
assert 'PASSED' in stdout
assert 'Ran 2/2 test case' in stdout
def test_no_ignore_check_conflicts(run_reframe):
returncode, *_ = run_reframe(
checkpath=['unittests/resources/checks'],
more_options=['-R'],
ignore_check_conflicts=False,
action='list'
)
assert returncode != 0
def test_timestamp_option(run_reframe):
from datetime import datetime
timefmt = datetime.now().strftime('xxx_%F')
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks'],
ignore_check_conflicts=False,
action='list',
more_options=['-R', '--timestamp=xxx_%F']
)
assert returncode != 0
assert timefmt in stdout
def test_list_empty_prgenvs_check_and_options(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
action='list',
environs=[],
more_options=['-n', 'NoPrgEnvCheck'],
)
assert 'Found 0 check(s)' in stdout
assert returncode == 0
def test_list_check_with_empty_prgenvs(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
action='list',
environs=['foo'],
more_options=['-n', 'NoPrgEnvCheck']
)
assert 'Found 0 check(s)' in stdout
assert returncode == 0
def test_list_empty_prgenvs_in_check_and_options(run_reframe):
returncode, stdout, _ = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
action='list',
environs=[],
more_options=['-n', 'NoPrgEnvCheck']
)
assert 'Found 0 check(s)' in stdout
assert returncode == 0
def test_list_with_details(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
action='list_detailed'
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_filtering_multiple_criteria(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks'],
action='list',
more_options=['-t', 'foo', '-n', 'hellocheck',
'--ignore-check-conflicts']
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert 'Found 1 check(s)' in stdout
assert returncode == 0
def test_show_config_all(run_reframe):
# Just make sure that this option does not make the frontend crash
returncode, stdout, stderr = run_reframe(
more_options=['--show-config'],
system='testsys'
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_show_config_param(run_reframe):
# Just make sure that this option does not make the frontend crash
returncode, stdout, stderr = run_reframe(
more_options=['--show-config=systems'],
system='testsys'
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_show_config_unknown_param(run_reframe):
# Just make sure that this option does not make the frontend crash
returncode, stdout, stderr = run_reframe(
more_options=['--show-config=foo'],
system='testsys'
)
assert 'no such configuration parameter found' in stdout
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_verbosity(run_reframe):
returncode, stdout, stderr = run_reframe(
more_options=['-vvvvv'],
system='testsys',
action='list'
)
assert stdout != ''
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_verbosity_with_check(run_reframe):
returncode, stdout, stderr = run_reframe(
more_options=['-vvvvv'],
system='testsys',
action='list',
checkpath=['unittests/resources/checks/hellocheck.py']
)
assert '' != stdout
assert '--- Logging error ---' not in stdout
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert 0 == returncode
def test_load_user_modules(run_reframe, user_exec_ctx):
with rt.module_use('unittests/modules'):
returncode, stdout, stderr = run_reframe(
more_options=['-m testmod_foo'],
action='list'
)
assert stdout != ''
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_unload_module(run_reframe, user_exec_ctx):
# This test is mostly for ensuring coverage. `run_reframe()` restores
# the current environment, so it is not easy to verify that the modules
# are indeed unloaded. However, this functionality is tested elsewhere
# more exhaustively.
ms = rt.runtime().modules_system
if not fixtures.has_sane_modules_system():
pytest.skip('no modules system found')
with rt.module_use('unittests/modules'):
ms.load_module('testmod_foo')
returncode, stdout, stderr = run_reframe(
more_options=['-u testmod_foo'],
action='list'
)
ms.unload_module('testmod_foo')
assert stdout != ''
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_unuse_module_path(run_reframe, user_exec_ctx):
ms = rt.runtime().modules_system
if not fixtures.has_sane_modules_system():
pytest.skip('no modules system found')
module_path = 'unittests/modules'
ms.searchpath_add(module_path)
returncode, stdout, stderr = run_reframe(
more_options=[f'--module-path=-{module_path}', '--module=testmod_foo'],
config_file=fixtures.USER_CONFIG_FILE, action='run',
system=rt.runtime().system.name
)
ms.searchpath_remove(module_path)
assert "could not load module 'testmod_foo' correctly" in stdout
assert 'Traceback' not in stderr
assert returncode == 0
def test_use_module_path(run_reframe, user_exec_ctx):
ms = rt.runtime().modules_system
if not fixtures.has_sane_modules_system():
pytest.skip('no modules system found')
module_path = 'unittests/modules'
returncode, stdout, stderr = run_reframe(
more_options=[f'--module-path=+{module_path}', '--module=testmod_foo'],
config_file=fixtures.USER_CONFIG_FILE, action='run',
system=rt.runtime().system.name
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert "could not load module 'testmod_foo' correctly" not in stdout
assert returncode == 0
def test_overwrite_module_path(run_reframe, user_exec_ctx):
ms = rt.runtime().modules_system
if not fixtures.has_sane_modules_system():
pytest.skip('no modules system found')
module_path = 'unittests/modules'
with contextlib.suppress(KeyError):
module_path += f':{os.environ["MODULEPATH"]}'
returncode, stdout, stderr = run_reframe(
more_options=[f'--module-path={module_path}', '--module=testmod_foo'],
config_file=fixtures.USER_CONFIG_FILE, action='run',
system=rt.runtime().system.name
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert "could not load module 'testmod_foo' correctly" not in stdout
assert returncode == 0
def test_failure_stats(run_reframe):
returncode, stdout, stderr = run_reframe(
checkpath=['unittests/resources/checks/frontend_checks.py'],
more_options=['-t', 'SanityFailureCheck', '--failure-stats']
)
assert r'FAILURE STATISTICS' in stdout
assert r'sanity 1 [SanityFailureCheck' in stdout
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert returncode != 0
def test_maxfail_option(run_reframe):
returncode, stdout, stderr = run_reframe(
more_options=['--maxfail', '1'],
system='testsys',
checkpath=['unittests/resources/checks/hellocheck.py']
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert ('Ran 2/2 test case(s) from 2 check(s) '
'(0 failure(s), 0 skipped)') in stdout
assert returncode == 0
def test_maxfail_invalid_option(run_reframe):
returncode, stdout, stderr = run_reframe(
more_options=['--maxfail', 'foo'],
system='testsys',
checkpath=['unittests/resources/checks/hellocheck.py']
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert "--maxfail is not a valid integer: 'foo'" in stdout
assert returncode == 1
def test_maxfail_negative(run_reframe):
returncode, stdout, stderr = run_reframe(
more_options=['--maxfail', '-2'],
system='testsys',
checkpath=['unittests/resources/checks/hellocheck.py']
)
assert 'Traceback' not in stdout
assert 'Traceback' not in stderr
assert "--maxfail should be a non-negative integer: '-2'" in stdout
assert returncode == 1
| []
| []
| [
"MODULEPATH"
]
| [] | ["MODULEPATH"] | python | 1 | 0 | |
intel/reconcile.py | # Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import time
from kubernetes import config as k8sconfig, client as k8sclient
from . import config, proc, third_party, custom_resource, k8s, util
def reconcile(seconds, publish):
pod_name = os.environ["HOSTNAME"]
node_name = k8s.get_node_from_pod(None, pod_name)
configmap_name = "cmk-config-{}".format(node_name)
c = config.Config(configmap_name, pod_name)
report = None
if seconds is None:
seconds = 0
else:
seconds = int(seconds)
should_exit = (seconds <= 0)
while True:
c.lock()
report = generate_report(c)
print(report.json())
reclaim_cpu_lists(c, report)
c.unlock()
if publish and report is not None:
logging.debug("Publishing reconcile report to "
"Kubernetes API server")
k8sconfig.load_incluster_config()
v1beta = k8sclient.ExtensionsV1beta1Api()
version = util.parse_version(k8s.get_kube_version(None))
if version >= util.parse_version("v1.7.0"):
reconcile_report_type = \
custom_resource.CustomResourceDefinitionType(
v1beta,
"intel.com",
"cmk-reconcilereport",
["cmk-rr"]
)
node_name = os.getenv("NODE_NAME")
reconcile_report = reconcile_report_type.create(node_name)
reconcile_report.body["spec"]["report"] = report
reconcile_report.save()
else:
reconcile_report_type = third_party.ThirdPartyResourceType(
v1beta,
"cmk.intel.com",
"Reconcilereport")
node_name = os.getenv("NODE_NAME")
reconcile_report = reconcile_report_type.create(node_name)
reconcile_report.body["report"] = report
reconcile_report.save()
if should_exit:
break
logging.info(
"Waiting %d seconds until next reconciliation..." % seconds)
time.sleep(seconds)
def reclaim_cpu_lists(conf, report):
for r in report["reclaimedCpuLists"]:
pool = conf.get_pool(r.pool())
cl = pool.get_core_list(r.cpus())
logging.info("Removing pid {} from cpu list \"{}\" in pool {}".format(
r.pid(), r.cpus(), r.pool()))
cl.remove_task(str(r.pid()))
def generate_report(conf):
report = ReconcileReport()
for pool in conf.get_pools():
p = conf.get_pool(pool)
for cl in p.get_core_lists():
for task in cl.tasks:
p = proc.Process(task)
if not p.exists():
report.add_reclaimed_cpu_list(
p.pid,
pool,
cl.core_id)
return report
class ReconcileReport(dict):
def __init__(self):
self["reclaimedCpuLists"] = []
def reclaimed_cpu_lists(self):
return self["reclaimedCpuLists"]
def add_reclaimed_cpu_list(self, pid, pool_name, cpus):
self["reclaimedCpuLists"].append(Reclaimed(pid, pool_name, cpus))
def json(self):
def by_pid(item): return item.pid()
self.reclaimed_cpu_lists().sort(key=by_pid)
return json.dumps(self, sort_keys=True, indent=2)
class Reclaimed(dict):
def __init__(self, pid, pool_name, cpus):
self["pid"] = pid
self["pool"] = pool_name
self["cpus"] = cpus
def pid(self):
return self["pid"]
def pool(self):
return self["pool"]
def cpus(self):
return self["cpus"]
| []
| []
| [
"HOSTNAME",
"NODE_NAME"
]
| [] | ["HOSTNAME", "NODE_NAME"] | python | 2 | 0 | |
v3/integrations/nrmicro/example/client/client.go | // Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"os"
"time"
"github.com/micro/go-micro"
"github.com/newrelic/go-agent/v3/integrations/nrmicro"
proto "github.com/newrelic/go-agent/v3/integrations/nrmicro/example/proto"
newrelic "github.com/newrelic/go-agent/v3/newrelic"
)
func main() {
app, err := newrelic.NewApplication(
newrelic.ConfigAppName("Micro Client"),
newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")),
newrelic.ConfigDebugLogger(os.Stdout),
)
if nil != err {
panic(err)
}
err = app.WaitForConnection(10 * time.Second)
if nil != err {
panic(err)
}
defer app.Shutdown(10 * time.Second)
txn := app.StartTransaction("client")
defer txn.End()
service := micro.NewService(
// Add the New Relic wrapper to the client which will create External
// segments for each out going call.
micro.WrapClient(nrmicro.ClientWrapper()),
)
service.Init()
ctx := newrelic.NewContext(context.Background(), txn)
c := proto.NewGreeterService("greeter", service.Client())
rsp, err := c.Hello(ctx, &proto.HelloRequest{
Name: "John",
})
if err != nil {
fmt.Println(err)
return
}
fmt.Println(rsp.Greeting)
}
| [
"\"NEW_RELIC_LICENSE_KEY\""
]
| []
| [
"NEW_RELIC_LICENSE_KEY"
]
| [] | ["NEW_RELIC_LICENSE_KEY"] | go | 1 | 0 | |
Godeps/_workspace/src/github.com/godbus/dbus/conn.go | package dbus
import (
"errors"
"io"
"os"
"reflect"
"strings"
"sync"
)
const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
var (
systemBus *Conn
systemBusLck sync.Mutex
sessionBus *Conn
sessionBusLck sync.Mutex
)
// ErrClosed is the error returned by calls on a closed connection.
var ErrClosed = errors.New("dbus: connection closed by user")
// Conn represents a connection to a message bus (usually, the system or
// session bus).
//
// Connections are either shared or private. Shared connections
// are shared between calls to the functions that return them. As a result,
// the methods Close, Auth and Hello must not be called on them.
//
// Multiple goroutines may invoke methods on a connection simultaneously.
type Conn struct {
transport
busObj *Object
unixFD bool
uuid string
names []string
namesLck sync.RWMutex
serialLck sync.Mutex
nextSerial uint32
serialUsed map[uint32]bool
calls map[uint32]*Call
callsLck sync.RWMutex
handlers map[ObjectPath]map[string]interface{}
handlersLck sync.RWMutex
out chan *Message
closed bool
outLck sync.RWMutex
signals []chan<- *Signal
signalsLck sync.Mutex
eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex
}
// SessionBus returns a shared connection to the session bus, connecting to it
// if not already done.
func SessionBus() (conn *Conn, err error) {
sessionBusLck.Lock()
defer sessionBusLck.Unlock()
if sessionBus != nil {
return sessionBus, nil
}
defer func() {
if conn != nil {
sessionBus = conn
}
}()
conn, err = SessionBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
if address != "" && address != "autolaunch:" {
return Dial(address)
}
return sessionBusPlatform()
}
// SystemBus returns a shared connection to the system bus, connecting to it if
// not already done.
func SystemBus() (conn *Conn, err error) {
systemBusLck.Lock()
defer systemBusLck.Unlock()
if systemBus != nil {
return systemBus, nil
}
defer func() {
if conn != nil {
systemBus = conn
}
}()
conn, err = SystemBusPrivate()
if err != nil {
return
}
if err = conn.Auth(nil); err != nil {
conn.Close()
conn = nil
return
}
if err = conn.Hello(); err != nil {
conn.Close()
conn = nil
}
return
}
// SystemBusPrivate returns a new private connection to the system bus.
func SystemBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" {
return Dial(address)
}
return Dial(defaultSystemBusAddress)
}
// Dial establishes a new private connection to the message bus specified by address.
func Dial(address string) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
return newConn(tr)
}
// NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
return newConn(genericTransport{conn})
}
// newConn creates a new *Conn from a transport.
func newConn(tr transport) (*Conn, error) {
conn := new(Conn)
conn.transport = tr
conn.calls = make(map[uint32]*Call)
conn.out = make(chan *Message, 10)
conn.handlers = make(map[ObjectPath]map[string]interface{})
conn.nextSerial = 1
conn.serialUsed = map[uint32]bool{0: true}
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
return conn, nil
}
// BusObject returns the object owned by the bus daemon which handles
// administrative requests.
func (conn *Conn) BusObject() *Object {
return conn.busObj
}
// Close closes the connection. Any blocked operations will return with errors
// and the channels passed to Eavesdrop and Signal are closed. This method must
// not be called on shared connections.
func (conn *Conn) Close() error {
conn.outLck.Lock()
close(conn.out)
conn.closed = true
conn.outLck.Unlock()
conn.signalsLck.Lock()
for _, ch := range conn.signals {
close(ch)
}
conn.signalsLck.Unlock()
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
close(conn.eavesdropped)
}
conn.eavesdroppedLck.Unlock()
return conn.transport.Close()
}
// Eavesdrop causes conn to send all incoming messages to the given channel
// without further processing. Method replies, errors and signals will not be
// sent to the appropiate channels and method calls will not be handled. If nil
// is passed, the normal behaviour is restored.
//
// The caller has to make sure that ch is sufficiently buffered;
// if a message arrives when a write to ch is not possible, the message is
// discarded.
func (conn *Conn) Eavesdrop(ch chan<- *Message) {
conn.eavesdroppedLck.Lock()
conn.eavesdropped = ch
conn.eavesdroppedLck.Unlock()
}
// getSerial returns an unused serial.
func (conn *Conn) getSerial() uint32 {
conn.serialLck.Lock()
defer conn.serialLck.Unlock()
n := conn.nextSerial
for conn.serialUsed[n] {
n++
}
conn.serialUsed[n] = true
conn.nextSerial = n + 1
return n
}
// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
// called after authentication, but before sending any other messages to the
// bus. Hello must not be called for shared connections.
func (conn *Conn) Hello() error {
var s string
err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
if err != nil {
return err
}
conn.namesLck.Lock()
conn.names = make([]string, 1)
conn.names[0] = s
conn.namesLck.Unlock()
return nil
}
// inWorker runs in an own goroutine, reading incoming messages from the
// transport and dispatching them appropiately.
func (conn *Conn) inWorker() {
for {
msg, err := conn.ReadMessage()
if err == nil {
conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil {
select {
case conn.eavesdropped <- msg:
default:
}
conn.eavesdroppedLck.Unlock()
continue
}
conn.eavesdroppedLck.Unlock()
dest, _ := msg.Headers[FieldDestination].value.(string)
found := false
if dest == "" {
found = true
} else {
conn.namesLck.RLock()
if len(conn.names) == 0 {
found = true
}
for _, v := range conn.names {
if dest == v {
found = true
break
}
}
conn.namesLck.RUnlock()
}
if !found {
// Eavesdropped a message, but no channel for it is registered.
// Ignore it.
continue
}
switch msg.Type {
case TypeMethodReply, TypeError:
serial := msg.Headers[FieldReplySerial].value.(uint32)
conn.callsLck.Lock()
if c, ok := conn.calls[serial]; ok {
if msg.Type == TypeError {
name, _ := msg.Headers[FieldErrorName].value.(string)
c.Err = Error{name, msg.Body}
} else {
c.Body = msg.Body
}
c.Done <- c
conn.serialLck.Lock()
delete(conn.serialUsed, serial)
conn.serialLck.Unlock()
delete(conn.calls, serial)
}
conn.callsLck.Unlock()
case TypeSignal:
iface := msg.Headers[FieldInterface].value.(string)
member := msg.Headers[FieldMember].value.(string)
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
if iface == "org.freedesktop.DBus" && member == "NameLost" &&
sender == "org.freedesktop.DBus" {
name, _ := msg.Body[0].(string)
conn.namesLck.Lock()
for i, v := range conn.names {
if v == name {
copy(conn.names[i:], conn.names[i+1:])
conn.names = conn.names[:len(conn.names)-1]
}
}
conn.namesLck.Unlock()
}
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalsLck.Lock()
for _, ch := range conn.signals {
ch <- signal
}
conn.signalsLck.Unlock()
case TypeMethodCall:
go conn.handleCall(msg)
}
} else if _, ok := err.(InvalidMessageError); !ok {
// Some read error occured (usually EOF); we can't really do
// anything but to shut down all stuff and returns errors to all
// pending replies.
conn.Close()
conn.callsLck.RLock()
for _, v := range conn.calls {
v.Err = err
v.Done <- v
}
conn.callsLck.RUnlock()
return
}
// invalid messages are ignored
}
}
// Names returns the list of all names that are currently owned by this
// connection. The slice is always at least one element long, the first element
// being the unique name of the connection.
func (conn *Conn) Names() []string {
conn.namesLck.RLock()
// copy the slice so it can't be modified
s := make([]string, len(conn.names))
copy(s, conn.names)
conn.namesLck.RUnlock()
return s
}
// Object returns the object identified by the given destination name and path.
func (conn *Conn) Object(dest string, path ObjectPath) *Object {
return &Object{conn, dest, path}
}
// outWorker runs in an own goroutine, encoding and sending messages that are
// sent to conn.out.
func (conn *Conn) outWorker() {
for msg := range conn.out {
err := conn.SendMessage(msg)
conn.callsLck.RLock()
if err != nil {
if c := conn.calls[msg.serial]; c != nil {
c.Err = err
c.Done <- c
}
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
} else if msg.Type != TypeMethodCall {
conn.serialLck.Lock()
delete(conn.serialUsed, msg.serial)
conn.serialLck.Unlock()
}
conn.callsLck.RUnlock()
}
}
// Send sends the given message to the message bus. You usually don't need to
// use this; use the higher-level equivalents (Call / Go, Emit and Export)
// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
// call is returned and the same value is sent to ch (which must be buffered)
// once the call is complete. Otherwise, ch is ignored and a Call structure is
// returned of which only the Err member is valid.
func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
var call *Call
msg.serial = conn.getSerial()
if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
if ch == nil {
ch = make(chan *Call, 5)
} else if cap(ch) == 0 {
panic("dbus: unbuffered channel passed to (*Conn).Send")
}
call = new(Call)
call.Destination, _ = msg.Headers[FieldDestination].value.(string)
call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
iface, _ := msg.Headers[FieldInterface].value.(string)
member, _ := msg.Headers[FieldMember].value.(string)
call.Method = iface + "." + member
call.Args = msg.Body
call.Done = ch
conn.callsLck.Lock()
conn.calls[msg.serial] = call
conn.callsLck.Unlock()
conn.outLck.RLock()
if conn.closed {
call.Err = ErrClosed
call.Done <- call
} else {
conn.out <- msg
}
conn.outLck.RUnlock()
} else {
conn.outLck.RLock()
if conn.closed {
call = &Call{Err: ErrClosed}
} else {
conn.out <- msg
call = &Call{Err: nil}
}
conn.outLck.RUnlock()
}
return call
}
// sendError creates an error message corresponding to the parameters and sends
// it to conn.out.
func (conn *Conn) sendError(e Error, dest string, serial uint32) {
msg := new(Message)
msg.Type = TypeError
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldErrorName] = MakeVariant(e.Name)
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = e.Body
if len(e.Body) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// sendReply creates a method reply message corresponding to the parameters and
// sends it to conn.out.
func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
msg := new(Message)
msg.Type = TypeMethodReply
msg.serial = conn.getSerial()
msg.Headers = make(map[HeaderField]Variant)
if dest != "" {
msg.Headers[FieldDestination] = MakeVariant(dest)
}
msg.Headers[FieldReplySerial] = MakeVariant(serial)
msg.Body = values
if len(values) > 0 {
msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
}
conn.outLck.RLock()
if !conn.closed {
conn.out <- msg
}
conn.outLck.RUnlock()
}
// Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded.
//
// Multiple of these channels can be registered at the same time. Passing a
// channel that already is registered will remove it from the list of the
// registered channels.
//
// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
// channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals.
func (conn *Conn) Signal(ch chan<- *Signal) {
conn.signalsLck.Lock()
conn.signals = append(conn.signals, ch)
conn.signalsLck.Unlock()
}
// SupportsUnixFDs returns whether the underlying transport supports passing of
// unix file descriptors. If this is false, method calls containing unix file
// descriptors will return an error and emitted signals containing them will
// not be sent.
func (conn *Conn) SupportsUnixFDs() bool {
return conn.unixFD
}
// Error represents a D-Bus message of type Error.
type Error struct {
Name string
Body []interface{}
}
func NewError(name string, body []interface{}) *Error {
return &Error{name, body}
}
func (e Error) Error() string {
if len(e.Body) >= 1 {
s, ok := e.Body[0].(string)
if ok {
return s
}
}
return e.Name
}
// Signal represents a D-Bus message of type Signal. The name member is given in
// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
type Signal struct {
Sender string
Path ObjectPath
Name string
Body []interface{}
}
// transport is a D-Bus transport.
type transport interface {
// Read and Write raw data (for example, for the authentication protocol).
io.ReadWriteCloser
// Send the initial null byte used for the EXTERNAL mechanism.
SendNullByte() error
// Returns whether this transport supports passing Unix FDs.
SupportsUnixFDs() bool
// Signal the transport that Unix FD passing is enabled for this connection.
EnableUnixFDs()
// Read / send a message, handling things like Unix FDs.
ReadMessage() (*Message, error)
SendMessage(*Message) error
}
var (
transports map[string]func(string) (transport, error) = make(map[string]func(string) (transport, error))
)
func getTransport(address string) (transport, error) {
var err error
var t transport
addresses := strings.Split(address, ";")
for _, v := range addresses {
i := strings.IndexRune(v, ':')
if i == -1 {
err = errors.New("dbus: invalid bus address (no transport)")
continue
}
f := transports[v[:i]]
if f == nil {
err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
}
t, err = f(v[i+1:])
if err == nil {
return t, nil
}
}
return nil, err
}
// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
// of arbitrary types, containes the values that are obtained from dereferencing
// all elements in vs.
func dereferenceAll(vs []interface{}) []interface{} {
for i := range vs {
v := reflect.ValueOf(vs[i])
v = v.Elem()
vs[i] = v.Interface()
}
return vs
}
// getKey gets a key from a the list of keys. Returns "" on error / not found...
func getKey(s, key string) string {
i := strings.Index(s, key)
if i == -1 {
return ""
}
if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
return ""
}
j := strings.Index(s, ",")
if j == -1 {
j = len(s)
}
return s[i+len(key)+1 : j]
}
| [
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"DBUS_SYSTEM_BUS_ADDRESS\""
]
| []
| [
"DBUS_SYSTEM_BUS_ADDRESS",
"DBUS_SESSION_BUS_ADDRESS"
]
| [] | ["DBUS_SYSTEM_BUS_ADDRESS", "DBUS_SESSION_BUS_ADDRESS"] | go | 2 | 0 | |
edi_green_sensor.py | """Python class for interfacing with an EdiGreen Home Sensor."""
from socket import *
import json
import os
import requests
from requests.auth import HTTPDigestAuth
class RequestFailed(Exception):
"""Generic exception for failed requests to server."""
class EdiGreenSensor:
"""Represents an Edimax EdiGreen Home Sensor."""
def __init__(self, addr, mac, name="EdiMax", username="admin", password="1234"):
if not addr:
raise ValueError("Missing IP address")
if not mac:
raise ValueError("Missing MAC")
if not name:
raise ValueError("Missing name")
if not username:
raise ValueError("Missing username")
if not password:
raise ValueError("Missing password")
self.addr = addr
self.mac = mac
self.name = name
self.username = username
self.password = password
self._log_request = False
self._log_response = False
def __str__(self) -> str:
return "[EdiGreenSensor: {} {} {}]".format(self.name, self.addr, self.mac)
@staticmethod
def search(password):
# Create a UDP socket
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
sock.settimeout(5)
# TODO(wdm) What are these magic bytes?
payload = bytes.fromhex('ff ff ff ff ff ff 00 02 ff fd 00 00 00 00')
try:
sent = sock.sendto(payload, ('255.255.255.255', 24929))
print('Searching for EdiMax sensor...')
data, _ = sock.recvfrom(4096)
mac = data[0:6].hex().upper()
# TODO(wdm) What is this? data[6:10] == b'\x01\x02\xfe\xfd'
# uid = mac + data[10:14]
search = json.loads(data[14:])
# print('Debug', search)
sensor = EdiGreenSensor(search.get('search.ip'), mac, search.get(
'search.name'), 'admin', password)
if sensor:
print('Found: ', sensor)
return sensor
except Exception as err:
print(err)
finally:
sock.close()
def send_cmd(self, cmd):
"""Send command to Edimax and return its response."""
url = "http://{}:5678/edilife.cgi".format(self.addr)
data = {
"customer.name": "EDIMAX",
"mac": self.mac,
"cmd": cmd,
"uid": None, # Ignored?
}
if self._log_request:
print('debug: url={} data={}'.format(url, data))
response = requests.post(url, data=json.dumps(data),
auth=HTTPDigestAuth(self.username, self.password))
if response.status_code == 401:
raise ValueError("Invalid password")
if response.status_code != 200:
raise RequestFailed(
'Request failed with code: {}'.format(response.status_code))
msg = _rotate_json(response.content)
# print('Debug: msg={}'.format(msg))
data = json.loads(msg)
if self._log_response:
print('debug: cmd_respone=', json.dumps(data, indent=2))
return data['cmd'][1]
def set_led(self, led_enable):
"""Set the led on or off."""
return self.send_cmd([{"id": "set"}, {"feature": {"led.enable": led_enable}}])
def get_readings(self):
return self.send_cmd([{"id": "get"}, {"status": {
"temperature": "0",
"moisture": 0,
"pm2.5": 0,
"pm10": 0,
"co2": "0",
"hcho": "0",
"tvoc": "0",
}}]).get("status", {})
def get_all(self):
return self.send_cmd([{"id": "get"}, {
"basic": {
"fwversion": None,
"mac": None,
"model": None,
"name.maxlen": None,
"name": None,
"owner.token": None,
"produce.country.code": None,
"protocolversion": None,
"uploaddata.enable": None,
"cloud.ddns.domain": None,
"cloud.ddns.port": None,
"cloud.push.domain": None,
"cloud.push.port": None,
"cloud.ota.domain": None,
"cloud.ota.port": None,
"cloud.upload.domain": None,
"cloud.upload.port": None,
"cloud.upload.cert": None,
# TODO(wdm) Any other fields?
# These fields return null:
"cloud.push.cert": None,
"cloud.ota.cert": None,
"cloud.upload.cert": None,
"cgiversion": None,
"model.id": None,
"upgrade.status": None,
},
"network": {
"http.port": None,
"ip.type": None,
"ip.dhcp.ip": None,
"ip.static.ip": None,
"ip.static.netmask": None,
"ip.static.gateway": None,
"ip.static.dns1": None,
"wifi.check": None,
"wifi.mode": None,
# This field return values:
"wifi.ssid": None,
"wifi.auth": None,
"wifi.encryption": None,
"wifi.wpakey": None,
"wifi.bssid": None,
"wifi.channel": None,
"wifi.wepkeyIndex": None,
"wifi.wepkeyformat": None,
"wifi.wepkeylength": None,
"wifi.wepkey1": None,
"wifi.wepkey2": None,
"wifi.wepkey3": None,
"wifi.wepkey4": None,
},
"status": {
"systemtime": None,
"temperature": None,
"moisture": None,
"pm2.5": None,
"pm10": None,
"co2": None,
"hcho": None,
"tvoc": None
},
"events": {
"push.enable": None,
"pm2.5.max": None,
"pm10.max": None,
"co2.max": None,
"hcho.max": None,
"tvoc.max": None,
"temperature.min": None,
"temperature.max": None,
"moisture.min": None,
"moisture.max": None
},
"feature": {
"led.enable": None
}
}])
# Utility functions:
def _rotate_byte(byte, rotations):
"""Rotate byte to the left by the specified number of rotations."""
return (byte << rotations | byte >> (8-rotations)) & 0xFF
def _rotate_json(response):
"""Byte rotate the Edimax response into plain JSON."""
# First byte always decodes to '{' so the difference is the number of rotations.
rotations = response[0] - ord('{')
return '{' + ''.join(chr(_rotate_byte(byte, rotations)) for byte in response[1:])
# Testing:
def test():
"""Example invocations."""
password = os.environ.get('EDIMAX_PASSWORD')
if not password:
print('Please set environemt variable EDIMAX_PASSWORD')
exit(1)
sensor = EdiGreenSensor.search(password)
if not sensor:
print('Failed to find a EdiMax sensor')
exit(1)
print(sensor.get_readings())
sensor._log_response = True
sensor._log_request = True
# sensor.get_all()
sensor.set_led(1)
sensor.set_led(0)
if __name__ == '__main__':
test()
| []
| []
| [
"EDIMAX_PASSWORD"
]
| [] | ["EDIMAX_PASSWORD"] | python | 1 | 0 | |
providers/ibm/ibm_is_vpn_gateway.go | // Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"fmt"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/IBM/vpc-go-sdk/vpcv1"
)
// VPNGatewayGenerator ...
type VPNGatewayGenerator struct {
IBMService
}
func (g VPNGatewayGenerator) createVPNGatewayResources(vpngwID, vpngwName string) terraformutils.Resource {
var resources terraformutils.Resource
resources = terraformutils.NewSimpleResource(
vpngwID,
vpngwName,
"ibm_is_vpn_gateway",
"ibm",
[]string{})
return resources
}
func (g VPNGatewayGenerator) createVPNGatewayConnectionResources(vpngwID, vpngwConnectionID, vpngwConnectionName string, dependsOn []string) terraformutils.Resource {
var resources terraformutils.Resource
resources = terraformutils.NewResource(
fmt.Sprintf("%s/%s", vpngwID, vpngwConnectionID),
vpngwConnectionName,
"ibm_is_vpn_gateway_connections",
"ibm",
map[string]string{},
[]string{},
map[string]interface{}{
"depends_on": dependsOn,
})
return resources
}
// InitResources ...
func (g *VPNGatewayGenerator) InitResources() error {
var resoureGroup string
region := envFallBack([]string{"IC_REGION"}, "us-south")
apiKey := os.Getenv("IC_API_KEY")
if apiKey == "" {
return fmt.Errorf("No API key set")
}
rg := g.Args["resource_group"]
if rg != nil {
resoureGroup = rg.(string)
}
vpcurl := fmt.Sprintf("https://%s.iaas.cloud.ibm.com/v1", region)
vpcoptions := &vpcv1.VpcV1Options{
URL: envFallBack([]string{"IBMCLOUD_IS_API_ENDPOINT"}, vpcurl),
Authenticator: &core.IamAuthenticator{
ApiKey: apiKey,
},
}
vpcclient, err := vpcv1.NewVpcV1(vpcoptions)
if err != nil {
return err
}
start := ""
allrecs := []vpcv1.VPNGatewayIntf{}
for {
listVPNGatewaysOptions := &vpcv1.ListVPNGatewaysOptions{}
if start != "" {
listVPNGatewaysOptions.Start = &start
}
if resoureGroup != "" {
listVPNGatewaysOptions.ResourceGroupID = &resoureGroup
}
vpngws, response, err := vpcclient.ListVPNGateways(listVPNGatewaysOptions)
if err != nil {
return fmt.Errorf("Error Fetching VPN Gateways %s\n%s", err, response)
}
start = GetNext(vpngws.Next)
allrecs = append(allrecs, vpngws.VPNGateways...)
if start == "" {
break
}
}
for _, gw := range allrecs {
vpngw := gw.(*vpcv1.VPNGateway)
var dependsOn []string
dependsOn = append(dependsOn,
"ibm_is_vpn_gateway."+terraformutils.TfSanitize(*vpngw.Name))
g.Resources = append(g.Resources, g.createVPNGatewayResources(*vpngw.ID, *vpngw.Name))
listVPNGatewayConnectionsOptions := &vpcv1.ListVPNGatewayConnectionsOptions{
VPNGatewayID: vpngw.ID,
}
vpngwConnections, response, err := vpcclient.ListVPNGatewayConnections(listVPNGatewayConnectionsOptions)
if err != nil {
return fmt.Errorf("Error Fetching VPN Gateway Connections %s\n%s", err, response)
}
for _, connection := range vpngwConnections.Connections {
vpngwConnection := connection.(*vpcv1.VPNGatewayConnection)
g.Resources = append(g.Resources, g.createVPNGatewayConnectionResources(*vpngw.ID, *vpngwConnection.ID, *vpngwConnection.Name, dependsOn))
}
}
return nil
}
| [
"\"IC_API_KEY\""
]
| []
| [
"IC_API_KEY"
]
| [] | ["IC_API_KEY"] | go | 1 | 0 | |
pkg/jx/cmd/promote.go | package cmd
import (
"fmt"
"github.com/jenkins-x/jx/pkg/kube/services"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/blang/semver"
"github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1"
typev1 "github.com/jenkins-x/jx/pkg/client/clientset/versioned/typed/jenkins.io/v1"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/jx/cmd/templates"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
"gopkg.in/AlecAivazis/survey.v1"
"gopkg.in/AlecAivazis/survey.v1/terminal"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
optionEnvironment = "env"
optionApplication = "app"
optionTimeout = "timeout"
optionPullRequestPollTime = "pull-request-poll-time"
gitStatusSuccess = "success"
)
var (
waitAfterPullRequestCreated = time.Second * 3
)
// PromoteOptions containers the CLI options
type PromoteOptions struct {
CommonOptions
Namespace string
Environment string
Application string
Pipeline string
Build string
Version string
ReleaseName string
LocalHelmRepoName string
HelmRepositoryURL string
NoHelmUpdate bool
AllAutomatic bool
NoMergePullRequest bool
NoPoll bool
NoWaitAfterMerge bool
IgnoreLocalFiles bool
Timeout string
PullRequestPollTime string
Filter string
Alias string
// allow git to be configured externally before a PR is created
ConfigureGitCallback ConfigureGitFolderFn
// for testing
FakePullRequests CreateEnvPullRequestFn
UseFakeHelm bool
// calculated fields
TimeoutDuration *time.Duration
PullRequestPollDuration *time.Duration
Activities typev1.PipelineActivityInterface
GitInfo *gits.GitRepositoryInfo
jenkinsURL string
releaseResource *v1.Release
ReleaseInfo *ReleaseInfo
}
type ReleaseInfo struct {
ReleaseName string
FullAppName string
Version string
PullRequestInfo *ReleasePullRequestInfo
}
type ReleasePullRequestInfo struct {
GitProvider gits.GitProvider
PullRequest *gits.GitPullRequest
PullRequestArguments *gits.GitPullRequestArguments
}
var (
promote_long = templates.LongDesc(`
Promotes a version of an application to zero to many permanent environments.
For more documentation see: [https://jenkins-x.io/about/features/#promotion](https://jenkins-x.io/about/features/#promotion)
`)
promote_example = templates.Examples(`
# Promote a version of the current application to staging
# discovering the application name from the source code
jx promote --version 1.2.3 --env staging
# Promote a version of the myapp application to production
jx promote myapp --version 1.2.3 --env production
# To search for all the available charts for a given name use -f.
# e.g. to find a redis chart to install
jx promote -f redis
# To promote a postgres chart using an alias
jx promote -f postgres --alias mydb
# To create or update a Preview Environment please see the 'jx preview' command
jx preview
`)
)
// NewCmdPromote creates the new command for: jx get prompt
func NewCmdPromote(f Factory, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) *cobra.Command {
options := &PromoteOptions{
CommonOptions: CommonOptions{
Factory: f,
In: in,
Out: out,
Err: errOut,
},
}
cmd := &cobra.Command{
Use: "promote [application]",
Short: "Promotes a version of an application to an Environment",
Long: promote_long,
Example: promote_example,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
CheckErr(err)
},
}
options.addCommonFlags(cmd)
cmd.Flags().StringVarP(&options.Namespace, "namespace", "n", "", "The Namespace to promote to")
cmd.Flags().StringVarP(&options.Environment, optionEnvironment, "e", "", "The Environment to promote to")
cmd.Flags().BoolVarP(&options.AllAutomatic, "all-auto", "", false, "Promote to all automatic environments in order")
options.addPromoteOptions(cmd)
return cmd
}
func (options *PromoteOptions) addPromoteOptions(cmd *cobra.Command) {
cmd.Flags().StringVarP(&options.Application, optionApplication, "a", "", "The Application to promote")
cmd.Flags().StringVarP(&options.Filter, "filter", "f", "", "The search filter to find charts to promote")
cmd.Flags().StringVarP(&options.Alias, "alias", "", "", "The optional alias used in the 'requirements.yaml' file")
cmd.Flags().StringVarP(&options.Pipeline, "pipeline", "", "", "The Pipeline string in the form 'folderName/repoName/branch' which is used to update the PipelineActivity. If not specified its defaulted from the '$BUILD_NUMBER' environment variable")
cmd.Flags().StringVarP(&options.Build, "build", "", "", "The Build number which is used to update the PipelineActivity. If not specified its defaulted from the '$BUILD_NUMBER' environment variable")
cmd.Flags().StringVarP(&options.Version, "version", "v", "", "The Version to promote")
cmd.Flags().StringVarP(&options.LocalHelmRepoName, "helm-repo-name", "r", kube.LocalHelmRepoName, "The name of the helm repository that contains the app")
cmd.Flags().StringVarP(&options.HelmRepositoryURL, "helm-repo-url", "u", helm.DefaultHelmRepositoryURL, "The Helm Repository URL to use for the App")
cmd.Flags().StringVarP(&options.ReleaseName, "release", "", "", "The name of the helm release")
cmd.Flags().StringVarP(&options.Timeout, optionTimeout, "t", "1h", "The timeout to wait for the promotion to succeed in the underlying Environment. The command fails if the timeout is exceeded or the promotion does not complete")
cmd.Flags().StringVarP(&options.PullRequestPollTime, optionPullRequestPollTime, "", "20s", "Poll time when waiting for a Pull Request to merge")
cmd.Flags().BoolVarP(&options.NoHelmUpdate, "no-helm-update", "", false, "Allows the 'helm repo update' command if you are sure your local helm cache is up to date with the version you wish to promote")
cmd.Flags().BoolVarP(&options.NoMergePullRequest, "no-merge", "", false, "Disables automatic merge of promote Pull Requests")
cmd.Flags().BoolVarP(&options.NoPoll, "no-poll", "", false, "Disables polling for Pull Request or Pipeline status")
cmd.Flags().BoolVarP(&options.NoWaitAfterMerge, "no-wait", "", false, "Disables waiting for completing promotion after the Pull request is merged")
cmd.Flags().BoolVarP(&options.IgnoreLocalFiles, "ignore-local-file", "", false, "Ignores the local file system when deducing the Git repository")
}
// Run implements this command
func (o *PromoteOptions) Run() error {
app := o.Application
if app == "" {
args := o.Args
if len(args) == 0 {
search := o.Filter
var err error
if search != "" {
app, err = o.SearchForChart(search)
} else {
app, err = o.DiscoverAppName()
}
if err != nil {
return err
}
} else {
app = args[0]
}
}
o.Application = app
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return err
}
if o.Environment == "" && !o.BatchMode {
names := []string{}
m, allEnvNames, err := kube.GetOrderedEnvironments(jxClient, ns)
if err != nil {
return err
}
for _, n := range allEnvNames {
env := m[n]
if env.Spec.Kind == v1.EnvironmentKindTypePermanent {
names = append(names, n)
}
}
o.Environment, err = kube.PickEnvironment(names, "", o.In, o.Out, o.Err)
if err != nil {
return err
}
}
if o.PullRequestPollTime != "" {
duration, err := time.ParseDuration(o.PullRequestPollTime)
if err != nil {
return fmt.Errorf("Invalid duration format %s for option --%s: %s", o.PullRequestPollTime, optionPullRequestPollTime, err)
}
o.PullRequestPollDuration = &duration
}
if o.Timeout != "" {
duration, err := time.ParseDuration(o.Timeout)
if err != nil {
return fmt.Errorf("Invalid duration format %s for option --%s: %s", o.Timeout, optionTimeout, err)
}
o.TimeoutDuration = &duration
}
targetNS, env, err := o.GetTargetNamespace(o.Namespace, o.Environment)
if err != nil {
return err
}
o.Activities = jxClient.JenkinsV1().PipelineActivities(ns)
releaseName := o.ReleaseName
if releaseName == "" {
releaseName = targetNS + "-" + app
o.ReleaseName = releaseName
}
if o.AllAutomatic {
return o.PromoteAllAutomatic()
}
if env == nil {
if o.Environment == "" {
return util.MissingOption(optionEnvironment)
}
env, err := jxClient.JenkinsV1().Environments(ns).Get(o.Environment, metav1.GetOptions{})
if err != nil {
return err
}
if env == nil {
return fmt.Errorf("Could not find an Environment called %s", o.Environment)
}
}
releaseInfo, err := o.Promote(targetNS, env, true)
if err != nil {
return err
}
o.ReleaseInfo = releaseInfo
if !o.NoPoll {
err = o.WaitForPromotion(targetNS, env, releaseInfo)
if err != nil {
return err
}
}
return err
}
func (o *PromoteOptions) PromoteAllAutomatic() error {
kubeClient, currentNs, err := o.KubeClient()
if err != nil {
return err
}
team, _, err := kube.GetDevNamespace(kubeClient, currentNs)
if err != nil {
return err
}
jxClient, _, err := o.JXClient()
if err != nil {
return err
}
envs, err := jxClient.JenkinsV1().Environments(team).List(metav1.ListOptions{})
if err != nil {
log.Warnf("No Environments found: %s/n", err)
return nil
}
environments := envs.Items
if len(environments) == 0 {
log.Warnf("No Environments have been created yet in team %s. Please create some via 'jx create env'\n", team)
return nil
}
kube.SortEnvironments(environments)
for _, env := range environments {
kind := env.Spec.Kind
if env.Spec.PromotionStrategy == v1.PromotionStrategyTypeAutomatic && kind.IsPermanent() {
ns := env.Spec.Namespace
if ns == "" {
return fmt.Errorf("No namespace for environment %s", env.Name)
}
releaseInfo, err := o.Promote(ns, &env, false)
if err != nil {
return err
}
o.ReleaseInfo = releaseInfo
err = o.WaitForPromotion(ns, &env, releaseInfo)
if err != nil {
return err
}
}
}
return nil
}
func (o *PromoteOptions) Promote(targetNS string, env *v1.Environment, warnIfAuto bool) (*ReleaseInfo, error) {
surveyOpts := survey.WithStdio(o.In, o.Out, o.Err)
app := o.Application
if app == "" {
log.Warnf("No application name could be detected so cannot promote via Helm. If the detection of the helm chart name is not working consider adding it with the --%s argument on the 'jx promomote' command\n", optionApplication)
return nil, nil
}
version := o.Version
info := util.ColorInfo
if version == "" {
log.Infof("Promoting latest version of app %s to namespace %s\n", info(app), info(targetNS))
} else {
log.Infof("Promoting app %s version %s to namespace %s\n", info(app), info(version), info(targetNS))
}
fullAppName := app
if o.LocalHelmRepoName != "" {
fullAppName = o.LocalHelmRepoName + "/" + app
}
releaseName := o.ReleaseName
if releaseName == "" {
releaseName = targetNS + "-" + app
o.ReleaseName = releaseName
}
releaseInfo := &ReleaseInfo{
ReleaseName: releaseName,
FullAppName: fullAppName,
Version: version,
}
if warnIfAuto && env != nil && env.Spec.PromotionStrategy == v1.PromotionStrategyTypeAutomatic && !o.BatchMode {
log.Infof("%s", util.ColorWarning(fmt.Sprintf("WARNING: The Environment %s is setup to promote automatically as part of the CI/CD Pipelines.\n\n", env.Name)))
confirm := &survey.Confirm{
Message: "Do you wish to promote anyway? :",
Default: false,
}
flag := false
err := survey.AskOne(confirm, &flag, nil, surveyOpts)
if err != nil {
return releaseInfo, err
}
if !flag {
return releaseInfo, nil
}
}
promoteKey := o.createPromoteKey(env)
if env != nil {
source := &env.Spec.Source
if source.URL != "" && env.Spec.Kind.IsPermanent() {
err := o.PromoteViaPullRequest(env, releaseInfo)
if err == nil {
startPromotePR := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromotePullRequestStep) error {
kube.StartPromotionPullRequest(a, s, ps, p)
pr := releaseInfo.PullRequestInfo
if pr != nil && pr.PullRequest != nil && p.PullRequestURL == "" {
p.PullRequestURL = pr.PullRequest.URL
}
if version != "" && a.Spec.Version == "" {
a.Spec.Version = version
}
return nil
}
err = promoteKey.OnPromotePullRequest(o.Activities, startPromotePR)
if err != nil {
log.Warnf("Failed to update PipelineActivity: %s\n", err)
}
// lets sleep a little before we try poll for the PR status
time.Sleep(waitAfterPullRequestCreated)
}
return releaseInfo, err
}
}
var err error
if !o.UseFakeHelm {
err := o.verifyHelmConfigured()
if err != nil {
return releaseInfo, err
}
}
// lets do a helm update to ensure we can find the latest version
if !o.NoHelmUpdate {
log.Info("Updating the helm repositories to ensure we can find the latest versions...")
err = o.Helm().UpdateRepo()
if err != nil {
return releaseInfo, err
}
}
startPromote := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromoteUpdateStep) error {
kube.StartPromotionUpdate(a, s, ps, p)
if version != "" && a.Spec.Version == "" {
a.Spec.Version = version
}
return nil
}
promoteKey.OnPromoteUpdate(o.Activities, startPromote)
err = o.Helm().UpgradeChart(fullAppName, releaseName, targetNS, &version, true, nil, false, true, nil, nil)
if err == nil {
err = o.commentOnIssues(targetNS, env, promoteKey)
if err != nil {
log.Warnf("Failed to comment on issues for release %s: %s\n", releaseName, err)
}
err = promoteKey.OnPromoteUpdate(o.Activities, kube.CompletePromotionUpdate)
} else {
err = promoteKey.OnPromoteUpdate(o.Activities, kube.FailedPromotionUpdate)
}
return releaseInfo, err
}
func (o *PromoteOptions) PromoteViaPullRequest(env *v1.Environment, releaseInfo *ReleaseInfo) error {
version := o.Version
versionName := version
if versionName == "" {
versionName = "latest"
}
app := o.Application
branchNameText := "promote-" + app + "-" + versionName
title := app + " to " + versionName
message := fmt.Sprintf("Promote %s to version %s", app, versionName)
modifyRequirementsFn := func(requirements *helm.Requirements) error {
var err error
if version == "" {
version, err = o.findLatestVersion(app)
if err != nil {
return err
}
}
requirements.SetAppVersion(app, version, o.HelmRepositoryURL, o.Alias)
return nil
}
if o.FakePullRequests != nil {
info, err := o.FakePullRequests(env, modifyRequirementsFn, branchNameText, title, message, releaseInfo.PullRequestInfo)
releaseInfo.PullRequestInfo = info
return err
} else {
info, err := o.createEnvironmentPullRequest(env, modifyRequirementsFn, branchNameText, title, message, releaseInfo.PullRequestInfo, o.ConfigureGitCallback)
releaseInfo.PullRequestInfo = info
return err
}
}
func (o *PromoteOptions) GetTargetNamespace(ns string, env string) (string, *v1.Environment, error) {
kubeClient, currentNs, err := o.KubeClient()
if err != nil {
return "", nil, err
}
team, _, err := kube.GetDevNamespace(kubeClient, currentNs)
if err != nil {
return "", nil, err
}
jxClient, _, err := o.JXClient()
if err != nil {
return "", nil, err
}
m, envNames, err := kube.GetEnvironments(jxClient, team)
if err != nil {
return "", nil, err
}
if len(envNames) == 0 {
return "", nil, fmt.Errorf("No Environments have been created yet in team %s. Please create some via 'jx create env'", team)
}
var envResource *v1.Environment
targetNS := currentNs
if env != "" {
envResource = m[env]
if envResource == nil {
return "", nil, util.InvalidOption(optionEnvironment, env, envNames)
}
targetNS = envResource.Spec.Namespace
if targetNS == "" {
return "", nil, fmt.Errorf("Environment %s does not have a namspace associated with it!", env)
}
} else if ns != "" {
targetNS = ns
}
labels := map[string]string{}
annotations := map[string]string{}
err = kube.EnsureNamespaceCreated(kubeClient, targetNS, labels, annotations)
if err != nil {
return "", nil, err
}
return targetNS, envResource, nil
}
func (o *PromoteOptions) WaitForPromotion(ns string, env *v1.Environment, releaseInfo *ReleaseInfo) error {
if o.TimeoutDuration == nil {
log.Infof("No --%s option specified on the 'jx promote' command so not waiting for the promotion to succeed\n", optionTimeout)
return nil
}
if o.PullRequestPollDuration == nil {
log.Infof("No --%s option specified on the 'jx promote' command so not waiting for the promotion to succeed\n", optionPullRequestPollTime)
return nil
}
duration := *o.TimeoutDuration
end := time.Now().Add(duration)
pullRequestInfo := releaseInfo.PullRequestInfo
if pullRequestInfo != nil {
promoteKey := o.createPromoteKey(env)
err := o.waitForGitOpsPullRequest(ns, env, releaseInfo, end, duration, promoteKey)
if err != nil {
// TODO based on if the PR completed or not fail the PR or the Promote?
promoteKey.OnPromotePullRequest(o.Activities, kube.FailedPromotionPullRequest)
return err
}
}
return nil
}
// TODO This could do with a refactor and some tests...
func (o *PromoteOptions) waitForGitOpsPullRequest(ns string, env *v1.Environment, releaseInfo *ReleaseInfo, end time.Time, duration time.Duration, promoteKey *kube.PromoteStepActivityKey) error {
pullRequestInfo := releaseInfo.PullRequestInfo
logMergeFailure := false
logNoMergeCommitSha := false
logHasMergeSha := false
logMergeStatusError := false
logNoMergeStatuses := false
urlStatusMap := map[string]string{}
urlStatusTargetURLMap := map[string]string{}
if pullRequestInfo != nil {
for {
pr := pullRequestInfo.PullRequest
gitProvider := pullRequestInfo.GitProvider
err := gitProvider.UpdatePullRequestStatus(pr)
if err != nil {
log.Warnf("Failed to query the Pull Request status for %s %s", pr.URL, err)
} else {
if pr.Merged != nil && *pr.Merged {
if pr.MergeCommitSHA == nil {
if !logNoMergeCommitSha {
logNoMergeCommitSha = true
log.Infof("Pull Request %s is merged but waiting for Merge SHA\n", util.ColorInfo(pr.URL))
}
} else {
mergeSha := *pr.MergeCommitSHA
if !logHasMergeSha {
logHasMergeSha = true
log.Infof("Pull Request %s is merged at sha %s\n", util.ColorInfo(pr.URL), util.ColorInfo(mergeSha))
mergedPR := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromotePullRequestStep) error {
kube.CompletePromotionPullRequest(a, s, ps, p)
p.MergeCommitSHA = mergeSha
return nil
}
promoteKey.OnPromotePullRequest(o.Activities, mergedPR)
if o.NoWaitAfterMerge {
log.Infof("Pull requests are merged, No wait on promotion to complete")
return err
}
}
promoteKey.OnPromoteUpdate(o.Activities, kube.StartPromotionUpdate)
statuses, err := gitProvider.ListCommitStatus(pr.Owner, pr.Repo, mergeSha)
if err != nil {
if !logMergeStatusError {
logMergeStatusError = true
log.Warnf("Failed to query merge status of repo %s/%s with merge sha %s due to: %s\n", pr.Owner, pr.Repo, mergeSha, err)
}
} else {
if len(statuses) == 0 {
if !logNoMergeStatuses {
logNoMergeStatuses = true
log.Infof("Merge commit has not yet any statuses on repo %s/%s merge sha %s\n", pr.Owner, pr.Repo, mergeSha)
}
} else {
for _, status := range statuses {
if status.IsFailed() {
log.Warnf("merge status: %s URL: %s description: %s\n",
status.State, status.TargetURL, status.Description)
return fmt.Errorf("Status: %s URL: %s description: %s\n",
status.State, status.TargetURL, status.Description)
}
url := status.URL
state := status.State
if urlStatusMap[url] == "" || urlStatusMap[url] != gitStatusSuccess {
if urlStatusMap[url] != state {
urlStatusMap[url] = state
urlStatusTargetURLMap[url] = status.TargetURL
log.Infof("merge status: %s for URL %s with target: %s description: %s\n",
util.ColorInfo(state), util.ColorInfo(status.URL), util.ColorInfo(status.TargetURL), util.ColorInfo(status.Description))
}
}
}
prStatuses := []v1.GitStatus{}
keys := util.SortedMapKeys(urlStatusMap)
for _, url := range keys {
state := urlStatusMap[url]
targetURL := urlStatusTargetURLMap[url]
if targetURL == "" {
targetURL = url
}
prStatuses = append(prStatuses, v1.GitStatus{
URL: targetURL,
Status: state,
})
}
updateStatuses := func(a *v1.PipelineActivity, s *v1.PipelineActivityStep, ps *v1.PromoteActivityStep, p *v1.PromoteUpdateStep) error {
p.Statuses = prStatuses
return nil
}
promoteKey.OnPromoteUpdate(o.Activities, updateStatuses)
succeeded := true
for _, v := range urlStatusMap {
if v != gitStatusSuccess {
succeeded = false
}
}
if succeeded {
log.Infoln("Merge status checks all passed so the promotion worked!")
err = o.commentOnIssues(ns, env, promoteKey)
if err == nil {
err = promoteKey.OnPromoteUpdate(o.Activities, kube.CompletePromotionUpdate)
}
return err
}
}
}
}
} else {
if pr.IsClosed() {
log.Warnf("Pull Request %s is closed\n", util.ColorInfo(pr.URL))
return fmt.Errorf("Promotion failed as Pull Request %s is closed without merging", pr.URL)
}
// lets try merge if the status is good
status, err := gitProvider.PullRequestLastCommitStatus(pr)
if err != nil {
log.Warnf("Failed to query the Pull Request last commit status for %s ref %s %s\n", pr.URL, pr.LastCommitSha, err)
//return fmt.Errorf("Failed to query the Pull Request last commit status for %s ref %s %s", pr.URL, pr.LastCommitSha, err)
} else if status == "in-progress" {
log.Infoln("The build for the Pull Request last commit is currently in progress.")
} else {
if status == "success" {
if !o.NoMergePullRequest {
err = gitProvider.MergePullRequest(pr, "jx promote automatically merged promotion PR")
if err != nil {
if !logMergeFailure {
logMergeFailure = true
log.Warnf("Failed to merge the Pull Request %s due to %s maybe I don't have karma?\n", pr.URL, err)
}
}
}
} else if status == "error" || status == "failure" {
return fmt.Errorf("Pull request %s last commit has status %s for ref %s", pr.URL, status, pr.LastCommitSha)
}
}
}
if pr.Mergeable != nil && !*pr.Mergeable {
log.Infoln("Rebasing PullRequest due to conflict")
err = o.PromoteViaPullRequest(env, releaseInfo)
if releaseInfo.PullRequestInfo != nil {
pullRequestInfo = releaseInfo.PullRequestInfo
}
}
}
if time.Now().After(end) {
return fmt.Errorf("Timed out waiting for pull request %s to merge. Waited %s", pr.URL, duration.String())
}
time.Sleep(*o.PullRequestPollDuration)
}
}
return nil
}
func (o *PromoteOptions) findLatestVersion(app string) (string, error) {
versions, err := o.Helm().SearchChartVersions(app)
if err != nil {
return "", err
}
var maxSemVer *semver.Version
maxString := ""
for _, version := range versions {
sv, err := semver.Parse(version)
if err != nil {
log.Warnf("Invalid semantic version: %s %s\n", version, err)
if maxString == "" || strings.Compare(version, maxString) > 0 {
maxString = version
}
} else {
if maxSemVer == nil || maxSemVer.Compare(sv) > 0 {
maxSemVer = &sv
}
}
}
if maxSemVer != nil {
return maxSemVer.String(), nil
}
if maxString == "" {
return "", fmt.Errorf("Could not find a version of app %s in the helm repositories", app)
}
return maxString, nil
}
func (o *PromoteOptions) verifyHelmConfigured() error {
helmHomeDir := filepath.Join(util.HomeDir(), ".helm")
exists, err := util.FileExists(helmHomeDir)
if err != nil {
return err
}
if !exists {
log.Warnf("No helm home dir at %s so lets initialise helm client\n", helmHomeDir)
err = o.helmInit("")
if err != nil {
return err
}
}
_, ns, _ := o.KubeClient()
if err != nil {
return err
}
// lets add the releases chart
return o.registerLocalHelmRepo(o.LocalHelmRepoName, ns)
}
func (o *PromoteOptions) createPromoteKey(env *v1.Environment) *kube.PromoteStepActivityKey {
pipeline := o.Pipeline
build := o.Build
buildURL := os.Getenv("BUILD_URL")
buildLogsURL := os.Getenv("BUILD_LOG_URL")
releaseNotesURL := ""
gitInfo := o.GitInfo
if !o.IgnoreLocalFiles {
var err error
gitInfo, err = o.Git().Info("")
releaseName := o.ReleaseName
if o.releaseResource == nil && releaseName != "" {
jxClient, _, err := o.JXClient()
if err == nil && jxClient != nil {
release, err := jxClient.JenkinsV1().Releases(env.Spec.Namespace).Get(releaseName, metav1.GetOptions{})
if err == nil && release != nil {
o.releaseResource = release
}
}
}
if o.releaseResource != nil {
releaseNotesURL = o.releaseResource.Spec.ReleaseNotesURL
}
if err != nil {
log.Warnf("Could not discover the Git repository info %s\n", err)
} else {
o.GitInfo = gitInfo
}
}
if pipeline == "" {
pipeline, build = o.getPipelineName(gitInfo, pipeline, build, o.Application)
}
if pipeline != "" && build == "" {
log.Warnf("No $BUILD_NUMBER environment variable found so cannot record promotion activities into the PipelineActivity resources in kubernetes\n")
var err error
build, err = o.getLatestPipelineBuildByCRD(pipeline)
if err != nil {
log.Warnf("Could not discover the latest PipelineActivity build %s\n", err)
}
}
name := pipeline
if build != "" {
name += "-" + build
if buildURL == "" || buildLogsURL == "" {
jenkinsURL := o.getJenkinsURL()
if jenkinsURL != "" {
path := pipeline
if !strings.HasPrefix(path, "job/") && !strings.HasPrefix(path, "/job/") {
// lets split the path and prefix it with /job
path = strings.Join(strings.Split(path, "/"), "/job/")
path = util.UrlJoin("job", path)
}
path = util.UrlJoin(path, build)
if !strings.HasSuffix(path, "/") {
path += "/"
}
if buildURL == "" {
buildURL = util.UrlJoin(jenkinsURL, path)
}
if buildLogsURL == "" {
buildLogsURL = util.UrlJoin(buildURL, "console")
}
}
}
}
name = kube.ToValidName(name)
if o.Verbose {
log.Infof("Using pipeline: %s build: %s\n", util.ColorInfo(pipeline), util.ColorInfo("#"+build))
}
return &kube.PromoteStepActivityKey{
PipelineActivityKey: kube.PipelineActivityKey{
Name: name,
Pipeline: pipeline,
Build: build,
BuildURL: buildURL,
BuildLogsURL: buildLogsURL,
GitInfo: gitInfo,
ReleaseNotesURL: releaseNotesURL,
},
Environment: env.Name,
}
}
// getLatestPipelineBuild returns the latest pipeline build
func (o *CommonOptions) getLatestPipelineBuildByCRD(pipeline string) (string, error) {
// lets find the latest build number
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return "", err
}
pipelines, err := jxClient.JenkinsV1().PipelineActivities(ns).List(metav1.ListOptions{})
if err != nil {
return "", err
}
buildNumber := 0
for _, p := range pipelines.Items {
if p.Spec.Pipeline == pipeline {
b := p.Spec.Build
if b != "" {
n, err := strconv.Atoi(b)
if err == nil {
if n > buildNumber {
buildNumber = n
}
}
}
}
}
if buildNumber > 0 {
return strconv.Itoa(buildNumber), nil
}
return "1", nil
}
func (o *CommonOptions) getPipelineName(gitInfo *gits.GitRepositoryInfo, pipeline string, build string, appName string) (string, string) {
if pipeline == "" {
pipeline = o.getJobName()
}
if build == "" {
build = o.getBuildNumber()
}
if gitInfo != nil && pipeline == "" {
// lets default the pipeline name from the Git repo
branch, err := o.Git().Branch(".")
if err != nil {
log.Warnf("Could not find the branch name: %s\n", err)
}
if branch == "" {
branch = "master"
}
pipeline = util.UrlJoin(gitInfo.Organisation, gitInfo.Name, branch)
}
if pipeline == "" && appName != "" {
suffix := appName + "/master"
// lets try deduce the pipeline name via the app name
jxClient, ns, err := o.JXClientAndDevNamespace()
if err == nil {
pipelineList, err := jxClient.JenkinsV1().PipelineActivities(ns).List(metav1.ListOptions{})
if err == nil {
for _, pipelineResource := range pipelineList.Items {
pipelineName := pipelineResource.Spec.Pipeline
if strings.HasSuffix(pipelineName, suffix) {
pipeline = pipelineName
break
}
}
}
}
}
if pipeline == "" {
// lets try find
log.Warnf("No $JOB_NAME environment variable found so cannot record promotion activities into the PipelineActivity resources in kubernetes\n")
} else if build == "" {
// lets validate and determine the current active pipeline branch
p, b, err := o.getLatestPipelineBuild(pipeline)
if err != nil {
log.Warnf("Failed to try detect the current Jenkins pipeline for %s due to %s\n", pipeline, err)
build = "1"
} else {
pipeline = p
build = b
}
}
return pipeline, build
}
// getLatestPipelineBuild for the given pipeline name lets try find the Jenkins Pipeline and the latest build
func (o *CommonOptions) getLatestPipelineBuild(pipeline string) (string, string, error) {
log.Infof("pipeline %s\n", pipeline)
build := ""
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return pipeline, build, err
}
kubeClient, _, err := o.KubeClient()
if err != nil {
return pipeline, build, err
}
devEnv, err := kube.GetEnrichedDevEnvironment(kubeClient, jxClient, ns)
webhookEngine := devEnv.Spec.WebHookEngine
if webhookEngine == v1.WebHookEngineProw {
return pipeline, build, nil
}
jenkins, err := o.JenkinsClient()
if err != nil {
return pipeline, build, err
}
paths := strings.Split(pipeline, "/")
job, err := jenkins.GetJobByPath(paths...)
if err != nil {
return pipeline, build, err
}
build = strconv.Itoa(job.LastBuild.Number)
return pipeline, build, nil
}
func (o *PromoteOptions) getJenkinsURL() string {
if o.jenkinsURL == "" {
o.jenkinsURL = os.Getenv("JENKINS_URL")
}
if o.jenkinsURL == "" {
o.jenkinsURL = os.Getenv("JENKINS_URL")
}
url, err := o.GetJenkinsURL()
if err != nil {
log.Warnf("Could not find Jenkins URL %s", err)
} else {
o.jenkinsURL = url
}
return o.jenkinsURL
}
// commentOnIssues comments on any issues for a release that the fix is available in the given environment
func (o *PromoteOptions) commentOnIssues(targetNS string, environment *v1.Environment, promoteKey *kube.PromoteStepActivityKey) error {
ens := environment.Spec.Namespace
envName := environment.Spec.Label
app := o.Application
version := o.Version
if ens == "" {
log.Warnf("Environment %s has no namespace\n", envName)
return nil
}
if app == "" {
log.Warnf("No application name so cannot comment on issues that they are now in %s\n", envName)
return nil
}
if version == "" {
log.Warnf("No version name so cannot comment on issues that they are now in %s\n", envName)
return nil
}
gitInfo := o.GitInfo
if gitInfo == nil {
log.Warnf("No GitInfo discovered so cannot comment on issues that they are now in %s\n", envName)
return nil
}
authConfigSvc, err := o.CreateGitAuthConfigService()
if err != nil {
return err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return err
}
provider, err := gitInfo.PickOrCreateProvider(authConfigSvc, "user name to comment on issues", o.BatchMode, gitKind, o.Git(), o.In, o.Out, o.Err)
if err != nil {
return err
}
releaseName := kube.ToValidNameWithDots(app + "-" + version)
jxClient, _, err := o.JXClient()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClient()
if err != nil {
return err
}
appNames := []string{app, o.ReleaseName, ens + "-" + app}
url := ""
for _, n := range appNames {
url, err = services.FindServiceURL(kubeClient, ens, n)
if url != "" {
break
}
}
if url == "" {
log.Warnf("Could not find the service URL in namespace %s for names %s\n", ens, strings.Join(appNames, ", "))
}
available := ""
if url != "" {
available = fmt.Sprintf(" and available [here](%s)", url)
}
if available == "" {
ing, err := kubeClient.ExtensionsV1beta1().Ingresses(ens).Get(app, metav1.GetOptions{})
if err != nil || ing == nil && o.ReleaseName != "" && o.ReleaseName != app {
ing, err = kubeClient.ExtensionsV1beta1().Ingresses(ens).Get(o.ReleaseName, metav1.GetOptions{})
}
if ing != nil {
if len(ing.Spec.Rules) > 0 {
hostname := ing.Spec.Rules[0].Host
if hostname != "" {
available = fmt.Sprintf(" and available at %s", hostname)
url = hostname
}
}
}
}
// lets try update the PipelineActivity
if url != "" && promoteKey.ApplicationURL == "" {
promoteKey.ApplicationURL = url
if o.Verbose {
log.Infof("Application is available at: %s\n", util.ColorInfo(url))
}
}
release, err := jxClient.JenkinsV1().Releases(ens).Get(releaseName, metav1.GetOptions{})
if err == nil && release != nil {
o.releaseResource = release
issues := release.Spec.Issues
versionMessage := version
if release.Spec.ReleaseNotesURL != "" {
versionMessage = "[" + version + "](" + release.Spec.ReleaseNotesURL + ")"
}
for _, issue := range issues {
if issue.IsClosed() {
log.Infof("Commenting that issue %s is now in %s\n", util.ColorInfo(issue.URL), util.ColorInfo(envName))
comment := fmt.Sprintf(":white_check_mark: the fix for this issue is now deployed to **%s** in version %s %s", envName, versionMessage, available)
id := issue.ID
if id != "" {
number, err := strconv.Atoi(id)
if err != nil {
log.Warnf("Could not parse issue id %s for URL %s\n", id, issue.URL)
} else {
if number > 0 {
err = provider.CreateIssueComment(gitInfo.Organisation, gitInfo.Name, number, comment)
if err != nil {
log.Warnf("Failed to add comment to issue %s: %s", issue.URL, err)
}
}
}
}
}
}
}
return nil
}
func (o *PromoteOptions) SearchForChart(filter string) (string, error) {
answer := ""
charts, err := o.Helm().SearchCharts(filter)
if err != nil {
return answer, err
}
if len(charts) == 0 {
return answer, fmt.Errorf("No charts available for search filter: %s", filter)
}
m := map[string]*helm.ChartSummary{}
names := []string{}
for i, chart := range charts {
text := chart.Name
if chart.Description != "" {
text = fmt.Sprintf("%-36s: %s", chart.Name, chart.Description)
}
names = append(names, text)
m[text] = &charts[i]
}
name, err := util.PickName(names, "Pick chart to promote: ", "", o.In, o.Out, o.Err)
if err != nil {
return answer, err
}
chart := m[name]
chartName := chart.Name
// TODO now we split the chart into name and repo
parts := strings.Split(chartName, "/")
if len(parts) != 2 {
return answer, fmt.Errorf("Invalid chart name '%s' was expecting single / character separating repo name and chart name", chartName)
}
repoName := parts[0]
appName := parts[1]
repos, err := o.Helm().ListRepos()
if err != nil {
return answer, err
}
repoUrl := repos[repoName]
if repoUrl == "" {
return answer, fmt.Errorf("Failed to find helm chart repo URL for '%s' when possible values are %s", repoName, util.SortedMapKeys(repos))
}
o.Version = chart.ChartVersion
o.HelmRepositoryURL = repoUrl
return appName, nil
}
| [
"\"BUILD_URL\"",
"\"BUILD_LOG_URL\"",
"\"JENKINS_URL\"",
"\"JENKINS_URL\""
]
| []
| [
"JENKINS_URL",
"BUILD_URL",
"BUILD_LOG_URL"
]
| [] | ["JENKINS_URL", "BUILD_URL", "BUILD_LOG_URL"] | go | 3 | 0 | |
nexttransit/bart/config.py | # Filename: config.py
"""
BART Configuration
"""
# Standard libraries
import os
from enum import Enum
STRFTIME = "%m/%d/%Y"
VALIDATION_KEY = os.getenv('BART_VALIDATION_KEY', None)
if VALIDATION_KEY is None:
raise ImportError("cannot find the value of $BART_VALIDATION_KEY")
URL = {
'etd': 'http://api.bart.gov/api/etd.aspx?cmd=etd&orig={station}&key={key}',
'etd_dir': 'http://api.bart.gov/api/etd.aspx?cmd=etd&orig={station}&'
'key={key}&dir={direction}',
'route_info': 'http://api.bart.gov/api/route.aspx?cmd=routeinfo&'
'route={number}&key={key}',
'routes': 'http://api.bart.gov/api/route.aspx?cmd=routes&date={date}&'
'key={key}',
'station': 'http://api.bart.gov/api/stn.aspx?cmd=stns&key={key}',
'station_info': 'http://api.bart.gov/api/stn.aspx?cmd=stninfo&'
'orig={station}&key={key}',
}
class GetResponseCodeEnum(Enum):
SUCCESS = 200
| []
| []
| [
"BART_VALIDATION_KEY"
]
| [] | ["BART_VALIDATION_KEY"] | python | 1 | 0 | |
tests/test_features.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import warnings
import numpy as np
import pytest
import librosa
from test_core import load, srand
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
__EXAMPLE_FILE = os.path.join('tests', 'data', 'test1_22050.wav')
warnings.resetwarnings()
warnings.simplefilter('always')
warnings.filterwarnings('module', '.*', FutureWarning, 'scipy.*')
# utils submodule
@pytest.mark.parametrize('slope', np.linspace(-2, 2, num=6))
@pytest.mark.parametrize('xin', [np.vstack([np.arange(100.0)] * 3)])
@pytest.mark.parametrize('order', [1, pytest.mark.xfail(0)])
@pytest.mark.parametrize('width, axis', [pytest.mark.xfail((-1, 0)),
pytest.mark.xfail((-1, 1)),
pytest.mark.xfail((0, 0)),
pytest.mark.xfail((0, 1)),
pytest.mark.xfail((1, 0)),
pytest.mark.xfail((1, 1)),
pytest.mark.xfail((2, 0)),
pytest.mark.xfail((2, 1)),
(3, 0), (3, 1),
pytest.mark.xfail((4, 0)),
pytest.mark.xfail((4, 1)),
(5, 1), pytest.mark.xfail((5, 0)),
pytest.mark.xfail((6, 0)),
pytest.mark.xfail((6, 1)),
pytest.mark.xfail((7, 0)), (7, 1)])
@pytest.mark.parametrize('bias', [-10, 0, 10])
def test_delta(xin, width, slope, order, axis, bias):
x = slope * xin + bias
# Note: this test currently only checks first-order differences
# if width < 3 or np.mod(width, 2) != 1 or width > x.shape[axis]:
# pytest.raises(librosa.ParameterError)
delta = librosa.feature.delta(x,
width=width,
order=order,
axis=axis)
# Check that trimming matches the expected shape
assert x.shape == delta.shape
# Once we're sufficiently far into the signal (ie beyond half_len)
# (x + delta)[t] should approximate x[t+1] if x is actually linear
slice_orig = [slice(None)] * x.ndim
slice_out = [slice(None)] * delta.ndim
slice_orig[axis] = slice(width//2 + 1, -width//2 + 1)
slice_out[axis] = slice(width//2, -width//2)
assert np.allclose((x + delta)[tuple(slice_out)], x[tuple(slice_orig)])
def test_stack_memory():
def __test(n_steps, delay, data):
data_stack = librosa.feature.stack_memory(data,
n_steps=n_steps,
delay=delay)
# If we're one-dimensional, reshape for testing
if data.ndim == 1:
data = data.reshape((1, -1))
d, t = data.shape
assert data_stack.shape[0] == n_steps * d
assert data_stack.shape[1] == t
assert np.allclose(data_stack[0], data[0])
for i in range(d):
for step in range(1, n_steps):
if delay > 0:
assert np.allclose(data[i, :- step * delay],
data_stack[step * d + i, step * delay:])
else:
assert np.allclose(data[i, -step * delay:],
data_stack[step * d + i, :step * delay])
srand()
for ndim in [1, 2]:
data = np.random.randn(* ([5] * ndim))
for n_steps in [-1, 0, 1, 2, 3, 4]:
for delay in [-4, -2, -1, 0, 1, 2, 4]:
tf = __test
if n_steps < 1:
tf = pytest.mark.xfail(__test, raises=librosa.ParameterError)
if delay == 0:
tf = pytest.mark.xfail(__test, raises=librosa.ParameterError)
yield tf, n_steps, delay, data
# spectral submodule
def test_spectral_centroid_synthetic():
k = 5
def __test(S, freq, sr, n_fft):
cent = librosa.feature.spectral_centroid(S=S, freq=freq)
if freq is None:
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
assert np.allclose(cent, freq[k])
srand()
# construct a fake spectrogram
sr = 22050
n_fft = 1024
S = np.zeros((1 + n_fft // 2, 10))
S[k, :] = 1.0
yield __test, S, None, sr, n_fft
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq, sr, n_fft
# And if we modify the frequencies
freq *= 3
yield __test, S, freq, sr, n_fft
# Or if we make up random frequencies for each frame
freq = np.random.randn(*S.shape)
yield __test, S, freq, sr, n_fft
def test_spectral_centroid_errors():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(S):
librosa.feature.spectral_centroid(S=S)
S = - np.ones((513, 10))
yield __test, S
S = - np.ones((513, 10)) * 1.j
yield __test, S
def test_spectral_centroid_empty():
def __test(y, sr, S):
cent = librosa.feature.spectral_centroid(y=y, sr=sr, S=S)
assert not np.any(cent)
sr = 22050
y = np.zeros(3 * sr)
yield __test, y, sr, None
S = np.zeros((1025, 10))
yield __test, None, sr, S
def test_spectral_bandwidth_synthetic():
# This test ensures that a signal confined to a single frequency bin
# always achieves 0 bandwidth
k = 5
def __test(S, freq, sr, n_fft, norm, p):
bw = librosa.feature.spectral_bandwidth(S=S, freq=freq, norm=norm, p=p)
assert not np.any(bw)
srand()
# construct a fake spectrogram
sr = 22050
n_fft = 1024
S = np.zeros((1 + n_fft // 2, 10))
S[k, :] = 1.0
for norm in [False, True]:
for p in [1, 2]:
# With vanilla frequencies
yield __test, S, None, sr, n_fft, norm, p
# With explicit frequencies
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq, sr, n_fft, norm, p
# And if we modify the frequencies
freq = 3 * librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq, sr, n_fft, norm, p
# Or if we make up random frequencies for each frame
freq = np.random.randn(*S.shape)
yield __test, S, freq, sr, n_fft, norm, p
def test_spectral_bandwidth_onecol():
# This test checks for issue https://github.com/librosa/librosa/issues/552
# failure when the spectrogram has a single column
def __test(S, freq):
bw = librosa.feature.spectral_bandwidth(S=S, freq=freq)
assert bw.shape == (1, 1)
k = 5
srand()
# construct a fake spectrogram
sr = 22050
n_fft = 1024
S = np.zeros((1 + n_fft // 2, 1))
S[k, :] = 1.0
# With vanilla frequencies
yield __test, S, None
# With explicit frequencies
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq
# And if we modify the frequencies
freq = 3 * librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq
# Or if we make up random frequencies for each frame
freq = np.random.randn(*S.shape)
yield __test, S, freq
def test_spectral_bandwidth_errors():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(S):
librosa.feature.spectral_bandwidth(S=S)
S = - np.ones((513, 10))
yield __test, S
S = - np.ones((513, 10)) * 1.j
yield __test, S
def test_spectral_rolloff_synthetic():
srand()
sr = 22050
n_fft = 2048
def __test(S, freq, pct):
rolloff = librosa.feature.spectral_rolloff(S=S, sr=sr, freq=freq,
roll_percent=pct)
if freq is None:
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
idx = np.floor(pct * freq.shape[0]).astype(int)
assert np.allclose(rolloff, freq[idx])
S = np.ones((1 + n_fft // 2, 10))
for pct in [0.25, 0.5, 0.95]:
# Implicit frequencies
yield __test, S, None, pct
# Explicit frequencies
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
yield __test, S, freq, pct
# And time-varying frequencies
freq = np.cumsum(np.abs(np.random.randn(*S.shape)), axis=0)
yield __test, S, freq, pct
def test_spectral_rolloff_errors():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(S, p):
librosa.feature.spectral_rolloff(S=S, roll_percent=p)
S = - np.ones((513, 10))
yield __test, S, 0.95
S = - np.ones((513, 10)) * 1.j
yield __test, S, 0.95
S = np.ones((513, 10))
yield __test, S, -1
S = np.ones((513, 10))
yield __test, S, 2
def test_spectral_contrast_log():
# We already have a regression test for linear energy difference
# This test just does a sanity-check on the log-scaled version
y, sr = librosa.load(__EXAMPLE_FILE)
contrast = librosa.feature.spectral_contrast(y=y, sr=sr, linear=False)
assert not np.any(contrast < 0)
def test_spectral_contrast_errors():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(S, freq, fmin, n_bands, quantile):
librosa.feature.spectral_contrast(S=S,
freq=freq,
fmin=fmin,
n_bands=n_bands,
quantile=quantile)
S = np.ones((1025, 10))
# ill-shaped frequency set: scalar
yield __test, S, 0, 200, 6, 0.02
# ill-shaped frequency set: wrong-length vector
yield __test, S, np.zeros((S.shape[0]+1,)), 200, 6, 0.02
# ill-shaped frequency set: matrix
yield __test, S, np.zeros(S.shape), 200, 6, 0.02
# negative fmin
yield __test, S, None, -1, 6, 0.02
# zero fmin
yield __test, S, None, 0, 6, 0.02
# negative n_bands
yield __test, S, None, 200, -1, 0.02
# bad quantile
yield __test, S, None, 200, 6, -1
# bad quantile
yield __test, S, None, 200, 6, 2
# bands exceed nyquist
yield __test, S, None, 200, 7, 0.02
def test_spectral_flatness_synthetic():
# to construct a spectrogram
n_fft = 2048
def __test(y, S, flatness_ref):
flatness = librosa.feature.spectral_flatness(y=y,
S=S,
n_fft=2048,
hop_length=512)
assert np.allclose(flatness, flatness_ref)
# comparison to a manual calculation result
S = np.array([[1, 3], [2, 1], [1, 2]])
flatness_ref = np.array([[0.7937005259, 0.7075558390]])
yield __test, None, S, flatness_ref
# ones
S = np.ones((1 + n_fft // 2, 10))
flatness_ones = np.ones((1, 10))
yield __test, None, S, flatness_ones
# zeros
S = np.zeros((1 + n_fft // 2, 10))
flatness_zeros = np.ones((1, 10))
yield __test, None, S, flatness_zeros
def test_spectral_flatness_errors():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(S, amin):
librosa.feature.spectral_flatness(S=S,
amin=amin)
S = np.ones((1025, 10))
# zero amin
yield __test, S, 0
# negative amin
yield __test, S, -1
def test_rmse():
def __test(n):
S = np.ones((n, 5))
# RMSE of an all-ones band is 1
rmse = librosa.feature.rmse(S=S)
assert np.allclose(rmse, np.ones_like(rmse))
def __test_consistency(frame_length, hop_length, center):
y, sr = librosa.load(__EXAMPLE_FILE, sr=None)
# Ensure audio is divisible into frame size.
y = librosa.util.fix_length(y, y.size - y.size % frame_length)
assert y.size % frame_length == 0
# STFT magnitudes with a constant windowing function and no centering.
S = librosa.magphase(librosa.stft(y,
n_fft=frame_length,
hop_length=hop_length,
window=np.ones,
center=center))[0]
# Try both RMS methods.
rms1 = librosa.feature.rmse(S=S, frame_length=frame_length,
hop_length=hop_length)
rms2 = librosa.feature.rmse(y=y, frame_length=frame_length,
hop_length=hop_length, center=center)
assert rms1.shape == rms2.shape
# Normalize envelopes.
rms1 /= rms1.max()
rms2 /= rms2.max()
# Ensure results are similar.
np.testing.assert_allclose(rms1, rms2, rtol=5e-2)
for frame_length in [2048, 4096]:
for hop_length in [128, 512, 1024]:
for center in [False, True]:
yield __test_consistency, frame_length, hop_length, center
for n in range(10, 100, 10):
yield __test, n
def test_zcr_synthetic():
def __test_zcr(rate, y, frame_length, hop_length, center):
zcr = librosa.feature.zero_crossing_rate(y,
frame_length=frame_length,
hop_length=hop_length,
center=center)
# We don't care too much about the edges if there's padding
if center:
zcr = zcr[:, frame_length//2:-frame_length//2]
# We'll allow 1% relative error
assert np.allclose(zcr, rate, rtol=1e-2)
sr = 16384
for period in [32, 16, 8, 4, 2]:
y = np.ones(sr)
y[::period] = -1
# Every sign flip induces two crossings
rate = 2./period
# 1+2**k so that we get both sides of the last crossing
for frame_length in [513, 2049]:
for hop_length in [128, 256]:
for center in [False, True]:
yield __test_zcr, rate, y, frame_length, hop_length, center
def test_poly_features_synthetic():
srand()
sr = 22050
n_fft = 2048
def __test(S, coeffs, freq):
order = coeffs.shape[0] - 1
p = librosa.feature.poly_features(S=S, sr=sr, n_fft=n_fft,
order=order, freq=freq)
for i in range(S.shape[-1]):
assert np.allclose(coeffs, p[::-1, i].squeeze())
def __make_data(coeffs, freq):
S = np.zeros_like(freq)
for i, c in enumerate(coeffs):
S = S + c * freq**i
S = S.reshape((freq.shape[0], -1))
return S
for order in range(1, 3):
freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
coeffs = np.atleast_1d(np.arange(1, 1+order))
# First test: vanilla
S = __make_data(coeffs, freq)
yield __test, S, coeffs, None
# And with explicit frequencies
yield __test, S, coeffs, freq
# And with alternate frequencies
freq = freq**2.0
S = __make_data(coeffs, freq)
yield __test, S, coeffs, freq
# And multi-dimensional
freq = np.cumsum(np.abs(np.random.randn(1 + n_fft//2, 2)), axis=0)
S = __make_data(coeffs, freq)
yield __test, S, coeffs, freq
def test_tonnetz():
y, sr = librosa.load(librosa.util.example_audio_file())
tonnetz_chroma = np.load(os.path.join('tests', "data", "feature-tonnetz-chroma.npy"))
tonnetz_msaf = np.load(os.path.join('tests', "data", "feature-tonnetz-msaf.npy"))
# Use cqt chroma
def __audio():
tonnetz = librosa.feature.tonnetz(y=y, sr=sr)
assert tonnetz.shape[0] == 6
# Use pre-computed chroma
def __stft():
tonnetz = librosa.feature.tonnetz(chroma=tonnetz_chroma)
assert tonnetz.shape[1] == tonnetz_chroma.shape[1]
assert tonnetz.shape[0] == 6
assert np.allclose(tonnetz_msaf, tonnetz)
def __cqt():
# Use high resolution cqt chroma
chroma_cqt = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma=24)
tonnetz = librosa.feature.tonnetz(chroma=chroma_cqt)
assert tonnetz.shape[1] == chroma_cqt.shape[1]
assert tonnetz.shape[0] == 6
# Using stft chroma won't generally match cqt chroma
# skip the equivalence check
# Call the function with not enough parameters
yield pytest.mark.xfail(librosa.feature.tonnetz, raises=librosa.ParameterError)
yield __audio
yield __stft
yield __cqt
def test_tempogram_fail():
@pytest.mark.xfail(raises=librosa.ParameterError)
def __test(y, sr, onset_envelope, hop_length, win_length, center, window, norm):
librosa.feature.tempogram(y=y,
sr=sr,
onset_envelope=onset_envelope,
hop_length=hop_length,
win_length=win_length,
center=center,
window=window,
norm=norm)
sr = 22050
hop_length = 512
duration = 10
y = np.zeros(duration * sr)
# Fail when no input is provided
yield __test, None, sr, None, hop_length, 384, True, 'hann', np.inf
# Fail when win_length is too small
for win_length in [-384, -1, 0]:
yield __test, y, sr, None, hop_length, win_length, True, 'hann', np.inf
# Fail when len(window) != win_length
yield __test, y, sr, None, hop_length, 384, True, np.ones(win_length + 1), np.inf
def test_tempogram_audio():
def __test(y, sr, oenv, hop_length):
# Get the tempogram from audio
t1 = librosa.feature.tempogram(y=y, sr=sr,
onset_envelope=None,
hop_length=hop_length)
# Get the tempogram from oenv
t2 = librosa.feature.tempogram(y=None, sr=sr,
onset_envelope=oenv,
hop_length=hop_length)
# Make sure it works when both are provided
t3 = librosa.feature.tempogram(y=y, sr=sr,
onset_envelope=oenv,
hop_length=hop_length)
# And that oenv overrides y
t4 = librosa.feature.tempogram(y=0 * y, sr=sr,
onset_envelope=oenv,
hop_length=hop_length)
assert np.allclose(t1, t2)
assert np.allclose(t1, t3)
assert np.allclose(t1, t4)
y, sr = librosa.load(__EXAMPLE_FILE)
for hop_length in [512, 1024]:
oenv = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length)
yield __test, y, sr, oenv, hop_length
def test_tempogram_odf():
sr = 22050
hop_length = 512
duration = 8
def __test_equiv(tempo, center):
odf = np.zeros(duration * sr // hop_length)
spacing = sr * 60. // (hop_length * tempo)
odf[::int(spacing)] = 1
odf_ac = librosa.autocorrelate(odf)
tempogram = librosa.feature.tempogram(onset_envelope=odf,
sr=sr,
hop_length=hop_length,
win_length=len(odf),
window=np.ones,
center=center,
norm=None)
idx = 0
if center:
idx = len(odf)//2
assert np.allclose(odf_ac, tempogram[:, idx])
# Generate a synthetic onset envelope
def __test_peaks(tempo, win_length, window, norm):
# Generate an evenly-spaced pulse train
odf = np.zeros(duration * sr // hop_length)
spacing = sr * 60. // (hop_length * tempo)
odf[::int(spacing)] = 1
tempogram = librosa.feature.tempogram(onset_envelope=odf,
sr=sr,
hop_length=hop_length,
win_length=win_length,
window=window,
norm=norm)
# Check the shape of the output
assert tempogram.shape[0] == win_length
assert tempogram.shape[1] == len(odf)
# Mean over time to wash over the boundary padding effects
idx = np.where(librosa.util.localmax(tempogram.max(axis=1)))[0]
# Indices should all be non-zero integer multiples of spacing
assert np.allclose(idx, spacing * np.arange(1, 1 + len(idx)))
for tempo in [60, 90, 120, 160, 200]:
for center in [False, True]:
yield __test_equiv, tempo, center
for win_length in [192, 384]:
for window in ['hann', np.ones, np.ones(win_length)]:
for norm in [None, 1, 2, np.inf]:
yield __test_peaks, tempo, win_length, window, norm
def test_tempogram_odf_multi():
sr = 22050
hop_length = 512
duration = 8
# Generate a synthetic onset envelope
def __test(center, win_length, window, norm):
# Generate an evenly-spaced pulse train
odf = np.zeros((10, duration * sr // hop_length))
for i in range(10):
spacing = sr * 60. // (hop_length * (60 + 12 * i))
odf[i, ::int(spacing)] = 1
tempogram = librosa.feature.tempogram(onset_envelope=odf,
sr=sr,
hop_length=hop_length,
win_length=win_length,
window=window,
norm=norm)
for i in range(10):
tg_local = librosa.feature.tempogram(onset_envelope=odf[i],
sr=sr,
hop_length=hop_length,
win_length=win_length,
window=window,
norm=norm)
assert np.allclose(tempogram[i], tg_local)
for center in [False, True]:
for win_length in [192, 384]:
for window in ['hann', np.ones, np.ones(win_length)]:
for norm in [None, 1, 2, np.inf]:
yield __test, center, win_length, window, norm
def test_cens():
# load CQT data from Chroma Toolbox
ct_cqt = load(os.path.join('tests', 'data', 'features-CT-cqt.mat'))
fn_ct_chroma_cens = ['features-CT-CENS_9-2.mat',
'features-CT-CENS_21-5.mat',
'features-CT-CENS_41-1.mat']
cens_params = [(9, 2), (21, 5), (41, 1)]
for cur_test_case, cur_fn_ct_chroma_cens in enumerate(fn_ct_chroma_cens):
win_len_smooth = cens_params[cur_test_case][0]
downsample_smooth = cens_params[cur_test_case][1]
# plug into librosa cens computation
lr_chroma_cens = librosa.feature.chroma_cens(C=ct_cqt['f_cqt'],
win_len_smooth=win_len_smooth,
fmin=librosa.core.midi_to_hz(1),
bins_per_octave=12,
n_octaves=10)
# leaving out frames to match chroma toolbox behaviour
# lr_chroma_cens = librosa.resample(lr_chroma_cens, orig_sr=1, target_sr=1/downsample_smooth)
lr_chroma_cens = lr_chroma_cens[:, ::downsample_smooth]
# load CENS-41-1 features
ct_chroma_cens = load(os.path.join('tests', 'data', cur_fn_ct_chroma_cens))
maxdev = np.abs(ct_chroma_cens['f_CENS'] - lr_chroma_cens)
assert np.allclose(ct_chroma_cens['f_CENS'], lr_chroma_cens, rtol=1e-15, atol=1e-15), maxdev
def test_mfcc():
def __test(dct_type, norm, n_mfcc, S):
E_total = np.sum(S, axis=0)
mfcc = librosa.feature.mfcc(S=S, dct_type=dct_type, norm=norm, n_mfcc=n_mfcc)
assert mfcc.shape[0] == n_mfcc
assert mfcc.shape[1] == S.shape[1]
# In type-2 mode, DC component should be constant over all frames
if dct_type == 2:
assert np.var(mfcc[0] / E_total) <= 1e-30
S = librosa.power_to_db(np.random.randn(128, 100)**2, ref=np.max)
for n_mfcc in [13, 20]:
for dct_type in [1, 2, 3]:
for norm in [None, 'ortho']:
if dct_type == 1 and norm == 'ortho':
tf = pytest.mark.xfail(__test, raises=NotImplementedError)
else:
tf = __test
yield tf, dct_type, norm, n_mfcc, S
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from torch.autograd import Variable
import argparse
import numpy as np
from torch.optim.lr_scheduler import *
import csv
from model.resnet import resnet101
from data_pre.fashionTEST import fashiontest
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, default=2)
parser.add_argument('--batchSize', type=int, default=128)
parser.add_argument('--nepoch', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--gpu', type=str, default='7', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--attr', type=str,default='collar_design_labels')
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
transform_test = transforms.Compose([
transforms.Scale((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
AttrNum = {
'coat_length_labels':8,
'collar_design_labels':5,
'lapel_design_labels':5,
'neck_design_labels':5,
'neckline_design_labels':10,
'pant_length_labels':6,
'skirt_length_labels':6,
'sleeve_length_labels':9,
}
testset=fashiontest('/home/yhf/Challenge/FashionAI/ex_STL_FashionAI/data/2rank/Tests/question.csv',transform_test,opt.attr)
testloader=torch.utils.data.DataLoader(testset,batch_size=opt.batchSize,shuffle=False,num_workers=opt.workers)
model=resnet101(pretrained=True)
model.fc=nn.Linear(2048,AttrNum[opt.attr])
model.load_state_dict(torch.load('ckp/model_task_%s.pth'%opt.attr))
model.cuda()
model.eval()
results=[]
for image,addrs in testloader:
image=Variable(image.cuda(),volatile=True)
out=model(image)
out=np.exp(out.cpu().data.numpy()).tolist()
results.extend([[j,opt.attr,";".join([str(ii) for ii in i])] for (i,j) in zip(out,addrs)])
eval_csv=os.path.join(os.path.expanduser('.'),'deploy',opt.attr+'_eval.csv')
with open(eval_csv,'w',newline='') as f:
writer=csv.writer(f,delimiter=',')
for x in results:
writer.writerow(x)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
eval.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Calculate mAP for YOLO model on some annotation dataset
"""
import os, argparse, time
import numpy as np
import operator
from operator import mul
from functools import reduce
from PIL import Image
from collections import OrderedDict
import matplotlib.pyplot as plt
from tqdm import tqdm
from tensorflow.keras.models import load_model
import tensorflow.keras.backend as K
import tensorflow as tf
import MNN
import onnxruntime
from yolo3.postprocess_np import yolo3_postprocess_np
from yolo2.postprocess_np import yolo2_postprocess_np
from common.data_utils import preprocess_image
from common.utils import get_dataset, get_classes, get_anchors, get_colors, draw_boxes, optimize_tf_gpu, get_custom_objects
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
optimize_tf_gpu(tf, K)
def annotation_parse(annotation_lines, class_names):
'''
parse annotation lines to get image dict and ground truth class dict
image dict would be like:
annotation_records = {
'/path/to/000001.jpg': {'100,120,200,235':'dog', '85,63,156,128':'car', ...},
...
}
ground truth class dict would be like:
classes_records = {
'car': [
['000001.jpg','100,120,200,235'],
['000002.jpg','85,63,156,128'],
...
],
...
}
'''
annotation_records = OrderedDict()
classes_records = OrderedDict({class_name: [] for class_name in class_names})
for line in annotation_lines:
box_records = {}
image_name = line.split(' ')[0]
boxes = line.split(' ')[1:]
for box in boxes:
#strip box coordinate and class
class_name = class_names[int(box.split(',')[-1])]
coordinate = ','.join(box.split(',')[:-1])
box_records[coordinate] = class_name
#append or add ground truth class item
record = [os.path.basename(image_name), coordinate]
if class_name in classes_records:
classes_records[class_name].append(record)
else:
classes_records[class_name] = list([record])
annotation_records[image_name] = box_records
return annotation_records, classes_records
def transform_gt_record(gt_records, class_names):
'''
Transform the Ground Truth records of a image to prediction format, in
order to show & compare in result pic.
Ground Truth records is a dict with format:
{'100,120,200,235':'dog', '85,63,156,128':'car', ...}
Prediction format:
(boxes, classes, scores)
'''
if gt_records is None or len(gt_records) == 0:
return [], [], []
gt_boxes = []
gt_classes = []
gt_scores = []
for (coordinate, class_name) in gt_records.items():
gt_box = [int(x) for x in coordinate.split(',')]
gt_class = class_names.index(class_name)
gt_boxes.append(gt_box)
gt_classes.append(gt_class)
gt_scores.append(1.0)
return np.array(gt_boxes), np.array(gt_classes), np.array(gt_scores)
def yolo_predict_tflite(interpreter, image, anchors, num_classes, conf_threshold):
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
#if input_details[0]['dtype'] == np.float32:
#floating_model = True
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
model_image_size = (height, width)
image_data = preprocess_image(image, model_image_size)
#origin image shape, in (height, width) format
image_shape = tuple(reversed(image.size))
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
prediction = []
for output_detail in output_details:
output_data = interpreter.get_tensor(output_detail['index'])
prediction.append(output_data)
prediction.sort(key=lambda x: len(x[0]))
if len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'
pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
else:
pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
return pred_boxes, pred_classes, pred_scores
def yolo_predict_mnn(interpreter, session, image, anchors, num_classes, conf_threshold):
# assume only 1 input tensor for image
input_tensor = interpreter.getSessionInput(session)
# get input shape
input_shape = input_tensor.getShape()
if input_tensor.getDimensionType() == MNN.Tensor_DimensionType_Tensorflow:
batch, height, width, channel = input_shape
elif input_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe:
batch, channel, height, width = input_shape
else:
# should be MNN.Tensor_DimensionType_Caffe_C4, unsupported now
raise ValueError('unsupported input tensor dimension type')
model_image_size = (height, width)
# prepare input image
image_data = preprocess_image(image, model_image_size)
#origin image shape, in (height, width) format
image_shape = tuple(reversed(image.size))
# use a temp tensor to copy data
tmp_input = MNN.Tensor(input_shape, input_tensor.getDataType(),\
image_data, input_tensor.getDimensionType())
input_tensor.copyFrom(tmp_input)
interpreter.runSession(session)
def get_tensor_list(output_tensors):
# transform the output tensor dict to ordered tensor list, for further postprocess
#
# output tensor list should be like (for YOLOv3):
# [
# (name, tensor) for (13, 13, 3, num_classes+5),
# (name, tensor) for (26, 26, 3, num_classes+5),
# (name, tensor) for (52, 52, 3, num_classes+5)
# ]
output_list = []
for (output_tensor_name, output_tensor) in output_tensors.items():
tensor_shape = output_tensor.getShape()
dim_type = output_tensor.getDimensionType()
tensor_height, tensor_width = tensor_shape[2:4] if dim_type == MNN.Tensor_DimensionType_Caffe else tensor_shape[1:3]
if len(anchors) == 6:
# Tiny YOLOv3
if tensor_height == height//32:
output_list.insert(0, (output_tensor_name, output_tensor))
elif tensor_height == height//16:
output_list.insert(1, (output_tensor_name, output_tensor))
else:
raise ValueError('invalid tensor shape')
elif len(anchors) == 9:
# YOLOv3
if tensor_height == height//32:
output_list.insert(0, (output_tensor_name, output_tensor))
elif tensor_height == height//16:
output_list.insert(1, (output_tensor_name, output_tensor))
elif tensor_height == height//8:
output_list.insert(2, (output_tensor_name, output_tensor))
else:
raise ValueError('invalid tensor shape')
elif len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
assert len(output_tensors) == 1, 'YOLOv2 model should have only 1 output tensor.'
output_list.insert(0, (output_tensor_name, output_tensor))
else:
raise ValueError('invalid anchor number')
return output_list
output_tensors = interpreter.getSessionOutputAll(session)
output_tensor_list = get_tensor_list(output_tensors)
prediction = []
for (output_tensor_name, output_tensor) in output_tensor_list:
output_shape = output_tensor.getShape()
output_elementsize = reduce(mul, output_shape)
assert output_tensor.getDataType() == MNN.Halide_Type_Float
# copy output tensor to host, for further postprocess
tmp_output = MNN.Tensor(output_shape, output_tensor.getDataType(),\
#np.zeros(output_shape, dtype=float), output_tensor.getDimensionType())
tuple(np.zeros(output_shape, dtype=float).reshape(output_elementsize, -1)), output_tensor.getDimensionType())
output_tensor.copyToHostTensor(tmp_output)
#tmp_output.printTensorData()
output_data = np.array(tmp_output.getData(), dtype=float).reshape(output_shape)
# our postprocess code based on TF channel last format, so if the output format
# doesn't match, we need to transpose
if output_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe:
output_data = output_data.transpose((0,2,3,1))
elif output_tensor.getDimensionType() == MNN.Tensor_DimensionType_Caffe_C4:
raise ValueError('unsupported output tensor dimension type')
prediction.append(output_data)
prediction.sort(key=lambda x: len(x[0]))
if len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'
pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
else:
pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
return pred_boxes, pred_classes, pred_scores
def yolo_predict_pb(model, image, anchors, num_classes, model_image_size, conf_threshold):
# NOTE: TF 1.x frozen pb graph need to specify input/output tensor name
# so we need to hardcode the input/output tensor names here to get them from model
if len(anchors) == 6:
output_tensor_names = ['graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0']
elif len(anchors) == 9:
output_tensor_names = ['graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0', 'graph/predict_conv_3/BiasAdd:0']
elif len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
output_tensor_names = ['graph/predict_conv/BiasAdd:0']
else:
raise ValueError('invalid anchor number')
# assume only 1 input tensor for image
input_tensor_name = 'graph/image_input:0'
# get input/output tensors
image_input = model.get_tensor_by_name(input_tensor_name)
output_tensors = [model.get_tensor_by_name(output_tensor_name) for output_tensor_name in output_tensor_names]
batch, height, width, channel = image_input.shape
model_image_size = (int(height), int(width))
# prepare input image
image_data = preprocess_image(image, model_image_size)
#origin image shape, in (height, width) format
image_shape = tuple(reversed(image.size))
with tf.Session(graph=model) as sess:
prediction = sess.run(output_tensors, feed_dict={
image_input: image_data
})
prediction.sort(key=lambda x: len(x[0]))
if len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'
pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
else:
pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
return pred_boxes, pred_classes, pred_scores
def yolo_predict_onnx(model, image, anchors, num_classes, conf_threshold):
input_tensors = []
for i, input_tensor in enumerate(model.get_inputs()):
input_tensors.append(input_tensor)
# assume only 1 input tensor for image
assert len(input_tensors) == 1, 'invalid input tensor number.'
batch, height, width, channel = input_tensors[0].shape
model_image_size = (height, width)
# prepare input image
image_data = preprocess_image(image, model_image_size)
#origin image shape, in (height, width) format
image_shape = tuple(reversed(image.size))
feed = {input_tensors[0].name: image_data}
prediction = model.run(None, feed)
prediction.sort(key=lambda x: len(x[0]))
if len(anchors) == 5:
# YOLOv2 use 5 anchors and have only 1 prediction
assert len(prediction) == 1, 'invalid YOLOv2 prediction number.'
pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction[0], image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
else:
pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
return pred_boxes, pred_classes, pred_scores
def yolo_predict_keras(model, image, anchors, num_classes, model_image_size, conf_threshold):
image_data = preprocess_image(image, model_image_size)
#origin image shape, in (height, width) format
image_shape = tuple(reversed(image.size))
prediction = model.predict([image_data])
if len(anchors) == 5:
# YOLOv2 use 5 anchors
pred_boxes, pred_classes, pred_scores = yolo2_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
else:
pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(prediction, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=conf_threshold)
return pred_boxes, pred_classes, pred_scores
def get_prediction_class_records(model, model_format, annotation_records, anchors, class_names, model_image_size, conf_threshold, save_result):
'''
Do the predict with YOLO model on annotation images to get predict class dict
predict class dict would contain image_name, coordinary and score, and
sorted by score:
pred_classes_records = {
'car': [
['000001.jpg','94,115,203,232',0.98],
['000002.jpg','82,64,154,128',0.93],
...
],
...
}
'''
if model_format == 'MNN':
#MNN inference engine need create session
session = model.createSession()
# create txt file to save prediction result, with
# save format as annotation file but adding score, like:
#
# path/to/img1.jpg 50,100,150,200,0,0.86 30,50,200,120,3,0.95
#
os.makedirs('result', exist_ok=True)
result_file = open(os.path.join('result','detection_result.txt'), 'w')
pred_classes_records = OrderedDict()
pbar = tqdm(total=len(annotation_records), desc='Eval model')
for (image_name, gt_records) in annotation_records.items():
image = Image.open(image_name)
if image.mode != 'RGB':
image = image.convert('RGB')
image_array = np.array(image, dtype='uint8')
# support of tflite model
if model_format == 'TFLITE':
pred_boxes, pred_classes, pred_scores = yolo_predict_tflite(model, image, anchors, len(class_names), conf_threshold)
# support of MNN model
elif model_format == 'MNN':
pred_boxes, pred_classes, pred_scores = yolo_predict_mnn(model, session, image, anchors, len(class_names), conf_threshold)
# support of TF 1.x frozen pb model
elif model_format == 'PB':
pred_boxes, pred_classes, pred_scores = yolo_predict_pb(model, image, anchors, len(class_names), model_image_size, conf_threshold)
# support of ONNX model
elif model_format == 'ONNX':
pred_boxes, pred_classes, pred_scores = yolo_predict_onnx(model, image, anchors, len(class_names), conf_threshold)
# normal keras h5 model
elif model_format == 'H5':
pred_boxes, pred_classes, pred_scores = yolo_predict_keras(model, image, anchors, len(class_names), model_image_size, conf_threshold)
else:
raise ValueError('invalid model format')
#print('Found {} boxes for {}'.format(len(pred_boxes), image_name))
pbar.update(1)
# save prediction result to txt
result_file.write(image_name)
for box, cls, score in zip(pred_boxes, pred_classes, pred_scores):
xmin, ymin, xmax, ymax = box
box_annotation = " %d,%d,%d,%d,%d,%f" % (
xmin, ymin, xmax, ymax, cls, score)
result_file.write(box_annotation)
result_file.write('\n')
result_file.flush()
if save_result:
gt_boxes, gt_classes, gt_scores = transform_gt_record(gt_records, class_names)
result_dir=os.path.join('result','detection')
os.makedirs(result_dir, exist_ok=True)
colors = get_colors(class_names)
image_array = draw_boxes(image_array, gt_boxes, gt_classes, gt_scores, class_names, colors=None, show_score=False)
image_array = draw_boxes(image_array, pred_boxes, pred_classes, pred_scores, class_names, colors)
image = Image.fromarray(image_array)
# here we handle the RGBA image
if(len(image.split()) == 4):
r, g, b, a = image.split()
image = Image.merge("RGB", (r, g, b))
image.save(os.path.join(result_dir, image_name.split(os.path.sep)[-1]))
# Nothing detected
if pred_boxes is None or len(pred_boxes) == 0:
continue
for box, cls, score in zip(pred_boxes, pred_classes, pred_scores):
pred_class_name = class_names[cls]
xmin, ymin, xmax, ymax = box
coordinate = "{},{},{},{}".format(xmin, ymin, xmax, ymax)
#append or add predict class item
record = [os.path.basename(image_name), coordinate, score]
if pred_class_name in pred_classes_records:
pred_classes_records[pred_class_name].append(record)
else:
pred_classes_records[pred_class_name] = list([record])
# sort pred_classes_records for each class according to score
for pred_class_list in pred_classes_records.values():
pred_class_list.sort(key=lambda ele: ele[2], reverse=True)
pbar.close()
result_file.close()
return pred_classes_records
def box_iou(pred_box, gt_box):
'''
Calculate iou for predict box and ground truth box
Param
pred_box: predict box coordinate
(xmin,ymin,xmax,ymax) format
gt_box: ground truth box coordinate
(xmin,ymin,xmax,ymax) format
Return
iou value
'''
# get intersection box
inter_box = [max(pred_box[0], gt_box[0]), max(pred_box[1], gt_box[1]), min(pred_box[2], gt_box[2]), min(pred_box[3], gt_box[3])]
inter_w = max(0.0, inter_box[2] - inter_box[0] + 1)
inter_h = max(0.0, inter_box[3] - inter_box[1] + 1)
# compute overlap (IoU) = area of intersection / area of union
pred_area = (pred_box[2] - pred_box[0] + 1) * (pred_box[3] - pred_box[1] + 1)
gt_area = (gt_box[2] - gt_box[0] + 1) * (gt_box[3] - gt_box[1] + 1)
inter_area = inter_w * inter_h
union_area = pred_area + gt_area - inter_area
return 0 if union_area == 0 else float(inter_area) / float(union_area)
def match_gt_box(pred_record, gt_records, iou_threshold=0.5):
'''
Search gt_records list and try to find a matching box for the predict box
Param
pred_record: with format ['image_file', 'xmin,ymin,xmax,ymax', score]
gt_records: record list with format
[
['image_file', 'xmin,ymin,xmax,ymax', 'usage'],
['image_file', 'xmin,ymin,xmax,ymax', 'usage'],
...
]
iou_threshold:
pred_record and gt_records should be from same annotation image file
Return
matching gt_record index. -1 when there's no matching gt
'''
max_iou = 0.0
max_index = -1
#get predict box coordinate
pred_box = [float(x) for x in pred_record[1].split(',')]
for i, gt_record in enumerate(gt_records):
#get ground truth box coordinate
gt_box = [float(x) for x in gt_record[1].split(',')]
iou = box_iou(pred_box, gt_box)
# if the ground truth has been assigned to other
# prediction, we couldn't reuse it
if iou > max_iou and gt_record[2] == 'unused' and pred_record[0] == gt_record[0]:
max_iou = iou
max_index = i
# drop the prediction if couldn't match iou threshold
if max_iou < iou_threshold:
max_index = -1
return max_index
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre) - 2, -1, -1):
mpre[i] = max(mpre[i], mpre[i + 1])
"""
This part creates a list of indexes where the recall changes
"""
# matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i - 1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
"""
# matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
ap = 0.0
for i in i_list:
ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
return ap, mrec, mpre
'''
def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap, mrec, mpre
'''
def get_rec_prec(true_positive, false_positive, gt_records):
'''
Calculate precision/recall based on true_positive, false_positive
result.
'''
cumsum = 0
for idx, val in enumerate(false_positive):
false_positive[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(true_positive):
true_positive[idx] += cumsum
cumsum += val
rec = true_positive[:]
for idx, val in enumerate(true_positive):
rec[idx] = (float(true_positive[idx]) / len(gt_records)) if len(gt_records) != 0 else 0
prec = true_positive[:]
for idx, val in enumerate(true_positive):
prec[idx] = float(true_positive[idx]) / (false_positive[idx] + true_positive[idx])
return rec, prec
def draw_rec_prec(rec, prec, mrec, mprec, class_name, ap):
"""
Draw plot
"""
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + class_name + ' AP = {}%'.format(ap*100))
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
rec_prec_plot_path = os.path.join('result','classes')
os.makedirs(rec_prec_plot_path, exist_ok=True)
fig.savefig(os.path.join(rec_prec_plot_path, class_name + ".png"))
plt.cla() # clear axes for next plot
def adjust_axes(r, t, fig, axes):
"""
Plot - adjust axes
"""
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
"""
Draw plot using Matplotlib
"""
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in (green=true predictions) & (red=false predictions)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
def calc_AP(gt_records, pred_records, class_name, iou_threshold, show_result):
'''
Calculate AP value for one class records
Param
gt_records: ground truth records list for one class, with format:
[
['image_file', 'xmin,ymin,xmax,ymax'],
['image_file', 'xmin,ymin,xmax,ymax'],
...
]
pred_record: predict records for one class, with format:
[
['image_file', 'xmin,ymin,xmax,ymax', score],
['image_file', 'xmin,ymin,xmax,ymax', score],
...
]
Return
AP value for the class
'''
# append usage flag in gt_records for matching gt search
gt_records = [gt_record + ['unused'] for gt_record in gt_records]
# init true_positive and false_positive list
nd = len(pred_records) # number of predict data
true_positive = [0] * nd
false_positive = [0] * nd
true_positive_count = 0
# assign predictions to ground truth objects
for idx, pred_record in enumerate(pred_records):
# filter out gt record from same image
image_gt_records = [ gt_record for gt_record in gt_records if gt_record[0] == pred_record[0]]
i = match_gt_box(pred_record, image_gt_records, iou_threshold=iou_threshold)
if i != -1:
# find a valid gt obj to assign, set
# true_positive list and mark image_gt_records.
#
# trick: gt_records will also be marked
# as 'used', since image_gt_records is a
# reference list
image_gt_records[i][2] = 'used'
true_positive[idx] = 1
true_positive_count += 1
else:
false_positive[idx] = 1
# compute precision/recall
rec, prec = get_rec_prec(true_positive, false_positive, gt_records)
ap, mrec, mprec = voc_ap(rec, prec)
if show_result:
draw_rec_prec(rec, prec, mrec, mprec, class_name, ap)
return ap, true_positive_count
def plot_Pascal_AP_result(count_images, count_true_positives, num_classes,
gt_counter_per_class, pred_counter_per_class,
precision_dict, recall_dict, mPrec, mRec,
APs, mAP, iou_threshold):
'''
Plot the total number of occurences of each class in the ground-truth
'''
window_title = "Ground-Truth Info"
plot_title = "Ground-Truth\n" + "(" + str(count_images) + " files and " + str(num_classes) + " classes)"
x_label = "Number of objects per class"
output_path = os.path.join('result','Ground-Truth_Info.png')
draw_plot_func(gt_counter_per_class, num_classes, window_title, plot_title, x_label, output_path, to_show=False, plot_color='forestgreen', true_p_bar='')
'''
Plot the total number of occurences of each class in the "predicted" folder
'''
window_title = "Predicted Objects Info"
# Plot title
plot_title = "Predicted Objects\n" + "(" + str(count_images) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = os.path.join('result','Predicted_Objects_Info.png')
draw_plot_func(pred_counter_per_class, len(pred_counter_per_class), window_title, plot_title, x_label, output_path, to_show=False, plot_color='forestgreen', true_p_bar=count_true_positives)
'''
Draw mAP plot (Show AP's of all classes in decreasing order)
'''
window_title = "mAP"
plot_title = "mAP@IoU={0}: {1:.2f}%".format(iou_threshold, mAP)
x_label = "Average Precision"
output_path = os.path.join('result','mAP.png')
draw_plot_func(APs, num_classes, window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
'''
Draw Precision plot (Show Precision of all classes in decreasing order)
'''
window_title = "Precision"
plot_title = "mPrec@IoU={0}: {1:.2f}%".format(iou_threshold, mPrec)
x_label = "Precision rate"
output_path = os.path.join('result','Precision.png')
draw_plot_func(precision_dict, len(precision_dict), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
'''
Draw Recall plot (Show Recall of all classes in decreasing order)
'''
window_title = "Recall"
plot_title = "mRec@IoU={0}: {1:.2f}%".format(iou_threshold, mRec)
x_label = "Recall rate"
output_path = os.path.join('result','Recall.png')
draw_plot_func(recall_dict, len(recall_dict), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
def get_mean_metric(metric_records, gt_classes_records):
'''
Calculate mean metric, but only count classes which have ground truth object
Param
metric_records: metric dict like:
metric_records = {
'aeroplane': 0.79,
'bicycle': 0.79,
...
'tvmonitor': 0.71,
}
gt_classes_records: ground truth class dict like:
gt_classes_records = {
'car': [
['000001.jpg','100,120,200,235'],
['000002.jpg','85,63,156,128'],
...
],
...
}
Return
mean_metric: float value of mean metric
'''
mean_metric = 0.0
count = 0
for (class_name, metric) in metric_records.items():
if (class_name in gt_classes_records) and (len(gt_classes_records[class_name]) != 0):
mean_metric += metric
count += 1
mean_metric = (mean_metric/count)*100 if count != 0 else 0.0
return mean_metric
def compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold, show_result=True):
'''
Compute PascalVOC style mAP
'''
APs = {}
count_true_positives = {class_name: 0 for class_name in list(gt_classes_records.keys())}
#get AP value for each of the ground truth classes
for _, class_name in enumerate(class_names):
#if there's no gt obj for a class, record 0
if class_name not in gt_classes_records:
APs[class_name] = 0.
continue
gt_records = gt_classes_records[class_name]
#if we didn't detect any obj for a class, record 0
if class_name not in pred_classes_records:
APs[class_name] = 0.
continue
pred_records = pred_classes_records[class_name]
ap, true_positive_count = calc_AP(gt_records, pred_records, class_name, iou_threshold, show_result)
APs[class_name] = ap
count_true_positives[class_name] = true_positive_count
#sort AP result by value, in descending order
APs = OrderedDict(sorted(APs.items(), key=operator.itemgetter(1), reverse=True))
#get mAP percentage value
#mAP = np.mean(list(APs.values()))*100
mAP = get_mean_metric(APs, gt_classes_records)
#get GroundTruth count per class
gt_counter_per_class = {}
for (class_name, info_list) in gt_classes_records.items():
gt_counter_per_class[class_name] = len(info_list)
#get Precision count per class
pred_counter_per_class = {class_name: 0 for class_name in list(gt_classes_records.keys())}
for (class_name, info_list) in pred_classes_records.items():
pred_counter_per_class[class_name] = len(info_list)
#get the precision & recall
precision_dict = {}
recall_dict = {}
for (class_name, gt_count) in gt_counter_per_class.items():
if (class_name not in pred_counter_per_class) or (class_name not in count_true_positives) or pred_counter_per_class[class_name] == 0:
precision_dict[class_name] = 0.
else:
precision_dict[class_name] = float(count_true_positives[class_name]) / pred_counter_per_class[class_name]
if class_name not in count_true_positives or gt_count == 0:
recall_dict[class_name] = 0.
else:
recall_dict[class_name] = float(count_true_positives[class_name]) / gt_count
#get mPrec, mRec
#mPrec = np.mean(list(precision_dict.values()))*100
#mRec = np.mean(list(recall_dict.values()))*100
mPrec = get_mean_metric(precision_dict, gt_classes_records)
mRec = get_mean_metric(recall_dict, gt_classes_records)
if show_result:
plot_Pascal_AP_result(len(annotation_records), count_true_positives, len(gt_classes_records),
gt_counter_per_class, pred_counter_per_class,
precision_dict, recall_dict, mPrec, mRec,
APs, mAP, iou_threshold)
#show result
print('\nPascal VOC AP evaluation')
for (class_name, AP) in APs.items():
print('%s: AP %.4f, precision %.4f, recall %.4f' % (class_name, AP, precision_dict[class_name], recall_dict[class_name]))
print('mAP@IoU=%.2f result: %f' % (iou_threshold, mAP))
print('mPrec@IoU=%.2f result: %f' % (iou_threshold, mPrec))
print('mRec@IoU=%.2f result: %f' % (iou_threshold, mRec))
#return mAP percentage value
return mAP, APs
def compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names, show_result=True):
'''
Compute MSCOCO AP list on AP 0.5:0.05:0.95
'''
iou_threshold_list = np.arange(0.50, 1.00, 0.05)
APs = {}
pbar = tqdm(total=len(iou_threshold_list), desc='Eval COCO')
for iou_threshold in iou_threshold_list:
iou_threshold = round(iou_threshold, 2)
mAP, _ = compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold, show_result=False)
APs[iou_threshold] = round(mAP, 6)
pbar.update(1)
pbar.close()
#sort AP result by value, in descending order
APs = OrderedDict(sorted(APs.items(), key=operator.itemgetter(1), reverse=True))
#get overall AP percentage value
AP = np.mean(list(APs.values()))
if show_result:
'''
Draw MS COCO AP plot
'''
os.makedirs('result', exist_ok=True)
window_title = "MSCOCO AP on different IOU"
plot_title = "COCO AP = {0:.2f}%".format(AP)
x_label = "Average Precision"
output_path = os.path.join('result','COCO_AP.png')
draw_plot_func(APs, len(APs), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
print('\nMS COCO AP evaluation')
for (iou_threshold, AP_value) in APs.items():
print('IOU %.2f: AP %f' % (iou_threshold, AP_value))
print('total AP: %f' % (AP))
#return AP percentage value
return AP, APs
def compute_AP_COCO_Scale(annotation_records, scale_gt_classes_records, pred_classes_records, class_names):
'''
Compute MSCOCO AP on different scale object: small, medium, large
'''
scale_APs = {}
for scale_key in ['small','medium','large']:
gt_classes_records = scale_gt_classes_records[scale_key]
scale_AP, _ = compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names, show_result=False)
scale_APs[scale_key] = round(scale_AP, 4)
#get overall AP percentage value
scale_mAP = np.mean(list(scale_APs.values()))
'''
Draw Scale AP plot
'''
os.makedirs('result', exist_ok=True)
window_title = "MSCOCO AP on different scale"
plot_title = "scale mAP = {0:.2f}%".format(scale_mAP)
x_label = "Average Precision"
output_path = os.path.join('result','COCO_scale_AP.png')
draw_plot_func(scale_APs, len(scale_APs), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
'''
Draw Scale Object Sum plot
'''
for scale_key in ['small','medium','large']:
gt_classes_records = scale_gt_classes_records[scale_key]
gt_classes_sum = {}
for _, class_name in enumerate(class_names):
# summarize the gt object number for every class on different scale
gt_classes_sum[class_name] = np.sum(len(gt_classes_records[class_name])) if class_name in gt_classes_records else 0
total_sum = np.sum(list(gt_classes_sum.values()))
window_title = "{} object number".format(scale_key)
plot_title = "total {} object number = {}".format(scale_key, total_sum)
x_label = "Object Number"
output_path = os.path.join('result','{}_object_number.png'.format(scale_key))
draw_plot_func(gt_classes_sum, len(gt_classes_sum), window_title, plot_title, x_label, output_path, to_show=False, plot_color='royalblue', true_p_bar='')
print('\nMS COCO AP evaluation on different scale')
for (scale, AP_value) in scale_APs.items():
print('%s scale: AP %f' % (scale, AP_value))
print('total AP: %f' % (scale_mAP))
def add_gt_record(gt_records, gt_record, class_name):
# append or add ground truth class item
if class_name in gt_records:
gt_records[class_name].append(gt_record)
else:
gt_records[class_name] = list([gt_record])
return gt_records
def get_scale_gt_dict(gt_classes_records, class_names):
'''
Get ground truth class dict on different object scales, according to MS COCO metrics definition:
small objects: area < 32^2
medium objects: 32^2 < area < 96^2
large objects: area > 96^2
input gt_classes_records would be like:
gt_classes_records = {
'car': [
['000001.jpg','100,120,200,235'],
['000002.jpg','85,63,156,128'],
...
],
...
}
return a record dict with following format, for AP/AR eval on different scale:
scale_gt_classes_records = {
'small': {
'car': [
['000001.jpg','100,120,200,235'],
['000002.jpg','85,63,156,128'],
...
],
...
},
'medium': {
'car': [
['000003.jpg','100,120,200,235'],
['000004.jpg','85,63,156,128'],
...
],
...
},
'large': {
'car': [
['000005.jpg','100,120,200,235'],
['000006.jpg','85,63,156,128'],
...
],
...
}
}
'''
scale_gt_classes_records = {}
small_gt_records = {}
medium_gt_records = {}
large_gt_records = {}
for _, class_name in enumerate(class_names):
gt_records = gt_classes_records[class_name]
for (image_file, box) in gt_records:
# get box area based on coordinate
box_coord = [int(p) for p in box.split(',')]
box_area = (box_coord[2] - box_coord[0]) * (box_coord[3] - box_coord[1])
# add to corresponding gt records dict according to area size
if box_area <= 32*32:
small_gt_records = add_gt_record(small_gt_records, [image_file, box], class_name)
elif box_area > 32*32 and box_area <= 96*96:
medium_gt_records = add_gt_record(medium_gt_records, [image_file, box], class_name)
elif box_area > 96*96:
large_gt_records = add_gt_record(large_gt_records, [image_file, box], class_name)
# form up scale_gt_classes_records
scale_gt_classes_records['small'] = small_gt_records
scale_gt_classes_records['medium'] = medium_gt_records
scale_gt_classes_records['large'] = large_gt_records
return scale_gt_classes_records
def eval_AP(model, model_format, annotation_lines, anchors, class_names, model_image_size, eval_type, iou_threshold, conf_threshold, save_result):
'''
Compute AP for detection model on annotation dataset
'''
annotation_records, gt_classes_records = annotation_parse(annotation_lines, class_names)
pred_classes_records = get_prediction_class_records(model, model_format, annotation_records, anchors, class_names, model_image_size, conf_threshold, save_result)
AP = 0.0
if eval_type == 'VOC':
AP, _ = compute_mAP_PascalVOC(annotation_records, gt_classes_records, pred_classes_records, class_names, iou_threshold)
elif eval_type == 'COCO':
AP, _ = compute_AP_COCO(annotation_records, gt_classes_records, pred_classes_records, class_names)
# get AP for different scale: small, medium, large
scale_gt_classes_records = get_scale_gt_dict(gt_classes_records, class_names)
compute_AP_COCO_Scale(annotation_records, scale_gt_classes_records, pred_classes_records, class_names)
else:
raise ValueError('Unsupported evaluation type')
return AP
#load TF 1.x frozen pb graph
def load_graph(model_path):
# We parse the graph_def file
with tf.gfile.GFile(model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# We load the graph_def in the default graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="graph",
op_dict=None,
producer_op_list=None
)
return graph
def load_eval_model(model_path):
# support of tflite model
if model_path.endswith('.tflite'):
from tensorflow.lite.python import interpreter as interpreter_wrapper
model = interpreter_wrapper.Interpreter(model_path=model_path)
model.allocate_tensors()
model_format = 'TFLITE'
# support of MNN model
elif model_path.endswith('.mnn'):
model = MNN.Interpreter(model_path)
model_format = 'MNN'
# support of TF 1.x frozen pb model
elif model_path.endswith('.pb'):
model = load_graph(model_path)
model_format = 'PB'
# support of ONNX model
elif model_path.endswith('.onnx'):
model = onnxruntime.InferenceSession(model_path)
model_format = 'ONNX'
# normal keras h5 model
elif model_path.endswith('.h5'):
custom_object_dict = get_custom_objects()
model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
model_format = 'H5'
K.set_learning_phase(0)
else:
raise ValueError('invalid model file')
return model, model_format
def main():
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, description='evaluate YOLO model (h5/pb/onnx/tflite/mnn) with test dataset')
'''
Command line options
'''
parser.add_argument(
'--model_path', type=str, required=True,
help='path to model file')
parser.add_argument(
'--anchors_path', type=str, required=True,
help='path to anchor definitions')
parser.add_argument(
'--classes_path', type=str, required=False,
help='path to class definitions, default configs/voc_classes.txt', default=os.path.join('configs' , 'voc_classes.txt'))
parser.add_argument(
'--annotation_file', type=str, required=True,
help='test annotation txt file')
parser.add_argument(
'--eval_type', type=str,
help='evaluation type (VOC/COCO), default=VOC', default='VOC')
parser.add_argument(
'--iou_threshold', type=float,
help='IOU threshold for PascalVOC mAP, default=0.5', default=0.5)
parser.add_argument(
'--conf_threshold', type=float,
help='confidence threshold for filtering box in postprocess, default=0.001', default=0.001)
parser.add_argument(
'--model_image_size', type=str,
help='model image input size as <height>x<width>, default 416x416', default='416x416')
parser.add_argument(
'--save_result', default=False, action="store_true",
help='Save the detection result image in result/detection dir'
)
args = parser.parse_args()
# param parse
anchors = get_anchors(args.anchors_path)
class_names = get_classes(args.classes_path)
height, width = args.model_image_size.split('x')
model_image_size = (int(height), int(width))
assert (model_image_size[0]%32 == 0 and model_image_size[1]%32 == 0), 'model_image_size should be multiples of 32'
annotation_lines = get_dataset(args.annotation_file, shuffle=False)
model, model_format = load_eval_model(args.model_path)
start = time.time()
eval_AP(model, model_format, annotation_lines, anchors, class_names, model_image_size, args.eval_type, args.iou_threshold, args.conf_threshold, args.save_result)
end = time.time()
print("Evaluation time cost: {:.6f}s".format(end - start))
if __name__ == '__main__':
main()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
formatter.go | package log
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path"
"runtime"
"strconv"
"strings"
)
// RFC3339Milli defines an RFC3339 date format with miliseconds
const RFC3339Milli = "2006-01-02T15:04:05.000Z07:00"
const defaultTimestampFormat = RFC3339Milli
// FieldLabel is a type for defining label keys.
type FieldLabel string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[FieldLabel]string
// Default key names for the default fields
const (
LabelCaller = "caller"
LabelData = "data"
LabelError = "error"
LabelHost = "host"
LabelLevel = "level"
LabelMsg = "msg"
LabelTime = "time"
LabelTrace = "trace"
)
func (f FieldMap) resolve(fieldLabel FieldLabel) string {
if definedLabel, ok := f[fieldLabel]; ok {
return definedLabel
}
return string(fieldLabel)
}
type logData struct {
LabelCaller string `json:"-"`
LabelData string `json:"-"`
LabelError string `json:"-"`
LabelHost string `json:"-"`
LabelLevel string `json:"-"`
LabelMsg string `json:"-"`
LabelTime string `json:"-"`
LabelTrace string `json:"-"`
Color colors `json:"-"`
ErrData []string `json:"-"`
Caller string `json:"caller,omitempty"`
Data map[string]interface{} `json:"data,omitempty"`
Err interface{} `json:"error,omitempty"`
Hostname string `json:"host,omitempty"`
Level string `json:"level,omitempty"`
Message string `json:"msg,omitempty"`
Timestamp string `json:"time,omitempty"`
Trace []string `json:"trace,omitempty"`
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (l *logData) UnmarshalJSON(d []byte) error {
data := map[string]interface{}{}
err := json.Unmarshal(d, &data)
if nil != err {
return err
}
if _, ok := data["caller"]; ok {
l.Caller = data["caller"].(string)
}
if _, ok := data["data"]; ok {
if nil == l.Data {
l.Data = map[string]interface{}{}
}
for k, v := range data["data"].(map[string]interface{}) {
if e, ok := v.(error); ok {
l.Data[k] = e.(error)
} else {
l.Data[k] = v
}
}
//l.Data = data["data"].(map[string]interface{})
}
if _, ok := data["error"]; ok && "" != data["error"] {
l.Err = fmt.Errorf(data["error"].(string))
}
if _, ok := data["host"]; ok {
l.Hostname = data["host"].(string)
}
if _, ok := data["level"]; ok {
l.Level = data["level"].(string)
}
if _, ok := data["msg"]; ok {
l.Message = data["msg"].(string)
}
if _, ok := data["time"]; ok {
l.Timestamp = data["time"].(string)
}
if _, ok := data["trace"]; ok {
l.Trace = data["trace"].([]string)
}
return nil
}
// SetCallerLevel will adjust the relative caller level in log output.
func SetCallerLevel(level int) {
callerLevel = level
}
var callerLevel int
func getCaller() string {
caller := ""
a := 0
for {
if pc, file, line, ok := runtime.Caller(a); ok {
if !strings.Contains(strings.ToLower(file), "github.com/bdlm/log") ||
strings.HasSuffix(strings.ToLower(file), "_test.go") {
if 0 == callerLevel {
caller = fmt.Sprintf("%s:%d %s", path.Base(file), line, runtime.FuncForPC(pc).Name())
} else {
if pc2, file2, line2, ok := runtime.Caller(a + callerLevel); ok {
caller = fmt.Sprintf("%s:%d %s", path.Base(file2), line2, runtime.FuncForPC(pc2).Name())
} else {
caller = fmt.Sprintf("%s:%d %s", path.Base(file), line, runtime.FuncForPC(pc).Name())
}
}
break
}
} else {
break
}
a++
}
return caller
}
func getTrace() []string {
trace := []string{}
a := 0
for {
if pc, file, line, ok := runtime.Caller(a); ok {
if !strings.Contains(strings.ToLower(file), "github.com/bdlm/log") ||
strings.HasSuffix(strings.ToLower(file), "_test.go") {
if 0 == callerLevel {
trace = append(trace, fmt.Sprintf("%s:%d %s", path.Base(file), line, runtime.FuncForPC(pc).Name()))
} else {
if pc2, file2, line2, ok := runtime.Caller(a + callerLevel); ok {
trace = append(trace, fmt.Sprintf("%s:%d %s", path.Base(file2), line2, runtime.FuncForPC(pc2).Name()))
} else {
trace = append(trace, fmt.Sprintf("%s:%d %s", path.Base(file), line, runtime.FuncForPC(pc).Name()))
}
}
}
} else {
break
}
a++
}
if len(trace) > 2 {
trace = trace[:len(trace)-2]
}
return trace
}
var (
// DEFAULTColor is the default TTY 'level' color.
DEFAULTColor = "\033[38;5;46m"
// ERRORColor is the TTY 'level' color for error messages.
ERRORColor = "\033[38;5;166m"
// FATALColor is the TTY 'level' color for fatal messages.
FATALColor = "\033[38;5;124m"
// PANICColor is the TTY 'level' color for panic messages.
PANICColor = "\033[38;5;196m"
// WARNColor is the TTY 'level' color for warning messages.
WARNColor = "\033[38;5;226m"
// DEBUGColor is the TTY 'level' color for debug messages.
DEBUGColor = "\033[38;5;245m"
// CallerColor is the TTY caller color.
CallerColor = "\033[38;5;244m"
// DataLabelColor is the TTY data label color.
DataLabelColor = "\033[38;5;111m"
// DataValueColor is the TTY data value color.
DataValueColor = "\033[38;5;180m"
// HostnameColor is the TTY hostname color.
HostnameColor = "\033[38;5;39m"
// TraceColor is the TTY trace color.
TraceColor = "\033[38;5;244m"
// TimestampColor is the TTY timestamp color.
TimestampColor = "\033[38;5;72m"
// ResetColor resets the TTY color scheme to it's default.
ResetColor = "\033[0m"
)
type colors struct {
Caller string
DataLabel string
DataValue string
Err string
Hostname string
Level string
Reset string
Timestamp string
Trace string
}
func escape(data interface{}, escapeHTML bool) string {
var result string
buf := new(bytes.Buffer)
encoder := json.NewEncoder(buf)
encoder.SetEscapeHTML(escapeHTML)
err := encoder.Encode(data)
if nil == err {
result = strings.Trim(buf.String(), "\n")
}
return result
}
// getData is a helper function that extracts log data from the Entry.
func getData(entry *Entry, fieldMap FieldMap, escapeHTML, isTTY bool) *logData {
var levelColor string
data := &logData{
Caller: getCaller(),
Data: map[string]interface{}{},
Err: entry.Err,
ErrData: []string{},
Hostname: os.Getenv("HOSTNAME"),
Level: LevelString(entry.Level),
Message: entry.Message,
Timestamp: entry.Time.Format(RFC3339Milli),
Trace: getTrace(),
}
data.LabelCaller = fieldMap.resolve(LabelCaller)
data.LabelData = fieldMap.resolve(LabelData)
data.LabelError = fieldMap.resolve(LabelError)
data.LabelHost = fieldMap.resolve(LabelHost)
data.LabelLevel = fieldMap.resolve(LabelLevel)
data.LabelMsg = fieldMap.resolve(LabelMsg)
data.LabelTime = fieldMap.resolve(LabelTime)
data.LabelTrace = fieldMap.resolve(LabelTrace)
if isTTY {
switch entry.Level {
case DebugLevel:
levelColor = DEBUGColor
case WarnLevel:
levelColor = WARNColor
case ErrorLevel:
levelColor = ERRORColor
case FatalLevel:
levelColor = FATALColor
case PanicLevel:
levelColor = PANICColor
default:
levelColor = DEFAULTColor
}
data.Color = colors{
Caller: CallerColor,
DataLabel: DataLabelColor,
DataValue: DataValueColor,
Err: ERRORColor,
Hostname: HostnameColor,
Level: levelColor,
Reset: ResetColor,
Timestamp: TimestampColor,
Trace: TraceColor,
}
}
remapData(entry, fieldMap, data)
return data
}
func remapData(entry *Entry, fieldMap FieldMap, data *logData) {
for k, v := range entry.Data {
switch k {
default:
switch v := v.(type) {
case string:
data.Data[strings.TrimPrefix(k, fieldMap.resolve(LabelData)+".")] = strings.Trim(strconv.QuoteToASCII(fmt.Sprintf("%v", v)), `"`)
default:
data.Data[strings.TrimPrefix(k, fieldMap.resolve(LabelData)+".")] = v
}
}
}
}
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields, fieldMap FieldMap) {
var key string
for _, field := range []FieldLabel{
LabelCaller,
LabelData,
LabelError,
LabelHost,
LabelLevel,
LabelMsg,
LabelTime,
} {
key = fieldMap.resolve(field)
if t, ok := data[key]; ok {
data[fieldMap.resolve(LabelData)+"."+key] = t
delete(data, key)
}
}
}
| [
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | go | 1 | 0 | |
vocale/recognizer.py | import asyncio
import json
import os
import time
import warnings
import wave
from queue import Queue
from typing import Callable
import numpy as np
import pvporcupine
import sounddevice as sd
import vosk
from fluxhelper import osInterface
from speech_recognition import AudioFile, Recognizer, UnknownValueError
from tensorflow.keras.models import load_model
from vosk import SetLogLevel
RATE = 16000
DURATION = 0.5
CHANNELS = 1
CHUNK = 512
MAX_FREQ = 18
# Disable logging
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
SetLogLevel(-1)
class VAD:
"""
Main Voice activity detection class.
This uses deep learning to predict whether a piece of audio is considered a speech or not a speech.
Parameters
----------
`modelPath` : str
path to the model.h5 file.
`sensitivity : float
how sensitive the detection is.
Methods
-------
`isSpeech(stream: bytes)` :
returns True if the classified stream is a voice and False if not.
"""
def __init__(self, modelPath: str, sensitivity: float = 0.90):
self.model = load_model(modelPath)
self.buffer = []
self.sensitivity = sensitivity
async def _formatPredictions(self, predictions) -> list:
"""
Format the predictions into a more readable and easy to traverse format.
"""
predictions = [[i, float(r)] for i, r in enumerate(predictions)]
predictions.sort(key=lambda x: x[1], reverse=True)
return predictions
async def isSpeech(self, stream: bytes) -> bool:
"""
Makes a prediction from the given stream bytes.
Parameters
----------
`stream` : bytes
raw bytes stream (usually retrieved from pyaudio's .read function or sounddevice)
Returns True if the classified stream is a voice and False if not.
"""
# Convert the raw streams into a numpy array and get the decibels
arr = np.frombuffer(stream, dtype=np.int16)
db = 20 * np.log10(np.abs(np.fft.rfft(arr[:2048])))
# Collect decibel values from relevent frequencies (MAX_FREQ)
features = list(np.round(db[3:MAX_FREQ], 2))
self.buffer.append(features)
if len(self.buffer) == int(RATE / CHUNK * DURATION):
total = np.array([x for y in self.buffer for x in y])
self.buffer.clear()
# Make the prediction
predictions = self.model(np.array([total]))[0]
predictions = await self._formatPredictions(predictions)
index, probability = predictions[0]
if index == 1 and probability >= self.sensitivity:
# 1 is the index of speech and 0 is non speech
return True
return False
class SpeechRecognizer:
def __init__(
self,
wakewords: list,
wakewordSensitivities: list,
vadPath: str,
vadThreshold: float,
voskPath: str,
savePath: str,
callback: Callable,
loop: asyncio.BaseEventLoop,
offline: bool = False,
device: int = None,
**kwargs,
) -> None:
# Class parameters
self.wakewords = wakewords
self.offline = offline
self.savePath = savePath
self.voskPath = voskPath
self.device = device
self.loop = loop
self.sensitivities = wakewordSensitivities
self._callback = callback
# Class kwarg parameters
self.speechLengths = kwargs.get("speechLengths", (6.0, 0.9))
self.speechLengthMultiplier = kwargs.get("speechLengthMultiplier", 0.15)
self.beforeWokeBufferLimit = kwargs.get("beforeWokeBufferLimit", 200)
self.googleRecognizerKey = kwargs.get("googleRecognizerKey", None)
self.disableVosk = kwargs.get("disableVosk", False)
# Empty string convert to None
if self.googleRecognizerKey == "":
self.googleRecognizerKey = None
# Initialize vosk recognizer
if not self.disableVosk:
self.voskModel = vosk.Model(self.voskPath)
self.vosk = None
self.restartVosk()
# Initialize speechrecognition module
self.srRecognizer = Recognizer()
# Initialize other libraries
w = [x for x in self.wakewords if x in pvporcupine.KEYWORDS]
self.porcupine = None
if w:
self.porcupine = pvporcupine.create(
keywords=w, sensitivities=self.sensitivities
)
self.vad = VAD(vadPath, vadThreshold)
self.done = False
self.listen = True
self.woke = False
self._speechLength = self.speechLengths[0]
self._frames = {"beforeWoke": [], "afterWoke": []}
self._followup = False
self._q = Queue()
self._ready = False
self._speech = True
self._startSpeechLength = self.speechLengths[0]
self._realSpeechLength = self.speechLengths[1]
self._lastRecognizedTime = time.time()
self.__count = 0
self.__prevSpeaking = None
self.__length = 0
# User callback parameters
self.callbackParams = {}
def __callback(self, data, frames, time_, status) -> None:
self._q.put(bytes(data))
def _reset(self) -> None:
self._frames = {"beforeWoke": [], "afterWoke": []}
if not self.disableVosk:
self.vosk.FinalResult()
self.woke = False
self._speech = True
self._lastRecognizedTime = time.time()
self.__count = 0
self.__prevSpeaking = None
self.__length = 0
self._speechLength = self.speechLengths[0]
def multiplySpeechLength(self, multiplier: float) -> float:
"""
Dynamically update the speech length by multiplying it by a certain value.
"""
self._realSpeechLength = self.speechLengths[1] * multiplier
return self._realSpeechLength
def recognizeDone(self) -> None:
"""
Tells the recognizer that we are done recognizing.
"""
self._speech = False
def restartVosk(self) -> None:
"""
Restart just the Vosk recognizer.
"""
if not self.disableVosk:
self.vosk = vosk.KaldiRecognizer(self.voskModel, RATE)
async def recognize(self) -> dict:
if not self._speech:
if self.offline:
if not self.disableVosk:
text = json.loads(self.vosk.FinalResult())["text"]
return {"status": "recognized", "msg": text}
return {"status": "error", "msg": f"both disableVosk and offline is True. Can't recognize with nothing to recognize with.", "exception": None}
frames = self._frames["beforeWoke"][-10:] + self._frames["afterWoke"]
# First save the data gathered into a .wav file
wf = wave.open(self.savePath, "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(2)
wf.setframerate(RATE)
wf.writeframes(b"".join(frames))
wf.close()
# Convert it into a AudioData object
try:
with AudioFile(self.savePath) as src:
audio = self.srRecognizer.record(src)
except Exception as e:
return {
"status": "error",
"msg": f"Failed to convert cache file to AudioData. ({e})",
"exception": e
}
# Finally attempt to recognize using google's recognizer from speechrecognition module
try:
content = self.srRecognizer.recognize_google(
audio, key=self.googleRecognizerKey
)
callback = {"status": "recognized", "msg": content}
except UnknownValueError:
callback = {"status": "unknown", "msg": "Unknown value."}
except Exception as e:
callback = {
"status": "error",
"msg": f"Failed to recognize audio. ({e})",
"exception": e
}
finally:
return callback
return {"status": "listening", "msg": "Appending frames."}
async def callback(self, *args, **kwargs) -> None:
await self._callback(*args, **kwargs, **self.callbackParams)
async def wakeUp(
self, followup: bool = False, emitCallback: bool = True, **kwargs
) -> None:
"""
Wake up the speech recognizer,
Parameters
----------
`followup` : bool
"""
self.woke = True
self._followup = followup
self.__prevSpeaking = time.time()
self.callbackParams = {"followup": followup, **kwargs}
if emitCallback:
await self.callback({"status": "woke", "msg": "woke"})
async def start(self, blocking: bool = False) -> None:
"""
Start the speech recognizer.
Parameters
----------
`blocking` : bool
if True, speech recognizer will block the program.
"""
if blocking:
return await self._start()
def f():
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._start())
osInterface.thread(f)
while not self._ready:
await asyncio.sleep(0.05)
async def wokeListen(self, data) -> bool:
"""
Starts listening for the provided wake words both using pvporcupine and vosk.
Vosk will not be used if self.disableVosk is True
"""
if not self.disableVosk:
# Get vosk information
self.vosk.AcceptWaveform(data)
partial = json.loads(self.vosk.PartialResult())
else:
partial = {"partial": ""}
# Get pvporcupine wake word information
p = -1
if self.porcupine:
p = self.porcupine.process(np.frombuffer(data, dtype=np.int16))
# Check if a wake word is recognized using both vosk and porcupine if porcupine is successfully initialized
if any(k in partial["partial"] for k in self.wakewords) or p >= 0:
if not self.disableVosk:
self.vosk.FinalResult()
return True
# Constantly collect before wake word frames
if len(self._frames["beforeWoke"]) > self.beforeWokeBufferLimit:
self._frames["beforeWoke"].pop(0)
self._frames["beforeWoke"].append(data)
if not self.disableVosk:
# Prevent active listening from getting way too big, will cause a memory leak if not implemented
if len(partial["partial"].split()) > 25:
self.vosk.FinalResult()
self.restartVosk()
vad = await self.vad.isSpeech(data)
if vad:
self.__prevSpeaking = time.time()
if not self.__prevSpeaking:
self.__prevSpeaking = time.time()
length = time.time() - self.__prevSpeaking
if length > 20.0:
if not self.disableVosk:
self.vosk.FinalResult()
self.restartVosk()
self.__prevSpeaking = time.time()
# Emit what the vosk recognizer is currently hearing
await self.callback(
{"status": "activeListeningPartial", "msg": partial["partial"]}
)
return False
async def _start(self) -> None:
with sd.RawInputStream(
samplerate=RATE,
blocksize=CHUNK,
device=self.device,
dtype="int16",
channels=CHANNELS,
callback=self.__callback,
):
self._ready = True
while not self.done:
data = self._q.get()
if self.listen:
# Wait for one of the wake words to be triggered
if not self.woke:
# There seems to be a bug wherein woke becomes True right after the speech is recognized, so we do a time check to prevent that. (pls fix) FIXME
woke = await self.wokeListen(data)
if (time.time() - self._lastRecognizedTime) < 1.8:
woke = False
# Now wake up the processor/recognizer
if woke and not self.woke:
await self.wakeUp()
if self.woke:
partial = None
if not self.disableVosk:
# Give vosk the speech data
self.vosk.AcceptWaveform(data)
# Realtime Partial data
partial = list(json.loads(self.vosk.PartialResult()).items())[
0
][1].strip()
if partial:
await self.callback(
{"status": "recognizedPartial", "msg": partial}
)
# Perform voice activity detection
vad = await self.vad.isSpeech(data)
if vad:
self.__count += 1
self.__prevSpeaking = time.time()
await self.callback(
{"status": "voiceActivity", "msg": "voiceActivity"}
)
# Perform previous voice activity checking.
if self.__prevSpeaking:
self.__length = time.time() - self.__prevSpeaking
comparator = self.__count == 0 or not partial
if self.disableVosk:
comparator = self.__count == 0
if comparator:
self._speechLength = self._startSpeechLength
else:
self._speechLength = self._realSpeechLength
# Current speech length has exceeded the provided speech length meaning we're done listening.
if self.__length > self._speechLength:
self.recognizeDone()
self._frames["afterWoke"].append(data)
recognized = await self.recognize()
await self.callback(recognized)
# Finally reset all the variables back to their default so that it can be ready for the next time the listener gets woke.
if not self._speech:
self._reset()
async def callback(data, *args, **kwargs) -> None:
status = data.get("status", "listening")
if status == "recognizedPartial":
print(f"> {data['msg']} {recognizer._realSpeechLength}", end="\r")
if data["msg"].startswith("turn the lights off"):
recognizer.recognizeDone()
if data["msg"].endswith(("to", "two", "of", "and", "for")):
recognizer.multiplySpeechLength(2.8)
else:
recognizer.multiplySpeechLength(1)
if status == "recognized":
print(f"You: {data['msg']}")
if status == "woke":
print(f"\nI'm listening...")
if status == "activeListeningPartial":
print(f"Active: {data['msg']}", end="\r")
async def main(loop: asyncio.BaseEventLoop) -> None:
global recognizer
recognizer = SpeechRecognizer(
["jarvis"],
[1.0],
osInterface.joinPath("models/vad.h5"),
0.9,
osInterface.joinPath("models/vosk"),
osInterface.joinPath(".tmp/cache.wav"),
callback,
loop,
speechLengths=(5.0, 1.2),
offline=False,
disableVosk=True
)
await recognizer.start(blocking=True)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main(loop))
except KeyboardInterrupt:
loop.stop()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
encode_test.go | package yaml_test
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"time"
"net"
"os"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
)
type jsonNumberT string
func (j jsonNumberT) Int64() (int64, error) {
val, err := strconv.Atoi(string(j))
if err != nil {
return 0, err
}
return int64(val), nil
}
func (j jsonNumberT) Float64() (float64, error) {
return strconv.ParseFloat(string(j), 64)
}
func (j jsonNumberT) String() string {
return string(j)
}
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
nil,
"null\n",
}, {
(*marshalerType)(nil),
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float32(0.99)},
"v: 0.99\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct{ A [2]int }{[2]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{nil},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{}},
"a: {x: 0}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{0, 1}},
"{}\n",
}, {
&struct {
A float64 "a,omitempty"
B float64 "b,omitempty"
}{1, 0},
"a: 1\n",
},
{
&struct {
T1 time.Time "t1,omitempty"
T2 time.Time "t2,omitempty"
T3 *time.Time "t3,omitempty"
T4 *time.Time "t4,omitempty"
}{
T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC),
T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)),
},
"t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n",
},
// Nil interface that implements Marshaler.
{
map[string]yaml.Marshaler{
"a": nil,
},
"a: null\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
// Map inlining
{
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
"a: 1\nb: 2\nc: 3\n",
},
// Duration
{
map[string]time.Duration{"a": 3 * time.Second},
"a: 3s\n",
},
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
},
// Ordered maps.
{
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
},
// Encode unicode as utf-8 rather than in escaped form.
{
map[string]string{"a": "你好"},
"a: 你好\n",
},
// Support encoding.TextMarshaler.
{
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
"a: 1.2.3.4\n",
},
// time.Time gets a timestamp tag.
{
map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)},
"a: 2015-02-24T18:19:39Z\n",
},
{
map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))},
"a: 2015-02-24T18:19:39Z\n",
},
{
// This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag.
map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))},
"a: 2015-02-24T18:19:39.123456789-03:00\n",
},
// Ensure timestamp-like strings are quoted.
{
map[string]string{"a": "2015-02-24T18:19:39Z"},
"a: \"2015-02-24T18:19:39Z\"\n",
},
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
{
map[string]string{"a": "b: c"},
"a: 'b: c'\n",
},
// Containing hash mark ('#') in string should be quoted
{
map[string]string{"a": "Hello #comment"},
"a: 'Hello #comment'\n",
},
{
map[string]string{"a": "你好 #comment"},
"a: '你好 #comment'\n",
},
{
map[string]interface{}{"a": jsonNumberT("5")},
"a: 5\n",
},
{
map[string]interface{}{"a": jsonNumberT("100.5")},
"a: 100.5\n",
},
{
map[string]interface{}{"a": jsonNumberT("bogus")},
"a: bogus\n",
},
}
func (s *S) TestLineWrapping(c *C) {
var v = map[string]string{
"a": "abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 ",
}
data, err := yaml.Marshal(v)
c.Assert(err, IsNil)
c.Assert(string(data), Equals,
"a: 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz\n" +
" ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 '\n")
// The API does not allow this process to be reversed as it's intended
// for migration only. v3 drops this method and instead offers more
// control on a per encoding basis.
yaml.FutureLineWrap()
data, err = yaml.Marshal(v)
c.Assert(err, IsNil)
c.Assert(string(data), Equals,
"a: 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 '\n")
}
func (s *S) TestMarshal(c *C) {
defer os.Setenv("TZ", os.Getenv("TZ"))
os.Setenv("TZ", "UTC")
for i, item := range marshalTests {
c.Logf("test %d: %q", i, item.data)
data, err := yaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
func (s *S) TestEncoderSingleDocument(c *C) {
for i, item := range marshalTests {
c.Logf("test %d. %q", i, item.data)
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf)
err := enc.Encode(item.value)
c.Assert(err, Equals, nil)
err = enc.Close()
c.Assert(err, Equals, nil)
c.Assert(buf.String(), Equals, item.data)
}
}
func (s *S) TestEncoderMultipleDocuments(c *C) {
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf)
err := enc.Encode(map[string]string{"a": "b"})
c.Assert(err, Equals, nil)
err = enc.Encode(map[string]string{"c": "d"})
c.Assert(err, Equals, nil)
err = enc.Close()
c.Assert(err, Equals, nil)
c.Assert(buf.String(), Equals, "a: b\n---\nc: d\n")
}
func (s *S) TestEncoderWriteError(c *C) {
enc := yaml.NewEncoder(errorWriter{})
err := enc.Encode(map[string]string{"a": "b"})
c.Assert(err, ErrorMatches, `yaml: write error: some write error`) // Data not flushed yet
}
type errorWriter struct{}
func (errorWriter) Write([]byte) (int, error) {
return 0, fmt.Errorf("some write error")
}
var marshalErrorTests = []struct {
value interface{}
error string
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: &struct {
A int
B map[string]int ",inline"
}{1, map[string]int{"a": 2}},
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
var marshalerTests = []struct {
data string
value interface{}
}{
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
{"_: 10\n", 10},
{"_: null\n", nil},
{"_: BAR!\n", "BAR!"},
}
type marshalerType struct {
value interface{}
}
func (o marshalerType) MarshalText() ([]byte, error) {
panic("MarshalText called on type with MarshalYAML")
}
func (o marshalerType) MarshalYAML() (interface{}, error) {
return o.value, nil
}
type marshalerValue struct {
Field marshalerType "_"
}
func (s *S) TestMarshaler(c *C) {
for _, item := range marshalerTests {
obj := &marshalerValue{}
obj.Field.value = item.value
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestMarshalerWholeDocument(c *C) {
obj := &marshalerType{}
obj.value = map[string]string{"hello": "world!"}
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
type failingMarshaler struct{}
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
return nil, failingErr
}
func (s *S) TestMarshalerError(c *C) {
_, err := yaml.Marshal(&failingMarshaler{})
c.Assert(err, Equals, failingErr)
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/0001",
"a/002",
"a/3",
"a/10",
"a/11",
"a/0012",
"a/100",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d7",
"d7abc",
"d12",
"d12a",
"e2b",
"e4b",
"e21a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := yaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}
func newTime(t time.Time) *time.Time {
return &t
}
| [
"\"TZ\""
]
| []
| [
"TZ"
]
| [] | ["TZ"] | go | 1 | 0 | |
infrastructure/client/cloudstorages/gcs.go | package cloudstorages
import (
"fmt"
"cloud.google.com/go/storage"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"io"
"io/ioutil"
"net/http"
"os"
log "github.com/howood/imagereductor/infrastructure/logger"
)
// GcsBucketUploadfiles is bucket to upload
var GcsBucketUploadfiles = os.Getenv("GCS_BUKET")
// GcsProjectID is GCS Project ID
var GcsProjectID = os.Getenv("GcsProjectID")
// GCSInstance struct
type GCSInstance struct {
client *storage.Client
ctx context.Context
}
// NewGCS creates a new GCSInstance
func NewGCS(ctx context.Context) *GCSInstance {
log.Debug(ctx, "----GCS DNS----")
var I *GCSInstance
client, err := storage.NewClient(ctx)
if err != nil {
return nil
}
I = &GCSInstance{
client: client,
ctx: ctx,
}
I.init()
return I
}
func (gcsinstance *GCSInstance) init() {
if _, exitstserr := gcsinstance.client.Bucket(GcsBucketUploadfiles).Attrs(gcsinstance.ctx); exitstserr != nil {
if err := gcsinstance.client.Bucket(GcsBucketUploadfiles).Create(gcsinstance.ctx, GcsProjectID, nil); err != nil {
log.Debug(gcsinstance.ctx, "***CreateError****")
log.Debug(gcsinstance.ctx, err)
}
}
}
// Put puts to storage
func (gcsinstance *GCSInstance) Put(bucket string, path string, file io.ReadSeeker) error {
bytes, err := ioutil.ReadAll(file)
if err != nil {
return err
}
mimetype := http.DetectContentType(bytes)
object := gcsinstance.client.Bucket(bucket).Object(path)
writer := object.NewWriter(gcsinstance.ctx)
writer.ContentType = mimetype
writer.CacheControl = "no-cache"
defer writer.Close()
if _, err = writer.Write(bytes); err != nil {
return err
}
return nil
}
// Get gets from storage
func (gcsinstance *GCSInstance) Get(bucket string, key string) (string, []byte, error) {
log.Debug(gcsinstance.ctx, bucket)
log.Debug(gcsinstance.ctx, key)
reader, err := gcsinstance.client.Bucket(bucket).Object(key).NewReader(gcsinstance.ctx)
if err != nil {
return "", nil, err
}
defer reader.Close()
contenttype := reader.ContentType()
// CloudStorage上のObjectの、コンテンツの読み込み
response, err := ioutil.ReadAll(reader)
if err != nil {
return "", nil, err
}
return contenttype, response, nil
}
// List get list from storage
func (gcsinstance *GCSInstance) List(bucket string, key string) ([]string, error) {
log.Debug(gcsinstance.ctx, fmt.Sprintf("ListDirectory %s : %s", bucket, key))
query := &storage.Query{Prefix: key}
var names []string
it := gcsinstance.client.Bucket(bucket).Objects(gcsinstance.ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return names, err
}
names = append(names, attrs.Name)
}
return names, nil
}
// Delete deletes from storage
func (gcsinstance *GCSInstance) Delete(bucket string, key string) error {
err := gcsinstance.client.Bucket(bucket).Object(key).Delete(gcsinstance.ctx)
return err
}
| [
"\"GCS_BUKET\"",
"\"GcsProjectID\""
]
| []
| [
"GcsProjectID",
"GCS_BUKET"
]
| [] | ["GcsProjectID", "GCS_BUKET"] | go | 2 | 0 | |
src/picalc.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"strconv"
)
// Calculate pi using Gregory-Leibniz series: (4/1) - (4/3) + (4/5) - (4/7) + (4/9) - (4/11) + (4/13) - (4/15) ...
func calculatePi(iterations int) float64 {
var result float64 = 0.0
var sign float64 = 1.0
var denominator float64 = 1.0
for i := 0; i < iterations; i++ {
result = result + (sign * 4/denominator)
denominator = denominator + 2
sign = -sign
}
return result
}
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Pi calculator received a request.")
iterations, err := strconv.Atoi(r.URL.Query()["iterations"][0])
if err != nil {
fmt.Fprintf(w, "iterations parameter not valid\n")
return
}
fmt.Fprintf(w, "%.10f\n", calculatePi(iterations))
}
func main() {
log.Print("Pi calculator started.")
http.HandleFunc("/", handler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
hdWebApp/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hdWebApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tf_agents/experimental/examples/sac/haarnoja18/sac_train_eval.py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train and Eval SAC.
All hyperparameters come from the SAC paper
https://arxiv.org/pdf/1812.05905.pdf
"""
import functools
import os
from absl import app
from absl import flags
from absl import logging
import gin
import reverb
import tensorflow as tf
from tf_agents.agents.sac import sac_agent
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.environments import suite_mujoco
from tf_agents.keras_layers import inner_reshape
from tf_agents.metrics import py_metrics
from tf_agents.networks import nest_map
from tf_agents.networks import sequential
from tf_agents.policies import greedy_policy
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_integer(
'reverb_port', None,
'Port for reverb server, if None, use a randomly chosen unused port.')
flags.DEFINE_integer('num_iterations', 3000000,
'Total number train/eval iterations to perform.')
flags.DEFINE_integer(
'eval_interval', 10000,
'Number of train steps between evaluations. Set to 0 to skip.')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
dense = functools.partial(
tf.keras.layers.Dense,
activation=tf.keras.activations.relu,
kernel_initializer='glorot_uniform')
def create_fc_network(layer_units):
return sequential.Sequential([dense(num_units) for num_units in layer_units])
def create_identity_layer():
return tf.keras.layers.Lambda(lambda x: x)
def create_sequential_critic_network(obs_fc_layer_units,
action_fc_layer_units,
joint_fc_layer_units):
"""Create a sequential critic network."""
# Split the inputs into observations and actions.
def split_inputs(inputs):
return {'observation': inputs[0], 'action': inputs[1]}
# Create an observation network.
obs_network = (create_fc_network(obs_fc_layer_units) if obs_fc_layer_units
else create_identity_layer())
# Create an action network.
action_network = (create_fc_network(action_fc_layer_units)
if action_fc_layer_units else create_identity_layer())
# Create a joint network.
joint_network = (create_fc_network(joint_fc_layer_units)
if joint_fc_layer_units else create_identity_layer())
# Final layer.
value_layer = tf.keras.layers.Dense(1, kernel_initializer='glorot_uniform')
return sequential.Sequential([
tf.keras.layers.Lambda(split_inputs),
nest_map.NestMap({
'observation': obs_network,
'action': action_network
}),
nest_map.NestFlatten(),
tf.keras.layers.Concatenate(),
joint_network,
value_layer,
inner_reshape.InnerReshape(current_shape=[1], new_shape=[])
], name='sequential_critic')
class _TanhNormalProjectionNetworkWrapper(
tanh_normal_projection_network.TanhNormalProjectionNetwork):
"""Wrapper to pass predefined `outer_rank` to underlying projection net."""
def __init__(self, sample_spec, predefined_outer_rank=1):
super(_TanhNormalProjectionNetworkWrapper, self).__init__(sample_spec)
self.predefined_outer_rank = predefined_outer_rank
def call(self, inputs, network_state=(), **kwargs):
kwargs['outer_rank'] = self.predefined_outer_rank
if 'step_type' in kwargs:
del kwargs['step_type']
return super(_TanhNormalProjectionNetworkWrapper,
self).call(inputs, **kwargs)
def create_sequential_actor_network(actor_fc_layers, action_tensor_spec):
"""Create a sequential actor network."""
def tile_as_nest(non_nested_output):
return tf.nest.map_structure(lambda _: non_nested_output,
action_tensor_spec)
return sequential.Sequential(
[dense(num_units) for num_units in actor_fc_layers] +
[tf.keras.layers.Lambda(tile_as_nest)] + [
nest_map.NestMap(
tf.nest.map_structure(_TanhNormalProjectionNetworkWrapper,
action_tensor_spec))
])
@gin.configurable
def train_eval(
root_dir,
env_name='HalfCheetah-v2',
# Training params
initial_collect_steps=10000,
num_iterations=3200000,
actor_fc_layers=(256, 256),
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=(256, 256),
# Agent params
batch_size=256,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
alpha_learning_rate=3e-4,
gamma=0.99,
target_update_tau=0.005,
target_update_period=1,
reward_scale_factor=0.1,
# Replay params
reverb_port=None,
replay_capacity=1000000,
# Others
policy_save_interval=10000,
replay_buffer_save_interval=100000,
eval_interval=10000,
eval_episodes=30,
debug_summaries=False,
summarize_grads_and_vars=False):
"""Trains and evaluates SAC."""
logging.info('Training SAC on: %s', env_name)
collect_env = suite_mujoco.load(env_name)
eval_env = suite_mujoco.load(env_name)
_, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(collect_env))
train_step = train_utils.create_train_step()
actor_net = create_sequential_actor_network(
actor_fc_layers=actor_fc_layers, action_tensor_spec=action_tensor_spec)
critic_net = create_sequential_critic_network(
obs_fc_layer_units=critic_obs_fc_layers,
action_fc_layer_units=critic_action_fc_layers,
joint_fc_layer_units=critic_joint_fc_layers)
agent = sac_agent.SacAgent(
time_step_tensor_spec,
action_tensor_spec,
actor_network=actor_net,
critic_network=critic_net,
actor_optimizer=tf.keras.optimizers.Adam(
learning_rate=actor_learning_rate),
critic_optimizer=tf.keras.optimizers.Adam(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.keras.optimizers.Adam(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=tf.math.squared_difference,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
gradient_clipping=None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
table_name = 'uniform_table'
table = reverb.Table(
table_name,
max_size=replay_capacity,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1))
reverb_checkpoint_dir = os.path.join(root_dir, learner.TRAIN_DIR,
learner.REPLAY_BUFFER_CHECKPOINT_DIR)
reverb_checkpointer = reverb.platform.checkpointers_lib.DefaultCheckpointer(
path=reverb_checkpoint_dir)
reverb_server = reverb.Server([table],
port=reverb_port,
checkpointer=reverb_checkpointer)
reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=2,
table_name=table_name,
local_server=reverb_server)
rb_observer = reverb_utils.ReverbAddTrajectoryObserver(
reverb_replay.py_client,
table_name,
sequence_length=2,
stride_length=1)
dataset = reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=2).prefetch(50)
experience_dataset_fn = lambda: dataset
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={triggers.ENV_STEP_METADATA_KEY: env_step_metric}),
triggers.ReverbCheckpointTrigger(
train_step,
interval=replay_buffer_save_interval,
reverb_client=reverb_replay.py_client),
# TODO(b/165023684): Add SIGTERM handler to checkpoint before preemption.
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
agent_learner = learner.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn,
triggers=learning_triggers)
random_policy = random_py_policy.RandomPyPolicy(
collect_env.time_step_spec(), collect_env.action_spec())
initial_collect_actor = actor.Actor(
collect_env,
random_policy,
train_step,
steps_per_run=initial_collect_steps,
observers=[rb_observer])
logging.info('Doing initial collect.')
initial_collect_actor.run()
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),
observers=[rb_observer, env_step_metric])
tf_greedy_policy = greedy_policy.GreedyPolicy(agent.policy)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_greedy_policy, use_tf_function=True)
eval_actor = actor.Actor(
eval_env,
eval_greedy_policy,
train_step,
episodes_per_run=eval_episodes,
metrics=actor.eval_metrics(eval_episodes),
summary_dir=os.path.join(root_dir, 'eval'),
)
if eval_interval:
logging.info('Evaluating.')
eval_actor.run_and_log()
logging.info('Training.')
for _ in range(num_iterations):
collect_actor.run()
agent_learner.run(iterations=1)
if eval_interval and agent_learner.train_step_numpy % eval_interval == 0:
logging.info('Evaluating.')
eval_actor.run_and_log()
rb_observer.close()
reverb_server.stop()
def main(_):
logging.set_verbosity(logging.INFO)
tf.compat.v1.enable_v2_behavior()
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
train_eval(
FLAGS.root_dir,
num_iterations=FLAGS.num_iterations,
reverb_port=FLAGS.reverb_port,
eval_interval=FLAGS.eval_interval)
if __name__ == '__main__':
flags.mark_flag_as_required('root_dir')
app.run(main)
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
smartsuites-server/src/test/java/com/smartsuites/WebDriverManager.java | /*
* Copyright (c) 2017. 联思智云(北京)科技有限公司. All rights reserved.
*/
package com.smartsuites;
import org.apache.commons.io.FileUtils;
import org.openqa.selenium.By;
import org.openqa.selenium.TimeoutException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxBinary;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.firefox.FirefoxProfile;
import org.openqa.selenium.safari.SafariDriver;
import org.openqa.selenium.support.ui.ExpectedCondition;
import org.openqa.selenium.support.ui.WebDriverWait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.fail;
public class WebDriverManager {
public final static Logger LOG = LoggerFactory.getLogger(WebDriverManager.class);
private static String downLoadsDir = "";
public static WebDriver getWebDriver() {
WebDriver driver = null;
if (driver == null) {
try {
FirefoxBinary ffox = new FirefoxBinary();
if ("true".equals(System.getenv("TRAVIS"))) {
ffox.setEnvironmentProperty("DISPLAY", ":99"); // xvfb is supposed to
// run with DISPLAY 99
}
int firefoxVersion = WebDriverManager.getFirefoxVersion();
LOG.info("Firefox version " + firefoxVersion + " detected");
downLoadsDir = FileUtils.getTempDirectory().toString();
String tempPath = downLoadsDir + "/firebug/";
downloadFireBug(firefoxVersion, tempPath);
final String firebugPath = tempPath + "firebug.xpi";
final String firepathPath = tempPath + "firepath.xpi";
FirefoxProfile profile = new FirefoxProfile();
profile.setPreference("browser.download.folderList", 2);
profile.setPreference("browser.download.dir", downLoadsDir);
profile.setPreference("browser.helperApps.alwaysAsk.force", false);
profile.setPreference("browser.download.manager.showWhenStarting", false);
profile.setPreference("browser.download.manager.showAlertOnComplete", false);
profile.setPreference("browser.download.manager.closeWhenDone", true);
profile.setPreference("app.update.auto", false);
profile.setPreference("app.update.enabled", false);
profile.setPreference("dom.max_script_run_time", 0);
profile.setPreference("dom.max_chrome_script_run_time", 0);
profile.setPreference("browser.helperApps.neverAsk.saveToDisk", "application/x-ustar,application/octet-stream,application/zip,text/csv,text/plain");
profile.setPreference("network.proxy.type", 0);
// Commenting out installing extensions. See ZEPPELIN-2962.
// profile.addExtension(new File(firebugPath));
// profile.addExtension(new File(firepathPath));
driver = new FirefoxDriver(ffox, profile);
} catch (Exception e) {
LOG.error("Exception in WebDriverManager while FireFox Driver ", e);
}
}
if (driver == null) {
try {
driver = new ChromeDriver();
} catch (Exception e) {
LOG.error("Exception in WebDriverManager while ChromeDriver ", e);
}
}
if (driver == null) {
try {
driver = new SafariDriver();
} catch (Exception e) {
LOG.error("Exception in WebDriverManager while SafariDriver ", e);
}
}
String url;
if (System.getenv("url") != null) {
url = System.getenv("url");
} else {
url = "http://localhost:8080";
}
long start = System.currentTimeMillis();
boolean loaded = false;
driver.manage().timeouts().implicitlyWait(AbstractZeppelinIT.MAX_IMPLICIT_WAIT,
TimeUnit.SECONDS);
driver.get(url);
while (System.currentTimeMillis() - start < 60 * 1000) {
// wait for page load
try {
(new WebDriverWait(driver, 30)).until(new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//i[@uib-tooltip='WebSocket Connected']"))
.isDisplayed();
}
});
loaded = true;
break;
} catch (TimeoutException e) {
LOG.info("Exception in WebDriverManager while WebDriverWait ", e);
driver.navigate().to(url);
}
}
if (loaded == false) {
fail();
}
driver.manage().window().maximize();
return driver;
}
private static void downloadFireBug(int firefoxVersion, String tempPath) {
String firebugUrlString = null;
if (firefoxVersion < 23)
firebugUrlString = "http://getfirebug.com/releases/firebug/1.11/firebug-1.11.4.xpi";
else if (firefoxVersion >= 23 && firefoxVersion < 30)
firebugUrlString = "http://getfirebug.com/releases/firebug/1.12/firebug-1.12.8.xpi";
else if (firefoxVersion >= 30 && firefoxVersion < 33)
firebugUrlString = "http://getfirebug.com/releases/firebug/2.0/firebug-2.0.7.xpi";
else if (firefoxVersion >= 33)
firebugUrlString = "http://getfirebug.com/releases/firebug/2.0/firebug-2.0.17.xpi";
LOG.info("firebug version: " + firefoxVersion + ", will be downloaded to " + tempPath);
try {
File firebugFile = new File(tempPath + "firebug.xpi");
URL firebugUrl = new URL(firebugUrlString);
if (!firebugFile.exists()) {
FileUtils.copyURLToFile(firebugUrl, firebugFile);
}
File firepathFile = new File(tempPath + "firepath.xpi");
URL firepathUrl = new URL("https://addons.cdn.mozilla.net/user-media/addons/11900/firepath-0.9.7.1-fx.xpi");
if (!firepathFile.exists()) {
FileUtils.copyURLToFile(firepathUrl, firepathFile);
}
} catch (IOException e) {
LOG.error("Download of firebug version: " + firefoxVersion + ", falied in path " + tempPath);
}
LOG.info("Download of firebug version: " + firefoxVersion + ", successful");
}
public static int getFirefoxVersion() {
try {
String firefoxVersionCmd = "firefox -v";
if (System.getProperty("os.name").startsWith("Mac OS")) {
firefoxVersionCmd = "/Applications/Firefox.app/Contents/MacOS/" + firefoxVersionCmd;
}
String versionString = (String) CommandExecutor.executeCommandLocalHost(firefoxVersionCmd, false, ProcessData.Types_Of_Data.OUTPUT);
return Integer.valueOf(versionString.replaceAll("Mozilla Firefox", "").trim().substring(0, 2));
} catch (Exception e) {
LOG.error("Exception in WebDriverManager while getWebDriver ", e);
return -1;
}
}
}
| [
"\"TRAVIS\"",
"\"url\"",
"\"url\""
]
| []
| [
"url",
"TRAVIS"
]
| [] | ["url", "TRAVIS"] | java | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dojo.settings.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
server/graph/resolver.go | package graph
//go:generate go run github.com/99designs/gqlgen
import (
"os"
"sync"
"github.com/alexhans1/certainty_poker/graph/generated"
"github.com/alexhans1/certainty_poker/graph/model"
"github.com/go-redis/redis"
"github.com/sirupsen/logrus"
)
// This file will not be regenerated automatically.
//
// It serves as dependency injection for your app, add any dependencies you require here.
type Resolver struct {
games map[string]*model.Game
gameChannels map[string]map[string]chan *model.Game
mutex sync.Mutex
redisClient *redis.Client
logger *logrus.Logger
}
func NewResolver() generated.Config {
// logger
var log = logrus.New()
// redis client
redisClient := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDISCLOUD_URL"),
Password: os.Getenv("REDISCLOUD_PASSWORD"),
DB: 0,
})
pong, err := redisClient.Ping().Result()
log.Println(pong, err)
log.SetFormatter(&logrus.JSONFormatter{})
return generated.Config{
Resolvers: &Resolver{
games: map[string]*model.Game{},
gameChannels: map[string]map[string]chan *model.Game{},
mutex: sync.Mutex{},
redisClient: redisClient,
logger: log,
},
}
}
func updateGameChannel(r *mutationResolver, game *model.Game) {
r.mutex.Lock()
for gameID, channels := range r.gameChannels {
if gameID == game.ID {
for _, gameChannel := range channels {
gameChannel <- game
}
}
}
r.mutex.Unlock()
}
| [
"\"REDISCLOUD_URL\"",
"\"REDISCLOUD_PASSWORD\""
]
| []
| [
"REDISCLOUD_PASSWORD",
"REDISCLOUD_URL"
]
| [] | ["REDISCLOUD_PASSWORD", "REDISCLOUD_URL"] | go | 2 | 0 | |
test/functional/test_framework/test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "breekcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a breekcoin test script.
Individual breekcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave breekcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop breekcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use breekcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoind = os.getenv("BREEKCOIND", default=config["environment"]["BUILDDIR"] + '/src/breekcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BREEKCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/breekcoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: breekcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
))
def start_node(self, i, *args, **kwargs):
"""Start a breekcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple breekcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a breekcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple breekcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"],
extra_args=[],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[0].generatetoaddress(25 if i != 7 else 24, self.nodes[i % 4].get_deterministic_priv_key().address)
sync_blocks(self.nodes)
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("breekcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("breekcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
| []
| []
| [
"PATH",
"BREEKCOIND",
"BREEKCOINCLI"
]
| [] | ["PATH", "BREEKCOIND", "BREEKCOINCLI"] | python | 3 | 0 | |
images/deployer/kube-deploy/cmd/kube-deploy.go | package main
import (
"net/url"
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
klatest "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
kubeclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
latest "github.com/openshift/origin/pkg/api/latest"
osclient "github.com/openshift/origin/pkg/client"
deployapi "github.com/openshift/origin/pkg/deploy/api"
"gopkg.in/v1/yaml"
)
func main() {
util.InitLogs()
defer util.FlushLogs()
var masterServer string
if len(os.Getenv("KUBERNETES_MASTER")) > 0 {
masterServer = os.Getenv("KUBERNETES_MASTER")
} else {
masterServer = "http://localhost:8080"
}
_, err := url.Parse(masterServer)
if err != nil {
glog.Fatalf("Unable to parse %v as a URL\n", err)
}
client, err := kubeclient.New(masterServer, klatest.Version, nil)
if err != nil {
glog.Errorf("Unable to connect to kubernetes master: %v", err)
os.Exit(1)
}
osClient, err := osclient.New(masterServer, latest.Version, nil)
if err != nil {
glog.Errorf("Unable to connect to openshift master: %v", err)
os.Exit(1)
}
deployTarget(client, osClient)
}
func deployTarget(client *kubeclient.Client, osClient osclient.Interface) {
deploymentID := os.Getenv("KUBERNETES_DEPLOYMENT_ID")
if len(deploymentID) == 0 {
glog.Fatal("No deployment id was specified. Expected KUBERNETES_DEPLOYMENT_ID variable.")
return
}
glog.Infof("Retrieving deployment id: %v", deploymentID)
var deployment *deployapi.Deployment
var err error
if deployment, err = osClient.GetDeployment(deploymentID); err != nil {
glog.Fatalf("An error occurred retrieving the deployment object: %v", err)
return
}
selector, _ := labels.ParseSelector("deployment=" + deployment.ConfigID)
replicationControllers, err := client.ListReplicationControllers(selector)
if err != nil {
glog.Fatalf("Unable to get list of replication controllers %v\n", err)
return
}
controller := &api.ReplicationController{
DesiredState: deployment.ControllerTemplate,
Labels: map[string]string{"deployment": deployment.ConfigID},
}
if controller.DesiredState.PodTemplate.Labels == nil {
controller.DesiredState.PodTemplate.Labels = make(map[string]string)
}
controller.DesiredState.PodTemplate.Labels["deployment"] = deployment.ConfigID
glog.Info("Creating replication controller: ")
obj, _ := yaml.Marshal(controller)
glog.Info(string(obj))
if _, err := client.CreateReplicationController(controller); err != nil {
glog.Fatalf("An error occurred creating the replication controller: %v", err)
return
}
glog.Info("Create replication controller")
// For this simple deploy, remove previous replication controllers
for _, rc := range replicationControllers.Items {
glog.Info("Stopping replication controller: ")
obj, _ := yaml.Marshal(rc)
glog.Info(string(obj))
rcObj, err1 := client.GetReplicationController(rc.ID)
if err1 != nil {
glog.Fatalf("Unable to get replication controller %s - error: %#v\n", rc.ID, err1)
}
rcObj.DesiredState.Replicas = 0
_, err := client.UpdateReplicationController(rcObj)
if err != nil {
glog.Fatalf("Unable to stop replication controller %s - error: %#v\n", rc.ID, err)
}
}
for _, rc := range replicationControllers.Items {
glog.Infof("Deleting replication controller %s", rc.ID)
err := client.DeleteReplicationController(rc.ID)
if err != nil {
glog.Fatalf("Unable to remove replication controller %s - error: %#v\n", rc.ID, err)
}
}
}
| [
"\"KUBERNETES_MASTER\"",
"\"KUBERNETES_MASTER\"",
"\"KUBERNETES_DEPLOYMENT_ID\""
]
| []
| [
"KUBERNETES_DEPLOYMENT_ID",
"KUBERNETES_MASTER"
]
| [] | ["KUBERNETES_DEPLOYMENT_ID", "KUBERNETES_MASTER"] | go | 2 | 0 | |
pkg/cmd/codespace/create.go | package codespace
import (
"context"
"errors"
"fmt"
"os"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/cli/cli/v2/internal/codespaces"
"github.com/cli/cli/v2/internal/codespaces/api"
"github.com/cli/cli/v2/pkg/cmdutil"
"github.com/cli/cli/v2/utils"
"github.com/spf13/cobra"
)
const (
DEVCONTAINER_PROMPT_DEFAULT = "Default Codespaces configuration"
)
var (
DEFAULT_DEVCONTAINER_DEFINITIONS = []string{".devcontainer.json", ".devcontainer/devcontainer.json"}
)
type createOptions struct {
repo string
branch string
location string
machine string
showStatus bool
permissionsOptOut bool
devContainerPath string
idleTimeout time.Duration
}
func newCreateCmd(app *App) *cobra.Command {
opts := createOptions{}
createCmd := &cobra.Command{
Use: "create",
Short: "Create a codespace",
Args: noArgsConstraint,
RunE: func(cmd *cobra.Command, args []string) error {
return app.Create(cmd.Context(), opts)
},
}
createCmd.Flags().StringVarP(&opts.repo, "repo", "r", "", "repository name with owner: user/repo")
createCmd.Flags().StringVarP(&opts.branch, "branch", "b", "", "repository branch")
createCmd.Flags().StringVarP(&opts.location, "location", "l", "", "location: {EastUs|SouthEastAsia|WestEurope|WestUs2} (determined automatically if not provided)")
createCmd.Flags().StringVarP(&opts.machine, "machine", "m", "", "hardware specifications for the VM")
createCmd.Flags().BoolVarP(&opts.permissionsOptOut, "default-permissions", "", false, "do not prompt to accept additional permissions requested by the codespace")
createCmd.Flags().BoolVarP(&opts.showStatus, "status", "s", false, "show status of post-create command and dotfiles")
createCmd.Flags().DurationVar(&opts.idleTimeout, "idle-timeout", 0, "allowed inactivity before codespace is stopped, e.g. \"10m\", \"1h\"")
createCmd.Flags().StringVar(&opts.devContainerPath, "devcontainer-path", "", "path to the devcontainer.json file to use when creating codespace")
return createCmd
}
// Create creates a new Codespace
func (a *App) Create(ctx context.Context, opts createOptions) error {
// Overrides for Codespace developers to target test environments
vscsLocation := os.Getenv("VSCS_LOCATION")
vscsTarget := os.Getenv("VSCS_TARGET")
vscsTargetUrl := os.Getenv("VSCS_TARGET_URL")
userInputs := struct {
Repository string
Branch string
Location string
}{
Repository: opts.repo,
Branch: opts.branch,
Location: opts.location,
}
if userInputs.Repository == "" {
branchPrompt := "Branch (leave blank for default branch):"
if userInputs.Branch != "" {
branchPrompt = "Branch:"
}
questions := []*survey.Question{
{
Name: "repository",
Prompt: &survey.Input{
Message: "Repository:",
Help: "Search for repos by name. To search within an org or user, or to see private repos, enter at least ':user/'.",
Suggest: func(toComplete string) []string {
return getRepoSuggestions(ctx, a.apiClient, toComplete)
},
},
Validate: survey.Required,
},
{
Name: "branch",
Prompt: &survey.Input{
Message: branchPrompt,
Default: userInputs.Branch,
},
},
}
if err := ask(questions, &userInputs); err != nil {
return fmt.Errorf("failed to prompt: %w", err)
}
}
if userInputs.Location == "" && vscsLocation != "" {
userInputs.Location = vscsLocation
}
a.StartProgressIndicatorWithLabel("Fetching repository")
repository, err := a.apiClient.GetRepository(ctx, userInputs.Repository)
a.StopProgressIndicator()
if err != nil {
return fmt.Errorf("error getting repository: %w", err)
}
branch := userInputs.Branch
if branch == "" {
branch = repository.DefaultBranch
}
devContainerPath := opts.devContainerPath
// now that we have repo+branch, we can list available devcontainer.json files (if any)
if opts.devContainerPath == "" {
a.StartProgressIndicatorWithLabel("Fetching devcontainer.json files")
devcontainers, err := a.apiClient.ListDevContainers(ctx, repository.ID, branch, 100)
a.StopProgressIndicator()
if err != nil {
return fmt.Errorf("error getting devcontainer.json paths: %w", err)
}
if len(devcontainers) > 0 {
// if there is only one devcontainer.json file and it is one of the default paths we can auto-select it
if len(devcontainers) == 1 && utils.StringInSlice(devcontainers[0].Path, DEFAULT_DEVCONTAINER_DEFINITIONS) {
devContainerPath = devcontainers[0].Path
} else {
promptOptions := []string{}
if !utils.StringInSlice(devcontainers[0].Path, DEFAULT_DEVCONTAINER_DEFINITIONS) {
promptOptions = []string{DEVCONTAINER_PROMPT_DEFAULT}
}
for _, devcontainer := range devcontainers {
promptOptions = append(promptOptions, devcontainer.Path)
}
devContainerPathQuestion := &survey.Question{
Name: "devContainerPath",
Prompt: &survey.Select{
Message: "Devcontainer definition file:",
Options: promptOptions,
},
}
if err := ask([]*survey.Question{devContainerPathQuestion}, &devContainerPath); err != nil {
return fmt.Errorf("failed to prompt: %w", err)
}
}
}
if devContainerPath == DEVCONTAINER_PROMPT_DEFAULT {
// special arg allows users to opt out of devcontainer.json selection
devContainerPath = ""
}
}
machine, err := getMachineName(ctx, a.apiClient, repository.ID, opts.machine, branch, userInputs.Location)
if err != nil {
return fmt.Errorf("error getting machine type: %w", err)
}
if machine == "" {
return errors.New("there are no available machine types for this repository")
}
createParams := &api.CreateCodespaceParams{
RepositoryID: repository.ID,
Branch: branch,
Machine: machine,
Location: userInputs.Location,
VSCSTarget: vscsTarget,
VSCSTargetURL: vscsTargetUrl,
IdleTimeoutMinutes: int(opts.idleTimeout.Minutes()),
DevContainerPath: devContainerPath,
PermissionsOptOut: opts.permissionsOptOut,
}
a.StartProgressIndicatorWithLabel("Creating codespace")
codespace, err := a.apiClient.CreateCodespace(ctx, createParams)
a.StopProgressIndicator()
if err != nil {
var aerr api.AcceptPermissionsRequiredError
if !errors.As(err, &aerr) || aerr.AllowPermissionsURL == "" {
return fmt.Errorf("error creating codespace: %w", err)
}
codespace, err = a.handleAdditionalPermissions(ctx, createParams, aerr.AllowPermissionsURL)
if err != nil {
// this error could be a cmdutil.SilentError (in the case that the user opened the browser) so we don't want to wrap it
return err
}
}
if opts.showStatus {
if err := a.showStatus(ctx, codespace); err != nil {
return fmt.Errorf("show status: %w", err)
}
}
cs := a.io.ColorScheme()
fmt.Fprintln(a.io.Out, codespace.Name)
if a.io.IsStderrTTY() && codespace.IdleTimeoutNotice != "" {
fmt.Fprintln(a.io.ErrOut, cs.Yellow("Notice:"), codespace.IdleTimeoutNotice)
}
return nil
}
func (a *App) handleAdditionalPermissions(ctx context.Context, createParams *api.CreateCodespaceParams, allowPermissionsURL string) (*api.Codespace, error) {
var (
isInteractive = a.io.CanPrompt()
cs = a.io.ColorScheme()
displayURL = utils.DisplayURL(allowPermissionsURL)
)
fmt.Fprintf(a.io.ErrOut, "You must authorize or deny additional permissions requested by this codespace before continuing.\n")
if !isInteractive {
fmt.Fprintf(a.io.ErrOut, "%s in your browser to review and authorize additional permissions: %s\n", cs.Bold("Open this URL"), displayURL)
fmt.Fprintf(a.io.ErrOut, "Alternatively, you can run %q with the %q option to continue without authorizing additional permissions.\n", a.io.ColorScheme().Bold("create"), cs.Bold("--default-permissions"))
return nil, cmdutil.SilentError
}
choices := []string{
"Continue in browser to review and authorize additional permissions (Recommended)",
"Continue without authorizing additional permissions",
}
permsSurvey := []*survey.Question{
{
Name: "accept",
Prompt: &survey.Select{
Message: "What would you like to do?",
Options: choices,
Default: choices[0],
},
Validate: survey.Required,
},
}
var answers struct {
Accept string
}
if err := ask(permsSurvey, &answers); err != nil {
return nil, fmt.Errorf("error getting answers: %w", err)
}
// if the user chose to continue in the browser, open the URL
if answers.Accept == choices[0] {
fmt.Fprintln(a.io.ErrOut, "Please re-run the create request after accepting permissions in the browser.")
if err := a.browser.Browse(allowPermissionsURL); err != nil {
return nil, fmt.Errorf("error opening browser: %w", err)
}
// browser opened successfully but we do not know if they accepted the permissions
// so we must exit and wait for the user to attempt the create again
return nil, cmdutil.SilentError
}
// if the user chose to create the codespace without the permissions,
// we can continue with the create opting out of the additional permissions
createParams.PermissionsOptOut = true
a.StartProgressIndicatorWithLabel("Creating codespace")
codespace, err := a.apiClient.CreateCodespace(ctx, createParams)
a.StopProgressIndicator()
if err != nil {
return nil, fmt.Errorf("error creating codespace: %w", err)
}
return codespace, nil
}
// showStatus polls the codespace for a list of post create states and their status. It will keep polling
// until all states have finished. Once all states have finished, we poll once more to check if any new
// states have been introduced and stop polling otherwise.
func (a *App) showStatus(ctx context.Context, codespace *api.Codespace) error {
var (
lastState codespaces.PostCreateState
breakNextState bool
)
finishedStates := make(map[string]bool)
ctx, stopPolling := context.WithCancel(ctx)
defer stopPolling()
poller := func(states []codespaces.PostCreateState) {
var inProgress bool
for _, state := range states {
if _, found := finishedStates[state.Name]; found {
continue // skip this state as we've processed it already
}
if state.Name != lastState.Name {
a.StartProgressIndicatorWithLabel(state.Name)
if state.Status == codespaces.PostCreateStateRunning {
inProgress = true
lastState = state
break
}
finishedStates[state.Name] = true
a.StopProgressIndicator()
} else {
if state.Status == codespaces.PostCreateStateRunning {
inProgress = true
break
}
finishedStates[state.Name] = true
a.StopProgressIndicator()
lastState = codespaces.PostCreateState{} // reset the value
}
}
if !inProgress {
if breakNextState {
stopPolling()
return
}
breakNextState = true
}
}
err := codespaces.PollPostCreateStates(ctx, a, a.apiClient, codespace, poller)
if err != nil {
if errors.Is(err, context.Canceled) && breakNextState {
return nil // we cancelled the context to stop polling, we can ignore the error
}
return fmt.Errorf("failed to poll state changes from codespace: %w", err)
}
return nil
}
// getMachineName prompts the user to select the machine type, or validates the machine if non-empty.
func getMachineName(ctx context.Context, apiClient apiClient, repoID int, machine, branch, location string) (string, error) {
machines, err := apiClient.GetCodespacesMachines(ctx, repoID, branch, location)
if err != nil {
return "", fmt.Errorf("error requesting machine instance types: %w", err)
}
// if user supplied a machine type, it must be valid
// if no machine type was supplied, we don't error if there are no machine types for the current repo
if machine != "" {
for _, m := range machines {
if machine == m.Name {
return machine, nil
}
}
availableMachines := make([]string, len(machines))
for i := 0; i < len(machines); i++ {
availableMachines[i] = machines[i].Name
}
return "", fmt.Errorf("there is no such machine for the repository: %s\nAvailable machines: %v", machine, availableMachines)
} else if len(machines) == 0 {
return "", nil
}
if len(machines) == 1 {
// VS Code does not prompt for machine if there is only one, this makes us consistent with that behavior
return machines[0].Name, nil
}
machineNames := make([]string, 0, len(machines))
machineByName := make(map[string]*api.Machine)
for _, m := range machines {
machineName := buildDisplayName(m.DisplayName, m.PrebuildAvailability)
machineNames = append(machineNames, machineName)
machineByName[machineName] = m
}
machineSurvey := []*survey.Question{
{
Name: "machine",
Prompt: &survey.Select{
Message: "Choose Machine Type:",
Options: machineNames,
Default: machineNames[0],
},
Validate: survey.Required,
},
}
var machineAnswers struct{ Machine string }
if err := ask(machineSurvey, &machineAnswers); err != nil {
return "", fmt.Errorf("error getting machine: %w", err)
}
selectedMachine := machineByName[machineAnswers.Machine]
return selectedMachine.Name, nil
}
func getRepoSuggestions(ctx context.Context, apiClient apiClient, partialSearch string) []string {
searchParams := api.RepoSearchParameters{
// The prompt shows 7 items so 7 effectively turns off scrolling which is similar behavior to other clients
MaxRepos: 7,
Sort: "repo",
}
repos, err := apiClient.GetCodespaceRepoSuggestions(ctx, partialSearch, searchParams)
if err != nil {
return nil
}
return repos
}
// buildDisplayName returns display name to be used in the machine survey prompt.
func buildDisplayName(displayName string, prebuildAvailability string) string {
prebuildText := ""
if prebuildAvailability == "blob" || prebuildAvailability == "pool" {
prebuildText = " (Prebuild ready)"
}
return fmt.Sprintf("%s%s", displayName, prebuildText)
}
| [
"\"VSCS_LOCATION\"",
"\"VSCS_TARGET\"",
"\"VSCS_TARGET_URL\""
]
| []
| [
"VSCS_TARGET",
"VSCS_TARGET_URL",
"VSCS_LOCATION"
]
| [] | ["VSCS_TARGET", "VSCS_TARGET_URL", "VSCS_LOCATION"] | go | 3 | 0 | |
util/test/send_email.py | #!/usr/bin/env python
"""Portable email sender. Acts as replacement for mail, Mail, mailx,
email (cygwin). Message body is taken from stdin.
"""
from __future__ import print_function
import email.mime.text
import getpass
import logging
import optparse
import os
import smtplib
import socket
import sys
def main():
"""Parse command line arguments and send email!"""
args = _parse_args()
_setup_logging(args.verbose)
body = sys.stdin.read()
# Send the email!
send_email(args.recipients, body, args.subject, args.header, args.sender, args.smtp_host)
def send_email(recipients, body, subject=None, headers=None, sender=None, smtp_host=None):
"""Send email!
:arg recipients: list of recipients. If only one, may be a string.
:arg body: The email message body.
:arg subject: Optional subject. Defaults to ''.
:arg headers: Optional dict of headers to add.
:arg sender: Optional sender address. Defaults to <user>@<fqdn>
:arg smtp_host: Optional SMTP host. Defaults to 'localhost'.
"""
if isinstance(recipients, basestring):
recipients = [recipients]
sender = sender or _default_sender()
subject = subject or ''
smtp_host = smtp_host or _default_smtp_host()
msg = email.mime.text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ','.join(recipients)
if headers:
for key, value in headers.iteritems():
msg[key] = value
logging.debug('Opening connection to: {0}'.format(smtp_host))
smtp = smtplib.SMTP(smtp_host)
try:
logging.info('Sending email to: {0} from: {1} subject: {2}'.format(
','.join(recipients), sender, subject))
logging.debug('Email headers: {0}'.format(headers))
logging.debug('Email body length: {0}'.format(len(body)))
smtp.sendmail(sender, recipients, msg.as_string())
finally:
smtp.quit()
def _parse_headers(option, opt, value, parser, *args, **kwargs):
"""OptionParser callback function for parsing header values passed by user.
It takes values that have commas (e.g. the user specified
[email protected],Precedence=bulk), breaks them apart and adds the
individual name/value pairs to the dict of values.
"""
# Get the existing values the parser knows about for this particular
# option.
value_dict = getattr(parser.values, option.dest, None) or {}
# Split the value provided.
parsed_vals = value.split(',')
for v in parsed_vals:
key, value = v.split('=')
value_dict[key] = value
# Set the updated dict to the oiption value.
setattr(parser.values, option.dest, value_dict)
def _default_sender():
"""Return default sender address, which is <user>@<hostname>."""
return '{0}@{1}'.format(getpass.getuser(), socket.getfqdn())
def _default_smtp_host():
"""Return default smtp host, which is localhost unless CHPL_UTIL_SMTP_HOST is
set in environment.
"""
return os.environ.get('CHPL_UTIL_SMTP_HOST', 'localhost')
def _parse_args():
"""Parse and return command line arguments."""
class NoWrapHelpFormatter(optparse.IndentedHelpFormatter):
"""Help formatter that does not wrap the description text."""
def _format_text(self, text):
return text
parser = optparse.OptionParser(
usage='usage: %prog [options] recipient_email [...]',
description=__doc__,
formatter=NoWrapHelpFormatter()
)
parser.add_option(
'-v', '--verbose',
action='store_true',
help='Verbose output.'
)
mail_group = optparse.OptionGroup(parser, 'Mail Options')
mail_group.add_option(
'-s', '--subject',
default=None,
help='Email subject.'
)
mail_group.add_option(
'-H', '--header',
action='callback', type='string',
callback=_parse_headers,
help=('Email header(s) of form NAME=VALUE. '
'Specify more than one with comma delimited list.')
)
mail_group.add_option(
'-S', '--sender',
default=_default_sender(),
help='Sender email address. (default: %default)'
)
mail_group.add_option(
'--smtp-host',
default=_default_smtp_host(),
help='SMTP host to use when sending email. (default: %default)'
)
parser.add_option_group(mail_group)
opts, args = parser.parse_args()
# Add all positional arguments as recipients.
opts.recipients = args
return opts
def _setup_logging(verbose=False):
"""Initialize logging and set level based on verbose.
:type verbose: bool
:arg verbose: When True, set log level to DEBUG.
"""
log_level = logging.DEBUG if verbose else logging.WARN
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=log_level)
logging.debug('Verbose output enabled.')
if __name__ == '__main__':
main()
| []
| []
| [
"CHPL_UTIL_SMTP_HOST"
]
| [] | ["CHPL_UTIL_SMTP_HOST"] | python | 1 | 0 | |
src/scraping_constantino/scraper.py | import os
import requests
import re
from bs4 import BeautifulSoup
from dotenv import load_dotenv
load_dotenv()
base_url = os.getenv("BASEURL")
archive_url = os.getenv("ARCHIVEURL")
def get_latest_link(nth=1):
archive_page = requests.get(archive_url)
soup = BeautifulSoup(archive_page.content)
links = []
for link in soup.findAll('a', attrs={'href': re.compile("^/audio")}):
links.append(link.get('href'))
latest_link = f'{base_url}{links[1-nth]}'
return latest_link
def get_song_list():
latest_link = get_latest_link()
latest_page = requests.get(latest_link)
soup = BeautifulSoup(latest_page.content)
song_raw_list = soup.findAll(
'div', attrs={'class': 'aodHtmlDescription'})[0]
song_list = [i.replace('\xa0', '').replace('\r', '') for i in song_raw_list.text.split(
'\n') if(i != 'PLAY' and len(i) > 0 and '\t' not in i)]
return song_list
if __name__ == '__main__':
print(get_song_list())
| []
| []
| [
"ARCHIVEURL",
"BASEURL"
]
| [] | ["ARCHIVEURL", "BASEURL"] | python | 2 | 0 | |
spec/fixtures/go-app/src/stuffs/main.go | package main
import (
"fmt"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"foo": "bar"}`))
})
http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
internal/server/start.go | package server
import (
"database/sql"
"net/http"
"os"
// importing mysql driver for "database/sql" package
_ "github.com/go-sql-driver/mysql"
"github.com/spbu-timetable/api/internal/store/sqlstore"
)
// Start creates new db connection and starts the server on port declared in "config.env" file
func Start() error {
db, err := newDB()
if err != nil {
return err
}
defer db.Close()
store := sqlstore.New(db)
server := newServer(store)
port := ":" + os.Getenv("PORT")
return http.ListenAndServe(port, server)
}
func newDB() (*sql.DB, error) {
db, err := sql.Open("mysql", os.Getenv("CLEARDB_DATABASE_URL"))
if err != nil {
return nil, err
}
if err := db.Ping(); err != nil {
return nil, err
}
return db, nil
}
| [
"\"PORT\"",
"\"CLEARDB_DATABASE_URL\""
]
| []
| [
"PORT",
"CLEARDB_DATABASE_URL"
]
| [] | ["PORT", "CLEARDB_DATABASE_URL"] | go | 2 | 0 | |
python/setup.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
from setuptools import setup, find_packages, Distribution
import setuptools.command.build_ext as _build_ext
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
ray_files = [
"ray/core/src/ray/thirdparty/redis/src/redis-server",
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store_server", "ray/_raylet.so",
"ray/core/src/ray/raylet/raylet_monitor", "ray/core/src/ray/raylet/raylet",
"ray/dashboard/dashboard.py", "ray/dashboard/index.html",
"ray/dashboard/res/main.css", "ray/dashboard/res/main.js"
]
# These are the directories where automatically generated Python protobuf
# bindings are created.
generated_python_directories = [
"ray/core/generated",
]
optional_ray_files = []
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml",
"ray/autoscaler/gcp/example-full.yaml",
"ray/autoscaler/local/example-full.yaml",
]
if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
ray_files += [
"ray/core/src/credis/build/src/libmember.so",
"ray/core/src/credis/build/src/libmaster.so",
"ray/core/src/credis/redis/src/redis-server"
]
optional_ray_files += ray_autoscaler_files
extras = {
"rllib": [
"pyyaml", "gym[atari]", "opencv-python-headless", "lz4", "scipy"
],
"debug": ["psutil", "setproctitle", "py-spy"],
"dashboard": ["psutil", "aiohttp"],
}
class build_ext(_build_ext.build_ext):
def run(self):
# Note: We are passing in sys.executable so that we use the same
# version of Python to build pyarrow inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
command = ["../build.sh", "-p", sys.executable]
if os.getenv("RAY_INSTALL_JAVA") == "1":
# Also build binaries for Java if the above env variable exists.
command += ["-l", "python,java"]
subprocess.check_call(command)
# We also need to install pyarrow along with Ray, so make sure that the
# relevant non-Python pyarrow files get copied.
pyarrow_files = []
for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
for name in filenames:
pyarrow_files.append(os.path.join(root, name))
# Make sure the relevant files for modin get copied.
modin_files = []
for (root, dirs, filenames) in os.walk("./ray/modin"):
for name in filenames:
modin_files.append(os.path.join(root, name))
files_to_include = ray_files + pyarrow_files + modin_files
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
files_to_include.append(os.path.join(directory, filename))
for filename in files_to_include:
self.move_file(filename)
# Try to copy over the optional files.
for filename in optional_ray_files:
try:
self.move_file(filename)
except Exception:
print("Failed to copy optional file {}. This is ok."
.format(filename))
def move_file(self, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
source = filename
destination = os.path.join(self.build_lib, filename)
# Create the target directory if it doesn't already exist.
parent_directory = os.path.dirname(destination)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
if not os.path.exists(destination):
print("Copying {} to {}.".format(source, destination))
shutil.copy(source, destination)
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def find_version(*filepath):
# Extract version information from filepath
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *filepath)) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
requires = [
"numpy >= 1.14",
"filelock",
"funcsigs",
"click",
"colorama",
"pytest",
"pyyaml",
"redis>=3.3.2",
# NOTE: Don't upgrade the version of six! Doing so causes installation
# problems. See https://github.com/ray-project/ray/issues/4169.
"six >= 1.0.0",
"faulthandler;python_version<'3.3'",
"protobuf >= 3.8.0",
]
setup(
name="ray",
version=find_version("ray", "__init__.py"),
author="Ray Team",
author_email="[email protected]",
description=("A system for parallel and distributed Python that unifies "
"the ML ecosystem."),
long_description=open("../README.rst").read(),
url="https://github.com/ray-project/ray",
keywords=("ray distributed parallel machine-learning "
"reinforcement-learning deep-learning python"),
packages=find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=requires,
setup_requires=["cython >= 0.29"],
extras_require=extras,
entry_points={
"console_scripts": [
"ray=ray.scripts.scripts:main",
"rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
]
},
include_package_data=True,
zip_safe=False,
license="Apache 2.0")
| []
| []
| [
"RAY_INSTALL_JAVA",
"RAY_USE_NEW_GCS"
]
| [] | ["RAY_INSTALL_JAVA", "RAY_USE_NEW_GCS"] | python | 2 | 0 | |
services/main.go | package main
import (
"github.com/dzc15331066/agenda-cs/services/service"
flag "github.com/spf13/pflag"
"os"
)
const (
PORT string = "8080"
)
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = PORT
}
pPort := flag.StringP("port", "p", PORT, "PORT for httpd listening")
flag.Parse()
if len(*pPort) != 0 {
port = *pPort
}
server := service.NewServer()
server.Run(":" + port)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
orderer/common/server/main.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package server
import (
"bytes"
"context"
"fmt"
"github.com/hyperledger/fabric/orderer/common/channelparticipation"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // This is essentially the main package for the orderer
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-lib-go/healthz"
cb "github.com/hyperledger/fabric-protos-go/common"
ab "github.com/hyperledger/fabric-protos-go/orderer"
"github.com/hyperledger/fabric/bccsp"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto"
"github.com/hyperledger/fabric/common/flogging"
floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
"github.com/hyperledger/fabric/common/grpclogging"
"github.com/hyperledger/fabric/common/grpcmetrics"
"github.com/hyperledger/fabric/common/ledger/blockledger"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/tools/protolator"
"github.com/hyperledger/fabric/core/operations"
"github.com/hyperledger/fabric/internal/pkg/comm"
"github.com/hyperledger/fabric/internal/pkg/identity"
"github.com/hyperledger/fabric/msp"
"github.com/hyperledger/fabric/orderer/common/bootstrap/file"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/metadata"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
"github.com/hyperledger/fabric/orderer/consensus/kafka"
"github.com/hyperledger/fabric/orderer/consensus/solo"
"github.com/hyperledger/fabric/protoutil"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"gopkg.in/alecthomas/kingpin.v2"
)
var logger = flogging.MustGetLogger("orderer.common.server")
//command line flags
var (
app = kingpin.New("orderer", "Hyperledger Fabric orderer node")
_ = app.Command("start", "Start the orderer node").Default() // preserved for cli compatibility
version = app.Command("version", "Show version information")
clusterTypes = map[string]struct{}{"etcdraft": {}}
)
// Main is the entry point of orderer process
func Main() {
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
// "version" command
if fullCmd == version.FullCommand() {
fmt.Println(metadata.GetVersionInfo())
return
}
conf, err := localconfig.Load()
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
initializeLogging()
prettyPrintStruct(conf)
cryptoProvider := factory.GetDefault()
signer, signErr := loadLocalMSP(conf).GetDefaultSigningIdentity()
if signErr != nil {
logger.Panicf("Failed to get local MSP identity: %s", signErr)
}
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
metricsProvider := opsSystem.Provider
logObserver := floggingmetrics.NewObserver(metricsProvider)
flogging.SetObserver(logObserver)
serverConfig := initializeServerConfig(conf, metricsProvider)
grpcServer := initializeGrpcServer(conf, serverConfig)
caMgr := &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
clientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
lf, _, err := createLedgerFactory(conf, metricsProvider)
if err != nil {
logger.Panicf("Failed to create ledger factory: %v", err)
}
var clusterBootBlock *cb.Block
// configure following artifacts properly if orderer is of cluster type
var r *replicationInitiator
clusterServerConfig := serverConfig
clusterGRPCServer := grpcServer // by default, cluster shares the same grpc server
var clusterClientConfig comm.ClientConfig
var clusterDialer *cluster.PredicateDialer
var clusterType, reuseGrpcListener bool
var serversToUpdate []*comm.GRPCServer
bootstrapMethod := conf.General.BootstrapMethod
if bootstrapMethod == "file" || bootstrapMethod == "none" {
bootstrapBlock := extractBootstrapBlock(conf)
if bootstrapBlock == nil {
bootstrapBlock = extractSystemChannel(lf, cryptoProvider)
}
if bootstrapBlock != nil {
if err := ValidateBootstrapBlock(bootstrapBlock, cryptoProvider); err != nil {
logger.Panicf("Failed validating bootstrap block: %v", err)
}
sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
clusterBootBlock = selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
typ := consensusType(bootstrapBlock, cryptoProvider)
clusterType = isClusterType(clusterBootBlock, cryptoProvider)
if clusterType {
logger.Infof("Setting up cluster for orderer type %s", typ)
clusterClientConfig = initializeClusterClientConfig(conf)
clusterDialer = &cluster.PredicateDialer{
Config: clusterClientConfig,
}
r = createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer, cryptoProvider)
// Only clusters that are equipped with a recent config block can replicate.
if conf.General.BootstrapMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
}
if reuseGrpcListener = reuseListener(conf, typ); !reuseGrpcListener {
clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, ioutil.ReadFile)
}
// If we have a separate gRPC server for the cluster,
// we need to update its TLS CA certificate pool.
serversToUpdate = append(serversToUpdate, clusterGRPCServer)
}
// Are we bootstrapping?
if len(lf.ChannelIDs()) == 0 {
initializeBootstrapChannel(clusterBootBlock, lf)
} else {
logger.Info("Not bootstrapping because of existing channels")
}
}
}
identityBytes, err := signer.Serialize()
if err != nil {
logger.Panicf("Failed serializing signing identity: %v", err)
}
expirationLogger := flogging.MustGetLogger("certmonitor")
crypto.TrackExpiration(
serverConfig.SecOpts.UseTLS,
serverConfig.SecOpts.Certificate,
[][]byte{clusterClientConfig.SecOpts.Certificate},
identityBytes,
expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
time.Now(),
time.AfterFunc)
// if cluster is reusing client-facing server, then it is already
// appended to serversToUpdate at this point.
if grpcServer.MutualTLSRequired() && !reuseGrpcListener {
serversToUpdate = append(serversToUpdate, grpcServer)
}
tlsCallback := func(bundle *channelconfig.Bundle) {
logger.Debug("Executing callback to update root CAs")
caMgr.updateTrustedRoots(bundle, serversToUpdate...)
if clusterType {
caMgr.updateClusterDialer(
clusterDialer,
clusterClientConfig.SecOpts.ServerRootCAs,
)
}
}
manager := initializeMultichannelRegistrar(
clusterBootBlock,
r,
clusterDialer,
clusterServerConfig,
clusterGRPCServer,
conf,
signer,
metricsProvider,
opsSystem,
lf,
cryptoProvider,
tlsCallback,
)
opsSystem.RegisterHandler(
channelparticipation.URLBaseV1,
channelparticipation.NewHTTPHandler(conf.ChannelParticipation, manager),
)
if err = opsSystem.Start(); err != nil {
logger.Panicf("failed to start operations subsystem: %s", err)
}
defer opsSystem.Stop()
mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
server := NewServer(
manager,
metricsProvider,
&conf.Debug,
conf.General.Authentication.TimeWindow,
mutualTLS,
conf.General.Authentication.NoExpirationChecks,
)
logger.Infof("Starting %s", metadata.GetVersionInfo())
handleSignals(addPlatformSignals(map[os.Signal]func(){
syscall.SIGTERM: func() {
grpcServer.Stop()
if clusterGRPCServer != grpcServer {
clusterGRPCServer.Stop()
}
},
}))
if !reuseGrpcListener && clusterType {
logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
go clusterGRPCServer.Start()
}
if conf.General.Profile.Enabled {
go initializeProfilingService(conf)
}
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
grpcServer.Start()
}
func reuseListener(conf *localconfig.TopLevel, typ string) bool {
clusterConf := conf.General.Cluster
// If listen address is not configured, and the TLS certificate isn't configured,
// it means we use the general listener of the node.
if clusterConf.ListenPort == 0 && clusterConf.ServerCertificate == "" && clusterConf.ListenAddress == "" && clusterConf.ServerPrivateKey == "" {
logger.Info("Cluster listener is not configured, defaulting to use the general listener on port", conf.General.ListenPort)
if !conf.General.TLS.Enabled {
logger.Panicf("TLS is required for running ordering nodes of type %s.", typ)
}
return true
}
// Else, one of the above is defined, so all 4 properties should be defined.
if clusterConf.ListenPort == 0 || clusterConf.ServerCertificate == "" || clusterConf.ListenAddress == "" || clusterConf.ServerPrivateKey == "" {
logger.Panic("Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether.")
}
return false
}
// Extract system channel last config block
func extractSysChanLastConfig(lf blockledger.Factory, bootstrapBlock *cb.Block) *cb.Block {
// Are we bootstrapping?
channelCount := len(lf.ChannelIDs())
if channelCount == 0 {
logger.Info("Bootstrapping because no existing channels")
return nil
}
logger.Infof("Not bootstrapping because of %d existing channels", channelCount)
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemChannelLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
logger.Panicf("Failed getting system channel ledger: %v", err)
}
height := systemChannelLedger.Height()
lastConfigBlock := multichannel.ConfigBlock(systemChannelLedger)
logger.Infof("System channel: name=%s, height=%d, last config block number=%d",
systemChannelName, height, lastConfigBlock.Header.Number)
return lastConfigBlock
}
// extractSystemChannel loops through all channels, and return the last
// config block for the system channel. Returns nil if no system channel
// was found.
func extractSystemChannel(lf blockledger.Factory, bccsp bccsp.BCCSP) *cb.Block {
for _, cID := range lf.ChannelIDs() {
channelLedger, err := lf.GetOrCreate(cID)
if err != nil {
logger.Panicf("Failed getting channel %v's ledger: %v", cID, err)
}
channelConfigBlock := multichannel.ConfigBlock(channelLedger)
err = ValidateBootstrapBlock(channelConfigBlock, bccsp)
if err == nil {
return channelConfigBlock
}
}
return nil
}
// Select cluster boot block
func selectClusterBootBlock(bootstrapBlock, sysChanLastConfig *cb.Block) *cb.Block {
if sysChanLastConfig == nil {
logger.Debug("Selected bootstrap block, because system channel last config block is nil")
return bootstrapBlock
}
if sysChanLastConfig.Header.Number > bootstrapBlock.Header.Number {
logger.Infof("Cluster boot block is system channel last config block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return sysChanLastConfig
}
logger.Infof("Cluster boot block is bootstrap (genesis) block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return bootstrapBlock
}
func createReplicator(
lf blockledger.Factory,
bootstrapBlock *cb.Block,
conf *localconfig.TopLevel,
secOpts comm.SecureOptions,
signer identity.SignerSerializer,
bccsp bccsp.BCCSP,
) *replicationInitiator {
logger := flogging.MustGetLogger("orderer.common.cluster")
vl := &verifierLoader{
verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
onFailure: func(block *cb.Block) {
protolator.DeepMarshalJSON(os.Stdout, block)
},
ledgerFactory: lf,
logger: logger,
}
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
// System channel is not verified because we trust the bootstrap block
// and use backward hash chain verification.
verifiersByChannel := vl.loadVerifiers()
verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
vr := &cluster.VerificationRegistry{
LoadVerifier: vl.loadVerifier,
Logger: logger,
VerifiersByChannel: verifiersByChannel,
VerifierFactory: &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
}
ledgerFactory := &ledgerFactory{
Factory: lf,
onBlockCommit: vr.BlockCommitted,
}
return &replicationInitiator{
registerChain: vr.RegisterVerifier,
verifierRetriever: vr,
logger: logger,
secOpts: secOpts,
conf: conf,
lf: ledgerFactory,
signer: signer,
cryptoProvider: bccsp,
}
}
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
// Start the profiling service if enabled.
func initializeProfilingService(conf *localconfig.TopLevel) {
logger.Info("Starting Go pprof profiling service on:", conf.General.Profile.Address)
// The ListenAndServe() call does not return unless an error occurs.
logger.Panic("Go pprof service failed:", http.ListenAndServe(conf.General.Profile.Address, nil))
}
func handleSignals(handlers map[os.Signal]func()) {
var signals []os.Signal
for sig := range handlers {
signals = append(signals, sig)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, signals...)
go func() {
for sig := range signalChan {
logger.Infof("Received signal: %d (%s)", sig, sig)
handlers[sig]()
}
}()
}
type loadPEMFunc func(string) ([]byte, error)
// configureClusterListener returns a new ServerConfig and a new gRPC server (with its own TLS listener).
func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.ServerConfig, loadPEM loadPEMFunc) (comm.ServerConfig, *comm.GRPCServer) {
clusterConf := conf.General.Cluster
cert, err := loadPEM(clusterConf.ServerCertificate)
if err != nil {
logger.Panicf("Failed to load cluster server certificate from '%s' (%s)", clusterConf.ServerCertificate, err)
}
key, err := loadPEM(clusterConf.ServerPrivateKey)
if err != nil {
logger.Panicf("Failed to load cluster server key from '%s' (%s)", clusterConf.ServerPrivateKey, err)
}
port := fmt.Sprintf("%d", clusterConf.ListenPort)
bindAddr := net.JoinHostPort(clusterConf.ListenAddress, port)
var clientRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := loadPEM(serverRoot)
if err != nil {
logger.Panicf("Failed to load CA cert file '%s' (%s)", serverRoot, err)
}
clientRootCAs = append(clientRootCAs, rootCACert)
}
serverConf := comm.ServerConfig{
StreamInterceptors: generalConf.StreamInterceptors,
UnaryInterceptors: generalConf.UnaryInterceptors,
ConnectionTimeout: generalConf.ConnectionTimeout,
ServerStatsHandler: generalConf.ServerStatsHandler,
Logger: generalConf.Logger,
KaOpts: generalConf.KaOpts,
SecOpts: comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
CipherSuites: comm.DefaultTLSCipherSuites,
ClientRootCAs: clientRootCAs,
RequireClientCert: true,
Certificate: cert,
UseTLS: true,
Key: key,
},
}
srv, err := comm.NewGRPCServer(bindAddr, serverConf)
if err != nil {
logger.Panicf("Failed creating gRPC server on %s:%d due to %v", clusterConf.ListenAddress, clusterConf.ListenPort, err)
}
return serverConf, srv
}
func initializeClusterClientConfig(conf *localconfig.TopLevel) comm.ClientConfig {
cc := comm.ClientConfig{
AsyncConnect: true,
KaOpts: comm.DefaultKeepaliveOptions,
Timeout: conf.General.Cluster.DialTimeout,
SecOpts: comm.SecureOptions{},
}
if conf.General.Cluster.ClientCertificate == "" {
return cc
}
certFile := conf.General.Cluster.ClientCertificate
certBytes, err := ioutil.ReadFile(certFile)
if err != nil {
logger.Fatalf("Failed to load client TLS certificate file '%s' (%s)", certFile, err)
}
keyFile := conf.General.Cluster.ClientPrivateKey
keyBytes, err := ioutil.ReadFile(keyFile)
if err != nil {
logger.Fatalf("Failed to load client TLS key file '%s' (%s)", keyFile, err)
}
var serverRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)", serverRoot, err)
}
serverRootCAs = append(serverRootCAs, rootCACert)
}
cc.SecOpts = comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
RequireClientCert: true,
CipherSuites: comm.DefaultTLSCipherSuites,
ServerRootCAs: serverRootCAs,
Certificate: certBytes,
Key: keyBytes,
UseTLS: true,
}
return cc
}
func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics.Provider) comm.ServerConfig {
// secure server config
secureOpts := comm.SecureOptions{
UseTLS: conf.General.TLS.Enabled,
RequireClientCert: conf.General.TLS.ClientAuthRequired,
}
// check to see if TLS is enabled
if secureOpts.UseTLS {
msg := "TLS"
// load crypto material from files
serverCertificate, err := ioutil.ReadFile(conf.General.TLS.Certificate)
if err != nil {
logger.Fatalf("Failed to load server Certificate file '%s' (%s)",
conf.General.TLS.Certificate, err)
}
serverKey, err := ioutil.ReadFile(conf.General.TLS.PrivateKey)
if err != nil {
logger.Fatalf("Failed to load PrivateKey file '%s' (%s)",
conf.General.TLS.PrivateKey, err)
}
var serverRootCAs, clientRootCAs [][]byte
for _, serverRoot := range conf.General.TLS.RootCAs {
root, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, root)
}
if secureOpts.RequireClientCert {
for _, clientRoot := range conf.General.TLS.ClientRootCAs {
root, err := ioutil.ReadFile(clientRoot)
if err != nil {
logger.Fatalf("Failed to load ClientRootCAs file '%s' (%s)",
err, clientRoot)
}
clientRootCAs = append(clientRootCAs, root)
}
msg = "mutual TLS"
}
secureOpts.Key = serverKey
secureOpts.Certificate = serverCertificate
secureOpts.ServerRootCAs = serverRootCAs
secureOpts.ClientRootCAs = clientRootCAs
logger.Infof("Starting orderer with %s enabled", msg)
}
kaOpts := comm.DefaultKeepaliveOptions
// keepalive settings
// ServerMinInterval must be greater than 0
if conf.General.Keepalive.ServerMinInterval > time.Duration(0) {
kaOpts.ServerMinInterval = conf.General.Keepalive.ServerMinInterval
}
kaOpts.ServerInterval = conf.General.Keepalive.ServerInterval
kaOpts.ServerTimeout = conf.General.Keepalive.ServerTimeout
commLogger := flogging.MustGetLogger("core.comm").With("server", "Orderer")
if metricsProvider == nil {
metricsProvider = &disabled.Provider{}
}
return comm.ServerConfig{
SecOpts: secureOpts,
KaOpts: kaOpts,
Logger: commLogger,
ServerStatsHandler: comm.NewServerStatsHandler(metricsProvider),
ConnectionTimeout: conf.General.ConnectionTimeout,
StreamInterceptors: []grpc.StreamServerInterceptor{
grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
},
UnaryInterceptors: []grpc.UnaryServerInterceptor{
grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
grpclogging.UnaryServerInterceptor(
flogging.MustGetLogger("comm.grpc.server").Zap(),
grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)),
),
},
}
}
func grpcLeveler(ctx context.Context, fullMethod string) zapcore.Level {
switch fullMethod {
case "/orderer.Cluster/Step":
return flogging.DisabledLevel
default:
return zapcore.InfoLevel
}
}
func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
var bootstrapBlock *cb.Block
// Select the bootstrapping mechanism
switch conf.General.BootstrapMethod {
case "file": // For now, "file" is the only supported genesis method
bootstrapBlock = file.New(conf.General.BootstrapFile).GenesisBlock()
case "none": // simply honor the configuration value
return nil
default:
logger.Panic("Unknown genesis method:", conf.General.BootstrapMethod)
}
return bootstrapBlock
}
func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
channelID, err := protoutil.GetChannelIDFromBlock(genesisBlock)
if err != nil {
logger.Fatal("Failed to parse channel ID from genesis block:", err)
}
gl, err := lf.GetOrCreate(channelID)
if err != nil {
logger.Fatal("Failed to create the system channel:", err)
}
if err := gl.Append(genesisBlock); err != nil {
logger.Fatal("Could not write genesis block to ledger:", err)
}
}
func isClusterType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) bool {
_, exists := clusterTypes[consensusType(genesisBlock, bccsp)]
return exists
}
func consensusType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) string {
if genesisBlock == nil || genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 {
logger.Fatalf("Empty genesis block")
}
env := &cb.Envelope{}
if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil {
logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err)
}
bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp)
if err != nil {
logger.Fatalf("Failed creating bundle from the genesis block: %v", err)
}
ordConf, exists := bundle.OrdererConfig()
if !exists {
logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block")
}
return ordConf.ConsensusType()
}
func initializeGrpcServer(conf *localconfig.TopLevel, serverConfig comm.ServerConfig) *comm.GRPCServer {
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.General.ListenAddress, conf.General.ListenPort))
if err != nil {
logger.Fatal("Failed to listen:", err)
}
// Create GRPC server - return if an error occurs
grpcServer, err := comm.NewGRPCServerFromListener(lis, serverConfig)
if err != nil {
logger.Fatal("Failed to return new GRPC server:", err)
}
return grpcServer
}
func loadLocalMSP(conf *localconfig.TopLevel) msp.MSP {
// MUST call GetLocalMspConfig first, so that default BCCSP is properly
// initialized prior to LoadByType.
mspConfig, err := msp.GetLocalMspConfig(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID)
if err != nil {
logger.Panicf("Failed to get local msp config: %v", err)
}
typ := msp.ProviderTypeToString(msp.FABRIC)
opts, found := msp.Options[typ]
if !found {
logger.Panicf("MSP option for type %s is not found", typ)
}
localmsp, err := msp.New(opts, factory.GetDefault())
if err != nil {
logger.Panicf("Failed to load local MSP: %v", err)
}
if err = localmsp.Setup(mspConfig); err != nil {
logger.Panicf("Failed to setup local msp with config: %v", err)
}
return localmsp
}
//go:generate counterfeiter -o mocks/health_checker.go -fake-name HealthChecker . healthChecker
// HealthChecker defines the contract for health checker
type healthChecker interface {
RegisterChecker(component string, checker healthz.HealthChecker) error
}
func initializeMultichannelRegistrar(
bootstrapBlock *cb.Block,
ri *replicationInitiator,
clusterDialer *cluster.PredicateDialer,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
conf *localconfig.TopLevel,
signer identity.SignerSerializer,
metricsProvider metrics.Provider,
healthChecker healthChecker,
lf blockledger.Factory,
bccsp bccsp.BCCSP,
callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, bccsp, callbacks...)
consenters := map[string]consensus.Consenter{}
var icr etcdraft.InactiveChainRegistry
if conf.General.BootstrapMethod == "file" || conf.General.BootstrapMethod == "none" {
if bootstrapBlock != nil && isClusterType(bootstrapBlock, bccsp) {
etcdConsenter := initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider, bccsp)
icr = etcdConsenter.InactiveChainRegistry
}
}
consenters["solo"] = solo.New()
var kafkaMetrics *kafka.Metrics
consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker, icr, registrar.CreateChain)
// Note, we pass a 'nil' channel here, we could pass a channel that
// closes if we wished to cleanup this routine on exit.
go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
registrar.Initialize(consenters)
return registrar
}
func initializeEtcdraftConsenter(
consenters map[string]consensus.Consenter,
conf *localconfig.TopLevel,
lf blockledger.Factory,
clusterDialer *cluster.PredicateDialer,
bootstrapBlock *cb.Block,
ri *replicationInitiator,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
registrar *multichannel.Registrar,
metricsProvider metrics.Provider,
bccsp bccsp.BCCSP,
) *etcdraft.Consenter {
replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
if replicationRefreshInterval == 0 {
replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
}
systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock)
if err != nil {
ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
}
getConfigBlock := func() *cb.Block {
return multichannel.ConfigBlock(systemLedger)
}
exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
ticker := newTicker(exponentialSleep)
icr := &inactiveChainReplicator{
logger: logger,
scheduleChan: ticker.C,
quitChan: make(chan struct{}),
replicator: ri,
chains2CreationCallbacks: make(map[string]chainCreation),
retrieveLastSysChannelConfigBlock: getConfigBlock,
registerChain: ri.registerChain,
}
// Use the inactiveChainReplicator as a channel lister, since it has knowledge
// of all inactive chains.
// This is to prevent us pulling the entire system chain when attempting to enumerate
// the channels in the system.
ri.channelLister = icr
go icr.run()
raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider, bccsp)
consenters["etcdraft"] = raftConsenter
return raftConsenter
}
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("orderer.operations"),
ListenAddress: ops.ListenAddress,
Metrics: operations.MetricsOptions{
Provider: metrics.Provider,
Statsd: &operations.Statsd{
Network: metrics.Statsd.Network,
Address: metrics.Statsd.Address,
WriteInterval: metrics.Statsd.WriteInterval,
Prefix: metrics.Statsd.Prefix,
},
},
TLS: operations.TLS{
Enabled: ops.TLS.Enabled,
CertFile: ops.TLS.Certificate,
KeyFile: ops.TLS.PrivateKey,
ClientCertRequired: ops.TLS.ClientAuthRequired,
ClientCACertFiles: ops.TLS.ClientRootCAs,
},
Version: metadata.Version,
})
}
// caMgr manages certificate authorities scoped by channel
type caManager struct {
sync.Mutex
appRootCAsByChain map[string][][]byte
ordererRootCAsByChain map[string][][]byte
clientRootCAs [][]byte
}
func (mgr *caManager) updateTrustedRoots(
cm channelconfig.Resources,
servers ...*comm.GRPCServer,
) {
mgr.Lock()
defer mgr.Unlock()
appRootCAs := [][]byte{}
ordererRootCAs := [][]byte{}
appOrgMSPs := make(map[string]struct{})
ordOrgMSPs := make(map[string]struct{})
if ac, ok := cm.ApplicationConfig(); ok {
//loop through app orgs and build map of MSPIDs
for _, appOrg := range ac.Organizations() {
appOrgMSPs[appOrg.MSPID()] = struct{}{}
}
}
if ac, ok := cm.OrdererConfig(); ok {
//loop through orderer orgs and build map of MSPIDs
for _, ordOrg := range ac.Organizations() {
ordOrgMSPs[ordOrg.MSPID()] = struct{}{}
}
}
if cc, ok := cm.ConsortiumsConfig(); ok {
for _, consortium := range cc.Consortiums() {
//loop through consortium orgs and build map of MSPIDs
for _, consortiumOrg := range consortium.Organizations() {
appOrgMSPs[consortiumOrg.MSPID()] = struct{}{}
}
}
}
cid := cm.ConfigtxValidator().ChannelID()
logger.Debugf("updating root CAs for channel [%s]", cid)
msps, err := cm.MSPManager().GetMSPs()
if err != nil {
logger.Errorf("Error getting root CAs for channel %s (%s)", cid, err)
return
}
for k, v := range msps {
// check to see if this is a FABRIC MSP
if v.GetType() == msp.FABRIC {
for _, root := range v.GetTLSRootCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, root)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, root)
}
}
for _, intermediate := range v.GetTLSIntermediateCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, intermediate)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, intermediate)
}
}
}
}
mgr.appRootCAsByChain[cid] = appRootCAs
mgr.ordererRootCAsByChain[cid] = ordererRootCAs
// now iterate over all roots for all app and orderer chains
trustedRoots := [][]byte{}
for _, roots := range mgr.appRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
for _, roots := range mgr.ordererRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
// also need to append statically configured root certs
if len(mgr.clientRootCAs) > 0 {
trustedRoots = append(trustedRoots, mgr.clientRootCAs...)
}
// now update the client roots for the gRPC server
for _, srv := range servers {
err = srv.SetClientRootCAs(trustedRoots)
if err != nil {
msg := "Failed to update trusted roots for orderer from latest config " +
"block. This orderer may not be able to communicate " +
"with members of channel %s (%s)"
logger.Warningf(msg, cm.ConfigtxValidator().ChannelID(), err)
}
}
}
func (mgr *caManager) updateClusterDialer(
clusterDialer *cluster.PredicateDialer,
localClusterRootCAs [][]byte,
) {
mgr.Lock()
defer mgr.Unlock()
// Iterate over all orderer root CAs for all chains and add them
// to the root CAs
var clusterRootCAs [][]byte
for _, roots := range mgr.ordererRootCAsByChain {
clusterRootCAs = append(clusterRootCAs, roots...)
}
// Add the local root CAs too
clusterRootCAs = append(clusterRootCAs, localClusterRootCAs...)
// Update the cluster config with the new root CAs
clusterDialer.UpdateRootCAs(clusterRootCAs)
}
func prettyPrintStruct(i interface{}) {
params := localconfig.Flatten(i)
var buffer bytes.Buffer
for i := range params {
buffer.WriteString("\n\t")
buffer.WriteString(params[i])
}
logger.Infof("Orderer config values:%s\n", buffer.String())
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
]
| []
| [
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
]
| [] | ["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"] | go | 2 | 0 | |
python-dsl/buck_parser/buck.py | # Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import abc
import collections
import contextlib
import functools
import imp
import inspect
import json
import optparse
import os
import os.path
import platform
import re
import sys
import time
import traceback
import types
from pathlib import Path, PurePath
from select import select as _select
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Pattern,
Set,
Tuple,
TypeVar,
Union,
)
import pywatchman
from pywatchman import WatchmanError
from six import PY3, iteritems, itervalues, string_types
# Python 2.6, 2.7, use iterator filter from Python 3
from six.moves import builtins, filter
from .deterministic_set import DeterministicSet
from .glob_internal import glob_internal
from .glob_watchman import SyncCookieState, glob_watchman
from .json_encoder import BuckJSONEncoder
from .module_whitelist import ImportWhitelistManager
from .profiler import Profiler, Tracer, emit_trace, scoped_trace, traced
from .select_support import SelectorList, SelectorValue
from .struct import create_struct_class, struct
from .util import (
Diagnostic,
cygwin_adjusted_path,
get_caller_frame,
is_in_dir,
is_special,
)
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
# Those tagged with @provide_as_native_rule will be present unless
# explicitly disabled by parser.native_rules_enabled_in_build_files
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
#
# "cell_name" - The cell name the build file is in.
BUILD_FUNCTIONS = [] # type: List[Callable]
NATIVE_FUNCTIONS = [] # type: List[Callable]
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 60.0 # type: float
# Globals that should not be copied from one module into another
_HIDDEN_GLOBALS = {"include_defs", "load"} # type: Set[str]
ORIGINAL_IMPORT = builtins.__import__
_LOAD_TARGET_PATH_RE = re.compile(
r"^(?P<root>(?P<cell>@?[\w\-.]+)?//)?(?P<package>.*):(?P<target>.*)$"
) # type: Pattern[str]
# matches anything equivalent to recursive glob on all dirs
# e.g. "**/", "*/**/", "*/*/**/"
_RECURSIVE_GLOB_PATTERN = re.compile("^(\*/)*\*\*/") # type: Pattern[str]
class AbstractContext(object):
"""Superclass of execution contexts."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def includes(self):
# type: () -> Set[str]
raise NotImplementedError()
@abc.abstractproperty
def used_configs(self):
# type: () -> Dict[str, Dict[str, str]]
raise NotImplementedError()
@abc.abstractproperty
def used_env_vars(self):
# type: () -> Dict[str, str]
raise NotImplementedError()
@abc.abstractproperty
def diagnostics(self):
# type: () -> List[Diagnostic]
raise NotImplementedError()
def merge(self, other):
# type: (AbstractContext) -> None
"""Merge the context of an included file into the current context.
:param AbstractContext other: the include context to merge.
:rtype: None
"""
self.includes.update(other.includes)
self.diagnostics.extend(other.diagnostics)
self.used_configs.update(other.used_configs)
self.used_env_vars.update(other.used_env_vars)
class BuildFileContext(AbstractContext):
"""The build context used when processing a build file."""
def __init__(
self,
project_root,
base_path,
path,
dirname,
cell_name,
allow_empty_globs,
ignore_paths,
watchman_client,
watchman_watch_root,
watchman_project_prefix,
sync_cookie_state,
watchman_glob_stat_results,
watchman_use_glob_generator,
implicit_package_symbols,
):
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
self.rules = {}
self.project_root = project_root
self.base_path = base_path
self.path = path
self.cell_name = cell_name
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
self.implicit_package_symbols = implicit_package_symbols
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
class IncludeContext(AbstractContext):
"""The build context used when processing an include."""
def __init__(self, cell_name, path):
# type: (str, str) -> None
"""
:param cell_name: a cell name of the current context. Note that this cell name can be
different from the one BUCK file is evaluated in, since it can load extension files
from other cells, which should resolve their loads relative to their own location.
"""
self.cell_name = cell_name
self.path = path
self.globals = {}
self._includes = set()
self._used_configs = collections.defaultdict(dict)
self._used_env_vars = {}
self._diagnostics = []
@property
def includes(self):
return self._includes
@property
def used_configs(self):
return self._used_configs
@property
def used_env_vars(self):
return self._used_env_vars
@property
def diagnostics(self):
return self._diagnostics
# Generic context type that should be used in places where return and parameter
# types are the same but could be either of the concrete contexts.
_GCT = TypeVar("_GCT", IncludeContext, BuildFileContext)
LoadStatement = Dict[str, Union[str, Dict[str, str]]]
BuildInclude = collections.namedtuple("BuildInclude", ["cell_name", "path"])
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
# type: (Callable) -> None
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({"build_env": self.build_env})
try:
return self.func(*args, **updated_kwargs)
except TypeError:
missing_args, extra_args = get_mismatched_args(
self.func, args, updated_kwargs
)
if missing_args or extra_args:
name = "[missing]"
if "name" in updated_kwargs:
name = updated_kwargs["name"]
elif len(args) > 0:
# Optimistically hope that name is the first arg. It generally is...
name = args[0]
raise IncorrectArgumentsException(
self.func.func_name, name, missing_args, extra_args
)
raise
HostInfoOs = collections.namedtuple(
"HostInfoOs", ["is_linux", "is_macos", "is_windows", "is_freebsd", "is_unknown"]
)
HostInfoArch = collections.namedtuple(
"HostInfoArch",
[
"is_aarch64",
"is_arm",
"is_armeb",
"is_i386",
"is_mips",
"is_mips64",
"is_mipsel",
"is_mipsel64",
"is_powerpc",
"is_ppc64",
"is_unknown",
"is_x86_64",
],
)
HostInfo = collections.namedtuple("HostInfo", ["os", "arch"])
__supported_oses = {
"darwin": "macos",
"windows": "windows",
"linux": "linux",
"freebsd": "freebsd",
} # type: Dict[str, str]
# Pulled from com.facebook.buck.util.environment.Architecture.java as
# possible values. amd64 and arm64 are remapped, but they may not
# actually be present on most systems
__supported_archs = {
"aarch64": "aarch64",
"arm": "arm",
"armeb": "armeb",
"i386": "i386",
"mips": "mips",
"mips64": "mips64",
"mipsel": "mipsel",
"mipsel64": "mipsel64",
"powerpc": "powerpc",
"ppc64": "ppc64",
"unknown": "unknown",
"x86_64": "x86_64",
"amd64": "x86_64",
"arm64": "aarch64",
} # type: Dict[str, str]
def host_info(platform_system=platform.system, platform_machine=platform.machine):
host_arch = __supported_archs.get(platform_machine().lower(), "unknown")
host_os = __supported_oses.get(platform_system().lower(), "unknown")
return HostInfo(
os=HostInfoOs(
is_linux=(host_os == "linux"),
is_macos=(host_os == "macos"),
is_windows=(host_os == "windows"),
is_freebsd=(host_os == "freebsd"),
is_unknown=(host_os == "unknown"),
),
arch=HostInfoArch(
is_aarch64=(host_arch == "aarch64"),
is_arm=(host_arch == "arm"),
is_armeb=(host_arch == "armeb"),
is_i386=(host_arch == "i386"),
is_mips=(host_arch == "mips"),
is_mips64=(host_arch == "mips64"),
is_mipsel=(host_arch == "mipsel"),
is_mipsel64=(host_arch == "mipsel64"),
is_powerpc=(host_arch == "powerpc"),
is_ppc64=(host_arch == "ppc64"),
is_unknown=(host_arch == "unknown"),
is_x86_64=(host_arch == "x86_64"),
),
)
_cached_host_info = host_info()
def get_mismatched_args(func, actual_args, actual_kwargs):
argspec = inspect.getargspec(func)
required_args = set()
all_acceptable_args = []
for i, arg in enumerate(argspec.args):
if i < (len(argspec.args) - len(argspec.defaults)):
required_args.add(arg)
all_acceptable_args.append(arg)
extra_kwargs = set(actual_kwargs) - set(all_acceptable_args)
for k in set(actual_kwargs) - extra_kwargs:
all_acceptable_args.remove(k)
not_supplied_args = all_acceptable_args[len(actual_args) :]
missing_args = [arg for arg in not_supplied_args if arg in required_args]
return missing_args, sorted(list(extra_kwargs))
class IncorrectArgumentsException(TypeError):
def __init__(self, func_name, name_arg, missing_args, extra_args):
self.missing_args = missing_args
self.extra_args = extra_args
message = "Incorrect arguments to %s with name %s:" % (func_name, name_arg)
if missing_args:
message += " Missing required args: %s" % (", ".join(missing_args),)
if extra_args:
message += " Extra unknown kwargs: %s" % (", ".join(extra_args),)
super(IncorrectArgumentsException, self).__init__(message)
class BuildFileFailError(Exception):
pass
def provide_as_native_rule(func):
# type: (Callable) -> Callable
NATIVE_FUNCTIONS.append(func)
return func
def provide_for_build(func):
# type: (Callable) -> Callable
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
# type: (Dict, BuildFileContext) -> None
"""Record a rule in the current context.
This should be invoked by rule functions generated by the Java code.
:param dict rule: dictionary of the rule's fields.
:param build_env: the current context.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `{}()` at the top-level of an included file.".format(
rule["buck.type"]
)
# Include the base path of the BUCK file so the reader consuming this
# output will know which BUCK file the rule came from.
if "name" not in rule:
raise ValueError("rules must contain the field 'name'. Found %s." % rule)
rule_name = rule["name"]
if not isinstance(rule_name, string_types):
raise ValueError("rules 'name' field must be a string. Found %s." % rule_name)
if rule_name in build_env.rules:
raise ValueError(
"Duplicate rule definition '%s' found. Found %s and %s"
% (rule_name, rule, build_env.rules[rule_name])
)
rule["buck.base_path"] = build_env.base_path
build_env.rules[rule_name] = rule
@traced(stats_key="Glob")
def glob(
includes, excludes=None, include_dotfiles=False, build_env=None, search_base=None
):
# type: (List[str], Optional[List[str]], bool, BuildFileContext, str) -> List[str]
if excludes is None:
excludes = []
assert isinstance(
build_env, BuildFileContext
), "Cannot use `glob()` at the top-level of an included file."
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(
includes, string_types
), "The first argument to glob() must be a list of strings."
assert not isinstance(
excludes, string_types
), "The excludes argument must be a list of strings."
if search_base is None:
search_base = Path(build_env.dirname)
if build_env.dirname == build_env.project_root and any(
_RECURSIVE_GLOB_PATTERN.match(pattern) for pattern in includes
):
fail(
"Recursive globs are prohibited at top-level directory", build_env=build_env
)
results = None
if not includes:
results = []
elif build_env.watchman_client:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client,
build_env.diagnostics,
build_env.watchman_glob_stat_results,
build_env.watchman_use_glob_generator,
)
if results:
# glob should consistently return paths of type str, but
# watchman client returns unicode in Python 2 instead.
# Extra check is added to make this conversion resilient to
# watchman API changes.
results = [
res.encode("utf-8") if not isinstance(res, str) else res
for res in results
]
if results is None:
results = glob_internal(
includes,
excludes,
build_env.ignore_paths,
include_dotfiles,
search_base,
build_env.project_root,
)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) "
+ "returned no results. (allow_empty_globs is set to false in the Buck "
+ "configuration)"
).format(includes=includes, excludes=excludes, include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, (
"Conflicting header files in header search paths. "
+ '"%s" maps to both "%s" and "%s".'
% (key, result[key], header_map[key])
)
result[key] = header_map[key]
return result
def single_subdir_glob(
dirpath, glob_pattern, excludes=None, prefix=None, build_env=None, search_base=None
):
if excludes is None:
excludes = []
results = {}
files = glob(
[os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base,
)
for f in files:
if dirpath:
key = f[len(dirpath) + 1 :]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# This method is called by tests for both posix style paths and
# windows style paths.
# When running tests, search_base is always set
# and happens to have the correct platform-specific Path type.
cls = PurePath if not search_base else type(search_base)
key = str(cls(prefix) / cls(key))
results[key] = f
return results
def subdir_glob(
glob_specs, excludes=None, prefix=None, build_env=None, search_base=None
):
"""
Given a list of tuples, the form of (relative-sub-directory, glob-pattern),
return a dict of sub-directory relative paths to full paths. Useful for
defining header maps for C/C++ libraries which should be relative the given
sub-directory.
If prefix is not None, prepends it it to each key in the dictionary.
"""
if excludes is None:
excludes = []
results = []
for dirpath, glob_pattern in glob_specs:
results.append(
single_subdir_glob(
dirpath, glob_pattern, excludes, prefix, build_env, search_base
)
)
return merge_maps(*results)
def _get_package_name(func_name, build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
assert isinstance(build_env, BuildFileContext), (
"Cannot use `%s()` at the top-level of an included file." % func_name
)
return build_env.base_path
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("get_base_path", build_env=build_env)
@provide_for_build
def package_name(build_env=None):
"""The name of the package being evaluated.
For example, in the BUCK file "some/package/BUCK", its value will be
"some/package".
If the BUCK file calls a function defined in a *.bzl file, package_name()
will return the package of the calling BUCK file. For example, if there is
a BUCK file at "some/package/BUCK" and "some/other/package/ext.bzl"
extension file, when BUCK file calls a function inside of ext.bzl file
it will still return "some/package" and not "some/other/package".
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
:rtype: str
"""
return _get_package_name("package_name", build_env=build_env)
@provide_for_build
def fail(message, attr=None, build_env=None):
"""Raises a parse error.
:param message: Error message to display for the user.
The object is converted to a string.
:param attr: Optional name of the attribute that caused the error.
"""
attribute_prefix = "attribute " + attr + ": " if attr is not None else ""
msg = attribute_prefix + str(message)
raise BuildFileFailError(msg)
@provide_for_build
def get_cell_name(build_env=None):
"""Get the cell name of the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "cell". The return value will be "" if
the build file does not have a cell
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `get_cell_name()` at the top-level of an included file."
return build_env.cell_name
@provide_for_build
def select(conditions, no_match_message=None, build_env=None):
"""Allows to provide a configurable value for an attribute"""
return SelectorList([SelectorValue(conditions, no_match_message)])
@provide_as_native_rule
def repository_name(build_env=None):
"""
Get the repository (cell) name of the build file that was initially
evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
:return: a string, such as "@cell". The return value will be "@" if
the build file is in the main (standalone) repository.
:rtype: str
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `repository_name()` at the top-level of an included file."
return "@" + build_env.cell_name
@provide_as_native_rule
def rule_exists(name, build_env=None):
"""
:param name: name of the build rule
:param build_env: current build environment
:return: True if a rule with provided name has already been defined in
current file.
"""
assert isinstance(
build_env, BuildFileContext
), "Cannot use `rule_exists()` at the top-level of an included file."
return name in build_env.rules
def flatten_list_of_dicts(list_of_dicts):
"""Flatten the given list of dictionaries by merging l[1:] onto
l[0], one at a time. Key/Value pairs which appear in later list entries
will override those that appear in earlier entries
:param list_of_dicts: the list of dict objects to flatten.
:return: a single dict containing the flattened list
"""
return_value = {}
for d in list_of_dicts:
for k, v in iteritems(d):
return_value[k] = v
return return_value
@provide_for_build
def flatten_dicts(*args, **_):
"""Flatten the given list of dictionaries by merging args[1:] onto
args[0], one at a time.
:param *args: the list of dict objects to flatten.
:param **_: ignore the build_env kwarg
:return: a single dict containing the flattened list
"""
return flatten_list_of_dicts(args)
@provide_for_build
def depset(elements, build_env=None):
"""Creates an instance of sets with deterministic iteration order.
:param elements: the list of elements constituting the returned depset.
:rtype: DeterministicSet
"""
return DeterministicSet(elements)
GENDEPS_SIGNATURE = re.compile(
r"^#@# GENERATED FILE: DO NOT MODIFY ([a-f0-9]{40}) #@#\n$"
)
class BuildFileProcessor(object):
"""Handles the processing of a single build file.
:type _current_build_env: AbstractContext | None
"""
SAFE_MODULES_CONFIG = {
"os": ["environ", "getenv", "path", "sep", "pathsep", "linesep"],
"os.path": [
"basename",
"commonprefix",
"dirname",
"isabs",
"join",
"normcase",
"relpath",
"split",
"splitdrive",
"splitext",
"sep",
"pathsep",
],
"pipes": ["quote"],
}
def __init__(
self,
project_root,
cell_roots,
cell_name,
build_file_name,
allow_empty_globs,
watchman_client,
watchman_glob_stat_results,
watchman_use_glob_generator,
project_import_whitelist=None,
implicit_includes=None,
extra_funcs=None,
configs=None,
env_vars=None,
ignore_paths=None,
disable_implicit_native_rules=False,
warn_about_deprecated_syntax=True,
):
if project_import_whitelist is None:
project_import_whitelist = []
if implicit_includes is None:
implicit_includes = []
if extra_funcs is None:
extra_funcs = []
if configs is None:
configs = {}
if env_vars is None:
env_vars = {}
if ignore_paths is None:
ignore_paths = []
self._include_cache = {}
self._current_build_env = None
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._cell_roots = cell_roots
self._cell_name = cell_name
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_glob_stat_results = watchman_glob_stat_results
self._watchman_use_glob_generator = watchman_use_glob_generator
self._configs = configs
self._env_vars = env_vars
self._ignore_paths = ignore_paths
self._disable_implicit_native_rules = disable_implicit_native_rules
self._warn_about_deprecated_syntax = warn_about_deprecated_syntax
lazy_global_functions = {}
lazy_native_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_global_functions[func.__name__] = func_with_env
for func in NATIVE_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_native_functions[func.__name__] = func_with_env
self._global_functions = lazy_global_functions
self._native_functions = lazy_native_functions
self._native_module_class_for_extension = self._create_native_module_class(
self._global_functions, self._native_functions
)
self._native_module_class_for_build_file = self._create_native_module_class(
self._global_functions,
[] if self._disable_implicit_native_rules else self._native_functions,
)
self._import_whitelist_manager = ImportWhitelistManager(
import_whitelist=self._create_import_whitelist(project_import_whitelist),
safe_modules_config=self.SAFE_MODULES_CONFIG,
path_predicate=lambda path: is_in_dir(path, self._project_root),
)
# Set of helpers callable from the child environment.
self._default_globals_for_extension = self._create_default_globals(False, False)
self._default_globals_for_implicit_include = self._create_default_globals(
False, True
)
self._default_globals_for_build_file = self._create_default_globals(True, False)
def _create_default_globals(self, is_build_file, is_implicit_include):
# type: (bool) -> Dict[str, Callable]
return {
"include_defs": functools.partial(self._include_defs, is_implicit_include),
"add_build_file_dep": self._add_build_file_dep,
"read_config": self._read_config,
"implicit_package_symbol": self._implicit_package_symbol,
"allow_unsafe_import": self._import_whitelist_manager.allow_unsafe_import,
"glob": self._glob,
"subdir_glob": self._subdir_glob,
"load": functools.partial(self._load, is_implicit_include),
"struct": struct,
"provider": self._provider,
"host_info": self._host_info,
"native": self._create_native_module(is_build_file=is_build_file),
}
def _create_native_module(self, is_build_file):
"""
Creates a native module exposing built-in Buck rules.
This module allows clients to refer to built-in Buck rules using
"native.<native_rule>" syntax in their build files. For example,
"native.java_library(...)" will use a native Java library rule.
:return: 'native' module struct.
"""
native_globals = {}
self._install_builtins(native_globals, force_native_rules=not is_build_file)
assert "glob" not in native_globals
assert "host_info" not in native_globals
assert "implicit_package_symbol" not in native_globals
assert "read_config" not in native_globals
native_globals["glob"] = self._glob
native_globals["host_info"] = self._host_info
native_globals["implicit_package_symbol"] = self._implicit_package_symbol
native_globals["read_config"] = self._read_config
return (
self._native_module_class_for_build_file(**native_globals)
if is_build_file
else self._native_module_class_for_extension(**native_globals)
)
@staticmethod
def _create_native_module_class(global_functions, native_functions):
"""
Creates a native module class.
:return: namedtuple instance for native module
"""
return collections.namedtuple(
"native",
list(global_functions)
+ list(native_functions)
+ ["glob", "host_info", "read_config", "implicit_package_symbol"],
)
def _wrap_env_var_read(self, read, real):
"""
Return wrapper around function that reads an environment variable so
that the read is recorded.
"""
@functools.wraps(real)
def wrapper(varname, *arg, **kwargs):
self._record_env_var(varname, read(varname))
return real(varname, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _with_env_interceptor(self, read, obj, *attrs):
"""
Wrap a function, found at `obj.attr`, that reads an environment
variable in a new function which records the env var read.
"""
orig = []
for attr in attrs:
real = getattr(obj, attr)
wrapped = self._wrap_env_var_read(read, real)
setattr(obj, attr, wrapped)
orig.append((attr, real))
try:
yield
finally:
for attr, real in orig:
setattr(obj, attr, real)
@contextlib.contextmanager
def with_env_interceptors(self):
"""
Install environment variable read interceptors into all known ways that
a build file can access the environment.
"""
# Use a copy of the env to provide a function to get at the low-level
# environment. The wrappers will use this when recording the env var.
read = dict(os.environ).get
# Install interceptors into the main ways a user can read the env.
with self._with_env_interceptor(
read, os.environ, "__contains__", "__getitem__", "get"
):
yield
@staticmethod
def _merge_explicit_globals(src, dst, whitelist=None, whitelist_mapping=None):
# type: (types.ModuleType, Dict[str, Any], Tuple[str], Dict[str, str]) -> None
"""Copy explicitly requested global definitions from one globals dict to another.
If whitelist is set, only globals from the whitelist will be pulled in.
If whitelist_mapping is set, globals will be exported under the name of the keyword. For
example, foo="bar" would mean that a variable with name "bar" in imported file, will be
available as "foo" in current file.
"""
if whitelist is not None:
for symbol in whitelist:
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[symbol] = src.__dict__[symbol]
if whitelist_mapping is not None:
for exported_name, symbol in iteritems(whitelist_mapping):
if symbol not in src.__dict__:
raise KeyError('"%s" is not defined in %s' % (symbol, src.__name__))
dst[exported_name] = src.__dict__[symbol]
def _merge_globals(self, mod, dst):
# type: (types.ModuleType, Dict[str, Any]) -> None
"""Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
keys = getattr(mod, "__all__", mod.__dict__.keys())
for key in keys:
# Block copying modules unless they were specified in '__all__'
block_copying_module = not hasattr(mod, "__all__") and isinstance(
mod.__dict__[key], types.ModuleType
)
if (
not key.startswith("_")
and key not in _HIDDEN_GLOBALS
and not block_copying_module
):
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in itervalues(self._global_functions):
function.build_env = build_env
for function in itervalues(self._native_functions):
function.build_env = build_env
def _install_builtins(self, namespace, force_native_rules=False):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in iteritems(self._global_functions):
namespace[name] = function.invoke
if not self._disable_implicit_native_rules or force_native_rules:
for name, function in iteritems(self._native_functions):
namespace[name] = function.invoke
@contextlib.contextmanager
def with_builtins(self, namespace):
"""
Installs the build functions for the duration of a `with` block.
"""
original_namespace = namespace.copy()
self._install_builtins(namespace)
try:
yield
finally:
namespace.clear()
namespace.update(original_namespace)
def _resolve_include(self, name):
# type: (str) -> BuildInclude
"""Resolve the given include def name to a BuildInclude metadata."""
match = re.match(r"^([A-Za-z0-9_]*)//(.*)$", name)
if match is None:
raise ValueError(
"include_defs argument {} should be in the form of "
"//path or cellname//path".format(name)
)
cell_name = match.group(1)
relative_path = match.group(2)
if len(cell_name) > 0:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"include_defs argument {} references an unknown cell named {} "
"known cells: {!r}".format(name, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(cell_root, relative_path)),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(self._project_root, relative_path)),
)
def _get_load_path(self, label):
# type: (str) -> BuildInclude
"""Resolve the given load function label to a BuildInclude metadata."""
match = _LOAD_TARGET_PATH_RE.match(label)
if match is None:
raise ValueError(
"load label {} should be in the form of "
"//path:file or cellname//path:file".format(label)
)
cell_name = match.group("cell")
if cell_name:
if cell_name.startswith("@"):
cell_name = cell_name[1:]
elif self._warn_about_deprecated_syntax:
self._emit_warning(
'{} has a load label "{}" that uses a deprecated cell format. '
'"{}" should instead be "@{}".'.format(
self._current_build_env.path, label, cell_name, cell_name
),
"load function",
)
else:
cell_name = self._current_build_env.cell_name
relative_path = match.group("package")
file_name = match.group("target")
label_root = match.group("root")
if not label_root:
# relative include. e.g. :foo.bzl
if "/" in file_name:
raise ValueError(
"Relative loads work only for files in the same directory. "
+ "Please use absolute label instead ([cell]//pkg[/pkg]:target)."
)
callee_dir = os.path.dirname(self._current_build_env.path)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(os.path.join(callee_dir, file_name)),
)
elif cell_name:
cell_root = self._cell_roots.get(cell_name)
if cell_root is None:
raise KeyError(
"load label {} references an unknown cell named {} "
"known cells: {!r}".format(label, cell_name, self._cell_roots)
)
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(cell_root, relative_path, file_name)
),
)
else:
return BuildInclude(
cell_name=cell_name,
path=os.path.normpath(
os.path.join(self._project_root, relative_path, file_name)
),
)
def _read_config(self, section, field, default=None):
# type: (str, str, Any) -> Any
"""
Lookup a setting from `.buckconfig`.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
key = section, field
value = self._configs.get(key)
if value is not None and not isinstance(value, str):
# Python 2 returns unicode values from parsed JSON configs, but
# only str types should be exposed to clients
value = value.encode("utf-8")
# replace raw values to avoid decoding for frequently used configs
self._configs[key] = value
build_env.used_configs[section][field] = value
# If no config setting was found, return the default.
if value is None:
return default
return value
def _implicit_package_symbol(self, symbol, default=None):
# type: (str, Any) -> Any
"""
Gives access to a symbol that has been implicitly loaded for the package of the
build file that is currently being evaluated. If the symbol was not present,
`default` will be returned.
"""
build_env = self._current_build_env
return build_env.implicit_package_symbols.get(symbol, default)
def _glob(
self,
includes,
excludes=None,
include_dotfiles=False,
search_base=None,
exclude=None,
):
assert exclude is None or excludes is None, (
"Mixing 'exclude' and 'excludes' attributes is not allowed. Please replace your "
"exclude and excludes arguments with a single 'excludes = %r'."
% (exclude + excludes)
)
excludes = excludes or exclude
build_env = self._current_build_env # type: BuildFileContext
return glob(
includes,
excludes=excludes,
include_dotfiles=include_dotfiles,
search_base=search_base,
build_env=build_env,
)
def _subdir_glob(self, glob_specs, excludes=None, prefix=None, search_base=None):
build_env = self._current_build_env
return subdir_glob(
glob_specs,
excludes=excludes,
prefix=prefix,
search_base=search_base,
build_env=build_env,
)
def _record_env_var(self, name, value):
# type: (str, Any) -> None
"""
Record a read of an environment variable.
This method is meant to wrap methods in `os.environ` when called from
any files or includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Lookup the value and record it in this build file's context.
build_env.used_env_vars[name] = value
def _called_from_project_file(self):
# type: () -> bool
"""
Returns true if the function was called from a project file.
"""
frame = get_caller_frame(skip=[__name__])
filename = inspect.getframeinfo(frame).filename
return is_in_dir(filename, self._project_root)
def _include_defs(self, is_implicit_include, name, namespace=None):
# type: (bool, str, Optional[str]) -> None
"""Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._resolve_include(name)
inner_env, mod = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
if namespace is not None:
# If using a fresh namespace, create a fresh module to populate.
fresh_module = imp.new_module(namespace)
fresh_module.__file__ = mod.__file__
self._merge_globals(mod, fresh_module.__dict__)
frame.f_globals[namespace] = fresh_module
else:
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load(self, is_implicit_include, name, *symbols, **symbol_kwargs):
# type: (bool, str, *str, **str) -> None
"""Pull the symbols from the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
assert symbols or symbol_kwargs, "expected at least one symbol to load"
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(name)
inner_env, module = self._process_include(build_include, is_implicit_include)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = get_caller_frame(skip=["_functools", __name__])
BuildFileProcessor._merge_explicit_globals(
module, frame.f_globals, symbols, symbol_kwargs
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
def _load_package_implicit(self, build_env, package_implicit_load):
"""
Updates `build_env` to contain all symbols from `package_implicit_load`
Args:
build_env: The build environment on which to modify includes /
implicit_package_symbols properties
package_implicit_load: A dictionary with "load_path", the first part of the
a `load` statement, and "load_symbols", a dictionary
that works like the **symbols attribute of `load`
"""
# Resolve the named include to its path and process it to get its
# build context and module.
build_include = self._get_load_path(package_implicit_load["load_path"])
inner_env, module = self._process_include(build_include, True)
# Validate that symbols that are requested explicitly by config are present
# in the .bzl file
for key, value in iteritems(package_implicit_load["load_symbols"]):
try:
build_env.implicit_package_symbols[key] = getattr(module, value)
except AttributeError:
raise BuildFileFailError(
"Could not find symbol '{}' in implicitly loaded extension '{}'".format(
value, package_implicit_load["load_path"]
)
)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
@staticmethod
def _provider(doc="", fields=None):
# type: (str, Union[List[str], Dict[str, str]]) -> Callable
"""Creates a declared provider factory.
The return value of this function can be used to create "struct-like"
values. Example:
SomeInfo = provider()
def foo():
return 3
info = SomeInfo(x = 2, foo = foo)
print(info.x + info.foo()) # prints 5
Optional fields can be used to restrict the set of allowed fields.
Example:
SomeInfo = provider(fields=["data"])
info = SomeInfo(data="data") # valid
info = SomeInfo(foo="bar") # runtime exception
"""
if fields:
return create_struct_class(fields)
return struct
def _add_build_file_dep(self, name):
# type: (str) -> None
"""
Explicitly specify a dependency on an external file.
For instance, this can be used to specify a dependency on an external
executable that will be invoked, or some other external configuration
file.
"""
# Grab the current build context from the top of the stack.
build_env = self._current_build_env
cell_name, path = self._resolve_include(name)
build_env.includes.add(path)
@staticmethod
def _host_info():
return _cached_host_info
@contextlib.contextmanager
def _set_build_env(self, build_env):
# type: (AbstractContext) -> Iterator[None]
"""Set the given build context as the current context, unsetting it upon exit."""
old_env = self._current_build_env
self._current_build_env = build_env
self._update_functions(self._current_build_env)
try:
yield
finally:
self._current_build_env = old_env
self._update_functions(self._current_build_env)
def _emit_warning(self, message, source):
# type: (str, str) -> None
"""
Add a warning to the current build_env's diagnostics.
"""
if self._current_build_env is not None:
self._current_build_env.diagnostics.append(
Diagnostic(
message=message, level="warning", source=source, exception=None
)
)
@staticmethod
def _create_import_whitelist(project_import_whitelist):
# type: (List[str]) -> Set[str]
"""
Creates import whitelist by joining the global whitelist with the project specific one
defined in '.buckconfig'.
"""
global_whitelist = [
"copy",
"re",
"functools",
"itertools",
"json",
"hashlib",
"types",
"string",
"ast",
"__future__",
"collections",
"operator",
"fnmatch",
"copy_reg",
]
return set(global_whitelist + project_import_whitelist)
def _file_access_wrapper(self, real):
"""
Return wrapper around function so that accessing a file produces warning if it is
not a known dependency.
"""
@functools.wraps(real)
def wrapper(filename, *arg, **kwargs):
# Restore original 'open' because it is used by 'inspect.currentframe()' in
# '_called_from_project_file()'
with self._wrap_file_access(wrap=False):
if self._called_from_project_file():
path = os.path.abspath(filename)
if path not in self._current_build_env.includes:
dep_path = "//" + os.path.relpath(path, self._project_root)
warning_message = (
"Access to a non-tracked file detected! {0} is not a ".format(
path
)
+ "known dependency and it should be added using 'add_build_file_dep' "
+ "function before trying to access the file, e.g.\n"
+ "'add_build_file_dep('{0}')'\n".format(dep_path)
+ "The 'add_build_file_dep' function is documented at "
+ "https://buck.build/function/add_build_file_dep.html\n"
)
self._emit_warning(warning_message, "sandboxing")
return real(filename, *arg, **kwargs)
# Save the real function for restoration.
wrapper._real = real
return wrapper
@contextlib.contextmanager
def _wrap_fun_for_file_access(self, obj, attr, wrap=True):
"""
Wrap a function to check if accessed files are known dependencies.
"""
real = getattr(obj, attr)
if wrap:
# Don't wrap again
if not hasattr(real, "_real"):
wrapped = self._file_access_wrapper(real)
setattr(obj, attr, wrapped)
elif hasattr(real, "_real"):
# Restore real function if it was wrapped
setattr(obj, attr, real._real)
try:
yield
finally:
setattr(obj, attr, real)
def _wrap_file_access(self, wrap=True):
"""
Wrap 'open' so that they it checks if accessed files are known dependencies.
If 'wrap' is equal to False, restore original function instead.
"""
return self._wrap_fun_for_file_access(builtins, "open", wrap)
@contextlib.contextmanager
def _build_file_sandboxing(self):
"""
Creates a context that sandboxes build file processing.
"""
with self._wrap_file_access():
with self._import_whitelist_manager.allow_unsafe_import(False):
yield
@traced(stats_key="Process")
def _process(self, build_env, path, is_implicit_include, package_implicit_load):
# type: (_GCT, str, bool, Optional[LoadStatement]) -> Tuple[_GCT, types.ModuleType]
"""Process a build file or include at the given path.
:param build_env: context of the file to process.
:param path: target-like path to the file to process.
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
:package_implicit_load: if provided, a dictionary containing the path to
load for this given package, and the symbols to load
from that .bzl file.
:returns: build context (potentially different if retrieved from cache) and loaded module.
"""
if isinstance(build_env, IncludeContext):
default_globals = (
self._default_globals_for_implicit_include
if is_implicit_include
else self._default_globals_for_extension
)
else:
default_globals = self._default_globals_for_build_file
emit_trace(path)
# Install the build context for this input as the current context.
with self._set_build_env(build_env):
# Don't include implicit includes if the current file being
# processed is an implicit include
if not is_implicit_include:
for include in self._implicit_includes:
build_include = self._resolve_include(include)
inner_env, mod = self._process_include(build_include, True)
self._merge_globals(mod, default_globals)
build_env.includes.add(build_include.path)
build_env.merge(inner_env)
if package_implicit_load:
self._load_package_implicit(build_env, package_implicit_load)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
# We don't open this file as binary, as we assume it's a textual source
# file.
with scoped_trace("IO", stats_key="IO"):
with self._wrap_file_access(wrap=False):
with open(path, "r") as f:
contents = f.read()
with scoped_trace("Compile", stats_key="Compile"):
# Enable absolute imports. This prevents the compiler from
# trying to do a relative import first, and warning that
# this module doesn't exist in sys.modules.
future_features = absolute_import.compiler_flag
code = compile(contents, path, "exec", future_features, 1)
# Execute code with build file sandboxing
with self._build_file_sandboxing():
exec(code, module.__dict__)
return build_env, module
def _process_include(self, build_include, is_implicit_include):
# type: (BuildInclude, bool) -> Tuple[AbstractContext, types.ModuleType]
"""Process the include file at the given path.
:param build_include: build include metadata (cell_name and path).
:param is_implicit_include: whether the file being processed is an implicit include, or was
included from an implicit include.
"""
# First check the cache.
cached = self._include_cache.get(build_include.path)
if cached is not None:
return cached
build_env = IncludeContext(
cell_name=build_include.cell_name, path=build_include.path
)
build_env, mod = self._process(
build_env,
build_include.path,
is_implicit_include=is_implicit_include,
package_implicit_load=None,
)
self._include_cache[build_include.path] = build_env, mod
return build_env, mod
def _process_build_file(
self, watch_root, project_prefix, path, package_implicit_load
):
# type: (str, str, str, Optional[LoadStatement]) -> Tuple[BuildFileContext, types.ModuleType]
"""Process the build file at the given path."""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(path, self._project_root).replace(
"\\", "/"
)
len_suffix = -len(self._build_file_name) - 1
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
self._project_root,
base_path,
path,
dirname,
self._cell_name,
self._allow_empty_globs,
self._ignore_paths,
self._watchman_client,
watch_root,
project_prefix,
self._sync_cookie_state,
self._watchman_glob_stat_results,
self._watchman_use_glob_generator,
{},
)
return self._process(
build_env,
path,
is_implicit_include=False,
package_implicit_load=package_implicit_load,
)
def process(
self, watch_root, project_prefix, path, diagnostics, package_implicit_load
):
# type: (str, Optional[str], str, List[Diagnostic], Optional[LoadStatement]) -> List[Dict[str, Any]]
"""Process a build file returning a dict of its rules and includes."""
build_env, mod = self._process_build_file(
watch_root,
project_prefix,
os.path.join(self._project_root, path),
package_implicit_load=package_implicit_load,
)
# Initialize the output object to a map of the parsed rules.
values = list(itervalues(build_env.rules))
# Add in tracked included files as a special meta rule.
values.append({"__includes": [path] + sorted(build_env.includes)})
# Add in tracked used config settings as a special meta rule.
values.append({"__configs": build_env.used_configs})
# Add in used environment variables as a special meta rule.
values.append({"__env": build_env.used_env_vars})
diagnostics.extend(build_env.diagnostics)
return values
class InvalidSignatureError(Exception):
pass
def format_traceback(tb):
formatted = []
for entry in traceback.extract_tb(tb):
(filename, line_number, function_name, text) = entry
formatted.append(
{
"filename": filename,
"line_number": line_number,
"function_name": function_name,
"text": text,
}
)
return formatted
def format_exception_info(exception_info):
(exc_type, exc_value, exc_traceback) = exception_info
formatted = {
"type": exc_type.__name__,
"value": str(exc_value),
"traceback": format_traceback(exc_traceback),
}
if exc_type is SyntaxError:
formatted["filename"] = exc_value.filename
formatted["lineno"] = exc_value.lineno
formatted["offset"] = exc_value.offset
formatted["text"] = exc_value.text
return formatted
def encode_result(values, diagnostics, profile):
# type: (List[Dict[str, object]], List[Diagnostic], Optional[str]) -> str
result = {
"values": [
{k: v for k, v in iteritems(value) if v is not None} for value in values
]
}
json_encoder = BuckJSONEncoder()
if diagnostics:
encoded_diagnostics = []
for d in diagnostics:
encoded = {"message": d.message, "level": d.level, "source": d.source}
if d.exception:
encoded["exception"] = format_exception_info(d.exception)
encoded_diagnostics.append(encoded)
result["diagnostics"] = encoded_diagnostics
if profile is not None:
result["profile"] = profile
try:
return json_encoder.encode(result)
except Exception as e:
# Try again without the values
result["values"] = []
if "diagnostics" not in result:
result["diagnostics"] = []
result["diagnostics"].append(
{
"message": str(e),
"level": "fatal",
"source": "parse",
"exception": format_exception_info(sys.exc_info()),
}
)
return json_encoder.encode(result)
def process_with_diagnostics(build_file_query, build_file_processor, to_parent):
start_time = time.time()
build_file = build_file_query.get("buildFile")
watch_root = build_file_query.get("watchRoot")
project_prefix = build_file_query.get("projectPrefix")
package_implicit_load = build_file_query.get("packageImplicitLoad")
build_file = cygwin_adjusted_path(build_file)
watch_root = cygwin_adjusted_path(watch_root)
if project_prefix is not None:
project_prefix = cygwin_adjusted_path(project_prefix)
diagnostics = []
values = []
try:
values = build_file_processor.process(
watch_root,
project_prefix,
build_file,
diagnostics=diagnostics,
package_implicit_load=package_implicit_load,
)
except BaseException as e:
# sys.exit() don't emit diagnostics.
if e is not SystemExit:
if isinstance(e, WatchmanError):
source = "watchman"
message = e.msg
else:
source = "parse"
message = str(e)
diagnostics.append(
Diagnostic(
message=message,
level="fatal",
source=source,
exception=sys.exc_info(),
)
)
raise
finally:
java_process_send_result(to_parent, values, diagnostics, None)
end_time = time.time()
return end_time - start_time
def java_process_send_result(to_parent, values, diagnostics, profile_result):
"""Sends result to the Java process"""
data = encode_result(values, diagnostics, profile_result)
if PY3:
# in Python 3 write expects bytes instead of string
data = data.encode("utf-8")
to_parent.write(data)
to_parent.flush()
def silent_excepthook(exctype, value, tb):
# We already handle all exceptions by writing them to the parent, so
# no need to dump them again to stderr.
pass
def _optparse_store_kv(option, opt_str, value, parser):
"""Optparse option callback which parses input as K=V, and store into dictionary.
:param optparse.Option option: Option instance
:param str opt_str: string representation of option flag
:param str value: argument value
:param optparse.OptionParser parser: parser instance
"""
result = value.split("=", 1)
if len(result) != 2:
raise optparse.OptionError(
"Expected argument of to be in the form of X=Y".format(opt_str), option
)
(k, v) = result
# Get or create the dictionary
dest_dict = getattr(parser.values, option.dest)
if dest_dict is None:
dest_dict = {}
setattr(parser.values, option.dest, dest_dict)
dest_dict[k] = v
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUCK file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUCK files under the project root. If no paths to BUCK files are
# specified, then it will traverse the project root for BUCK files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUCK files will be printed
# to stdout encoded in JSON. That means that printing out other information
# for debugging purposes will break the JSON encoding, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
# w instead of a mode is used because of https://bugs.python.org/issue27805
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), "wb")
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
"--project_root", action="store", type="string", dest="project_root"
)
parser.add_option(
"--cell_root",
action="callback",
type="string",
dest="cell_roots",
metavar="NAME=PATH",
help="Cell roots that can be referenced by includes.",
callback=_optparse_store_kv,
default={},
)
parser.add_option("--cell_name", action="store", type="string", dest="cell_name")
parser.add_option(
"--build_file_name", action="store", type="string", dest="build_file_name"
)
parser.add_option(
"--allow_empty_globs",
action="store_true",
dest="allow_empty_globs",
help="Tells the parser not to raise an error when glob returns no results.",
)
parser.add_option(
"--use_watchman_glob",
action="store_true",
dest="use_watchman_glob",
help="Invokes `watchman query` to get lists of files instead of globbing in-process.",
)
parser.add_option(
"--watchman_use_glob_generator",
action="store_true",
dest="watchman_use_glob_generator",
help="Uses Watchman glob generator to speed queries",
)
parser.add_option(
"--watchman_glob_stat_results",
action="store_true",
dest="watchman_glob_stat_results",
help="Invokes `stat()` to sanity check result of `watchman query`.",
)
parser.add_option(
"--watchman_socket_path",
action="store",
type="string",
dest="watchman_socket_path",
help="Path to Unix domain socket/named pipe as returned by `watchman get-sockname`.",
)
parser.add_option(
"--watchman_query_timeout_ms",
action="store",
type="int",
dest="watchman_query_timeout_ms",
help="Maximum time in milliseconds to wait for watchman query to respond.",
)
parser.add_option("--include", action="append", dest="include")
parser.add_option("--config", help="BuckConfig settings available at parse time.")
parser.add_option("--ignore_paths", help="Paths that should be ignored.")
parser.add_option(
"--quiet",
action="store_true",
dest="quiet",
help="Stifles exception backtraces printed to stderr during parsing.",
)
parser.add_option(
"--profile", action="store_true", help="Profile every buck file execution"
)
parser.add_option(
"--build_file_import_whitelist",
action="append",
dest="build_file_import_whitelist",
)
parser.add_option(
"--disable_implicit_native_rules",
action="store_true",
help="Do not allow native rules in build files, only included ones",
)
parser.add_option(
"--warn_about_deprecated_syntax",
action="store_true",
help="Warn about deprecated syntax usage.",
)
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
cell_roots = {
k: os.path.abspath(cygwin_adjusted_path(v))
for k, v in iteritems(options.cell_roots)
}
watchman_client = None
if options.use_watchman_glob:
client_args = {"sendEncoding": "json", "recvEncoding": "json"}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args["timeout"] = max(
0.0, options.watchman_query_timeout_ms / 1000.0
)
else:
client_args["timeout"] = DEFAULT_WATCHMAN_QUERY_TIMEOUT
if options.watchman_socket_path is not None:
client_args["sockpath"] = options.watchman_socket_path
client_args["transport"] = "local"
watchman_client = pywatchman.client(**client_args)
configs = {}
if options.config is not None:
with open(options.config, "rb") as f:
for section, contents in iteritems(json.load(f)):
for field, value in iteritems(contents):
configs[(section, field)] = value
ignore_paths = []
if options.ignore_paths is not None:
with open(options.ignore_paths, "rb") as f:
ignore_paths = [make_glob(i) for i in json.load(f)]
build_file_processor = BuildFileProcessor(
project_root,
cell_roots,
options.cell_name,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
options.watchman_glob_stat_results,
options.watchman_use_glob_generator,
project_import_whitelist=options.build_file_import_whitelist or [],
implicit_includes=options.include or [],
configs=configs,
ignore_paths=ignore_paths,
disable_implicit_native_rules=options.disable_implicit_native_rules,
warn_about_deprecated_syntax=options.warn_about_deprecated_syntax,
)
# While processing, we'll write exceptions as diagnostic messages
# to the parent then re-raise them to crash the process. While
# doing so, we don't want Python's default unhandled exception
# behavior of writing to stderr.
orig_excepthook = None
if options.quiet:
orig_excepthook = sys.excepthook
sys.excepthook = silent_excepthook
# Process the build files with the env var interceptors and builtins
# installed.
with build_file_processor.with_env_interceptors():
with build_file_processor.with_builtins(builtins.__dict__):
processed_build_file = []
profiler = None
if options.profile:
profiler = Profiler(True)
profiler.start()
Tracer.enable()
for build_file in args:
query = {
"buildFile": build_file,
"watchRoot": project_root,
"projectPrefix": project_root,
}
duration = process_with_diagnostics(
query, build_file_processor, to_parent
)
processed_build_file.append(
{"buildFile": build_file, "duration": duration}
)
# From https://docs.python.org/2/using/cmdline.html :
#
# Note that there is internal buffering in file.readlines()
# and File Objects (for line in sys.stdin) which is not
# influenced by this option. To work around this, you will
# want to use file.readline() inside a while 1: loop.
for line in wait_and_read_build_file_query():
if line == "":
break
build_file_query = json.loads(line)
if build_file_query.get("command") == "report_profile":
report_profile(options, to_parent, processed_build_file, profiler)
else:
duration = process_with_diagnostics(
build_file_query, build_file_processor, to_parent
)
processed_build_file.append(
{
"buildFile": build_file_query["buildFile"],
"duration": duration,
}
)
if options.quiet:
sys.excepthook = orig_excepthook
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
def wait_build_file_query():
_select([sys.stdin], [], [])
def wait_and_read_build_file_query():
def default_wait():
return
wait = default_wait
if sys.platform != "win32":
# wait_build_file_query() is useful to attribute time waiting for queries.
# Since select.select() is not supported on Windows, we currently don't have
# a reliable way to measure it on this platform. Then, we skip it.
wait = wait_build_file_query
while True:
wait()
line = sys.stdin.readline()
if not line:
return
yield line
def report_profile(options, to_parent, processed_build_file, profiler):
if options.profile:
try:
profiler.stop()
profile_result = profiler.generate_report()
extra_result = "Total: {:.2f} sec\n\n\n".format(profiler.total_time)
extra_result += "# Parsed {} files".format(len(processed_build_file))
processed_build_file.sort(
key=lambda current_child: current_child["duration"], reverse=True
)
# Only show the top ten buck files
if len(processed_build_file) > 10:
processed_build_file = processed_build_file[:10]
extra_result += ", {} slower BUCK files:\n".format(
len(processed_build_file)
)
else:
extra_result += "\n"
for info in processed_build_file:
extra_result += "Parsed {}: {:.2f} sec \n".format(
info["buildFile"], info["duration"]
)
extra_result += "\n\n"
profile_result = extra_result + profile_result
profile_result += Tracer.get_all_traces_and_reset()
java_process_send_result(to_parent, [], [], profile_result)
except Exception:
trace = traceback.format_exc()
print(str(trace))
raise
else:
java_process_send_result(to_parent, [], [], None)
def make_glob(pat):
# type: (str) -> str
if is_special(pat):
return pat
return pat + "/**"
# import autogenerated rule instances for effect.
try:
import generated_rules
except ImportError:
# If running directly or python tests of this code, this is not an error.
sys.stderr.write("Failed to load buck generated rules module.\n")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
acceptance/acceptance_test.go | //go:build acceptance
// +build acceptance
package acceptance
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/ghodss/yaml"
"github.com/google/go-containerregistry/pkg/name"
"github.com/pelletier/go-toml"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
"github.com/buildpacks/pack/acceptance/assertions"
"github.com/buildpacks/pack/acceptance/buildpacks"
"github.com/buildpacks/pack/acceptance/config"
"github.com/buildpacks/pack/acceptance/invoke"
"github.com/buildpacks/pack/acceptance/managers"
"github.com/buildpacks/pack/internal/cache"
"github.com/buildpacks/pack/internal/style"
"github.com/buildpacks/pack/pkg/archive"
h "github.com/buildpacks/pack/testhelpers"
)
const (
runImage = "pack-test/run"
buildImage = "pack-test/build"
)
var (
dockerCli client.CommonAPIClient
registryConfig *h.TestRegistryConfig
suiteManager *SuiteManager
imageManager managers.ImageManager
assertImage assertions.ImageAssertionManager
)
func TestAcceptance(t *testing.T) {
var err error
h.RequireDocker(t)
rand.Seed(time.Now().UTC().UnixNano())
assert := h.NewAssertionManager(t)
dockerCli, err = client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.38"))
assert.Nil(err)
imageManager = managers.NewImageManager(t, dockerCli)
registryConfig = h.RunRegistry(t)
defer registryConfig.RmRegistry(t)
assertImage = assertions.NewImageAssertionManager(t, imageManager, registryConfig)
inputConfigManager, err := config.NewInputConfigurationManager()
assert.Nil(err)
assetsConfig := config.ConvergedAssetManager(t, assert, inputConfigManager)
suiteManager = &SuiteManager{out: t.Logf}
suite := spec.New("acceptance suite", spec.Report(report.Terminal{}))
if inputConfigManager.Combinations().IncludesCurrentSubjectPack() {
suite("p_current", func(t *testing.T, when spec.G, it spec.S) {
testWithoutSpecificBuilderRequirement(
t,
when,
it,
assetsConfig.NewPackAsset(config.Current),
)
}, spec.Report(report.Terminal{}))
}
for _, combo := range inputConfigManager.Combinations() {
// see https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
combo := combo
t.Logf(`setting up run combination %s: %s`,
style.Symbol(combo.String()),
combo.Describe(assetsConfig),
)
suite(combo.String(), func(t *testing.T, when spec.G, it spec.S) {
testAcceptance(
t,
when,
it,
assetsConfig.NewPackAsset(combo.Pack),
assetsConfig.NewPackAsset(combo.PackCreateBuilder),
assetsConfig.NewLifecycleAsset(combo.Lifecycle),
)
}, spec.Report(report.Terminal{}))
}
suite.Run(t)
assert.Nil(suiteManager.CleanUp())
}
// These tests either (a) do not require a builder or (b) do not require a specific builder to be provided
// in order to test compatibility.
// They should only be run against the "current" (i.e., main) version of pack.
func testWithoutSpecificBuilderRequirement(
t *testing.T,
when spec.G,
it spec.S,
packConfig config.PackAsset,
) {
var (
pack *invoke.PackInvoker
assert = h.NewAssertionManager(t)
buildpackManager buildpacks.BuildpackManager
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, packConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
})
it.After(func() {
pack.Cleanup()
})
when("invalid subcommand", func() {
it("prints usage", func() {
output, err := pack.Run("some-bad-command")
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsCommandUnknown("some-bad-command")
assertOutput.IncludesUsagePrompt()
})
})
when("build with default builders not set", func() {
it("informs the user", func() {
output, err := pack.Run(
"build", "some/image",
"-p", filepath.Join("testdata", "mock_app"),
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesMessageToSetDefaultBuilder()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilders()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
when("buildpack", func() {
when("package", func() {
var (
tmpDir string
buildpackManager buildpacks.BuildpackManager
simplePackageConfigFixtureName = "package.toml"
)
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "buildpack-package-tests")
assert.Nil(err)
buildpackManager = buildpacks.NewBuildpackManager(t, assert)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.SimpleLayersParent, buildpacks.SimpleLayers)
})
it.After(func() {
assert.Nil(os.RemoveAll(tmpDir))
})
generateAggregatePackageToml := func(buildpackURI, nestedPackageName, os string) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package_aggregate-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
"package_aggregate.toml",
packageTomlFile,
map[string]interface{}{
"BuildpackURI": buildpackURI,
"PackageName": nestedPackageName,
"OS": os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
when("no --format is provided", func() {
it("creates the package as image", func() {
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
output := pack.RunSuccessfully("buildpack", "package", packageName, "-c", packageTomlPath)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(packageName)
defer imageManager.CleanupImages(packageName)
assertImage.ExistsLocally(packageName)
})
})
when("--format image", func() {
it("creates the package", func() {
t.Log("package w/ only buildpacks")
nestedPackageName := "test/package-" + h.RandString(10)
packageName := "test/package-" + h.RandString(10)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageName,
aggregatePackageToml,
buildpacks.WithRequiredBuildpacks(
buildpacks.SimpleLayersParent,
buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageBuildpack)
defer imageManager.CleanupImages(nestedPackageName, packageName)
assertImage.ExistsLocally(nestedPackageName)
assertImage.ExistsLocally(packageName)
})
when("--publish", func() {
it("publishes image to registry", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
buildpacks.WithPublish(),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
output := pack.RunSuccessfully(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--publish",
)
defer imageManager.CleanupImages(packageName)
assertions.NewOutputAssertionManager(t, output).ReportsPackagePublished(packageName)
assertImage.NotExistsLocally(packageName)
assertImage.CanBePulledFromRegistry(packageName)
})
})
when("--pull-policy=never", func() {
it("should use local image", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := "test/package-" + h.RandString(10)
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
defer imageManager.CleanupImages(nestedPackageName)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
defer imageManager.CleanupImages(packageName)
pack.JustRunSuccessfully(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--pull-policy", "never")
assertImage.ExistsLocally(packageName)
})
it("should not pull image from registry", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
nestedPackageName := registryConfig.RepoName("test/package-" + h.RandString(10))
nestedPackage := buildpacks.NewPackageImage(
t,
pack,
nestedPackageName,
packageTomlPath,
buildpacks.WithPublish(),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
buildpackManager.PrepareBuildpacks(tmpDir, nestedPackage)
aggregatePackageToml := generateAggregatePackageToml("simple-layers-parent-buildpack.tgz", nestedPackageName, imageManager.HostOS())
packageName := registryConfig.RepoName("test/package-" + h.RandString(10))
output, err := pack.Run(
"buildpack", "package", packageName,
"-c", aggregatePackageToml,
"--pull-policy", "never",
)
assert.NotNil(err)
assertions.NewOutputAssertionManager(t, output).ReportsImageNotExistingOnDaemon(nestedPackageName)
})
})
})
when("--format file", func() {
when("the file extension is .cnb", func() {
it("creates the package with the same extension", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package.cnb")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(destinationFile)
h.AssertTarball(t, destinationFile)
})
})
when("the file extension is empty", func() {
it("creates the package with a .cnb extension", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package")
expectedFile := filepath.Join(tmpDir, "package.cnb")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsPackageCreation(expectedFile)
h.AssertTarball(t, expectedFile)
})
})
when("the file extension is not .cnb", func() {
it("creates the package with the given extension but shows a warning", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, simplePackageConfigFixtureName, imageManager.HostOS())
destinationFile := filepath.Join(tmpDir, "package.tar.gz")
output := pack.RunSuccessfully(
"buildpack", "package", destinationFile,
"--format", "file",
"-c", packageTomlPath,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsPackageCreation(destinationFile)
assertOutput.ReportsInvalidExtension(".gz")
h.AssertTarball(t, destinationFile)
})
})
})
when("package.toml is invalid", func() {
it("displays an error", func() {
output, err := pack.Run(
"buildpack", "package", "some-package",
"-c", pack.FixtureManager().FixtureLocation("invalid_package.toml"),
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsReadingConfig()
})
})
})
when("inspect", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "buildpack-inspect-tests")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
when("buildpack archive", func() {
it("succeeds", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL ARCHIVE",
"buildpack_name": packageFileLocation,
},
)
output := pack.RunSuccessfully("buildpack", "inspect", packageFileLocation)
assert.TrimmedEq(output, expectedOutput)
})
})
when("buildpack image", func() {
when("inspect", func() {
it("succeeds", func() {
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageImageName := registryConfig.RepoName("buildpack-" + h.RandString(8))
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
defer imageManager.CleanupImages(packageImageName)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
expectedOutput := pack.FixtureManager().TemplateFixture(
"inspect_buildpack_output.txt",
map[string]interface{}{
"buildpack_source": "LOCAL IMAGE",
"buildpack_name": packageImageName,
},
)
output := pack.RunSuccessfully("buildpack", "inspect", packageImageName)
assert.TrimmedEq(output, expectedOutput)
})
})
})
})
})
when("builder", func() {
when("suggest", func() {
it("displays suggested builders", func() {
output := pack.RunSuccessfully("builder", "suggest")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesSuggestedBuildersHeading()
assertOutput.IncludesPrefixedGoogleBuilder()
assertOutput.IncludesPrefixedHerokuBuilders()
assertOutput.IncludesPrefixedPaketoBuilders()
})
})
})
when("config", func() {
when("default-builder", func() {
it("sets the default builder in ~/.pack/config.toml", func() {
builderName := "paketobuildpacks/builder:base"
output := pack.RunSuccessfully("config", "default-builder", builderName)
assertions.NewOutputAssertionManager(t, output).ReportsSettingDefaultBuilder(builderName)
})
})
when("trusted-builders", func() {
it("prints list of trusted builders", func() {
output := pack.RunSuccessfully("config", "trusted-builders")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesTrustedBuildersHeading()
assertOutput.IncludesHerokuBuilders()
assertOutput.IncludesGoogleBuilder()
assertOutput.IncludesPaketoBuilders()
})
when("add", func() {
it("sets the builder as trusted in ~/.pack/config.toml", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
})
})
when("remove", func() {
it("removes the previously trusted builder from ~/${PACK_HOME}/config.toml", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
assert.Contains(pack.ConfigFileContents(), builderName)
pack.JustRunSuccessfully("config", "trusted-builders", "remove", builderName)
assert.NotContains(pack.ConfigFileContents(), builderName)
})
})
when("list", func() {
it("prints list of trusted builders", func() {
output := pack.RunSuccessfully("config", "trusted-builders", "list")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.IncludesTrustedBuildersHeading()
assertOutput.IncludesHerokuBuilders()
assertOutput.IncludesGoogleBuilder()
assertOutput.IncludesPaketoBuilders()
})
it("shows a builder trusted by pack config trusted-builders add", func() {
builderName := "some-builder" + h.RandString(10)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
output := pack.RunSuccessfully("config", "trusted-builders", "list")
assert.Contains(output, builderName)
})
})
})
})
when("stack", func() {
when("suggest", func() {
it("displays suggested stacks", func() {
output, err := pack.Run("stack", "suggest")
assert.Nil(err)
assertions.NewOutputAssertionManager(t, output).IncludesSuggestedStacksHeading()
})
})
})
when("report", func() {
when("default builder is set", func() {
it("redacts default builder", func() {
pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "[REDACTED]",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
it("explicit mode doesn't redact", func() {
pack.RunSuccessfully("config", "default-builder", "paketobuildpacks/builder:base")
output := pack.RunSuccessfully("report", "--explicit")
version := pack.Version()
expectedOutput := pack.FixtureManager().TemplateFixture(
"report_output.txt",
map[string]interface{}{
"DefaultBuilder": "paketobuildpacks/builder:base",
"Version": version,
"OS": runtime.GOOS,
"Arch": runtime.GOARCH,
},
)
assert.Equal(output, expectedOutput)
})
})
})
}
func testAcceptance(
t *testing.T,
when spec.G,
it spec.S,
subjectPackConfig, createBuilderPackConfig config.PackAsset,
lifecycle config.LifecycleAsset,
) {
var (
pack, createBuilderPack *invoke.PackInvoker
buildpackManager buildpacks.BuildpackManager
bpDir = buildpacksDir(lifecycle.EarliestBuildpackAPIVersion())
assert = h.NewAssertionManager(t)
)
it.Before(func() {
pack = invoke.NewPackInvoker(t, assert, subjectPackConfig, registryConfig.DockerConfigDir)
pack.EnableExperimental()
createBuilderPack = invoke.NewPackInvoker(t, assert, createBuilderPackConfig, registryConfig.DockerConfigDir)
createBuilderPack.EnableExperimental()
buildpackManager = buildpacks.NewBuildpackManager(
t,
assert,
buildpacks.WithBuildpackAPIVersion(lifecycle.EarliestBuildpackAPIVersion()),
)
})
it.After(func() {
pack.Cleanup()
createBuilderPack.Cleanup()
})
when("stack is created", func() {
var (
runImageMirror string
stackBaseImages = map[string][]string{
"linux": {"ubuntu:bionic"},
"windows": {"mcr.microsoft.com/windows/nanoserver:1809", "golang:1.17-nanoserver-1809"},
}
)
it.Before(func() {
value, err := suiteManager.RunTaskOnceString("create-stack",
func() (string, error) {
runImageMirror := registryConfig.RepoName(runImage)
err := createStack(t, dockerCli, runImageMirror)
if err != nil {
return "", err
}
return runImageMirror, nil
})
assert.Nil(err)
baseStackNames := stackBaseImages[imageManager.HostOS()]
suiteManager.RegisterCleanUp("remove-stack-images", func() error {
imageManager.CleanupImages(baseStackNames...)
imageManager.CleanupImages(runImage, buildImage, value)
return nil
})
runImageMirror = value
})
when("builder is created", func() {
var builderName string
it.Before(func() {
key := taskKey(
"create-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createBuilder(t, assert, createBuilderPack, lifecycle, buildpackManager, runImageMirror)
})
assert.Nil(err)
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
})
when("complex builder", func() {
it.Before(func() {
// create our nested builder
h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers")
// create a task, handled by a 'task manager' which executes our pack commands during tests.
// looks like this is used to de-dup tasks
key := taskKey(
"create-complex-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createComplexBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
})
assert.Nil(err)
// register task to be run to 'clean up' a task
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
})
when("builder has duplicate buildpacks", func() {
it("buildpack layers have no duplication", func() {
assertImage.DoesNotHaveDuplicateLayers(builderName)
})
})
})
when("builder.toml is invalid", func() {
it("displays an error", func() {
builderConfigPath := createBuilderPack.FixtureManager().FixtureLocation("invalid_builder.toml")
output, err := createBuilderPack.Run(
"builder", "create", "some-builder:build",
"--config", builderConfigPath,
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsInvalidBuilderToml()
})
})
when("build", func() {
var repo, repoName string
it.Before(func() {
repo = "some-org/" + h.RandString(10)
repoName = registryConfig.RepoName(repo)
pack.JustRunSuccessfully("config", "lifecycle-image", lifecycle.Image())
})
it.After(func() {
imageManager.CleanupImages(repoName)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
cacheImage := cache.NewImageCache(ref, dockerCli)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
cacheImage.Clear(context.TODO())
buildCacheVolume.Clear(context.TODO())
launchCacheVolume.Clear(context.TODO())
})
when("builder is untrusted", func() {
var untrustedBuilderName string
it.Before(func() {
var err error
untrustedBuilderName, err = createBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
assert.Nil(err)
suiteManager.RegisterCleanUp("remove-lifecycle-"+lifecycle.Image(), func() error {
img := imageManager.GetImageID(lifecycle.Image())
imageManager.CleanupImages(img)
return nil
})
})
it.After(func() {
imageManager.CleanupImages(untrustedBuilderName)
})
when("daemon", func() {
it("uses the 5 phases", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertOutput.IncludesLifecycleImageTag(lifecycle.Image())
assertOutput.IncludesSeparatePhases()
})
})
when("--publish", func() {
it("uses the 5 phases", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
"--publish",
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertOutput.IncludesLifecycleImageTag(lifecycle.Image())
assertOutput.IncludesSeparatePhases()
})
})
when("additional tags", func() {
var additionalRepoName string
it.Before(func() {
additionalRepoName = fmt.Sprintf("%s_additional", repoName)
})
it.After(func() {
imageManager.CleanupImages(additionalRepoName)
})
it("pushes image to additional tags", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"-B", untrustedBuilderName,
"--tag", additionalRepoName,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assert.Contains(output, additionalRepoName)
})
})
})
when("default builder is set", func() {
it.Before(func() {
pack.RunSuccessfully("config", "default-builder", builderName)
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
})
it("creates a runnable, rebuildable image on daemon from app dir", func() {
appPath := filepath.Join("testdata", "mock_app")
output := pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsUsingBuildCacheVolume()
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
t.Log("app is runnable")
assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("it uses the run image as a base image")
assertImage.HasBaseImage(repoName, runImage)
t.Log("sets the run image metadata")
assertImage.HasLabelWithData(repoName, "io.buildpacks.lifecycle.metadata", fmt.Sprintf(`"stack":{"runImage":{"image":"%s","mirrors":["%s"]}}}`, runImage, runImageMirror))
t.Log("sets the source metadata")
assertImage.HasLabelWithData(repoName, "io.buildpacks.project.metadata", (`{"source":{"type":"project","version":{"declared":"1.0.2"},"metadata":{"url":"https://github.com/buildpacks/pack"}}}`))
t.Log("registry is empty")
assertImage.NotExistsInRegistry(repo)
t.Log("add a local mirror")
localRunImageMirror := registryConfig.RepoName("pack-test/run-mirror")
imageManager.TagImage(runImage, localRunImageMirror)
defer imageManager.CleanupImages(localRunImageMirror)
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror)
t.Log("rebuild")
output = pack.RunSuccessfully(
"build", repoName,
"-p", appPath,
)
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
cachedLaunchLayer := "simple/layers:cached-launch-layer"
assertLifecycleOutput := assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsRestoresCachedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheReuse(cachedLaunchLayer)
t.Log("app is runnable")
assertImage.RunsWithOutput(repoName, "Launch Dep Contents", "Cached Dep Contents")
t.Log("rebuild with --clear-cache")
output = pack.RunSuccessfully("build", repoName, "-p", appPath, "--clear-cache")
assertOutput = assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertLifecycleOutput = assertions.NewLifecycleOutputAssertionManager(t, output)
assertLifecycleOutput.ReportsExporterReusingUnchangedLayer(cachedLaunchLayer)
assertLifecycleOutput.ReportsCacheCreation(cachedLaunchLayer)
t.Log("cacher adds layers")
assert.Matches(output, regexp.MustCompile(`(?i)Adding cache layer 'simple/layers:cached-launch-layer'`))
t.Log("inspecting image")
inspectCmd := "inspect"
if !pack.Supports("inspect") {
inspectCmd = "inspect-image"
}
var (
webCommand string
helloCommand string
helloArgs []string
helloArgsPrefix string
)
if imageManager.HostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = []string{"/c", "echo hello world"}
helloArgsPrefix = " "
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = []string{"hello", "world"}
helloArgsPrefix = ""
}
formats := []compareFormat{
{
extension: "txt",
compareFunc: assert.TrimmedEq,
outputArg: "human-readable",
},
{
extension: "json",
compareFunc: assert.EqualJSON,
outputArg: "json",
},
{
extension: "yaml",
compareFunc: assert.EqualYAML,
outputArg: "yaml",
},
{
extension: "toml",
compareFunc: assert.EqualTOML,
outputArg: "toml",
},
}
for _, format := range formats {
t.Logf("inspecting image %s format", format.outputArg)
output = pack.RunSuccessfully(inspectCmd, repoName, "--output", format.outputArg)
expectedOutput := pack.FixtureManager().TemplateFixture(
fmt.Sprintf("inspect_image_local_output.%s", format.extension),
map[string]interface{}{
"image_name": repoName,
"base_image_id": h.ImageID(t, runImageMirror),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_local_mirror": localRunImageMirror,
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
"hello_args_prefix": helloArgsPrefix,
},
)
format.compareFunc(output, expectedOutput)
}
})
when("--no-color", func() {
it("doesn't have color", func() {
appPath := filepath.Join("testdata", "mock_app")
// --no-color is set as a default option in our tests, and doesn't need to be explicitly provided
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.WithoutColors()
})
})
when("--quiet", func() {
it("only logs app name and sha", func() {
appPath := filepath.Join("testdata", "mock_app")
pack.SetVerbose(false)
defer pack.SetVerbose(true)
output := pack.RunSuccessfully("build", repoName, "-p", appPath, "--quiet")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportSuccessfulQuietBuild(repoName)
})
})
it("supports building app from a zip file", func() {
appPath := filepath.Join("testdata", "mock_app.zip")
output := pack.RunSuccessfully("build", repoName, "-p", appPath)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
})
when("--network", func() {
var tmpDir string
it.Before(func() {
h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness")
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpacks-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.InternetCapable)
})
it.After(func() {
h.SkipIf(t, imageManager.HostOS() == "windows", "temporarily disabled on WCOW due to CI flakiness")
assert.Succeeds(os.RemoveAll(tmpDir))
})
when("the network mode is not provided", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to default", func() {
it("reports buildpack access to internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "default",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsConnectedToInternet()
})
})
when("the network mode is set to none", func() {
it("reports buildpack disconnected from internet", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.InternetCapable.FullPathIn(tmpDir),
"--network", "none",
)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsDisconnectedFromInternet()
})
})
})
when("--volume", func() {
var (
volumeRoot = "/"
slash = "/"
tmpDir string
tmpVolumeSrc string
)
it.Before(func() {
h.SkipIf(t, os.Getenv("DOCKER_HOST") != "", "cannot mount volume when DOCKER_HOST is set")
if imageManager.HostOS() == "windows" {
volumeRoot = `c:\`
slash = `\`
}
var err error
tmpDir, err = ioutil.TempDir("", "volume-buildpack-tests-")
assert.Nil(err)
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ReadVolume, buildpacks.ReadWriteVolume)
tmpVolumeSrc, err = ioutil.TempDir("", "volume-mount-source")
assert.Nil(err)
assert.Succeeds(os.Chmod(tmpVolumeSrc, 0777)) // Override umask
// Some OSes (like macOS) use symlinks for the standard temp dir.
// Resolve it so it can be properly mounted by the Docker daemon.
tmpVolumeSrc, err = filepath.EvalSymlinks(tmpVolumeSrc)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tmpVolumeSrc, "some-file"), []byte("some-content\n"), 0777)
assert.Nil(err)
})
it.After(func() {
_ = os.RemoveAll(tmpDir)
_ = os.RemoveAll(tmpVolumeSrc)
})
when("volume is read-only", func() {
it("mounts the provided volume in the detect and build phases", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testFilePath := volumeDest + slash + "some-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadVolume.FullPathIn(tmpDir),
"--env", "TEST_FILE_PATH="+testFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsReadingFileContents("Detect", testFilePath, "some-content")
bpOutputAsserts.ReportsReadingFileContents("Build", testFilePath, "some-content")
})
it("should fail to write", func() {
volumeDest := volumeRoot + "platform" + slash + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsFailingToWriteFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsFailingToWriteFileContents("Build", testBuildFilePath)
})
})
when("volume is read-write", func() {
it("can be written to", func() {
volumeDest := volumeRoot + "volume-mount-target"
testDetectFilePath := volumeDest + slash + "detect-file"
testBuildFilePath := volumeDest + slash + "build-file"
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--volume", fmt.Sprintf("%s:%s:rw", tmpVolumeSrc, volumeDest),
"--buildpack", buildpacks.ReadWriteVolume.FullPathIn(tmpDir),
"--env", "DETECT_TEST_FILE_PATH="+testDetectFilePath,
"--env", "BUILD_TEST_FILE_PATH="+testBuildFilePath,
)
bpOutputAsserts := assertions.NewTestBuildpackOutputAssertionManager(t, output)
bpOutputAsserts.ReportsWritingFileContents("Detect", testDetectFilePath)
bpOutputAsserts.ReportsReadingFileContents("Detect", testDetectFilePath, "some-content")
bpOutputAsserts.ReportsWritingFileContents("Build", testBuildFilePath)
bpOutputAsserts.ReportsReadingFileContents("Build", testBuildFilePath, "some-content")
})
})
})
when("--default-process", func() {
it("sets the default process from those in the process list", func() {
pack.RunSuccessfully(
"build", repoName,
"--default-process", "hello",
"-p", filepath.Join("testdata", "mock_app"),
)
assertImage.RunsWithLogs(repoName, "hello world")
})
})
when("--buildpack", func() {
when("the argument is an ID", func() {
it("adds the buildpacks to the builder if necessary and runs them", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", "simple/layers", // can omit version if only one
"--buildpack", "[email protected]",
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertTestAppOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertTestAppOutput.ReportsBuildStep("Simple Layers Buildpack")
assertTestAppOutput.ReportsBuildStep("NOOP Buildpack")
assertOutput.ReportsSuccessfulImageBuild(repoName)
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
})
})
when("the argument is an archive", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "archive-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpack to the builder and runs it", func() {
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.ArchiveNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.ArchiveNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is directory", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "folder-buildpack-tests-")
assert.Nil(err)
})
it.After(func() {
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs it", func() {
h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows")
buildpackManager.PrepareBuildpacks(tmpDir, buildpacks.FolderNotInBuilder)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", buildpacks.FolderNotInBuilder.FullPathIn(tmpDir),
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack("local/bp", "local-bp-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Local Buildpack")
})
})
when("the argument is a buildpackage image", func() {
var (
tmpDir string
packageImageName string
)
it.After(func() {
imageManager.CleanupImages(packageImageName)
_ = os.RemoveAll(tmpDir)
})
it("adds the buildpacks to the builder and runs them", func() {
packageImageName = registryConfig.RepoName("buildpack-" + h.RandString(8))
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageImage := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageImage)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the argument is a buildpackage file", func() {
var tmpDir string
it.Before(func() {
var err error
tmpDir, err = ioutil.TempDir("", "package-file")
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tmpDir))
})
it("adds the buildpacks to the builder and runs them", func() {
packageFileLocation := filepath.Join(
tmpDir,
fmt.Sprintf("buildpack-%s.cnb", h.RandString(8)),
)
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package_for_build_cmd.toml", imageManager.HostOS())
packageFile := buildpacks.NewPackageFile(
t,
pack,
packageFileLocation,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(
buildpacks.FolderSimpleLayersParent,
buildpacks.FolderSimpleLayers,
),
)
buildpackManager.PrepareBuildpacks(tmpDir, packageFile)
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", packageFileLocation,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsAddingBuildpack(
"simple/layers/parent",
"simple-layers-parent-version",
)
assertOutput.ReportsAddingBuildpack("simple/layers", "simple-layers-version")
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertBuildpackOutput := assertions.NewTestBuildpackOutputAssertionManager(t, output)
assertBuildpackOutput.ReportsBuildStep("Simple Layers Buildpack")
})
})
when("the buildpack stack doesn't match the builder", func() {
var otherStackBuilderTgz string
it.Before(func() {
otherStackBuilderTgz = h.CreateTGZ(t, filepath.Join(bpDir, "other-stack-buildpack"), "./", 0755)
})
it.After(func() {
assert.Succeeds(os.Remove(otherStackBuilderTgz))
})
it("errors", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--buildpack", otherStackBuilderTgz,
)
assert.NotNil(err)
assert.Contains(output, "other/stack/bp")
assert.Contains(output, "other-stack-version")
assert.Contains(output, "does not support stack 'pack.test.stack'")
})
})
})
when("--env-file", func() {
var envPath string
it.Before(func() {
envfile, err := ioutil.TempFile("", "envfile")
assert.Nil(err)
defer envfile.Close()
err = os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment")
assert.Nil(err)
envfile.WriteString(`
DETECT_ENV_BUILDPACK=true
ENV1_CONTENTS=Env1 Layer Contents From File
ENV2_CONTENTS
`)
envPath = envfile.Name()
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
assert.Succeeds(os.RemoveAll(envPath))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env-file", envPath,
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertImage.RunsWithOutput(
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From File",
)
})
})
when("--env", func() {
it.Before(func() {
assert.Succeeds(os.Setenv("ENV2_CONTENTS", "Env2 Layer Contents From Environment"))
})
it.After(func() {
assert.Succeeds(os.Unsetenv("ENV2_CONTENTS"))
})
it("provides the env vars to the build and detect steps", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--env", "DETECT_ENV_BUILDPACK=true",
"--env", `ENV1_CONTENTS="Env1 Layer Contents From Command Line"`,
"--env", "ENV2_CONTENTS",
)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
assertImage.RunsWithOutput(
repoName,
"Env2 Layer Contents From Environment",
"Env1 Layer Contents From Command Line",
)
})
})
when("--run-image", func() {
var runImageName string
when("the run-image has the correct stack ID", func() {
it.Before(func() {
user := func() string {
if imageManager.HostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
USER %s
RUN echo "custom-run" > /custom-run.txt
USER pack
`, runImage, user()))
})
it.After(func() {
imageManager.CleanupImages(runImageName)
})
it("uses the run image as the base image", func() {
output := pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccessfulImageBuild(repoName)
assertOutput.ReportsPullingImage(runImageName)
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
t.Log("uses the run image as the base image")
assertImage.HasBaseImage(repoName, runImageName)
})
})
when("the run image has the wrong stack ID", func() {
it.Before(func() {
runImageName = h.CreateImageOnRemote(t, dockerCli, registryConfig, "custom-run-image"+h.RandString(10), fmt.Sprintf(`
FROM %s
LABEL io.buildpacks.stack.id=other.stack.id
USER pack
`, runImage))
})
it.After(func() {
imageManager.CleanupImages(runImageName)
})
it("fails with a message", func() {
output, err := pack.Run(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--run-image", runImageName,
)
assert.NotNil(err)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsRunImageStackNotMatchingBuilder(
"other.stack.id",
"pack.test.stack",
)
})
})
})
when("--publish", func() {
it("creates image on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
t.Log("checking that registry has contents")
assertImage.ExistsInRegistryCatalog(repo)
cmdName := "inspect"
if !pack.Supports("inspect") {
cmdName = "inspect-image"
}
t.Log("inspect-image")
var (
webCommand string
helloCommand string
helloArgs []string
helloArgsPrefix string
)
if imageManager.HostOS() == "windows" {
webCommand = ".\\run"
helloCommand = "cmd"
helloArgs = []string{"/c", "echo hello world"}
helloArgsPrefix = " "
} else {
webCommand = "./run"
helloCommand = "echo"
helloArgs = []string{"hello", "world"}
helloArgsPrefix = ""
}
formats := []compareFormat{
{
extension: "txt",
compareFunc: assert.TrimmedEq,
outputArg: "human-readable",
},
{
extension: "json",
compareFunc: assert.EqualJSON,
outputArg: "json",
},
{
extension: "yaml",
compareFunc: assert.EqualYAML,
outputArg: "yaml",
},
{
extension: "toml",
compareFunc: assert.EqualTOML,
outputArg: "toml",
},
}
for _, format := range formats {
t.Logf("inspecting image %s format", format.outputArg)
output = pack.RunSuccessfully(cmdName, repoName, "--output", format.outputArg)
expectedOutput := pack.FixtureManager().TemplateFixture(
fmt.Sprintf("inspect_image_published_output.%s", format.extension),
map[string]interface{}{
"image_name": repoName,
"base_image_ref": strings.Join([]string{runImageMirror, h.Digest(t, runImageMirror)}, "@"),
"base_image_top_layer": h.TopLayerDiffID(t, runImageMirror),
"run_image_mirror": runImageMirror,
"web_command": webCommand,
"hello_command": helloCommand,
"hello_args": helloArgs,
"hello_args_prefix": helloArgsPrefix,
},
)
format.compareFunc(output, expectedOutput)
}
imageManager.PullImage(repoName, registryConfig.RegistryAuth())
t.Log("app is runnable")
assertImage.RunsWithOutput(
repoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
})
when("additional tags are specified with --tag", func() {
var additionalRepo string
var additionalRepoName string
it.Before(func() {
additionalRepo = fmt.Sprintf("%s_additional", repo)
additionalRepoName = fmt.Sprintf("%s_additional", repoName)
})
it.After(func() {
imageManager.CleanupImages(additionalRepoName)
})
it("creates additional tags on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
"--tag", additionalRepoName,
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
t.Log("checking that registry has contents")
assertImage.ExistsInRegistryCatalog(repo)
assertImage.ExistsInRegistryCatalog(additionalRepo)
imageManager.PullImage(repoName, registryConfig.RegistryAuth())
imageManager.PullImage(additionalRepoName, registryConfig.RegistryAuth())
t.Log("additional app is runnable")
assertImage.RunsWithOutput(
additionalRepoName,
"Launch Dep Contents",
"Cached Dep Contents",
)
imageDigest := h.Digest(t, repoName)
additionalDigest := h.Digest(t, additionalRepoName)
assert.Equal(imageDigest, additionalDigest)
})
})
})
when("--cache-image", func() {
var cacheImageName string
it.Before(func() {
cacheImageName = fmt.Sprintf("%s-cache", repoName)
})
it("creates image and cache image on the registry", func() {
buildArgs := []string{
repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--publish",
"--cache-image",
cacheImageName,
}
if imageManager.HostOS() != "windows" {
buildArgs = append(buildArgs, "--network", "host")
}
output := pack.RunSuccessfully("build", buildArgs...)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulImageBuild(repoName)
cacheImageRef, err := name.ParseReference(cacheImageName, name.WeakValidation)
assert.Nil(err)
t.Log("checking that registry has contents")
assertImage.CanBePulledFromRegistry(repoName)
if imageManager.HostOS() == "windows" {
// Cache images are automatically Linux container images, and therefore can't be pulled
// and inspected correctly on WCOW systems
// https://github.com/buildpacks/lifecycle/issues/529
imageManager.PullImage(cacheImageRef.Name(), registryConfig.RegistryAuth())
} else {
assertImage.CanBePulledFromRegistry(cacheImageRef.Name())
}
defer imageManager.CleanupImages(cacheImageRef.Name())
})
})
when("ctrl+c", func() {
it("stops the execution", func() {
var buf = new(bytes.Buffer)
command := pack.StartWithWriter(
buf,
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
)
go command.TerminateAtStep("DETECTING")
err := command.Wait()
assert.NotNil(err)
assert.NotContains(buf.String(), "Successfully built image")
})
})
when("--descriptor", func() {
when("using a included buildpack", func() {
var tempAppDir, tempWorkingDir, origWorkingDir string
it.Before(func() {
h.SkipIf(t, runtime.GOOS == "windows", "buildpack directories not supported on windows")
var err error
tempAppDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
tempWorkingDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
origWorkingDir, err = os.Getwd()
assert.Nil(err)
// Create test directories and files:
//
// ├── cookie.jar
// ├── descriptor-buildpack/...
// ├── media
// │ ├── mountain.jpg
// │ └── person.png
// └── test.sh
assert.Succeeds(os.Mkdir(filepath.Join(tempAppDir, "descriptor-buildpack"), os.ModePerm))
h.RecursiveCopy(t, filepath.Join(bpDir, "descriptor-buildpack"), filepath.Join(tempAppDir, "descriptor-buildpack"))
err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755)
assert.Nil(err)
projectToml := `
[project]
name = "exclude test"
[[project.licenses]]
type = "MIT"
[build]
exclude = [ "*.sh", "media/person.png", "descriptor-buildpack" ]
[[build.buildpacks]]
uri = "descriptor-buildpack"
`
excludeDescriptorPath := filepath.Join(tempAppDir, "project.toml")
err = ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
// set working dir to be outside of the app we are building
assert.Succeeds(os.Chdir(tempWorkingDir))
})
it.After(func() {
os.RemoveAll(tempAppDir)
if origWorkingDir != "" {
assert.Succeeds(os.Chdir(origWorkingDir))
}
})
it("uses buildpack specified by descriptor", func() {
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
)
assert.NotContains(output, "person.png")
assert.NotContains(output, "test.sh")
})
})
when("exclude and include", func() {
var buildpackTgz, tempAppDir string
it.Before(func() {
buildpackTgz = h.CreateTGZ(t, filepath.Join(bpDir, "descriptor-buildpack"), "./", 0755)
var err error
tempAppDir, err = ioutil.TempDir("", "descriptor-app")
assert.Nil(err)
// Create test directories and files:
//
// ├── cookie.jar
// ├── other-cookie.jar
// ├── nested-cookie.jar
// ├── nested
// │ └── nested-cookie.jar
// ├── secrets
// │ ├── api_keys.json
// | |── user_token
// ├── media
// │ ├── mountain.jpg
// │ └── person.png
// └── test.sh
err = os.Mkdir(filepath.Join(tempAppDir, "secrets"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "api_keys.json"), []byte("{}"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "secrets", "user_token"), []byte("token"), 0755)
assert.Nil(err)
err = os.Mkdir(filepath.Join(tempAppDir, "nested"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested", "nested-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "other-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "nested-cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = os.Mkdir(filepath.Join(tempAppDir, "media"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "mountain.jpg"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "media", "person.png"), []byte("fake image bytes"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "cookie.jar"), []byte("chocolate chip"), 0755)
assert.Nil(err)
err = ioutil.WriteFile(filepath.Join(tempAppDir, "test.sh"), []byte("echo test"), 0755)
assert.Nil(err)
})
it.After(func() {
assert.Succeeds(os.RemoveAll(tempAppDir))
})
it("should exclude ALL specified files and directories", func() {
projectToml := `
[project]
name = "exclude test"
[[project.licenses]]
type = "MIT"
[build]
exclude = [ "*.sh", "secrets/", "media/metadata", "/other-cookie.jar" ,"/nested-cookie.jar"]
`
excludeDescriptorPath := filepath.Join(tempAppDir, "exclude.toml")
err := ioutil.WriteFile(excludeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", excludeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.NotContains(output, "other-cookie.jar")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "nested-cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
it("should ONLY include specified files and directories", func() {
projectToml := `
[project]
name = "include test"
[[project.licenses]]
type = "MIT"
[build]
include = [ "*.jar", "media/mountain.jpg", "/media/person.png", ]
`
includeDescriptorPath := filepath.Join(tempAppDir, "include.toml")
err := ioutil.WriteFile(includeDescriptorPath, []byte(projectToml), 0755)
assert.Nil(err)
output := pack.RunSuccessfully(
"build",
repoName,
"-p", tempAppDir,
"--buildpack", buildpackTgz,
"--descriptor", includeDescriptorPath,
)
assert.NotContains(output, "api_keys.json")
assert.NotContains(output, "user_token")
assert.NotContains(output, "test.sh")
assert.Contains(output, "cookie.jar")
assert.Contains(output, "mountain.jpg")
assert.Contains(output, "person.png")
})
})
})
})
})
when("inspecting builder", func() {
when("inspecting a nested builder", func() {
it.Before(func() {
// create our nested builder
h.SkipIf(t, imageManager.HostOS() == "windows", "These tests are not yet compatible with Windows-based containers")
// create a task, handled by a 'task manager' which executes our pack commands during tests.
// looks like this is used to de-dup tasks
key := taskKey(
"create-complex-builder",
append(
[]string{runImageMirror, createBuilderPackConfig.Path(), lifecycle.Identifier()},
createBuilderPackConfig.FixturePaths()...,
)...,
)
// run task on taskmanager and save output, in case there are future calls to the same task
// likely all our changes need to go on the createBuilderPack.
value, err := suiteManager.RunTaskOnceString(key, func() (string, error) {
return createComplexBuilder(
t,
assert,
createBuilderPack,
lifecycle,
buildpackManager,
runImageMirror,
)
})
assert.Nil(err)
// register task to be run to 'clean up' a task
suiteManager.RegisterCleanUp("clean-"+key, func() error {
imageManager.CleanupImages(value)
return nil
})
builderName = value
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
})
it("displays nested Detection Order groups", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("provides nested detection output up to depth", func() {
depth := "1"
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", "--depth", depth, builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", "--depth", depth, builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_depth_2_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_depth_2_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
when("output format is toml", func() {
it("prints builder information in toml format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "toml")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "toml")
}
err := toml.NewDecoder(strings.NewReader(string(output))).Decode(&struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.TOMLOutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_toml.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_toml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is yaml", func() {
it("prints builder information in yaml format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "yaml")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "yaml")
}
err := yaml.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.YAMLOutputForAPIs(14)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_yaml.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_yaml.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.TrimmedEq(string(output), expectedOutput)
})
})
when("output format is json", func() {
it("prints builder information in json format", func() {
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName, "--output", "json")
} else {
output = pack.RunSuccessfully("inspect-builder", builderName, "--output", "json")
}
err := json.Unmarshal([]byte(output), &struct{}{})
assert.Nil(err)
var prettifiedOutput bytes.Buffer
err = json.Indent(&prettifiedOutput, []byte(output), "", " ")
assert.Nil(err)
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.JSONOutputForAPIs(8)
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_nested_output_json.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_nested_output_json.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
},
)
assert.Equal(prettifiedOutput.String(), expectedOutput)
})
})
})
it("displays configuration for a builder (local and remote)", func() {
output := pack.RunSuccessfully(
"config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1",
)
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSuccesfulRunImageMirrorsAdd("pack-test/run", "some-registry.com/pack-test/run1")
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "No",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
it("indicates builder is trusted", func() {
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", "pack-test/run", "--mirror", "some-registry.com/pack-test/run1")
var output string
if pack.Supports("builder inspect") {
output = pack.RunSuccessfully("builder", "inspect", builderName)
} else {
output = pack.RunSuccessfully("inspect-builder", builderName)
}
deprecatedBuildpackAPIs,
supportedBuildpackAPIs,
deprecatedPlatformAPIs,
supportedPlatformAPIs := lifecycle.OutputForAPIs()
expectedOutput := pack.FixtureManager().TemplateVersionedFixture(
"inspect_%s_builder_output.txt",
createBuilderPack.SanitizedVersion(),
"inspect_builder_output.txt",
map[string]interface{}{
"builder_name": builderName,
"lifecycle_version": lifecycle.Version(),
"deprecated_buildpack_apis": deprecatedBuildpackAPIs,
"supported_buildpack_apis": supportedBuildpackAPIs,
"deprecated_platform_apis": deprecatedPlatformAPIs,
"supported_platform_apis": supportedPlatformAPIs,
"run_image_mirror": runImageMirror,
"pack_version": createBuilderPack.Version(),
"trusted": "Yes",
// set previous pack template fields
"buildpack_api_version": lifecycle.EarliestBuildpackAPIVersion(),
"platform_api_version": lifecycle.EarliestPlatformAPIVersion(),
},
)
assert.TrimmedEq(output, expectedOutput)
})
})
when("rebase", func() {
var repoName, runBefore, origID string
var buildRunImage func(string, string, string)
it.Before(func() {
pack.JustRunSuccessfully("config", "trusted-builders", "add", builderName)
repoName = registryConfig.RepoName("some-org/" + h.RandString(10))
runBefore = registryConfig.RepoName("run-before/" + h.RandString(10))
buildRunImage = func(newRunImage, contents1, contents2 string) {
user := func() string {
if imageManager.HostOS() == "windows" {
return "ContainerAdministrator"
}
return "root"
}
h.CreateImage(t, dockerCli, newRunImage, fmt.Sprintf(`
FROM %s
USER %s
RUN echo %s > /contents1.txt
RUN echo %s > /contents2.txt
USER pack
`, runImage, user(), contents1, contents2))
}
buildRunImage(runBefore, "contents-before-1", "contents-before-2")
pack.RunSuccessfully(
"build", repoName,
"-p", filepath.Join("testdata", "mock_app"),
"--builder", builderName,
"--run-image", runBefore,
"--pull-policy", "never",
)
origID = h.ImageID(t, repoName)
assertImage.RunsWithOutput(
repoName,
"contents-before-1",
"contents-before-2",
)
})
it.After(func() {
imageManager.CleanupImages(origID, repoName, runBefore)
ref, err := name.ParseReference(repoName, name.WeakValidation)
assert.Nil(err)
buildCacheVolume := cache.NewVolumeCache(ref, "build", dockerCli)
launchCacheVolume := cache.NewVolumeCache(ref, "launch", dockerCli)
assert.Succeeds(buildCacheVolume.Clear(context.TODO()))
assert.Succeeds(launchCacheVolume.Clear(context.TODO()))
})
when("daemon", func() {
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
})
it.After(func() {
imageManager.CleanupImages(runAfter)
})
it("uses provided run image", func() {
output := pack.RunSuccessfully(
"rebase", repoName,
"--run-image", runAfter,
"--pull-policy", "never",
)
assert.Contains(output, fmt.Sprintf("Successfully rebased image '%s'", repoName))
assertImage.RunsWithOutput(
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
when("local config has a mirror", func() {
var localRunImageMirror string
it.Before(func() {
localRunImageMirror = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(localRunImageMirror, "local-mirror-after-1", "local-mirror-after-2")
pack.JustRunSuccessfully("config", "run-image-mirrors", "add", runImage, "-m", localRunImageMirror)
})
it.After(func() {
imageManager.CleanupImages(localRunImageMirror)
})
it("prefers the local mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", "never")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirrorFromLocalConfig(localRunImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertImage.RunsWithOutput(
repoName,
"local-mirror-after-1",
"local-mirror-after-2",
)
})
})
when("image metadata has a mirror", func() {
it.Before(func() {
// clean up existing mirror first to avoid leaking images
imageManager.CleanupImages(runImageMirror)
buildRunImage(runImageMirror, "mirror-after-1", "mirror-after-2")
})
it("selects the best mirror", func() {
output := pack.RunSuccessfully("rebase", repoName, "--pull-policy", "never")
assertOutput := assertions.NewOutputAssertionManager(t, output)
assertOutput.ReportsSelectingRunImageMirror(runImageMirror)
assertOutput.ReportsSuccessfulRebase(repoName)
assertImage.RunsWithOutput(
repoName,
"mirror-after-1",
"mirror-after-2",
)
})
})
})
when("--publish", func() {
it.Before(func() {
assert.Succeeds(h.PushImage(dockerCli, repoName, registryConfig))
})
when("--run-image", func() {
var runAfter string
it.Before(func() {
runAfter = registryConfig.RepoName("run-after/" + h.RandString(10))
buildRunImage(runAfter, "contents-after-1", "contents-after-2")
assert.Succeeds(h.PushImage(dockerCli, runAfter, registryConfig))
})
it.After(func() {
imageManager.CleanupImages(runAfter)
})
it("uses provided run image", func() {
output := pack.RunSuccessfully("rebase", repoName, "--publish", "--run-image", runAfter)
assertions.NewOutputAssertionManager(t, output).ReportsSuccessfulRebase(repoName)
assertImage.CanBePulledFromRegistry(repoName)
assertImage.RunsWithOutput(
repoName,
"contents-after-1",
"contents-after-2",
)
})
})
})
})
})
})
}
func buildpacksDir(bpAPIVersion string) string {
return filepath.Join("testdata", "mock_buildpacks", bpAPIVersion)
}
func createComplexBuilder(t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating complex builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-complex-test-builder")
if err != nil {
return "", err
}
defer os.RemoveAll(tmpDir)
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
packageImageName := registryConfig.RepoName("nested-level-1-buildpack-" + h.RandString(8))
nestedLevelTwoBuildpackName := registryConfig.RepoName("nested-level-2-buildpack-" + h.RandString(8))
simpleLayersBuildpackName := registryConfig.RepoName("simple-layers-buildpack-" + h.RandString(8))
simpleLayersBuildpackDifferentShaName := registryConfig.RepoName("simple-layers-buildpack-different-name-" + h.RandString(8))
templateMapping["package_id"] = "simple/nested-level-1"
templateMapping["package_image_name"] = packageImageName
templateMapping["nested_level_1_buildpack"] = packageImageName
templateMapping["nested_level_2_buildpack"] = nestedLevelTwoBuildpackName
templateMapping["simple_layers_buildpack"] = simpleLayersBuildpackName
templateMapping["simple_layers_buildpack_different_sha"] = simpleLayersBuildpackDifferentShaName
fixtureManager := pack.FixtureManager()
nestedLevelOneConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-1-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-1-buildpack_package.toml",
nestedLevelOneConfigFile,
templateMapping,
)
err = nestedLevelOneConfigFile.Close()
assert.Nil(err)
nestedLevelTwoConfigFile, err := ioutil.TempFile(tmpDir, "nested-level-2-package.toml")
assert.Nil(err)
fixtureManager.TemplateFixtureToFile(
"nested-level-2-buildpack_package.toml",
nestedLevelTwoConfigFile,
templateMapping,
)
err = nestedLevelTwoConfigFile.Close()
assert.Nil(err)
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
nestedLevelOneConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelOne,
buildpacks.NewPackageImage(
t,
pack,
nestedLevelTwoBuildpackName,
nestedLevelTwoConfigFile.Name(),
buildpacks.WithRequiredBuildpacks(
buildpacks.NestedLevelTwo,
buildpacks.NewPackageImage(
t,
pack,
simpleLayersBuildpackName,
fixtureManager.FixtureLocation("simple-layers-buildpack_package.toml"),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
),
),
),
),
)
simpleLayersDifferentShaBuildpack := buildpacks.NewPackageImage(
t,
pack,
simpleLayersBuildpackDifferentShaName,
fixtureManager.FixtureLocation("simple-layers-buildpack-different-sha_package.toml"),
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayersDifferentSha),
)
defer imageManager.CleanupImages(packageImageName, nestedLevelTwoBuildpackName, simpleLayersBuildpackName, simpleLayersBuildpackDifferentShaName)
builderBuildpacks = append(
builderBuildpacks,
packageImageBuildpack,
simpleLayersDifferentShaBuildpack,
)
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
if lifecycle.HasLocation() {
lifecycleURI := lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion := lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
builderConfigFile, err := ioutil.TempFile(tmpDir, "nested_builder.toml")
if err != nil {
return "", err
}
pack.FixtureManager().TemplateFixtureToFile("nested_builder.toml", builderConfigFile, templateMapping)
err = builderConfigFile.Close()
if err != nil {
return "", err
}
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"builder", "create", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func createBuilder(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
lifecycle config.LifecycleAsset,
buildpackManager buildpacks.BuildpackManager,
runImageMirror string,
) (string, error) {
t.Log("creating builder image...")
// CREATE TEMP WORKING DIR
tmpDir, err := ioutil.TempDir("", "create-test-builder")
assert.Nil(err)
defer os.RemoveAll(tmpDir)
templateMapping := map[string]interface{}{
"run_image_mirror": runImageMirror,
}
// ARCHIVE BUILDPACKS
builderBuildpacks := []buildpacks.TestBuildpack{
buildpacks.Noop,
buildpacks.Noop2,
buildpacks.OtherStack,
buildpacks.ReadEnv,
}
packageTomlPath := generatePackageTomlWithOS(t, assert, pack, tmpDir, "package.toml", imageManager.HostOS())
packageImageName := registryConfig.RepoName("simple-layers-package-image-buildpack-" + h.RandString(8))
packageImageBuildpack := buildpacks.NewPackageImage(
t,
pack,
packageImageName,
packageTomlPath,
buildpacks.WithRequiredBuildpacks(buildpacks.SimpleLayers),
)
defer imageManager.CleanupImages(packageImageName)
builderBuildpacks = append(builderBuildpacks, packageImageBuildpack)
templateMapping["package_image_name"] = packageImageName
templateMapping["package_id"] = "simple/layers"
buildpackManager.PrepareBuildpacks(tmpDir, builderBuildpacks...)
// ADD lifecycle
var lifecycleURI string
var lifecycleVersion string
if lifecycle.HasLocation() {
lifecycleURI = lifecycle.EscapedPath()
t.Logf("adding lifecycle path '%s' to builder config", lifecycleURI)
templateMapping["lifecycle_uri"] = lifecycleURI
} else {
lifecycleVersion = lifecycle.Version()
t.Logf("adding lifecycle version '%s' to builder config", lifecycleVersion)
templateMapping["lifecycle_version"] = lifecycleVersion
}
// RENDER builder.toml
configFileName := "builder.toml"
builderConfigFile, err := ioutil.TempFile(tmpDir, "builder.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
configFileName,
builderConfigFile,
templateMapping,
)
err = builderConfigFile.Close()
assert.Nil(err)
// NAME BUILDER
bldr := registryConfig.RepoName("test/builder-" + h.RandString(10))
// CREATE BUILDER
output := pack.RunSuccessfully(
"builder", "create", bldr,
"-c", builderConfigFile.Name(),
"--no-color",
)
assert.Contains(output, fmt.Sprintf("Successfully created builder image '%s'", bldr))
assert.Succeeds(h.PushImage(dockerCli, bldr, registryConfig))
return bldr, nil
}
func generatePackageTomlWithOS(
t *testing.T,
assert h.AssertionManager,
pack *invoke.PackInvoker,
tmpDir string,
fixtureName string,
platform_os string,
) string {
t.Helper()
packageTomlFile, err := ioutil.TempFile(tmpDir, "package-*.toml")
assert.Nil(err)
pack.FixtureManager().TemplateFixtureToFile(
fixtureName,
packageTomlFile,
map[string]interface{}{
"OS": platform_os,
},
)
assert.Nil(packageTomlFile.Close())
return packageTomlFile.Name()
}
func createStack(t *testing.T, dockerCli client.CommonAPIClient, runImageMirror string) error {
t.Helper()
t.Log("creating stack images...")
stackBaseDir := filepath.Join("testdata", "mock_stack", imageManager.HostOS())
if err := createStackImage(dockerCli, runImage, filepath.Join(stackBaseDir, "run")); err != nil {
return err
}
if err := createStackImage(dockerCli, buildImage, filepath.Join(stackBaseDir, "build")); err != nil {
return err
}
imageManager.TagImage(runImage, runImageMirror)
if err := h.PushImage(dockerCli, runImageMirror, registryConfig); err != nil {
return err
}
return nil
}
func createStackImage(dockerCli client.CommonAPIClient, repoName string, dir string) error {
defaultFilterFunc := func(file string) bool { return true }
ctx := context.Background()
buildContext := archive.ReadDirAsTar(dir, "/", 0, 0, -1, true, false, defaultFilterFunc)
return h.CheckImageBuildResult(dockerCli.ImageBuild(ctx, buildContext, dockertypes.ImageBuildOptions{
Tags: []string{repoName},
Remove: true,
ForceRemove: true,
}))
}
// taskKey creates a key from the prefix and all arguments to be unique
func taskKey(prefix string, args ...string) string {
hash := sha256.New()
for _, v := range args {
hash.Write([]byte(v))
}
return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(hash.Sum(nil)))
}
type compareFormat struct {
extension string
compareFunc func(string, string)
outputArg string
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
spotfix/spotfix/wsgi.py | """
WSGI config for spotfix project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spotfix.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/apitests/python/test_user_group.py | # coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project.
OpenAPI spec version: 1.4.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.environ["SWAGGER_CLIENT_PATH"])
import unittest
import testutils
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.user_group import UserGroup
from swagger_client.models.configurations import Configurations
from pprint import pprint
#Testcase
#12-01-LDAP-usergroup-add
#12-02-LDAP-usergroup-update
#12-03-LDAP-usergroup-delete
class TestUserGroup(unittest.TestCase):
"""UserGroup unit test stubs"""
product_api = testutils.GetProductApi("admin", "Harbor12345")
groupId = 0
def setUp(self):
result = self.product_api.configurations_put(configurations=Configurations(ldap_group_attribute_name="cn", ldap_group_base_dn="ou=groups,dc=example,dc=com", ldap_group_search_filter="objectclass=groupOfNames", ldap_group_search_scope=2))
pprint(result)
pass
def tearDown(self):
if self.groupId > 0 :
self.product_api.usergroups_group_id_delete(group_id=self.groupId)
pass
def testAddUpdateUserGroup(self):
"""Test UserGroup"""
user_group = UserGroup(group_name="harbor_group123", group_type=1, ldap_group_dn="cn=harbor_group,ou=groups,dc=example,dc=com")
result = self.product_api.usergroups_post(usergroup=user_group)
pprint(result)
user_groups = self.product_api.usergroups_get()
found = False
for ug in user_groups :
if ug.group_name == "harbor_group123" :
found = True
print("Found usergroup")
pprint(ug)
self.groupId = ug.id
self.assertTrue(found)
result = self.product_api.usergroups_group_id_put(self.groupId, usergroup = UserGroup(group_name = "newharbor_group"))
new_user_group = self.product_api.usergroups_group_id_get(group_id=self.groupId)
self.assertEqual("newharbor_group", new_user_group.group_name)
pass
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"SWAGGER_CLIENT_PATH"
]
| [] | ["SWAGGER_CLIENT_PATH"] | python | 1 | 0 | |
sdk/python/lib/pulumi/runtime/settings.py | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runtime settings and configuration.
"""
import asyncio
import os
from typing import Optional, Union, Any, TYPE_CHECKING
import grpc
from ..runtime.proto import engine_pb2_grpc, resource_pb2, resource_pb2_grpc
from ..errors import RunError
if TYPE_CHECKING:
from ..resource import Resource
# _MAX_RPC_MESSAGE_SIZE raises the gRPC Max Message size from `4194304` (4mb) to `419430400` (400mb)
_MAX_RPC_MESSAGE_SIZE = 1024 * 1024 * 400
_GRPC_CHANNEL_OPTIONS = [('grpc.max_receive_message_length', _MAX_RPC_MESSAGE_SIZE)]
# excessive_debug_output enables, well, pretty excessive debug output pertaining to resources and properties.
excessive_debug_output = False
class Settings:
monitor: Optional[Union[resource_pb2_grpc.ResourceMonitorStub, Any]]
engine: Optional[Union[engine_pb2_grpc.EngineStub, Any]]
project: Optional[str]
stack: Optional[str]
parallel: Optional[int]
dry_run: Optional[bool]
test_mode_enabled: Optional[bool]
legacy_apply_enabled: Optional[bool]
feature_support: dict
"""
A bag of properties for configuring the Pulumi Python language runtime.
"""
def __init__(self,
monitor: Optional[Union[str, Any]] = None,
engine: Optional[Union[str, Any]] = None,
project: Optional[str] = None,
stack: Optional[str] = None,
parallel: Optional[int] = None,
dry_run: Optional[bool] = None,
test_mode_enabled: Optional[bool] = None,
legacy_apply_enabled: Optional[bool] = None):
# Save the metadata information.
self.project = project
self.stack = stack
self.parallel = parallel
self.dry_run = dry_run
self.test_mode_enabled = test_mode_enabled
self.legacy_apply_enabled = legacy_apply_enabled
self.feature_support = {}
if self.test_mode_enabled is None:
self.test_mode_enabled = os.getenv("PULUMI_TEST_MODE", "false") == "true"
if self.legacy_apply_enabled is None:
self.legacy_apply_enabled = os.getenv("PULUMI_ENABLE_LEGACY_APPLY", "false") == "true"
# Actually connect to the monitor/engine over gRPC.
if monitor is not None:
if isinstance(monitor, str):
self.monitor = resource_pb2_grpc.ResourceMonitorStub(
grpc.insecure_channel(monitor, options=_GRPC_CHANNEL_OPTIONS),
)
else:
self.monitor = monitor
else:
self.monitor = None
if engine:
if isinstance(engine, str):
self.engine = engine_pb2_grpc.EngineStub(
grpc.insecure_channel(engine, options=_GRPC_CHANNEL_OPTIONS),
)
else:
self.engine = engine
else:
self.engine = None
# default to "empty" settings.
SETTINGS = Settings()
def configure(settings: Settings):
"""
Configure sets the current ambient settings bag to the one given.
"""
if not settings or not isinstance(settings, Settings):
raise TypeError('Settings is expected to be non-None and of type Settings')
global SETTINGS # pylint: disable=global-statement
SETTINGS = settings
def is_dry_run() -> bool:
"""
Returns whether or not we are currently doing a preview.
"""
return bool(SETTINGS.dry_run)
def is_test_mode_enabled() -> bool:
"""
Returns true if test mode is enabled (PULUMI_TEST_MODE).
"""
return bool(SETTINGS.test_mode_enabled)
def _set_test_mode_enabled(v: Optional[bool]):
"""
Enable or disable testing mode programmatically -- meant for testing only.
"""
SETTINGS.test_mode_enabled = v
def require_test_mode_enabled():
if not is_test_mode_enabled():
raise RunError('Program run without the Pulumi engine available; re-run using the `pulumi` CLI')
def is_legacy_apply_enabled():
return bool(SETTINGS.legacy_apply_enabled)
def get_project() -> str:
"""
Returns the current project name.
"""
project = SETTINGS.project
if not project:
require_test_mode_enabled()
raise RunError('Missing project name; for test mode, please call `pulumi.runtime.set_mocks`')
return project
def _set_project(v: Optional[str]):
"""
Set the project name programmatically -- meant for testing only.
"""
SETTINGS.project = v
def get_stack() -> str:
"""
Returns the current stack name.
"""
stack = SETTINGS.stack
if not stack:
require_test_mode_enabled()
raise RunError('Missing stack name; for test mode, please set PULUMI_NODEJS_STACK')
return stack
def _set_stack(v: Optional[str]):
"""
Set the stack name programmatically -- meant for testing only.
"""
SETTINGS.stack = v
def get_monitor() -> Optional[Union[resource_pb2_grpc.ResourceMonitorStub, Any]]:
"""
Returns the current resource monitoring service client for RPC communications.
"""
monitor = SETTINGS.monitor
if not monitor:
require_test_mode_enabled()
return monitor
def get_engine() -> Optional[Union[engine_pb2_grpc.EngineStub, Any]]:
"""
Returns the current engine service client for RPC communications.
"""
return SETTINGS.engine
ROOT: Optional['Resource'] = None
def get_root_resource() -> Optional['Resource']:
"""
Returns the implicit root stack resource for all resources created in this program.
"""
global ROOT
return ROOT
def set_root_resource(root: 'Resource'):
"""
Sets the current root stack resource for all resources subsequently to be created in this program.
"""
global ROOT
ROOT = root
async def monitor_supports_feature(feature: str) -> bool:
if feature not in SETTINGS.feature_support:
monitor = SETTINGS.monitor
if not monitor:
return False
req = resource_pb2.SupportsFeatureRequest(id=feature)
def do_rpc_call():
try:
resp = monitor.SupportsFeature(req)
return resp.hasSupport
except grpc.RpcError as exn:
if exn.code() != grpc.StatusCode.UNIMPLEMENTED: # pylint: disable=no-member
handle_grpc_error(exn)
return False
result = await asyncio.get_event_loop().run_in_executor(None, do_rpc_call)
SETTINGS.feature_support[feature] = result
return SETTINGS.feature_support[feature]
def grpc_error_to_exception(exn: grpc.RpcError) -> Optional[Exception]:
# gRPC-python gets creative with their exceptions. grpc.RpcError as a type is useless;
# the usefulness come from the fact that it is polymorphically also a grpc.Call and thus has
# the .code() member. Pylint doesn't know this because it's not known statically.
#
# Neither pylint nor I are the only ones who find this confusing:
# https://github.com/grpc/grpc/issues/10885#issuecomment-302581315
# pylint: disable=no-member
if exn.code() == grpc.StatusCode.UNAVAILABLE:
# If the monitor is unavailable, it is in the process of shutting down or has already
# shut down. Don't emit an error if this is the case.
return None
details = exn.details()
return Exception(details)
def handle_grpc_error(exn: grpc.RpcError):
exc = grpc_error_to_exception(exn)
if exc is not None:
raise exc
async def monitor_supports_secrets() -> bool:
return await monitor_supports_feature("secrets")
async def monitor_supports_resource_references() -> bool:
return await monitor_supports_feature("resourceReferences")
def reset_options(project: Optional[str] = None,
stack: Optional[str] = None,
parallel: Optional[int] = None,
engine_address: Optional[str] = None,
monitor_address: Optional[str] = None,
preview: Optional[bool] = None):
"""Resets globals to the values provided."""
global ROOT
ROOT = None
configure(Settings(
project=project,
monitor=monitor_address,
engine=engine_address,
stack=stack,
parallel=parallel,
dry_run=preview
))
| []
| []
| [
"PULUMI_TEST_MODE",
"PULUMI_ENABLE_LEGACY_APPLY"
]
| [] | ["PULUMI_TEST_MODE", "PULUMI_ENABLE_LEGACY_APPLY"] | python | 2 | 0 | |
preprocessing.py | # Copyright 2021 ETH Zurich, Media Technology Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pandas as pd
"""
This module is mainly used to transform the data from the partners into our desired format.
In the and only load_data and get_metadata is used in the algorithms.
"""
def load_data(folder, input_path='user_item', cut=40,high_cut=1000000, seed=None):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_pickle(
f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle(
f'{folder}/{input_path}_validation.pkl')
user_item_train = user_item_train[user_item_train.str.len() > cut * 0.7]
user_item_train = user_item_train[user_item_train.str.len() < high_cut * 0.7]
user_item_test = user_item_test.loc[user_item_train.index]
user_item_validation = user_item_validation.loc[user_item_train.index]
return user_item_train, user_item_test, user_item_validation
def load_data_vertical(folder, input_path='user_item_vertical', cut=40):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_parquet(
f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet(
f'{folder}/{input_path}_validation.pq')
user_item_train = user_item_train[user_item_train['count'] >cut]
user_item_test =user_item_test[user_item_test['count'] >cut]
user_item_validation = user_item_validation[user_item_validation['count'] >cut]
user_item_train['resource_id']=user_item_train['article_id']
user_item_test['resource_id']=user_item_test['article_id']
user_item_validation['resource_id']=user_item_validation['article_id']
return user_item_train, user_item_test, user_item_validation
def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data(folder, input_path=input_path, cut=cut,high_cut=high_cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data_vertical(folder, input_path=input_path, cut=cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def get_metadata(folder, usecols=[]):
"""
Loads and returns the article metadata.
The algorithms expect the format to be a Dataframe with two columns:
- "resource_id": unique id for the article
- "text": full text of the article (without html tags)
"""
if not usecols:
usecols = ['text', 'resource_id']
metadata = pd.read_csv(f"{folder}/meta.csv", usecols=usecols)
return metadata.dropna(subset=['text'])
def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl',
input_path='user_item_matrix_vertical.pq', sortby='ts'):
"""
Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have
one row for each user and each row contains a (sorted) list of articles she/he clicked on.
:param folder: Input folder
:param output_path: Filename/path for outputfile
:param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:
"user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp
to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.
:param sortby: Columnname of the timestamp column to sort by
:return: returns a Series where the index is the UserID and values is the by timestamp
sorted list of clicked ArticleIDs
"""
now = datetime.datetime.now()
matrices = pd.read_parquet(f"{folder}/{input_path}")
grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply(lambda x: list(x['article_id']))
grouped.to_pickle(f"{folder}/{output_path}")
print(f"Data transformed {datetime.datetime.now() - now}")
def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_pickle(f"{folder}/{input_path}")
user_item = user_item[user_item.str.len() > (cut_dump)]
user_item_train = user_item.apply(lambda x: x[:int(len(x) * 0.7)])
user_item_test = user_item.apply(lambda x: x[int(len(x) * 0.7):int(len(x) * 0.9)])
user_item_validation = user_item.apply(lambda x: x[int(len(x) * 0.9):])
user_item_train.name = 'article_id'
user_item_test.name = 'article_id'
user_item_validation.name = 'article_id'
user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl')
user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl')
user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl')
print(f"Split created {datetime.datetime.now() - now}")
def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10,time_column='ts'):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_parquet(f"{folder}/{input_path}").sort_values(time_column)
user_item['count']=user_item.groupby(['user_ix']).article_id.transform('count')
user_item = user_item[user_item['count']>cut_dump]
grouped = user_item.groupby(['user_ix'])
user_item['percentile'] = (grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count')
user_item_train = user_item[user_item['percentile']<=0.7]
user_item_test = user_item[(user_item['percentile']>0.7) & (user_item['percentile']<0.9)]
user_item_validation = user_item[user_item['percentile']>0.9]
user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq')
user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq')
user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq')
print(f"Split created {datetime.datetime.now() - now}")
def transform_horizontal_to_vertical(df):
"""
Transforms the horizontal format into vertical format
:param df:
:return:
"""
return df.explode().reset_index()
if __name__ == "__main__":
import pandas as pd
folder = os.getenv('DATA_FOLDER','processed')
# Transforms the user-item-matrix into a user-series. For each user we store the articles read as one sorted list.
# Save the new format.
# This format is more convenient for creating the split and for training some of the algorithms.
transform_item_matrix_to_horizontal_format(folder=folder)
# Create a train,test,validation split. 70%,10%,20% and save it
create_split(folder=folder, cut_dump=10)
create_split_vertical(folder=folder, cut_dump=10)
# loads the saved train,validation,test split
train, test, validation = load_data(folder=folder, cut=40)
# # if you wish to transform into normal user-item-format
# train_vertical = transform_horizontal_to_vertical(train)
| []
| []
| [
"DATA_FOLDER"
]
| [] | ["DATA_FOLDER"] | python | 1 | 0 | |
sayhello/settings.py | # -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import os
import sys
from sayhello import app
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
dev_db = prefix + os.path.join(os.path.dirname(app.root_path), 'data.db')
SECRET_KEY = os.getenv('SECRET_KEY', 'secret string')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI', dev_db)
DEBUG_TB_INTERCEPT_REDIRECTS = False
| []
| []
| [
"SECRET_KEY",
"DATABASE_URI"
]
| [] | ["SECRET_KEY", "DATABASE_URI"] | python | 2 | 0 | |
scripts/release.py | #!/usr/bin/env python
"""Automation script for making a release. Must be run from the root for the
repository"""
# Note: Version scheme according to https://www.python.org/dev/peps/pep-0440
import json
import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from os.path import join
from subprocess import DEVNULL, CalledProcessError, run
import click
import git
import pytest
from pkg_resources import parse_version
RX_VERSION = re.compile(
r'^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'
r'(?P<prepost>\.post\d+|-(dev|a|b|rc)\d+)?'
r'(?P<devsuffix>[+-]dev)?$'
)
def make_release(package_name):
"""Interactively create and publish a new release for the package"""
click.confirm("Do you want to make a release?", abort=True)
check_git_clean()
new_version = ask_for_release_version(package_name)
run_tests()
set_version(join('.', 'src', package_name, '__init__.py'), new_version)
edit_history(new_version)
while not check_dist():
click.confirm(
"Fix errors manually! Continue?", default=True, abort=True
)
make_release_commit(new_version)
make_upload(test=True)
push_release_commit()
make_upload(test=False)
make_and_push_tag(new_version)
next_dev_version = new_version + '+dev'
set_version(
join('.', 'src', package_name, '__init__.py'), next_dev_version
)
make_next_dev_version_commit(next_dev_version)
###############################################################################
class ReleaseError(ValueError):
pass
def get_package_name():
"""Find and return the package name from src"""
for name in os.listdir('src'):
if 'egg-info' in name:
continue
if os.path.isdir(os.path.join('src', name)):
return name
raise ReleaseError("Cannot find package name")
def get_pypi_versions(package_name):
"""Return list of versions for the given package on PyPI"""
url = "https://pypi.python.org/pypi/%s/json" % (package_name,)
data = json.load(urllib.request.urlopen(urllib.request.Request(url)))
versions = list(data["releases"].keys())
versions.sort(key=parse_version)
return versions
def get_local_versions():
"""Return list of versions based on local tags
For every version, there must be a tag "v<version>"
"""
repo = git.Repo(os.getcwd())
return [tag.name[1:] for tag in repo.tags if tag.name.startswith('v')]
def get_version(filename):
"""Extract the package version, as a str"""
with open(filename) as in_fh:
for line in in_fh:
if line.startswith('__version__'):
return line.split('=')[1].strip()[1:-1]
raise ReleaseError("Cannot extract version from %s" % filename)
def edit(filename):
"""Open filename in EDITOR"""
editor = os.getenv('EDITOR', 'vi')
if click.confirm("Open %s in %s?" % (filename, editor), default=True):
run([editor, filename])
def check_git_clean():
"""Ensure that a given git.Repo is clean"""
repo = git.Repo(os.getcwd())
if repo.is_dirty():
run(['git', 'status'])
raise ReleaseError("Repository must be in a clean state")
if repo.untracked_files:
click.echo("WARNING: there are untracked files:")
for filename in repo.untracked_files:
click.echo("\t%s" % filename)
click.confirm("Continue?", default=False, abort=True)
def run_tests():
"""Run 'make test'."""
success = False
while not success:
try:
run(['make', 'test'], check=True)
except CalledProcessError as exc_info:
print("Failed tests: %s\n" % exc_info)
print("Fix the tests and ammend the release commit.")
print("Then continue.\n")
click.confirm("Continue?", default=True, abort=True)
if not click.confirm("Retry?", default=True):
break
else:
success = True
def split_version(version, base=True):
"""Split `version` into a tuple
If `base` is True, only return (<major>, <minor>, <patch>) as a tuple of
ints, stripping out pre/post/dev release tags. Otherwise, the are included
as a possible fourth and fifth element in the tuple (as strings)
"""
version = str(version)
if not RX_VERSION.match(version):
raise ValueError("Invalid version: %s" % version)
if base:
return tuple(
[
int(v)
for v in str(parse_version(version).base_version).split(".")
]
)
else:
m = RX_VERSION.match(version)
if m:
res = [
int(m.group('major')),
int(m.group('minor')),
int(m.group('patch')),
]
if m.group('prepost') is not None:
res.append(m.group('prepost'))
if m.group('devsuffix') is not None:
res.append(m.group('devsuffix'))
return tuple(res)
else:
raise ValueError("Invalid version string: %s" % version)
def list_versions(package_name):
"""List previously released versions
This prints each released version on a new line, and returns the list of
all released versions (based on PyPI and local tags)
"""
try:
pypi_versions = get_pypi_versions(package_name)
except OSError:
click.echo("PyPI versions no available")
pypi_versions = []
local_versions = get_local_versions()
versions = sorted(
set(pypi_versions).union(local_versions), key=parse_version
)
for version in versions:
if version in pypi_versions and version in local_versions:
status = 'PyPI/local'
elif version in pypi_versions:
status = 'PyPI only!'
elif version in local_versions:
status = 'local only!'
click.echo("%-20s %s" % (version, status))
return versions
def version_ok(version, dev_version, released_versions=None):
"""Check that `version` is a valid version for an upcoming release
The `version` must be newer than the `dev_version` (from __version__, which
should end in '-dev' or '+dev')
"""
if released_versions is None:
released_versions = []
m = RX_VERSION.match(version)
if m:
if m.group('devsuffix') is not None:
click.echo("Version %s contains a development suffix" % version)
return False
if version in released_versions:
click.echo("Version %s is already released" % version)
return False
if parse_version(version) > parse_version(dev_version):
return True
else:
click.echo("Version %s not newer than %s" % (version, dev_version))
return False
else:
click.echo("Invalid version: %s" % version)
return False
def propose_next_version(dev_version):
"""Return the most likely release version based on the current
__version__"""
dev_version = str(dev_version)
if parse_version(dev_version).is_prerelease:
return parse_version(dev_version).base_version
else:
base_version = parse_version(dev_version).base_version
v = split_version(base_version)
return "%d.%d.%d" % (v[0], v[1], v[2] + 1)
def ask_for_release_version(package_name):
"""Ask for the version number of the release.
The version number is checked to be a valid next release
"""
dev_version = get_version(join('.', 'src', package_name, '__init__.py'))
proposed_version = propose_next_version(dev_version)
released_versions = list_versions(package_name)
new_version = click.prompt(
"What version would you like to release?", default=proposed_version
)
while not version_ok(new_version, dev_version, released_versions):
new_version = click.prompt(
"What version would you like to release?", default=proposed_version
)
click.confirm("Confirm version %s?" % new_version, abort=True)
return str(new_version)
def set_version(filename, version):
"""Set the package version (in main __init__.py)"""
shutil.copyfile(filename, filename + '.bak')
click.echo("Modifying %s to set version %s" % (filename, version))
with open(filename + '.bak') as in_fh, open(filename, 'w') as out_fh:
found_version_line = False
for line in in_fh:
if line.startswith('__version__'):
found_version_line = True
line = line.split('=')[0].rstrip() + " = '" + version + "'\n"
out_fh.write(line)
if get_version(filename) == version:
os.remove(filename + ".bak")
else:
# roll back
shutil.copyfile(filename + ".bak", filename)
msg = "Failed to set version in %s (restored original)" % filename
if not found_version_line:
msg += ". Does not contain a line starting with '__version__'."
raise ReleaseError(msg)
def edit_history(version):
"""Interactively edit HISTORY.rst"""
click.echo(
"Edit HISTORY.rst to add changelog and release date for %s" % version
)
edit('HISTORY.rst')
click.confirm("Is HISTORY.rst up to date?", default=True, abort=True)
def check_dist():
"""Quietly make dist and check it. This is mainly to ensure that the README
and HISTORY metadata are well-formed"""
click.echo("Making and verifying dist and metadata...")
try:
run(['make', 'dist'], check=True, stdout=DEVNULL)
run(['make', 'dist-check'], check=True)
return True
except CalledProcessError as exc_info:
click.echo("ERROR: %s" % str(exc_info))
return False
def make_release_commit(version):
"""Commit 'Bump version to xxx and update HISTORY'"""
click.confirm("Make release commit?", default=True, abort=True)
run(
[
'git',
'commit',
'-a',
'-m',
"Bump version to %s and update HISTORY" % version,
],
check=True,
)
def make_upload(test=True):
"""Upload to PyPI or test.pypi"""
if test:
cmd = ['make', 'test-upload']
url = 'https://test.pypi.org'
else:
url = 'https://pypi.org'
cmd = ['make', 'upload']
click.confirm(
"Ready to upload release to %s?" % url, default=True, abort=True
)
success = False
while not success:
try:
run(cmd, check=True)
except CalledProcessError as exc_info:
click.confirm(
"Failed to upload: %s. Try again?" % str(exc_info),
default=True,
abort=(not test),
)
success = False
else:
success = True
click.confirm(
"Please check release on %s. Continue?" % url,
default=True,
abort=True,
)
def push_release_commit():
"""Push local commits to origin"""
click.confirm("Push release commit to origin?", default=True, abort=True)
run(['git', 'push', 'origin', 'master'], check=True)
click.confirm(
"Please check Continuous Integration success. Continue?",
default=True,
abort=True,
)
def make_and_push_tag(version):
"""Tag the current commit and push that tag to origin"""
click.confirm(
"Push tag '%s' to origin?" % version, default=True, abort=True
)
run(['git', 'tag', "-s", "v%s" % version], check=True)
run(['git', 'push', '--tags', 'origin'], check=True)
def make_next_dev_version_commit(version):
"""Commit 'Bump version to xxx'"""
click.confirm(
"Make commit for bumping to %s?" % version, default=True, abort=True
)
run(
['git', 'commit', '-a', '-m', "Bump version to %s" % version],
check=True,
)
###############################################################################
# run tests with `pytest -s scripts/release.py`
def test_list_versions():
print("")
versions = list_versions(get_package_name())
print(versions)
assert isinstance(versions, list)
def test_split_version():
# fmt: off
assert split_version('0.1.0') == (0, 1, 0)
assert split_version('0.1.0', base=False) == (0, 1, 0)
assert split_version('0.1.0-dev1', base=True) == (0, 1, 0)
assert split_version('0.1.0-dev1', base=False) == (0, 1, 0, '-dev1')
assert split_version('0.1.0.post1', base=True) == (0, 1, 0)
assert split_version('0.1.0.post1', base=False) == (0, 1, 0, '.post1')
assert split_version('0.1.0-rc1', base=True) == (0, 1, 0)
assert split_version('0.1.0-rc1', base=False) == (0, 1, 0, '-rc1')
assert split_version('0.1.0-rc1-dev', base=True) == (0, 1, 0)
assert split_version('0.1.0-rc1-dev', base=False) == (0, 1, 0, '-rc1', '-dev')
assert split_version('0.1.0-rc1+dev', base=True) == (0, 1, 0)
assert split_version('0.1.0-rc1+dev', base=False) == (0, 1, 0, '-rc1', '+dev')
assert split_version('0.1.0-dev', base=True) == (0, 1, 0)
assert split_version('0.1.0-dev', base=False) == (0, 1, 0, '-dev')
assert split_version('0.1.0+dev', base=True) == (0, 1, 0)
assert split_version('0.1.0+dev', base=False) == (0, 1, 0, '+dev')
with pytest.raises(ValueError):
split_version('0.1.0.rc1')
with pytest.raises(ValueError):
split_version('0.1.0rc1')
with pytest.raises(ValueError):
split_version('0.1.0.1')
with pytest.raises(ValueError):
split_version('0.1')
with pytest.raises(ValueError):
split_version('0.1.0+dev1')
# fmt: on
def test_version_ok():
assert version_ok('0.1.0', '0.1.0-dev')
assert version_ok('0.1.0-a1', '0.1.0-dev')
assert version_ok('0.1.0-b1', '0.1.0-dev')
assert version_ok('0.1.0-rc1', '0.1.0-dev')
assert version_ok('0.2.0', '0.1.0+dev')
assert version_ok('0.2.0-a1', '0.1.0+dev')
assert version_ok('0.2.0-b1', '0.1.0+dev')
assert version_ok('0.2.0-rc1', '0.1.0+dev')
assert version_ok('0.2.0-dev1', '0.1.0+dev')
assert version_ok('0.1.0.post1', '0.1.0+dev')
assert version_ok('0.1.0.post1', '0.1.0')
assert version_ok('0.2.0', '0.1.0')
assert version_ok('0.2.0', '0.1.0+dev', ['0.1.0', '0.1.0.post1', '0.1.1'])
print("")
assert not version_ok('0.0.1-dev', '0.1.0-dev')
assert not version_ok('0.1.0', '0.1.0')
assert not version_ok('0.1.0', '0.1.0+dev')
assert not version_ok('0.1.0+dev', '0.1.0')
assert not version_ok('0.2.0-dev', '0.1.0+dev')
assert not version_ok('0.1.0.1', '0.1.0-dev')
assert not version_ok('0.1.0a1', '0.1.0-dev')
assert not version_ok('0.1.0b1', '0.1.0-dev')
assert not version_ok('0.1.0rc1', '0.1.0-dev')
assert not version_ok('0.1.0dev1', '0.1.0-dev')
assert not version_ok('0.1.0-post1', '0.1.0+dev')
assert not version_ok('0.2.0', '0.1.0+dev', ['0.1.0', '0.2.0'])
def test_propose_next_version():
assert propose_next_version('0.1.0') == '0.1.1'
assert propose_next_version('0.1.0-dev') == '0.1.0'
assert propose_next_version('0.1.0-rc1') == '0.1.0'
assert propose_next_version('0.1.0-rc1+dev') == '0.1.0'
assert propose_next_version('0.1.0+dev') == '0.1.1'
assert propose_next_version('0.1.0.post1') == '0.1.1'
assert propose_next_version('0.1.0.post1+dev') == '0.1.1'
###############################################################################
@click.command(help=__doc__)
@click.help_option('--help', '-h')
def main():
try:
make_release(get_package_name())
except Exception as exc_info:
click.echo(str(exc_info))
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
| []
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | python | 1 | 0 | |
src/main/java/com/yeetor/adb/AdbServer.java | package com.yeetor.adb;
import com.android.ddmlib.*;
import com.android.ddmlib.TimeoutException;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
/**
* Created by harry on 2017/4/15.
*/
public class AdbServer {
private static AdbServer server;
private String adbPath = null;
private String adbPlatformTools = "platform-tools";
AndroidDebugBridge adb = null;
private boolean success = false;
public static AdbServer server() {
if (server == null) {
server = new AdbServer();
}
return server;
}
private AdbServer() {
init();
}
private String getADBPath(){
if (adbPath == null){
adbPath = System.getenv("ANDROID_SDK_ROOT");
if(adbPath != null){
adbPath += File.separator + adbPlatformTools;
}else {
adbPath = "adb";
return adbPath;
}
}
adbPath += File.separator + "adb";
return adbPath;
}
private void init() {
AndroidDebugBridge.init(false);
adb = AndroidDebugBridge.createBridge(getADBPath(), true);
if (adb != null) {
if (waitForDeviceList()) {
success = true;
}
}
}
private boolean waitForDeviceList() {
int maxWaittingTime = 100;
int interval = 10;
while (!adb.hasInitialDeviceList()) {
try {
Thread.sleep(interval);
} catch (InterruptedException e) {
break;
}
maxWaittingTime -= 1;
if (maxWaittingTime == 0) {
disconnectAdb();
return false;
}
}
return true;
}
void disconnectAdb() {
if (adb != null) {
adb = null;
}
success = false;
}
public IDevice[] getDevices() {
return adb.getDevices();
}
public IDevice getDevice(String serialNumber) {
IDevice[] devices = AdbServer.server().getDevices();
IDevice device = null;
for (IDevice d : devices) {
if (serialNumber.equals(d.getSerialNumber())) {
device = d;
break;
}
}
return device;
}
public IDevice getFirstDevice() {
IDevice[] devices = getDevices();
if (devices.length > 0) {
return devices[0];
}
return null;
}
public static String executeShellCommand(IDevice device, String command) {
CollectingOutputReceiver output = new CollectingOutputReceiver();
try {
device.executeShellCommand(command, output, 0);
} catch (TimeoutException e) {
e.printStackTrace();
} catch (AdbCommandRejectedException e) {
e.printStackTrace();
} catch (ShellCommandUnresponsiveException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return output.getOutput();
}
/**
* TODO: 添加自定义adb命令,原因是安卓手表的传输速度太慢,导致adb push超时错误
* @param device
* @param command
* @return
*/
public static String executeCommand(IDevice device, String command) {
return "";
}
private ListenableFuture<List<AdbForward>> executeGetForwardList() {
final File adbFile = new File(AdbServer.server().adbPath);
final SettableFuture future = SettableFuture.create();
(new Thread(new Runnable() {
public void run() {
ProcessBuilder pb = new ProcessBuilder(new String[]{adbFile.getPath(), "forward", "--list"});
pb.redirectErrorStream(true);
Process p = null;
try {
p = pb.start();
} catch (IOException e) {
future.setException(e);
return;
}
StringBuilder sb = new StringBuilder();
BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream()));
try {
String line;
try {
List<AdbForward> list = new ArrayList<AdbForward>();
while((line = br.readLine()) != null) {
//64b2b4d9 tcp:555 localabstract:shit
AdbForward forward = new AdbForward(line);
if (forward.isForward()) {
list.add(forward);
}
}
future.set(list);
return;
} catch (IOException ex) {
future.setException(ex);
return;
}
} finally {
try {
br.close();
} catch (IOException ex) {
future.setException(ex);
}
}
}
}, "Obtaining adb version")).start();
return future;
}
public AdbForward[] getForwardList() {
ListenableFuture<List<AdbForward>> future = executeGetForwardList();
try {
List<AdbForward> s = future.get(1, TimeUnit.SECONDS);
AdbForward[] ret = new AdbForward[s.size()];
s.toArray(ret);
return ret;
} catch (Exception ex) {
ex.printStackTrace();
}
return new AdbForward[0];
}
// public static String[] getForwardList(IDevice device) {
// CollectingOutputReceiver receiver = new CollectingOutputReceiver();
//
// try {
// device.executeShellCommand("forward --list", receiver, 0);
// } catch (Exception e) {
// e.printStackTrace();
// }
//
// String output = receiver.getOutput();
// System.out.println(output);
// return null;
// }
}
| [
"\"ANDROID_SDK_ROOT\""
]
| []
| [
"ANDROID_SDK_ROOT"
]
| [] | ["ANDROID_SDK_ROOT"] | java | 1 | 0 | |
java/client/src/test/java/io/vitess/client/TestEnv.java | /*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vitess.client;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
import vttest.Vttest.VTTestTopology;
/**
* Helper class to hold the configurations for VtGate setup used in integration tests
*/
public class TestEnv {
private VTTestTopology topology;
private String keyspace;
private String outputPath;
private Process pythonScriptProcess;
private int port;
public void setTopology(VTTestTopology topology) {
this.topology = topology;
}
public VTTestTopology getTopology() {
return this.topology;
}
public void setKeyspace(String keyspace) {
this.keyspace = keyspace;
}
public String getKeyspace() {
return this.keyspace;
}
public int getPort() {
return this.port;
}
public void setPort(int port) {
this.port = port;
}
public Process getPythonScriptProcess() {
return this.pythonScriptProcess;
}
public void setPythonScriptProcess(Process process) {
this.pythonScriptProcess = process;
}
/**
* Get setup command to launch a cluster.
*/
public List<String> getSetupCommand(int port) {
String vtTop = System.getenv("VTTOP");
if (vtTop == null) {
throw new RuntimeException("cannot find env variable: VTTOP");
}
String schemaDir = getTestDataPath() + "/schema";
List<String> command = new ArrayList<String>();
command.add(vtTop + "/py/vttest/run_local_database.py");
command.add("--port");
command.add(Integer.toString(port));
command.add("--proto_topo");
command.add(getTopology().toString());
command.add("--schema_dir");
command.add(schemaDir);
return command;
}
public String getTestDataPath() {
String vtTop = System.getenv("VTTOP");
if (vtTop == null) {
throw new RuntimeException("cannot find env variable: VTTOP");
}
return vtTop + "/data/test";
}
public String getTestOutputPath() {
if (outputPath == null) {
try {
outputPath = Files.createTempDirectory("vttest").toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return outputPath;
}
public void clearTestOutput() throws IOException {
if (outputPath != null) {
FileUtils.deleteDirectory(new File(outputPath));
}
}
}
| [
"\"VTTOP\"",
"\"VTTOP\""
]
| []
| [
"VTTOP"
]
| [] | ["VTTOP"] | java | 1 | 0 | |
qa/rpc-tests/test_framework/test_framework.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from .authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*500)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave folds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop folds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing fold/fol-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: folds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("FOLD", "fold"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("FOLD", "fold"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| []
| []
| [
"FOLD",
"PATH"
]
| [] | ["FOLD", "PATH"] | python | 2 | 0 | |
application/cmd/insgocc/builder.go | //
// Copyright 2019 Insolar Technologies GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"go/build"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/insolar/insolar/application"
"github.com/insolar/insolar/application/genesisrefs"
"github.com/insolar/insolar/insolar"
"github.com/insolar/insolar/instrumentation/inslogger"
"github.com/insolar/insolar/log"
"github.com/insolar/insolar/logicrunner/preprocessor"
)
var (
contractSources = insolar.RootModule + "/application/contract"
proxySources = insolar.RootModule + "/application/proxy"
contractNames = []string{
application.GenesisNameRootDomain,
application.GenesisNameNodeDomain,
application.GenesisNameNodeRecord,
application.GenesisNameMember,
application.GenesisNameWallet,
application.GenesisNameDeposit,
application.GenesisNameCostCenter,
application.GenesisNamePKShard,
application.GenesisNameMigrationShard,
}
)
type contractsBuilder struct {
root string
skipProxy bool
sourcesDir string
outDir string
}
func (cb *contractsBuilder) setSourcesDir(dir string) {
cb.sourcesDir = dir
}
func (cb *contractsBuilder) setOutputDir(dir string) {
cb.outDir = dir
}
func (cb *contractsBuilder) outputDir() string {
if cb.outDir != "" {
return cb.outDir
}
return filepath.Join(cb.root, "plugins")
}
func newContractBuilder(tmpDir string, skipProxy bool) *contractsBuilder {
var err error
if tmpDir == "" {
tmpDir, err = ioutil.TempDir("", "insgocc-")
if err != nil {
panic(err)
}
}
cb := &contractsBuilder{
root: tmpDir,
skipProxy: skipProxy,
}
return cb
}
// clean deletes tmp directory used for contracts building
func (cb *contractsBuilder) clean() {
log.Infof("Cleaning build directory %q", cb.root)
err := os.RemoveAll(cb.root)
if err != nil {
panic(err)
}
}
func (cb *contractsBuilder) parseContract(name string) (*preprocessor.ParsedFile, error) {
return preprocessor.ParseFile(cb.getContractPath(name), insolar.MachineTypeGoPlugin)
}
type buildResult struct {
ContractName string
SoFilePath string
}
func (cb *contractsBuilder) build(ctx context.Context, names ...string) ([]buildResult, error) {
if len(names) == 0 {
names = contractNames
}
if err := cb.prepare(ctx, names...); err != nil {
return nil, err
}
result := make([]buildResult, 0, len(contractNames))
for _, name := range names {
log.Infof("building plugin for contract %q in %q", name, cb.root)
soFile, err := cb.plugin(ctx, name)
if err != nil {
return nil, errors.Wrapf(err, "failed to build plugin %v", name)
}
result = append(result, buildResult{
ContractName: name,
SoFilePath: soFile,
})
}
return result, nil
}
func (cb *contractsBuilder) prepare(ctx context.Context, names ...string) error {
inslog := inslogger.FromContext(ctx)
for _, name := range names {
inslog.Info("prepare contract:", name)
code, err := cb.parseContract(name)
if err != nil {
return errors.Wrapf(err, "failed to parse contract %v", name)
}
code.ChangePackageToMain()
ctr, err := createFileInDir(filepath.Join(cb.root, "src/contract", name), "main.go")
if err != nil {
return errors.Wrap(err, "can't create contract file")
}
err = code.Write(ctr)
if err != nil {
return errors.Wrap(err, "can't write to contract file")
}
closeAndCheck(ctr)
if !cb.skipProxy {
proxyPath := filepath.Join(cb.root, "src", proxySources, name)
proxy, err := createFileInDir(proxyPath, "main.go")
if err != nil {
return errors.Wrap(err, "can't open proxy file")
}
protoRef := genesisrefs.GenesisRef(name + genesisrefs.PrototypeSuffix)
err = code.WriteProxy(protoRef.String(), proxy)
closeAndCheck(proxy)
if err != nil {
return errors.Wrap(err, "can't write proxy")
}
}
wrp, err := createFileInDir(filepath.Join(cb.root, "src/contract", name), "main_wrapper.go")
if err != nil {
return errors.Wrap(err, "can't open wrapper file")
}
err = code.WriteWrapper(wrp, "main")
closeAndCheck(wrp)
if err != nil {
return errors.Wrap(err, "can't write wrapper")
}
}
return nil
}
// compile plugin
func (cb *contractsBuilder) plugin(ctx context.Context, name string) (string, error) {
dstDir := cb.outputDir()
err := os.MkdirAll(dstDir, 0700)
if err != nil {
return "", errors.Wrapf(err, "filed to create output directory for plugin %v", dstDir)
}
soFile := filepath.Join(dstDir, name+".so")
buildPath := filepath.Join(cb.root, "src/contract", name)
args := []string{
"build",
"-buildmode=plugin",
"-o", soFile,
buildPath,
}
cmd := exec.Command(
"go",
args...,
)
inslogger.FromContext(ctx).Infof("exec: go %v", strings.Join(args, " "))
env := make([]string, 0, len(os.Environ()))
for _, pair := range os.Environ() {
if strings.HasPrefix(pair, "GOPATH=") {
continue
}
env = append(env, pair)
}
env = append(env, "GOPATH="+prependGoPath(cb.root))
inslogger.FromContext(ctx).Info("GOPATH=" + prependGoPath(cb.root))
cmd.Env = env
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return "", errors.Wrapf(err, "can't build plugin: %v", soFile)
}
inslogger.FromContext(ctx).Infof("compiled %v contract to plugin %v", name, soFile)
return soFile, nil
}
func goPATH() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
func (cb *contractsBuilder) getContractPath(name string) string {
contractDir := filepath.Join(goPATH(), "src", contractSources)
if cb.sourcesDir != "" {
contractDir = cb.sourcesDir
}
contractFile := name + ".go"
return filepath.Join(contractDir, name, contractFile)
}
// prependGoPath prepends `path` to GOPATH environment variable
// accounting for possibly for default value. Returns new value.
// NOTE: that environment is not changed
func prependGoPath(path string) string {
return path + string(os.PathListSeparator) + goPATH()
}
// createFileInDir opens file in provided directory, creates directory if it does not exist.
func createFileInDir(dir string, name string) (*os.File, error) {
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, err
}
return os.OpenFile(filepath.Join(dir, name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
}
func closeAndCheck(f *os.File) {
err := f.Close()
if err != nil {
log.Errorf("failed close file %v: %v", f.Name(), err.Error())
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
examples/socketmode/socketmode.go | package main
import (
"fmt"
"log"
"os"
"strings"
"github.com/samyakgaur/slack/socketmode"
"github.com/samyakgaur/slack"
"github.com/samyakgaur/slack/slackevents"
)
func main() {
appToken := os.Getenv("SLACK_APP_TOKEN")
if appToken == "" {
}
if !strings.HasPrefix(appToken, "xapp-") {
fmt.Fprintf(os.Stderr, "SLACK_APP_TOKEN must have the prefix \"xapp-\".")
}
botToken := os.Getenv("SLACK_BOT_TOKEN")
if botToken == "" {
fmt.Fprintf(os.Stderr, "SLACK_BOT_TOKEN must be set.\n")
os.Exit(1)
}
if !strings.HasPrefix(botToken, "xoxb-") {
fmt.Fprintf(os.Stderr, "SLACK_BOT_TOKEN must have the prefix \"xoxb-\".")
}
api := slack.New(
botToken,
slack.OptionDebug(true),
slack.OptionLog(log.New(os.Stdout, "api: ", log.Lshortfile|log.LstdFlags)),
slack.OptionAppLevelToken(appToken),
)
client := socketmode.New(
api,
socketmode.OptionDebug(true),
socketmode.OptionLog(log.New(os.Stdout, "socketmode: ", log.Lshortfile|log.LstdFlags)),
)
go func() {
for evt := range client.Events {
switch evt.Type {
case socketmode.EventTypeConnecting:
fmt.Println("Connecting to Slack with Socket Mode...")
case socketmode.EventTypeConnectionError:
fmt.Println("Connection failed. Retrying later...")
case socketmode.EventTypeConnected:
fmt.Println("Connected to Slack with Socket Mode.")
case socketmode.EventTypeEventsAPI:
eventsAPIEvent, ok := evt.Data.(slackevents.EventsAPIEvent)
if !ok {
fmt.Printf("Ignored %+v\n", evt)
continue
}
fmt.Printf("Event received: %+v\n", eventsAPIEvent)
client.Ack(*evt.Request)
switch eventsAPIEvent.Type {
case slackevents.CallbackEvent:
innerEvent := eventsAPIEvent.InnerEvent
switch ev := innerEvent.Data.(type) {
case *slackevents.AppMentionEvent:
_, _, err := api.PostMessage(ev.Channel, slack.MsgOptionText("Yes, hello.", false))
if err != nil {
fmt.Printf("failed posting message: %v", err)
}
case *slackevents.MemberJoinedChannelEvent:
fmt.Printf("user %q joined to channel %q", ev.User, ev.Channel)
}
default:
client.Debugf("unsupported Events API event received")
}
case socketmode.EventTypeInteractive:
callback, ok := evt.Data.(slack.InteractionCallback)
if !ok {
fmt.Printf("Ignored %+v\n", evt)
continue
}
fmt.Printf("Interaction received: %+v\n", callback)
var payload interface{}
switch callback.Type {
case slack.InteractionTypeBlockActions:
// See https://api.slack.com/apis/connections/socket-implement#button
client.Debugf("button clicked!")
case slack.InteractionTypeShortcut:
case slack.InteractionTypeViewSubmission:
// See https://api.slack.com/apis/connections/socket-implement#modal
case slack.InteractionTypeDialogSubmission:
default:
}
client.Ack(*evt.Request, payload)
case socketmode.EventTypeSlashCommand:
cmd, ok := evt.Data.(slack.SlashCommand)
if !ok {
fmt.Printf("Ignored %+v\n", evt)
continue
}
client.Debugf("Slash command received: %+v", cmd)
payload := map[string]interface{}{
"blocks": []slack.Block{
slack.NewSectionBlock(
&slack.TextBlockObject{
Type: slack.MarkdownType,
Text: "foo",
},
nil,
slack.NewAccessory(
slack.NewButtonBlockElement(
"",
"somevalue",
&slack.TextBlockObject{
Type: slack.PlainTextType,
Text: "bar",
},
),
),
),
}}
client.Ack(*evt.Request, payload)
default:
fmt.Fprintf(os.Stderr, "Unexpected event type received: %s\n", evt.Type)
}
}
}()
client.Run()
}
| [
"\"SLACK_APP_TOKEN\"",
"\"SLACK_BOT_TOKEN\""
]
| []
| [
"SLACK_APP_TOKEN",
"SLACK_BOT_TOKEN"
]
| [] | ["SLACK_APP_TOKEN", "SLACK_BOT_TOKEN"] | go | 2 | 0 | |
scheduled_bots/wikipathways/bot.py | import json
import argparse
import copy
import traceback
from datetime import datetime
import os
from rdflib import Graph
import zipfile
import io
from contextlib import closing
from bs4 import BeautifulSoup, SoupStrainer
from SPARQLWrapper import SPARQLWrapper, JSON
import re
import tqdm
import requests
import sys
# from scheduled
from scheduled_bots import PROPS, ITEMS, get_default_core_props
from wikidataintegrator import wdi_core, wdi_login, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
from wikidataintegrator.wdi_helpers import try_write
CACHE_SIZE = 10000
CACHE_TIMEOUT_SEC = 300 # 5 min
print("Logging in...")
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
PROPS = {
'Wikipathways ID': 'P2410',
'instance of': 'P31',
'stated in': 'P248',
'reference URL': 'P854',
'Entrez Gene ID ': 'P351',
'found in taxon': 'P703',
'PubMed ID': 'P698',
'curator': 'P1640',
'retrieved': 'P813',
'medical condition': 'P1050',
'author': 'P50',
'author name string': 'P2093',
'main subject': 'P921',
'anatomical location': 'P927'
}
core_props = get_default_core_props()
core_props.update({PROPS['Wikipathways ID']})
ITEMS = {
'Wikipathways': 'Q7999828',
'Homo sapiens': 'Q15978631'
}
#core_props = get_default_core_props()
#core_props.update({PROPS['WikiPathways ID']})
__metadata__ = {
'name': 'PathwayBot',
'maintainer': 'Andra',
'tags': ['pathways'],
'properties': list(PROPS.values())
}
fast_run_base_filter = {'P2410': ''}
fast_run = True
# populate ontology dictionaries
# Disease Ontology
doid_qid_query = wdi_core.WDItemEngine.execute_sparql_query("SELECT * WHERE {?qid wdt:P699 ?doid . }")
doid_qid = dict()
for result in doid_qid_query["results"]["bindings"]:
doid_qid[result["doid"]["value"]] = result["qid"]["value"].replace("http://www.wikidata.org/entity/", "")
# Pathway Ontology
poid_qid_query = wdi_core.WDItemEngine.execute_sparql_query("SELECT * WHERE {?qid wdt:P7333 ?poid . }")
poid_qid = dict()
for result in poid_qid_query["results"]["bindings"]:
poid_qid[result["poid"]["value"]] = result["qid"]["value"].replace("http://www.wikidata.org/entity/", "")
def create_reference(pathway_id, retrieved):
refStatedIn = wdi_core.WDItemID(value=ITEMS['Wikipathways'], prop_nr=PROPS['stated in'], is_reference=True)
timeStringNow = retrieved.strftime("+%Y-%m-%dT00:00:00Z")
refRetrieved = wdi_core.WDTime(timeStringNow, prop_nr=PROPS['retrieved'], is_reference=True)
refWikiPathwaysID = wdi_core.WDString(value=pathway_id, prop_nr=PROPS['Wikipathways ID'], is_reference=True)
pathway_reference = [refStatedIn, refRetrieved, refWikiPathwaysID]
return pathway_reference
def panic(pathway_id, msg='', msg_type=''):
s = wdi_helpers.format_msg(pathway_id, PROPS['Wikipathways ID'], None, msg, msg_type)
wdi_core.WDItemEngine.log("ERROR", s)
print(s)
return None
def main(retrieved, fast_run, write):
login = wdi_login.WDLogin(WDUSER, WDPASS)
temp = Graph()
# TODO add try catch to catch api errors while downloading. Currently when there is API glitch, an UTF error is report, which is only because it gets empty content.
# TODO add progress indicator of the download
url = 'http://data.wikipathways.org/current/rdf'
page = requests.get(url).text
files = []
for link in BeautifulSoup(page, "lxml", parse_only=SoupStrainer('a')):
address = str(link).split("\"")
if len(address) > 1:
filename = address[1].replace("./", "/")
if len(filename) > 1:
if filename not in files:
if filename != "./":
files.append(url + filename)
for file in set(files):
if "rdf-authors" in file: # get the most accurate file
print(file)
u = requests.get(file)
with closing(u), zipfile.ZipFile(io.BytesIO(u.content)) as archive:
for member in archive.infolist():
if "_" in str(member.filename):
continue
print("parsing: " + member.filename)
nt_content = archive.read(member)
temp.parse(data=nt_content, format="turtle")
print("size: "+str(len(temp)))
if "rdf-wp" in file: # get the most accurate file
print(file)
u = requests.get(file)
with closing(u), zipfile.ZipFile(io.BytesIO(u.content)) as archive:
for member in archive.infolist():
nt_content = archive.read(member)
temp.parse(data=nt_content.decode(), format="turtle")
print("size: "+str(len(temp)))
wp_query = """prefix dcterm: <http://purl.org/dc/terms/>
prefix wp: <http://vocabularies.wikipathways.org/wp#>
SELECT DISTINCT ?wpid WHERE {
?s rdf:type <http://vocabularies.wikipathways.org/wp#Pathway> ;
dcterm:identifier ?wpid ;
?p <http://vocabularies.wikipathways.org/wp#Curation:AnalysisCollection> ;
wp:organism <http://purl.obolibrary.org/obo/NCBITaxon_9606> .
}"""
wpids = []
qres = temp.query(wp_query)
for row in qres:
wpids.append(str(row[0]))
for pathway_id in wpids:
try:
run_one(pathway_id, retrieved, fast_run, write, login, temp)
except Exception as e:
traceback.print_exc()
wdi_core.WDItemEngine.log("ERROR", wdi_helpers.format_msg(
pathway_id, PROPS['Wikipathways ID'], None, str(e), type(e)))
def run_one(pathway_id, retrieved, fast_run, write, login, temp):
print(pathway_id)
pathway_reference = create_reference(pathway_id, retrieved)
prep = dict()
prep = get_PathwayElements(pathway=pathway_id,datatype="Metabolite", temp=temp, prep=prep)
prep = get_PathwayElements(pathway=pathway_id,datatype="Protein", temp=temp, prep=prep)
prep = get_PathwayElements(pathway=pathway_id, datatype="GeneProduct",temp=temp, prep=prep)
prep = get_PathwayElements(pathway=pathway_id, datatype="Complex",temp=temp, prep=prep)
# P703 = found in taxon, Q15978631 = "Homo sapiens"
prep["P703"] = [
wdi_core.WDItemID(value="Q15978631", prop_nr='P703', references=[copy.deepcopy(pathway_reference)])]
query = """
PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
PREFIX gpml: <http://vocabularies.wikipathways.org/gpml#>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT DISTINCT ?pathway ?pwId ?pwLabel ?description
WHERE {
VALUES ?pwId {"""
query += "\"" + pathway_id + "\"^^xsd:string}"
query += """
?pathway a wp:Pathway ;
dc:title ?pwLabel ;
dcterms:description ?description ;
dcterms:identifier ?pwId ;
<http://vocabularies.wikipathways.org/wp#isAbout> ?details ;
wp:organismName "Homo sapiens"^^xsd:string .
}"""
qres3 = temp.query(query)
for row in qres3:
#pathway_iri = str(row[0])
pw_id = str(row[1])
pw_label = str(row[2])
description = str(row[3])
## clean up descriptions
description = re.sub(r'https?:\/\/.*[\s\r\n]','',description)
description = description.replace('\n',' ').replace('\r',' ').replace('\'\'\'','').replace('\'\'','').replace('[','').replace(']','')
description = description.replace('Proteins on this pathway have targeted assays available via the Portal','')
description = (description[:246]+'...') if len(description) > 246 else description
description = 'biological pathway in human' if len(description) < 20 else description
# P31 = instance of
prep["P31"] = [
wdi_core.WDItemID(value="Q4915012", prop_nr="P31", references=[copy.deepcopy(pathway_reference)])]
prep["P1476"] = [wdi_core.WDMonolingualText(value=pw_label, prop_nr="P1476", references=[copy.deepcopy(pathway_reference)])]
# P2410 = WikiPathways ID
prep["P2410"] = [wdi_core.WDString(pathway_id, prop_nr='P2410', references=[copy.deepcopy(pathway_reference)])]
# P2888 = exact match
prep["P2888"] = [wdi_core.WDUrl("http://identifiers.org/wikipathways/" + pw_id, prop_nr='P2888',
references=[copy.deepcopy(pathway_reference)])]
query = """
PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
PREFIX dcterms: <http://purl.org/dc/terms/>
select ?pubmed
WHERE {
?pubmed a wp:PublicationReference ;
dcterms:isPartOf <"""
query += str(row[0])
query += """> .}
"""
qres4 = temp.query(query)
p = re.compile('^[0-9]+$')
for pubmed_result in qres4:
pmid = str(pubmed_result[0]).replace("http://identifiers.org/pubmed/", "")
print(pmid)
m = p.match(pmid)
if not m:
pmid_qid, _, _ = wdi_helpers.PublicationHelper(pmid, id_type="doi",
source="crossref").get_or_create(login if write else None)
else:
pmid_qid, _, _ = wdi_helpers.PublicationHelper(pmid.replace("PMID:", ""), id_type="pmid",
source="europepmc").get_or_create(login if write else None)
if pmid_qid is None:
return panic(pathway_id, "not found: {}".format(pmid), "pmid")
else:
if 'P2860' not in prep.keys():
prep["P2860"] = []
print(pmid_qid)
prep['P2860'].append(wdi_core.WDItemID(value=str(pmid_qid), prop_nr='P2860', references=[copy.deepcopy(pathway_reference)]))
author_query = """
PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
SELECT ?author ?authorName ?authorHomepage ?authorQIRI
WHERE {
<http://identifiers.org/wikipathways/""" + pathway_id + """> dc:creator ?author .
?author a foaf:Person ;
foaf:name ?authorName ;
foaf:homepage ?authorHomepage .
OPTIONAL { ?author owl:sameAs ?authorQIRI . }
}
"""
author_query_res = temp.query(author_query)
prep["P2093"] = []
prep["P50"] = []
for row in author_query_res:
author_name = str(row[1])
print("author_name")
print(author_name)
author_homepage = str(row[2])
print("author_homepage")
print(author_homepage)
# P2093 = author name string
author_url_qualifier = wdi_core.WDString(value=author_homepage, prop_nr="P2699", is_qualifier=True)
prep["P2093"].append(wdi_core.WDString(author_name, prop_nr='P2093', qualifiers=[copy.deepcopy(author_url_qualifier)], references=[copy.deepcopy(pathway_reference)]))
if row[3] != None: # only if row[3] exists (authorQIRI)
author_iri = str(row[0])
author_name = str(row[1])
print("author_name")
print(author_name)
author_qiri = str(row[3])
author_qid = author_qiri.replace("http://www.wikidata.org/entity/", "")
print("author_qid")
print(author_qid)
# P50 = author
prep["P50"].append(wdi_core.WDItemID(author_qid, prop_nr='P50', references=[copy.deepcopy(pathway_reference)]))
disease_ontology_query = """
PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT ?diseaseOntologyTerm
WHERE {
?pathwayRDF wp:diseaseOntologyTag ?diseaseOntologyTerm ;
foaf:page ?pathway ;
dcterms:identifier \"""" + pathway_id + """\"^^xsd:string .
}
"""
disease_ontology_query_res = temp.query(disease_ontology_query)
prep["P1050"] = []
for row in disease_ontology_query_res:
disease_ontology_iri = str(row[0])
doid = disease_ontology_iri.replace("http://purl.obolibrary.org/obo/DOID_", "DOID:")
print("doid")
print(doid)
# P1050 = medical condition
if doid_qid.get(doid) != None: #skip if qid is missing
prep["P1050"].append(wdi_core.WDItemID(doid_qid[doid], prop_nr='P1050', references=[copy.deepcopy(pathway_reference)]))
pw_ontology_query = """
PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT ?pwOntologyTerm
WHERE {
?pathwayRDF wp:pathwayOntologyTag ?pwOntologyTerm ;
foaf:page ?pathway ;
dcterms:identifier \"""" + pathway_id + """\"^^xsd:string .
}
"""
pw_ontology_query_res = temp.query(pw_ontology_query)
prep["P921"] = []
for row in pw_ontology_query_res:
pw_ontology_iri = str(row[0])
poid = pw_ontology_iri.replace("http://purl.obolibrary.org/obo/PW_", "PW:")
print("poid")
print(poid)
# P921 = main subject
if poid_qid.get(poid) != None: #skip if qid is missing
prep["P921"].append(wdi_core.WDItemID(poid_qid[poid], prop_nr='P921', references=[copy.deepcopy(pathway_reference)]))
#TODO: Propose Cell Type Ontology ID as new property, add release item, associate terms with WD items.
#cell_type_ontology_query = """
# PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
# PREFIX dcterms: <http://purl.org/dc/terms/>
# SELECT ?cellTypeOntologyTerm
# WHERE {
# ?pathwayRDF wp:cellTypeOntologyTag ?cellTypeOntologyTerm ;
# foaf:page ?pathway ;
# dcterms:identifier \"""" + pathway_id + """\"^^xsd:string .
# }
# """
#cell_type_ontology_query_res = temp.query(cell_type_ontology_query)
#prep["P927"] = []
#for row in cell_type_ontology_query_res:
# cell_type_ontology_iri = str(row[0])
# ctoid = cell_type_ontology_iri.replace("http://purl.obolibrary.org/obo/CL_", "CL:")
# print("ctoid")
# print(ctoid)
# P927 = anatomical location
# prep["P927"].append(wdi_core.WDItem(qid[ctoid], prop_nr='P927', references=[copy.deepcopy(pathway_reference)]))
data2add = []
for key in prep.keys():
for statement in prep[key]:
data2add.append(statement)
print(statement.prop_nr, statement.value)
wdPage = wdi_core.WDItemEngine(data=data2add,
fast_run=fast_run,
fast_run_base_filter=fast_run_base_filter,
fast_run_use_refs=True,
ref_handler=update_retrieved_if_new_multiple_refs,
core_props=core_props)
wdPage.set_label(pw_label, lang="en")
wdPage.set_description(description, lang="en")
try_write(wdPage, record_id=pathway_id, record_prop=PROPS['Wikipathways ID'],
edit_summary="Updated a Wikipathways pathway", login=login, write=write)
def get_PathwayElements(pathway, datatype, temp, prep):
query = """PREFIX wp: <http://vocabularies.wikipathways.org/wp#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX dcterms: <http://purl.org/dc/terms/>
select distinct ?pathway (str(?label) as ?geneProduct) ?id where {
?metabolite a wp:"""
query += datatype
query += """ ;
rdfs:label ?label ;"""
if datatype == "Metabolite":
query += " wp:bdbPubChem ?id ;"
if datatype == "Protein":
query += " wp:bdbWikidata ?id ;"
if datatype == "GeneProduct":
query += " wp:bdbEntrezGene ?id ;"
if datatype == "Complex":
query += " wp:bdbWikidata ?id ;"
query += """
dcterms:isPartOf ?pathway .
?pathway a wp:Pathway ;
dcterms:identifier
"""
query += "\"" + pathway + "\"^^xsd:string .}"
qres2 = temp.query(query)
ids = []
if datatype == "Protein":
for row in qres2:
ids.append("wd:" + str(row[2]).replace(
"http://www.wikidata.org/entity/", "")
)
elif datatype == "Complex":
for row in qres2:
ids.append("wd:" + str(row[2]).replace(
"http://www.wikidata.org/entity/", "")
)
else:
for row in qres2:
ids.append("\"" + str(row[2]).replace("http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID", "").replace(
"http://identifiers.org/ncbigene/", "") + "\"")
# Check for existence of the ids in wikidata
wd_query = "SELECT DISTINCT * WHERE {VALUES ?id {"
wd_query += " ".join(list(set(ids)))
if datatype == "Metabolite":
wd_query += "} ?item wdt:P662 ?id . }"
if datatype == "GeneProduct":
wd_query += "} ?item wdt:P351 ?id . }"
if datatype == "Protein":
wd_query = "SELECT DISTINCT * WHERE { VALUES ?item { "
wd_query += " ".join(list(set(ids)))
wd_query += " } ?item wdt:P31 | wdt:P279 wd:Q8054 }"
if datatype == "Complex":
wd_query = "SELECT DISTINCT * WHERE { VALUES ?item { "
wd_query += " ".join(list(set(ids)))
wd_query += " } ?item wdt:P7718 | wdt:P3937 ?id }"
if datatype == "Complex":
print(wd_query)
results = wdi_core.WDItemEngine.execute_sparql_query(wd_query,)
if datatype == "Complex":
print(results)
for result in results["results"]["bindings"]:
if "P527" not in prep.keys():
prep["P527"] = []
pathway_reference = create_reference(pathway, retrieved)
prep["P527"].append(
wdi_core.WDItemID(result["item"]["value"].replace("http://www.wikidata.org/entity/", ""),
prop_nr='P527', references=[copy.deepcopy(pathway_reference)]))
return prep
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='run wikipathways bot')
parser.add_argument('--dummy', help='do not actually do write', action='store_true')
parser.add_argument('--no-fastrun', action='store_true')
args = parser.parse_args()
log_dir = "./logs"
run_id = datetime.now().strftime('%Y%m%d_%H:%M')
__metadata__['run_id'] = run_id
fast_run = False if args.no_fastrun else True
retrieved = datetime.now()
log_name = '{}-{}.log'.format(__metadata__['name'], run_id)
if wdi_core.WDItemEngine.logger is not None:
wdi_core.WDItemEngine.logger.handles = []
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, log_name=log_name, header=json.dumps(__metadata__),
logger_name='wikipathways')
main(retrieved, fast_run=fast_run, write=not args.dummy)
| []
| []
| [
"WDPASS",
"WDUSER"
]
| [] | ["WDPASS", "WDUSER"] | python | 2 | 0 | |
github/crash_report.go | package github
import (
"bytes"
"errors"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"github.com/github/hub/git"
"github.com/github/hub/ui"
"github.com/github/hub/utils"
"github.com/github/hub/version"
)
const (
hubReportCrashConfig = "hub.reportCrash"
hubProjectOwner = "github"
hubProjectName = "hub"
)
func CaptureCrash() {
if rec := recover(); rec != nil {
if err, ok := rec.(error); ok {
reportCrash(err)
} else if err, ok := rec.(string); ok {
reportCrash(errors.New(err))
}
}
}
func reportCrash(err error) {
if err == nil {
return
}
buf := make([]byte, 10000)
runtime.Stack(buf, false)
stack := formatStack(buf)
switch reportCrashConfig() {
case "always":
report(err, stack)
case "never":
printError(err, stack)
default:
printError(err, stack)
fmt.Print("Would you like to open an issue? ([Y]es/[N]o/[A]lways/N[e]ver): ")
var confirm string
fmt.Scan(&confirm)
always := utils.IsOption(confirm, "a", "always")
if always || utils.IsOption(confirm, "y", "yes") {
report(err, stack)
}
saveReportConfiguration(confirm, always)
}
os.Exit(1)
}
func report(reportedError error, stack string) {
title, body, err := reportTitleAndBody(reportedError, stack)
utils.Check(err)
project := NewProject(hubProjectOwner, hubProjectName, GitHubHost)
gh := NewClient(project.Host)
issue, err := gh.CreateIssue(project, title, body, []string{"Crash Report"})
utils.Check(err)
ui.Println(issue.HTMLURL)
}
const crashReportTmpl = "Crash report - %v\n\n" +
"Error (%s): `%v`\n\n" +
"Stack:\n\n```\n%s\n```\n\n" +
"Runtime:\n\n```\n%s\n```\n\n" +
"Version:\n\n```\n%s\n```\n" +
`
# Creating crash report:
#
# This information will be posted as a new issue under github/hub.
# We're NOT including any information about the command that you were executing,
# but knowing a little bit more about it would really help us to solve this problem.
# Feel free to modify the title and the description for this issue.
`
func reportTitleAndBody(reportedError error, stack string) (title, body string, err error) {
errType := reflect.TypeOf(reportedError).String()
message := fmt.Sprintf(
crashReportTmpl,
reportedError,
errType,
reportedError,
stack,
runtimeInfo(),
version.FullVersion(),
)
editor, err := NewEditor("CRASH_REPORT", "crash report", message)
if err != nil {
return "", "", err
}
defer editor.DeleteFile()
return editor.EditTitleAndBody()
}
func runtimeInfo() string {
return fmt.Sprintf("GOOS: %s\nGOARCH: %s", runtime.GOOS, runtime.GOARCH)
}
func formatStack(buf []byte) string {
buf = bytes.Trim(buf, "\x00")
stack := strings.Split(string(buf), "\n")
stack = append(stack[0:1], stack[5:]...)
return strings.Join(stack, "\n")
}
func printError(err error, stack string) {
ui.Printf("%v\n\n", err)
ui.Println(stack)
}
func saveReportConfiguration(confirm string, always bool) {
if always {
git.SetGlobalConfig(hubReportCrashConfig, "always")
} else if utils.IsOption(confirm, "e", "never") {
git.SetGlobalConfig(hubReportCrashConfig, "never")
}
}
func reportCrashConfig() (opt string) {
opt = os.Getenv("HUB_REPORT_CRASH")
if opt == "" {
opt, _ = git.GlobalConfig(hubReportCrashConfig)
}
return
}
| [
"\"HUB_REPORT_CRASH\""
]
| []
| [
"HUB_REPORT_CRASH"
]
| [] | ["HUB_REPORT_CRASH"] | go | 1 | 0 | |
tests/unit/gapic/talent_v4/test_job_service.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.talent_v4.services.job_service import JobServiceAsyncClient
from google.cloud.talent_v4.services.job_service import JobServiceClient
from google.cloud.talent_v4.services.job_service import pagers
from google.cloud.talent_v4.services.job_service import transports
from google.cloud.talent_v4.types import common
from google.cloud.talent_v4.types import filters
from google.cloud.talent_v4.types import histogram
from google.cloud.talent_v4.types import job
from google.cloud.talent_v4.types import job as gct_job
from google.cloud.talent_v4.types import job_service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.type import latlng_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
from google.type import timeofday_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert JobServiceClient._get_default_mtls_endpoint(None) is None
assert (
JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,])
def test_job_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.JobServiceGrpcTransport, "grpc"),
(transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_job_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,])
def test_job_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
def test_job_service_client_get_transport_class():
transport = JobServiceClient.get_transport_class()
available_transports = [
transports.JobServiceGrpcTransport,
]
assert transport in available_transports
transport = JobServiceClient.get_transport_class("grpc")
assert transport == transports.JobServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_job_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_job_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = JobServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_job(transport: str = "grpc", request_type=job_service.CreateJobRequest):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_create_job_from_dict():
test_create_job(request_type=dict)
def test_create_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
client.create_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
@pytest.mark.asyncio
async def test_create_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CreateJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_create_job_async_from_dict():
await test_create_job_async(request_type=dict)
def test_create_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = gct_job.Job()
client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
await client.create_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_job(
parent="parent_value", job=gct_job.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
def test_create_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_job(
job_service.CreateJobRequest(),
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job(
parent="parent_value", job=gct_job.Job(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job(
job_service.CreateJobRequest(),
parent="parent_value",
job=gct_job.Job(name="name_value"),
)
def test_batch_create_jobs(
transport: str = "grpc", request_type=job_service.BatchCreateJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_create_jobs_from_dict():
test_batch_create_jobs(request_type=dict)
def test_batch_create_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
client.batch_create_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
@pytest.mark.asyncio
async def test_batch_create_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchCreateJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchCreateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_create_jobs_async_from_dict():
await test_batch_create_jobs_async(request_type=dict)
def test_batch_create_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchCreateJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_create_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchCreateJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_create_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_create_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_jobs(
parent="parent_value", jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
def test_batch_create_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_jobs(
job_service.BatchCreateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.asyncio
async def test_batch_create_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_jobs(
parent="parent_value", jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_create_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_jobs(
job_service.BatchCreateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
def test_get_job(transport: str = "grpc", request_type=job_service.GetJobRequest):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_get_job_from_dict():
test_get_job(request_type=dict)
def test_get_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
client.get_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
@pytest.mark.asyncio
async def test_get_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_get_job_async_from_dict():
await test_get_job_async(request_type=dict)
def test_get_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = job.Job()
client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job.Job())
await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job(
job_service.GetJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job(
job_service.GetJobRequest(), name="name_value",
)
def test_update_job(transport: str = "grpc", request_type=job_service.UpdateJobRequest):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
response = client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
def test_update_job_from_dict():
test_update_job(request_type=dict)
def test_update_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
client.update_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
@pytest.mark.asyncio
async def test_update_job_async(
transport: str = "grpc_asyncio", request_type=job_service.UpdateJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_job.Job(
name="name_value",
company="company_value",
requisition_id="requisition_id_value",
title="title_value",
description="description_value",
addresses=["addresses_value"],
job_benefits=[common.JobBenefit.CHILD_CARE],
degree_types=[common.DegreeType.PRIMARY_EDUCATION],
department="department_value",
employment_types=[common.EmploymentType.FULL_TIME],
incentives="incentives_value",
language_code="language_code_value",
job_level=common.JobLevel.ENTRY_LEVEL,
promotion_value=1635,
qualifications="qualifications_value",
responsibilities="responsibilities_value",
posting_region=common.PostingRegion.ADMINISTRATIVE_AREA,
visibility=common.Visibility.ACCOUNT_ONLY,
company_display_name="company_display_name_value",
)
)
response = await client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_job.Job)
assert response.name == "name_value"
assert response.company == "company_value"
assert response.requisition_id == "requisition_id_value"
assert response.title == "title_value"
assert response.description == "description_value"
assert response.addresses == ["addresses_value"]
assert response.job_benefits == [common.JobBenefit.CHILD_CARE]
assert response.degree_types == [common.DegreeType.PRIMARY_EDUCATION]
assert response.department == "department_value"
assert response.employment_types == [common.EmploymentType.FULL_TIME]
assert response.incentives == "incentives_value"
assert response.language_code == "language_code_value"
assert response.job_level == common.JobLevel.ENTRY_LEVEL
assert response.promotion_value == 1635
assert response.qualifications == "qualifications_value"
assert response.responsibilities == "responsibilities_value"
assert response.posting_region == common.PostingRegion.ADMINISTRATIVE_AREA
assert response.visibility == common.Visibility.ACCOUNT_ONLY
assert response.company_display_name == "company_display_name_value"
@pytest.mark.asyncio
async def test_update_job_async_from_dict():
await test_update_job_async(request_type=dict)
def test_update_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateJobRequest()
request.job.name = "job.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
call.return_value = gct_job.Job()
client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "job.name=job.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateJobRequest()
request.job.name = "job.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
await client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "job.name=job.name/value",) in kw["metadata"]
def test_update_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_job(
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_job(
job_service.UpdateJobRequest(),
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_job.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_job.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_job(
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].job
mock_val = gct_job.Job(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_job(
job_service.UpdateJobRequest(),
job=gct_job.Job(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_batch_update_jobs(
transport: str = "grpc", request_type=job_service.BatchUpdateJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_update_jobs_from_dict():
test_batch_update_jobs(request_type=dict)
def test_batch_update_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
client.batch_update_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
@pytest.mark.asyncio
async def test_batch_update_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchUpdateJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchUpdateJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_update_jobs_async_from_dict():
await test_batch_update_jobs_async(request_type=dict)
def test_batch_update_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchUpdateJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_update_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchUpdateJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_update_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_update_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_update_jobs(
parent="parent_value", jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
def test_batch_update_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_update_jobs(
job_service.BatchUpdateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
@pytest.mark.asyncio
async def test_batch_update_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_update_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_update_jobs(
parent="parent_value", jobs=[job.Job(name="name_value")],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].jobs
mock_val = [job.Job(name="name_value")]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_update_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_update_jobs(
job_service.BatchUpdateJobsRequest(),
parent="parent_value",
jobs=[job.Job(name="name_value")],
)
def test_delete_job(transport: str = "grpc", request_type=job_service.DeleteJobRequest):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_from_dict():
test_delete_job(request_type=dict)
def test_delete_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
client.delete_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
@pytest.mark.asyncio
async def test_delete_job_async(
transport: str = "grpc_asyncio", request_type=job_service.DeleteJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_job_async_from_dict():
await test_delete_job_async(request_type=dict)
def test_delete_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = None
client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job(
job_service.DeleteJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job(
job_service.DeleteJobRequest(), name="name_value",
)
def test_batch_delete_jobs(
transport: str = "grpc", request_type=job_service.BatchDeleteJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_delete_jobs_from_dict():
test_batch_delete_jobs(request_type=dict)
def test_batch_delete_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
client.batch_delete_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
@pytest.mark.asyncio
async def test_batch_delete_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.BatchDeleteJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.BatchDeleteJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_delete_jobs_async_from_dict():
await test_batch_delete_jobs_async(request_type=dict)
def test_batch_delete_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchDeleteJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_delete_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.BatchDeleteJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_delete_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_delete_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_delete_jobs(
parent="parent_value", names=["names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].names
mock_val = ["names_value"]
assert arg == mock_val
def test_batch_delete_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_delete_jobs(
job_service.BatchDeleteJobsRequest(),
parent="parent_value",
names=["names_value"],
)
@pytest.mark.asyncio
async def test_batch_delete_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_delete_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_delete_jobs(
parent="parent_value", names=["names_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].names
mock_val = ["names_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_delete_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_delete_jobs(
job_service.BatchDeleteJobsRequest(),
parent="parent_value",
names=["names_value"],
)
def test_list_jobs(transport: str = "grpc", request_type=job_service.ListJobsRequest):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_from_dict():
test_list_jobs(request_type=dict)
def test_list_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
client.list_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
@pytest.mark.asyncio
async def test_list_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.ListJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_jobs_async_from_dict():
await test_list_jobs_async(request_type=dict)
def test_list_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = job_service.ListJobsResponse()
client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse()
)
await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_jobs(
parent="parent_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
def test_list_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_jobs(
job_service.ListJobsRequest(), parent="parent_value", filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_jobs(parent="parent_value", filter="filter_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].filter
mock_val = "filter_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_jobs(
job_service.ListJobsRequest(), parent="parent_value", filter="filter_value",
)
def test_list_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[job.Job(), job.Job(), job.Job(),], next_page_token="abc",
),
job_service.ListJobsResponse(jobs=[], next_page_token="def",),
job_service.ListJobsResponse(jobs=[job.Job(),], next_page_token="ghi",),
job_service.ListJobsResponse(jobs=[job.Job(), job.Job(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, job.Job) for i in results)
def test_list_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[job.Job(), job.Job(), job.Job(),], next_page_token="abc",
),
job_service.ListJobsResponse(jobs=[], next_page_token="def",),
job_service.ListJobsResponse(jobs=[job.Job(),], next_page_token="ghi",),
job_service.ListJobsResponse(jobs=[job.Job(), job.Job(),],),
RuntimeError,
)
pages = list(client.list_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[job.Job(), job.Job(), job.Job(),], next_page_token="abc",
),
job_service.ListJobsResponse(jobs=[], next_page_token="def",),
job_service.ListJobsResponse(jobs=[job.Job(),], next_page_token="ghi",),
job_service.ListJobsResponse(jobs=[job.Job(), job.Job(),],),
RuntimeError,
)
async_pager = await client.list_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, job.Job) for i in responses)
@pytest.mark.asyncio
async def test_list_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListJobsResponse(
jobs=[job.Job(), job.Job(), job.Job(),], next_page_token="abc",
),
job_service.ListJobsResponse(jobs=[], next_page_token="def",),
job_service.ListJobsResponse(jobs=[job.Job(),], next_page_token="ghi",),
job_service.ListJobsResponse(jobs=[job.Job(), job.Job(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_search_jobs(
transport: str = "grpc", request_type=job_service.SearchJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
response = client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
def test_search_jobs_from_dict():
test_search_jobs(request_type=dict)
def test_search_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
client.search_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
@pytest.mark.asyncio
async def test_search_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.SearchJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
)
response = await client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
@pytest.mark.asyncio
async def test_search_jobs_async_from_dict():
await test_search_jobs_async(request_type=dict)
def test_search_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
call.return_value = job_service.SearchJobsResponse()
client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse()
)
await client.search_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_jobs_for_alert(
transport: str = "grpc", request_type=job_service.SearchJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
response = client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
def test_search_jobs_for_alert_from_dict():
test_search_jobs_for_alert(request_type=dict)
def test_search_jobs_for_alert_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
client.search_jobs_for_alert()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
@pytest.mark.asyncio
async def test_search_jobs_for_alert_async(
transport: str = "grpc_asyncio", request_type=job_service.SearchJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse(
next_page_token="next_page_token_value",
total_size=1086,
broadened_query_jobs_count=2766,
)
)
response = await client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.SearchJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, job_service.SearchJobsResponse)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
assert response.broadened_query_jobs_count == 2766
@pytest.mark.asyncio
async def test_search_jobs_for_alert_async_from_dict():
await test_search_jobs_for_alert_async(request_type=dict)
def test_search_jobs_for_alert_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
call.return_value = job_service.SearchJobsResponse()
client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_jobs_for_alert_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_jobs_for_alert), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchJobsResponse()
)
await client.search_jobs_for_alert(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = JobServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.JobServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.JobServiceGrpcTransport,)
def test_job_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_job_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_job",
"batch_create_jobs",
"get_job",
"update_job",
"batch_update_jobs",
"delete_job",
"batch_delete_jobs",
"list_jobs",
"search_jobs",
"search_jobs_for_alert",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_job_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
def test_job_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.talent_v4.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport()
adc.assert_called_once()
def test_job_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
JobServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,],
)
def test_job_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.JobServiceGrpcTransport, grpc_helpers),
(transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_job_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=["1", "2"],
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_job_service_host_no_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint="jobs.googleapis.com"),
)
assert client.transport._host == "jobs.googleapis.com:443"
def test_job_service_host_with_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="jobs.googleapis.com:8000"
),
)
assert client.transport._host == "jobs.googleapis.com:8000"
def test_job_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_job_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_service_grpc_lro_client():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_job_service_grpc_lro_async_client():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_company_path():
project = "squid"
tenant = "clam"
company = "whelk"
expected = "projects/{project}/tenants/{tenant}/companies/{company}".format(
project=project, tenant=tenant, company=company,
)
actual = JobServiceClient.company_path(project, tenant, company)
assert expected == actual
def test_parse_company_path():
expected = {
"project": "octopus",
"tenant": "oyster",
"company": "nudibranch",
}
path = JobServiceClient.company_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_company_path(path)
assert expected == actual
def test_job_path():
project = "cuttlefish"
tenant = "mussel"
job = "winkle"
expected = "projects/{project}/tenants/{tenant}/jobs/{job}".format(
project=project, tenant=tenant, job=job,
)
actual = JobServiceClient.job_path(project, tenant, job)
assert expected == actual
def test_parse_job_path():
expected = {
"project": "nautilus",
"tenant": "scallop",
"job": "abalone",
}
path = JobServiceClient.job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_job_path(path)
assert expected == actual
def test_tenant_path():
project = "squid"
tenant = "clam"
expected = "projects/{project}/tenants/{tenant}".format(
project=project, tenant=tenant,
)
actual = JobServiceClient.tenant_path(project, tenant)
assert expected == actual
def test_parse_tenant_path():
expected = {
"project": "whelk",
"tenant": "octopus",
}
path = JobServiceClient.tenant_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_tenant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = JobServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = JobServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = JobServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = JobServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = JobServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = JobServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = JobServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = JobServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = JobServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = JobServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = JobServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
recipes/msys2/all/conanfile.py | from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration, ConanException
import fnmatch
import os
import shutil
import subprocess
try:
import ctypes
from ctypes import wintypes
except ImportError:
pass
except ValueError:
pass
class lock:
def __init__(self):
self.handle = ctypes.windll.kernel32.CreateMutexA(None, 0, "Global\\ConanMSYS2".encode())
if not self.handle:
raise ctypes.WinError()
def __enter__(self):
status = ctypes.windll.kernel32.WaitForSingleObject(self.handle, 0xFFFFFFFF)
if status not in [0, 0x80]:
raise ctypes.WinError()
def __exit__(self, exc_type, exc_val, exc_tb):
status = ctypes.windll.kernel32.ReleaseMutex(self.handle)
if not status:
raise ctypes.WinError()
def close(self):
ctypes.windll.kernel32.CloseHandle(self.handle)
__del__ = close
class MSYS2Conan(ConanFile):
name = "msys2"
description = "MSYS2 is a software distro and building platform for Windows"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.msys2.org"
license = "MSYS license"
topics = ("conan", "msys", "unix", "subsystem")
short_paths = True
# "exclude_files" "packages" "additional_packages" values are a comma separated list
options = {
"exclude_files": "ANY",
"packages": "ANY",
"additional_packages": "ANY"
}
default_options = {
"exclude_files": "*/link.exe",
"packages": "base-devel,binutils,gcc",
"additional_packages": None
}
settings = "os", "arch"
def validate(self):
if self.settings.os != "Windows":
raise ConanInvalidConfiguration("Only Windows supported")
if self.settings.arch != "x86_64":
raise ConanInvalidConfiguration("Only Windows x64 supported")
def _update_pacman(self):
with tools.chdir(os.path.join(self._msys_dir, "usr", "bin")):
try:
self._kill_pacman()
# https://www.msys2.org/docs/ci/
self.run('bash -l -c "pacman --debug --noconfirm --ask 20 -Syuu"') # Core update (in case any core packages are outdated)
self._kill_pacman()
self.run('bash -l -c "pacman --debug --noconfirm --ask 20 -Syuu"') # Normal update
self._kill_pacman()
self.run('bash -l -c "pacman --debug -Rc dash --noconfirm"')
except ConanException:
self.run('bash -l -c "cat /var/log/pacman.log || echo nolog"')
self._kill_pacman()
raise
# https://github.com/msys2/MSYS2-packages/issues/1966
def _kill_pacman(self):
if (self.settings.os == "Windows"):
taskkill_exe = os.path.join(os.environ.get('SystemRoot'), 'system32', 'taskkill.exe')
log_out = True
if log_out:
out = subprocess.PIPE
err = subprocess.STDOUT
else:
out = file(os.devnull, 'w')
err = subprocess.PIPE
if os.path.exists(taskkill_exe):
taskkill_cmds = [taskkill_exe + " /f /t /im pacman.exe",
taskkill_exe + " /f /im gpg-agent.exe",
taskkill_exe + " /f /im dirmngr.exe",
taskkill_exe + ' /fi "MODULES eq msys-2.0.dll"']
for taskkill_cmd in taskkill_cmds:
try:
proc = subprocess.Popen(taskkill_cmd, stdout=out, stderr=err, bufsize=1)
proc.wait()
except OSError as e:
if e.errno == errno.ENOENT:
raise ConanException("Cannot kill pacman")
@property
def _msys_dir(self):
subdir = "msys64"
return os.path.join(self.package_folder, "bin", subdir)
def source(self):
# sources are different per configuration - do download in build
pass
def build(self):
tools.get(**self.conan_data["sources"][self.version],
destination=os.path.join(self.package_folder, "bin"))
with lock():
self._do_build()
def _do_build(self):
packages = []
if self.options.packages:
packages.extend(str(self.options.packages).split(","))
if self.options.additional_packages:
packages.extend(str(self.options.additional_packages).split(","))
self._update_pacman()
with tools.chdir(os.path.join(self._msys_dir, "usr", "bin")):
for package in packages:
self.run('bash -l -c "pacman -S %s --noconfirm"' % package)
for package in ['pkgconf']:
self.run('bash -l -c "pacman -Rs -d -d $(pacman -Qsq %s) --noconfirm"' % package)
self._kill_pacman()
# create /tmp dir in order to avoid
# bash.exe: warning: could not find /tmp, please create!
tmp_dir = os.path.join(self._msys_dir, 'tmp')
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
tmp_name = os.path.join(tmp_dir, 'dummy')
with open(tmp_name, 'a'):
os.utime(tmp_name, None)
# Prepend the PKG_CONFIG_PATH environment variable with an eventual PKG_CONFIG_PATH environment variable
tools.replace_in_file(os.path.join(self._msys_dir, "etc", "profile"),
'PKG_CONFIG_PATH="', 'PKG_CONFIG_PATH="$PKG_CONFIG_PATH:')
def package(self):
excludes = None
if self.options.exclude_files:
excludes = tuple(str(self.options.exclude_files).split(","))
#self.copy("*", dst="bin", src=self._msys_dir, excludes=excludes)
for exclude in excludes:
for root, _, filenames in os.walk(self._msys_dir):
for filename in filenames:
fullname = os.path.join(root, filename)
if fnmatch.fnmatch(fullname, exclude):
os.unlink(fullname)
shutil.copytree(os.path.join(self._msys_dir, "usr", "share", "licenses"),
os.path.join(self.package_folder, "licenses"))
def package_info(self):
self.cpp_info.libdirs = []
msys_root = self._msys_dir
msys_bin = os.path.join(msys_root, "usr", "bin")
self.output.info("Creating MSYS_ROOT env var : %s" % msys_root)
self.env_info.MSYS_ROOT = msys_root
self.output.info("Creating MSYS_BIN env var : %s" % msys_bin)
self.env_info.MSYS_BIN = msys_bin
self.output.info("Appending PATH env var with : " + msys_bin)
self.env_info.path.append(msys_bin)
self.conf_info["tools.microsoft.bash:subsystem"] = "msys2"
self.conf_info["tools.microsoft.bash:path"] = os.path.join(msys_bin, "bash.exe")
| []
| []
| [
"SystemRoot"
]
| [] | ["SystemRoot"] | python | 1 | 0 | |
fluent_buddy/asgi.py | """
ASGI config for fluent_buddy project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fluent_buddy.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cfs/post_process_batch_predictions/main.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""Google Cloud function code to extract periodic transactions from data source."""
import base64
import json
import logging
import os
import sys
from typing import Any, Dict, Optional
from google.cloud.functions_v1.context import Context
from google.cloud import bigquery
import google.cloud.logging
from custom_functions import hook_get_load_predictions_query
from custom_functions import hook_get_bq_schema
from custom_functions import hook_apply_formulas
from custom_functions import hook_on_completion
# Set-up logging
logger = logging.getLogger('predict_transactions_batch')
logger.setLevel(logging.DEBUG)
handler = None
if os.getenv('LOCAL_LOGGING'):
handler = logging.StreamHandler(sys.stderr)
else:
client = google.cloud.logging.Client()
handler = google.cloud.logging.handlers.CloudLoggingHandler(client)
logger.addHandler(handler)
BQ_LTV_GCP_PROJECT = str(os.getenv("BQ_LTV_GCP_PROJECT", ""))
BQ_LTV_DATASET = str(os.getenv("BQ_LTV_DATASET", ""))
BQ_LTV_PREDICTIONS_TABLE = str(
os.getenv("BQ_LTV_PREDICTIONS_TABLE", ""))
DATAFRAME_PROCESSING_ENABLED = os.getenv('DATAFRAME_PROCESSING_ENABLED', 'Y')
def _load_data_from_bq(query):
"""Loads all the transactions from the table.
Args:
query: A string with the query to run on the table
Returns:
A dataframe with all the table data
"""
job_config = bigquery.job.QueryJobConfig()
return bigquery.Client(project=BQ_LTV_GCP_PROJECT).query(query, job_config=job_config).to_dataframe()
def _write_to_bigquery(df, table_name):
"""Writes the given dataframe into the BQ table.
Args:
df: A pandas dataframe representing the data to be written
table_name: A string representing the full path of the metadata BQ table
"""
dataframe = df
client = bigquery.Client(project=BQ_LTV_GCP_PROJECT)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = "WRITE_TRUNCATE"
job_config.schema = hook_get_bq_schema()
job = client.load_table_from_dataframe(
dataframe, table_name, job_config=job_config)
job.result()
table = client.get_table(table_name)
print("Loaded {} rows and {} columns to {}".format(table.num_rows,
len(table.schema),
table_name))
def _load_direct_to_bigquery(query, output_table):
"""Runs the load query and outputs the data directly to the next table in the pipeline.
Args:
query: string Prepared SQL query to load from the prediction output table
output_table: string Fully qualified name of the output BQ table where query
result is written.
Returns:
int Number of rows in the target table after job completion.
"""
# Set query to output directly to output table
job_config = bigquery.QueryJobConfig(
destination=output_table,
write_disposition='WRITE_TRUNCATE'
)
client = bigquery.Client()
job = client.query(query, job_config=job_config) # Make an API request.
job.result() # Wait for the job to complete.
table = client.get_table(output_table) # Make an API request.
print('Loaded {} rows and {} columns to {}'.format(table.num_rows,
len(table.schema),
output_table))
return table.num_rows
def _delete_dataset(dataset):
"""Deletes the dataset specified by the dataset parameter.
Args:
dataset: The name of the dataset to be deleted.
"""
client = bigquery.Client()
client.delete_dataset(
dataset, delete_contents=True, not_found_ok=True
)
def main(event: Dict[str, Any], context=Optional[Context]):
"""Checks if the data source table is available & no extract table generated.
Depending on the existence it will trigger the data transfer.
Args:
event (dict): The dictionary with data specific to this type of event.
The `data` field contains the PubsubMessage message. The `attributes`
field will contain custom attributes if there are any.
context (google.cloud.functions.Context): The Cloud Functions event
metadata. The `event_id` field contains the Pub/Sub message ID. The
`timestamp` field contains the publish time.
"""
del context
data = base64.b64decode(event["data"]).decode("utf-8")
msg = json.loads(data)
input_dataset = (msg['operation']
['outputInfo']['bigqueryOutputDataset']).split("://")[1]
input_table = f"""{input_dataset}.predictions_*"""
output_table = f'{BQ_LTV_GCP_PROJECT}.{BQ_LTV_DATASET}.{BQ_LTV_PREDICTIONS_TABLE}_{msg["date"]}'
dataframe_processing = not (DATAFRAME_PROCESSING_ENABLED == 'N')
query = hook_get_load_predictions_query(input_table)
if dataframe_processing:
_write_to_bigquery(
hook_apply_formulas(_load_data_from_bq(query)), output_table)
else:
_load_direct_to_bigquery(query, output_table)
_delete_dataset(input_dataset)
hook_on_completion(msg['date'])
def _test():
msg_data = base64.b64encode(bytes('{"payload": {"bq_input_to_predict_table": "decent-fulcrum-316414.test.filtered_periodic_transactions", "bq_output_table": "decent-fulcrum-316414.test.predictions", "date": "20210401", "operation": {"name": "projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "displayName": "pablogil_test_pltv_batch_predict - 2021-06-17 13:27:04.054958", "model": "projects/988912752389/locations/europe-west4/models/7662206262901211136", "inputConfig": {"instancesFormat": "bigquery", "bigquerySource": {"inputUri": "bq://decent-fulcrum-316414.test.filtered_periodic_transactions_20210401"}}, "outputConfig": {"predictionsFormat": "bigquery", "bigqueryDestination": {"outputUri": "bq://decent-fulcrum-316414"}}, "dedicatedResources": {"machineSpec": {"machineType": "n1-highmem-8"}, "startingReplicaCount": 20, "maxReplicaCount": 20}, "manualBatchTuningParameters": {"batchSize": 100}, "outputInfo": {"bigqueryOutputDataset": "bq://decent-fulcrum-316414.prediction_automl_training_data_20200605_0608_2021_06_17T06_27_04_428Z"}, "state": "JOB_STATE_SUCCEEDED", "completionStats": {"successfulCount": "280"}, "createTime": "2021-06-17T13:27:04.571081Z", "startTime": "2021-06-17T13:27:05.550439Z", "endTime": "2021-06-17T13:44:29Z", "updateTime": "2021-06-17T13:45:41.481342Z"}}, "status_check_url": "https://europe-west4-aiplatform.googleapis.com/v1/projects/988912752389/locations/europe-west4/batchPredictionJobs/7138777155428679680", "success_topic": "pablogil_test.pltv.post_process_batch_predictions", "concurrent_slot_document": "pablogil_test_pltv_prediction_tracking/concurrent_document", "status_success_values": ["JOB_STATE_SUCCEEDED"], "status_error_values": ["JOB_STATE_FAILED", "JOB_STATE_EXPIRED"], "inserted_timestamp": "0", "error_topic": "pablogil_test.pltv.", "expiration_timestamp": "0", "status_field": "state", "updated_timestamp": "0", "source_topic": "pablogil_test.pltv.predict_transactions_batch"}'.encode("utf-8")))
main(
event={
"data": msg_data,
"attributes": {
"forwarded": "true"
}
},
context=None)
if __name__ == "__main__":
_test()
| []
| []
| [
"BQ_LTV_DATASET",
"LOCAL_LOGGING",
"BQ_LTV_PREDICTIONS_TABLE",
"DATAFRAME_PROCESSING_ENABLED",
"BQ_LTV_GCP_PROJECT"
]
| [] | ["BQ_LTV_DATASET", "LOCAL_LOGGING", "BQ_LTV_PREDICTIONS_TABLE", "DATAFRAME_PROCESSING_ENABLED", "BQ_LTV_GCP_PROJECT"] | python | 5 | 0 | |
vendor/code.cloudfoundry.org/cli/integration/push/docker_test.go | package push
import (
"os"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("pushing a docker image", func() {
var (
appName string
)
BeforeEach(func() {
appName = helpers.NewAppName()
})
Describe("a public docker image", func() {
Describe("app existence", func() {
Context("when the app does not exist", func() {
It("creates the app", func() {
session := helpers.CF(PushCommandName, appName, "-o", PublicDockerImage)
Eventually(session).Should(Say("Getting app info\\.\\.\\."))
Eventually(session).Should(Say("Creating app with these attributes\\.\\.\\."))
Eventually(session).Should(Say("\\+\\s+name:\\s+%s", appName))
Eventually(session).Should(Say("\\s+docker image:\\s+%s", PublicDockerImage))
Eventually(session).Should(Say("Mapping routes\\.\\.\\."))
helpers.ConfirmStagingLogs(session)
Eventually(session).Should(Say("Waiting for app to start\\.\\.\\."))
Eventually(session).Should(Say("requested state:\\s+started"))
Eventually(session).Should(Exit(0))
session = helpers.CF("app", appName)
Eventually(session).Should(Say("name:\\s+%s", appName))
Eventually(session).Should(Exit(0))
})
})
Context("when the app exists", func() {
BeforeEach(func() {
Eventually(helpers.CF(PushCommandName, appName, "-o", PublicDockerImage)).Should(Exit(0))
})
It("updates the app", func() {
session := helpers.CF(PushCommandName, appName, "-o", PublicDockerImage)
Eventually(session).Should(Say("Getting app info\\.\\.\\."))
Eventually(session).Should(Say("Updating app with these attributes\\.\\.\\."))
Eventually(session).Should(Say("\\s+name:\\s+%s", appName))
Eventually(session).Should(Say("\\s+docker image:\\s+%s", PublicDockerImage))
Eventually(session).Should(Say("Mapping routes\\.\\.\\."))
Eventually(session).Should(Say("Waiting for app to start\\.\\.\\."))
Eventually(session).Should(Say("requested state:\\s+started"))
Eventually(session).Should(Exit(0))
session = helpers.CF("app", appName)
Eventually(session).Should(Say("name:\\s+%s", appName))
Eventually(session).Should(Exit(0))
})
})
})
})
Describe("private docker image", func() {
var (
privateDockerImage string
privateDockerUsername string
privateDockerPassword string
)
BeforeEach(func() {
privateDockerImage = os.Getenv("CF_INT_DOCKER_IMAGE")
privateDockerUsername = os.Getenv("CF_INT_DOCKER_USERNAME")
privateDockerPassword = os.Getenv("CF_INT_DOCKER_PASSWORD")
if privateDockerImage == "" || privateDockerUsername == "" || privateDockerPassword == "" {
Skip("CF_INT_DOCKER_IMAGE, CF_INT_DOCKER_USERNAME, or CF_INT_DOCKER_PASSWORD is not set")
}
})
Context("when CF_DOCKER_PASSWORD is set", func() {
It("push the docker image with those credentials", func() {
session := helpers.CustomCF(
helpers.CFEnv{
EnvVars: map[string]string{"CF_DOCKER_PASSWORD": privateDockerPassword},
},
PushCommandName, "--docker-username", privateDockerUsername, "--docker-image", privateDockerImage, appName,
)
Eventually(session).Should(Say("Getting app info\\.\\.\\."))
Eventually(session).Should(Say("Creating app with these attributes\\.\\.\\."))
Eventually(session).Should(Say("\\+\\s+name:\\s+%s", appName))
Eventually(session).Should(Say("\\s+docker image:\\s+%s", privateDockerImage))
Eventually(session).Should(Say("Mapping routes\\.\\.\\."))
helpers.ConfirmStagingLogs(session)
Eventually(session).Should(Say("Waiting for app to start\\.\\.\\."))
Eventually(session).Should(Say("requested state:\\s+started"))
Eventually(session).Should(Exit(0))
})
})
})
})
| [
"\"CF_INT_DOCKER_IMAGE\"",
"\"CF_INT_DOCKER_USERNAME\"",
"\"CF_INT_DOCKER_PASSWORD\""
]
| []
| [
"CF_INT_DOCKER_USERNAME",
"CF_INT_DOCKER_PASSWORD",
"CF_INT_DOCKER_IMAGE"
]
| [] | ["CF_INT_DOCKER_USERNAME", "CF_INT_DOCKER_PASSWORD", "CF_INT_DOCKER_IMAGE"] | go | 3 | 0 | |
go/src/relparser/relfile/relfile.go | package relfile
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"gopkg.in/yaml.v2"
)
//
// Utils
//
func LoadRelfile(path string) (*Relfile, error) {
var (
err error
c *exec.Cmd
f *os.File
buffer bytes.Buffer
rel Relfile
)
c = exec.Command("perl", "-pe", `s/\$(\{)?([a-zA-Z_]\w*)(?(1)\})/$ENV{$2}/g`)
f, err = os.Open(path)
if err != nil {
return nil, err
}
c.Stdin = f
c.Stdout = &buffer
err = c.Run()
if err != nil {
return nil, err
}
err = yaml.Unmarshal(buffer.Bytes(), &rel)
if err != nil {
return nil, err
}
return &rel, nil
}
//
// Relfile
//
type Relfile struct {
Version string `yaml:"version"`
Xcodeproj string `yaml:"xcodeproj"`
Workspace string `yaml:"workspace"`
Uploader map[string]interface{} `yaml:"uploader"`
LogFormatter string `yaml:"log_formatter"`
Distributions map[string]Distribution `yaml:"distributions"`
}
func (r Relfile) List() {
for k, _ := range r.Distributions {
fmt.Println(k)
}
}
func (r Relfile) Check(dist string) {
d := r.Distributions[dist]
d.Check()
}
func (r Relfile) GenOptionsPlist(dist string, infoPlist, out string) {
d := r.Distributions[dist]
of := prepareFile(out)
defer of.Close()
d.writeExportOptions(infoPlist, of)
}
func (r Relfile) GenPlist(dist string, in string, out string) {
d := r.Distributions[dist]
of := prepareFile(out)
defer of.Close()
d.WriteInfoPlist(in, of)
}
func (r Relfile) GenSource(dist string) *os.File {
var (
temp *os.File
err error
)
d := r.Distributions[dist]
out := os.Getenv("REL_TEMP_DIR")
if out == "" {
temp, err = ioutil.TempFile("", "relax/"+dist+"_source")
} else {
temp, err = ioutil.TempFile(out, dist+"_source")
}
if err != nil {
logger.Fatal(err)
}
out = temp.Name()
of := prepareFile(out)
defer of.Close()
r.writeSource(of)
d.writeSource(dist, of)
return of
}
func (r Relfile) writeSource(out *os.File) {
var (
err error
source string
)
source += genSourceline("xcodeproj", r.Xcodeproj)
source += genSourceline("workspace", r.Workspace)
source += genSourceline("log_formatter", r.LogFormatter)
for k, v := range r.Uploader {
up := v.(map[interface{}]interface{})
for name, value := range up {
//fmt.Printf("---\t%v: %v\n", name, value)
source += genSourceLine2("uploader_"+k, name.(string), value.(string))
}
}
_, err = out.WriteString(source)
if err != nil {
logger.Fatal(err)
}
}
func (r Relfile) PrintBuildOptions(dist string) {
d := r.Distributions[dist]
d.BuildOptions.PrintBuildOptions()
}
| [
"\"REL_TEMP_DIR\""
]
| []
| [
"REL_TEMP_DIR"
]
| [] | ["REL_TEMP_DIR"] | go | 1 | 0 | |
iot/api-client/gcs_file_to_device/gcs_send_to_device_test.py | # Copyright 2018 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
import time
from google.cloud import pubsub
from google.cloud import storage
# Add manager for bootstrapping device registry / device for testing
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'manager')) # noqa
import manager
import mock
import pytest
import requests
import gcs_send_to_device as gcs_to_device
gcs_bucket = os.environ['CLOUD_STORAGE_BUCKET']
project_id = os.environ['GCLOUD_PROJECT']
service_account_json = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
topic_id = 'test-device-events-{}'.format(int(time.time()))
device_id = 'test-device-{}'.format(int(time.time()))
registry_id = 'test-registry-{}'.format(int(time.time()))
pubsub_topic = 'projects/{}/topics/{}'.format(project_id, topic_id)
cloud_region = 'us-central1'
destination_file_name = 'destination-file.bin'
gcs_file_name = 'my-config'
@pytest.fixture(scope='module')
def test_blob():
"""Provides a pre-existing blob in the test bucket."""
bucket = storage.Client().bucket(gcs_bucket)
# Name of the blob
blob = bucket.blob('iot_core_store_file_gcs')
# Text in the blob
blob.upload_from_string('This file on GCS will go to a device.')
yield blob
# Clean up
blob.delete()
@mock.patch('google.cloud.storage.client.Client.create_bucket')
def test_create_bucket(create_bucket_mock, capsys):
# Unlike other tests for sending a config, this one mocks out the creation
# because buckets are expensive, globally-namespaced objects.
create_bucket_mock.return_value = mock.sentinel.bucket
gcs_to_device.create_bucket(gcs_bucket)
create_bucket_mock.assert_called_with(gcs_bucket)
def test_upload_local_file(capsys):
# Creates a temporary source file that gets uploaded
# to GCS. All other tests use the blob in test_blob().
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(b'This is a source file.')
gcs_to_device.upload_local_file(
gcs_bucket,
gcs_file_name,
source_file.name)
out, _ = capsys.readouterr()
assert 'File {} uploaded as {}.'.format(
source_file.name, gcs_file_name) in out
def test_make_file_public(test_blob):
gcs_to_device.make_file_public(
gcs_bucket,
test_blob.name)
r = requests.get(test_blob.public_url)
# Test for the content of the file to verify that
# it's publicly accessible.
assert r.text == 'This file on GCS will go to a device.'
def test_send_to_device(capsys):
manager.create_iot_topic(project_id, topic_id)
manager.open_registry(
service_account_json,
project_id,
cloud_region,
pubsub_topic,
registry_id)
manager.create_unauth_device(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
gcs_to_device.send_to_device(
gcs_bucket,
gcs_file_name,
destination_file_name,
project_id,
cloud_region,
registry_id,
device_id,
service_account_json)
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project_id, topic_id)
pubsub_client.delete_topic(topic_path)
out, _ = capsys.readouterr()
assert 'Successfully sent file to device' in out
def test_get_state(capsys):
manager.create_iot_topic(project_id, topic_id)
manager.open_registry(
service_account_json,
project_id,
cloud_region,
pubsub_topic,
registry_id)
manager.create_unauth_device(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
gcs_to_device.get_state(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project_id, topic_id)
pubsub_client.delete_topic(topic_path)
out, _ = capsys.readouterr()
assert 'Id' in out
assert 'Config' in out
| []
| []
| [
"CLOUD_STORAGE_BUCKET",
"GCLOUD_PROJECT",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["CLOUD_STORAGE_BUCKET", "GCLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"] | python | 3 | 0 | |
scrapy/amazon_benchmark/amazon_benchmark/spiders/amazon.py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
# simple spider, that starts at home page, and crawls for ../dp/.. pages
# and extracts a few fields
class AmazonSpider(CrawlSpider):
name = 'amazon'
allowed_domains = ['amazon.com']
start_urls = ['https://www.amazon.com/']
rules = (
Rule(LinkExtractor(allow=r'/dp/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
try:
i = {}
i['name'] = response.xpath('//meta[@name="title"]/@content').extract_first()
i['id'] = response.xpath('//input[@id="ASIN"]/@value').extract_first()
i['url'] = response.xpath('//link[@rel="canonical"]/@href').extract_first()
i['price'] = response.xpath('//span[@id="priceblock_ourprice"]/text()').extract_first()
return i
except AttributeError:
pass
| []
| []
| []
| [] | [] | python | null | null | null |
macgraph/train.py |
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from collections import namedtuple
from .estimator import get_estimator
from .input import gen_input_fn
from .args import *
# Make TF be quiet
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
import logging
logger = logging.getLogger(__name__)
def train(args):
# So I don't frigging forget what caused working models
save_args(args)
if args["use_tf_debug"]:
hooks = [tf_debug.LocalCLIDebugHook()]
else:
hooks = []
train_size = sum(1 for _ in tf.python_io.tf_record_iterator(args["train_input_path"]))
tf.logging.info(f"Training on {train_size} records")
# ----------------------------------------------------------------------------------
training_segments = []
TrainingSegment = namedtuple('TrainingSegment', ['args', 'max_steps'])
if args["use_curriculum"]:
assert args["train_max_steps"] is not None, "Curriculum training requires --train-max-steps"
seg_steps = args["train_max_steps"] / float(args["max_decode_iterations"])
for i in range(1, args["max_decode_iterations"]+1):
seg_args = {**args}
seg_args["filter_output_class"] = [str(j) for j in list(range(i+1))]
total_seg_steps = i*seg_steps*1000
training_segments.append(TrainingSegment(seg_args, total_seg_steps))
else:
training_segments.append(TrainingSegment(args, args["train_max_steps"]*1000 if args["train_max_steps"] is not None else None))
for i in training_segments:
tf.logging.info(f"Begin training segment {i.max_steps} {i.args['filter_output_class']}")
estimator = get_estimator(i.args)
train_spec = tf.estimator.TrainSpec(
input_fn=gen_input_fn(i.args, "train"),
max_steps=int(i.max_steps),
hooks=hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=gen_input_fn(i.args, "eval"),
throttle_secs=i.args["eval_every"])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
args = get_args()
# DO IT!
train(args)
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tests/test_convlstm.py | import torch
from kurosal.models.sequencer import ConvLSTMCell
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
cell = ConvLSTMCell(3, 3, 3, 0.1).cuda()
x = torch.Tensor(4, 3, 5, 5).cuda()
out, state = cell(x, None)
print(out, out.size())
out, state = cell(x, state)
print(out, out.size())
out, state = cell(x, None)
print(out.size())
out, state = cell(x, state)
print(out.size())
import pdb
pdb.set_trace()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
sumo/tests/netedit/shapeElements/poly/create/test.sikuli/test.py | #!/usr/bin/env python
"""
@file test.py
@author Pablo Alvarez Lopez
@date 2016-11-25
@version $Id: test.py 25910 2017-09-07 13:49:36Z namdre $
python script used by sikulix for testing netedit
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
# import common functions for netedit tests
import os
import sys
testRoot = os.path.join(os.environ.get('SUMO_HOME', '.'), 'tests')
neteditTestRoot = os.path.join(
os.environ.get('TEXTTEST_HOME', testRoot), 'netedit')
sys.path.append(neteditTestRoot)
import neteditTestFunctions as netedit # noqa
# Open netedit
neteditProcess, match = netedit.setupAndStart(neteditTestRoot)
# go to shape mode
netedit.shapeMode()
# go to shape mode
netedit.changeShape("poly")
# change color using dialog
netedit.changeColorUsingDialog(2, 5)
# create polygon
netedit.createSquaredPoly(match, 100, 50, 100, True)
# change color manually (invalid)
netedit.modifyShapeDefaultValue(3, "Vlue")
# try to create polygon
netedit.createSquaredPoly(match, 200, 50, 100, True)
# change color manually (valid)
netedit.modifyShapeDefaultValue(3, "red")
# create polygon
netedit.createSquaredPoly(match, 200, 50, 100, True)
# change block
netedit.modifyShapeDefaultBoolValue(4)
# create polygon
netedit.createSquaredPoly(match, 300, 50, 100, True)
# change layer (invalid)
netedit.modifyShapeDefaultValue(5, "dummyLayer")
# try to create polygon
netedit.createSquaredPoly(match, 400, 50, 100, True)
# change layer (valid)
netedit.modifyShapeDefaultValue(5, "2")
# create polygon
netedit.createSquaredPoly(match, 400, 50, 100, True)
# change type
netedit.modifyShapeDefaultValue(6, "poly_test")
# create polygon
netedit.createSquaredPoly(match, 500, 50, 100, True)
# change imgfile (invalid)
netedit.modifyShapeDefaultValue(7, "%%$%$&$%$%$")
# try to create polygon
netedit.createSquaredPoly(match, 600, 50, 100, True)
# change imgfile (inexistent)
netedit.modifyShapeDefaultValue(7, "paris.ico")
# try to create polygon
netedit.createSquaredPoly(match, 600, 50, 100, True)
# change imgfile (valid)
netedit.modifyShapeDefaultValue(7, "berlin_icon.ico")
# create polygon
netedit.createSquaredPoly(match, 600, 50, 100, True)
# reset imgfile
netedit.modifyShapeDefaultValue(7, "")
# change angle (invalid)
netedit.modifyShapeDefaultValue(8, "dummyAngle")
# try to create polygon
netedit.createSquaredPoly(match, 100, 150, 100, True)
# change angle (valid, but > 360)
netedit.modifyShapeDefaultValue(8, "365")
# try to create polygon
netedit.createSquaredPoly(match, 100, 150, 100, True)
# change angle (valid, < 0)
netedit.modifyShapeDefaultValue(8, "-5")
# create polygon
netedit.createSquaredPoly(match, 200, 150, 100, True)
# change angle (0 < angle < 360)
netedit.modifyShapeDefaultValue(8, "5")
# create polygon
netedit.createSquaredPoly(match, 300, 150, 100, True)
# reset angle
netedit.modifyShapeDefaultValue(8, "0")
# change block move
netedit.modifyShapeDefaultBoolValue(10)
# create polygon
netedit.createSquaredPoly(match, 400, 150, 100, True)
# change block shape
netedit.modifyShapeDefaultBoolValue(11)
# create polygon
netedit.createSquaredPoly(match, 500, 150, 100, True)
# reset block shape
netedit.modifyShapeDefaultBoolValue(11)
# create open polygon
netedit.createSquaredPoly(match, 600, 150, 100, False)
# force close polygon
netedit.modifyShapeDefaultBoolValue(12)
# create close polygon with force close enabled
netedit.createSquaredPoly(match, 100, 350, 100, True)
# create open polygon with force close enabled (will be closed)
netedit.createSquaredPoly(match, 200, 350, 100, True)
# create polygon clicking in the same points
netedit.createSquaredPoly(match, 300, 350, 0, True)
# create rectangled polygon with area 0
netedit.createRectangledPoly(match, 400, 350, 100, 0, True)
# create open line polygon
netedit.createLinePoly(match, 500, 350, 100, 100, True)
# Check undo redo
netedit.undo(match, 17)
netedit.redo(match, 17)
# save shapes
netedit.saveShapes()
# save newtork
netedit.saveNetwork()
# quit netedit
netedit.quit(neteditProcess)
| []
| []
| [
"SUMO_HOME",
"TEXTTEST_HOME"
]
| [] | ["SUMO_HOME", "TEXTTEST_HOME"] | python | 2 | 0 | |
ppqm/xtb.py | """
xTB wrapper functions
"""
import copy
import functools
import logging
import multiprocessing
import os
import pathlib
import shutil
import tempfile
from collections import ChainMap
import numpy as np
from tqdm import tqdm
import rmsd
from ppqm import chembridge, constants, env, linesio, misc, shell, units
from ppqm.calculator import BaseCalculator
XTB_CMD = "xtb"
XTB_FILENAME = "_tmp_xtb_input.xyz"
XTB_FILES = ["charges", "wbo", "xtbrestart", "xtbopt.coord", "xtbopt.log"]
COLUMN_ENERGY = "total_energy"
COLUMN_COORD = "coord"
COLUMN_ATOMS = "atoms"
COLUMN_GSOLV = "gsolv"
COLUMN_DIPOLE = "dipole"
COLUMN_CONVERGED = "is_converged"
COLUMN_STEPS = "opt_steps"
_logger = logging.getLogger("xtb")
class XtbCalculator(BaseCalculator):
"""
TODO Add options documentation for XTB calculations
"""
def __init__(
self,
cmd=XTB_CMD,
filename=XTB_FILENAME,
show_progress=False,
n_cores=None,
**kwargs,
):
super().__init__(**kwargs)
self.cmd = cmd
self.filename = filename
self.n_cores = n_cores
self.show_progress = show_progress
self.xtb_options = {
"cmd": self.cmd,
"scr": self.scr,
"filename": self.filename,
}
# Default xtb options
self.options = {}
# Check version and command
self.health_check()
def health_check(self):
assert env.which(self.cmd), f"Cannot find {self.cmd}"
stdout, stderr = shell.execute(f"{self.cmd} --version")
try:
stdout = stdout.split("\n")
stdout = [x for x in stdout if "*" in x]
version = stdout[0].strip()
version = version.split()
version = version[3]
version = version.split(".")
major, minor, patch = version
except Exception:
assert False, "too old xtb version"
assert int(major) >= 6, "too old xtb version"
assert int(minor) >= 4, "too old xtb version"
def _generate_options(self, optimize=True, hessian=False, gradient=False):
# TODO
options = ...
return options
def calculate(self, molobj, options, **kwargs):
# Merge options
options_prime = ChainMap(options, self.options)
options_prime = dict(options_prime)
if self.n_cores and self.n_cores > 1:
results = self.calculate_parallel(molobj, options_prime, **kwargs)
else:
results = self.calculate_serial(molobj, options_prime, **kwargs)
return results
def calculate_serial(self, molobj, options, **kwargs):
properties_list = []
n_confs = molobj.GetNumConformers()
atoms, _, charge = chembridge.get_axyzc(molobj, atomfmt=str)
if self.show_progress:
pbar = tqdm(
total=n_confs,
desc="xtb(1)",
**constants.TQDM_OPTIONS,
)
for conf_idx in range(n_confs):
coord = chembridge.get_coordinates(molobj, confid=conf_idx)
properties = get_properties_from_axyzc(
atoms, coord, charge, options, **self.xtb_options, **kwargs
)
properties_list.append(properties)
if self.show_progress:
pbar.update(1)
if self.show_progress:
pbar.close()
return properties_list
def calculate_parallel(self, molobj, options, n_cores=None):
_logger.debug("start xtb multiprocessing pool")
if not n_cores:
n_cores = self.n_cores
atoms, _, charge = chembridge.get_axyzc(molobj, atomfmt=str)
n_conformers = molobj.GetNumConformers()
coordinates_list = [
np.asarray(conformer.GetPositions()) for conformer in molobj.GetConformers()
]
n_procs = min(n_cores, n_conformers)
results = []
func = functools.partial(
get_properties_from_acxyz, atoms, charge, options=options, **self.xtb_options
)
results = misc.func_parallel(
func,
coordinates_list,
n_cores=n_procs,
n_jobs=n_conformers,
show_progress=self.show_progress,
title="XTB",
)
return results
def __repr__(self):
return f"XtbCalc(cmd={self.cmd},scr={self.scr},n_cores={self.n_cores})"
def clean_dir(scr="./"):
suffix = "/"
if not scr.endswith(suffix):
scr += suffix
# TODO delete all tmp files made by xtb
return
def health_check(config=None, cmd=XTB_CMD):
"""
INCOMPLETE
"""
path = env.which(cmd)
assert path is not None, f"{cmd} was not found in your environment"
return True
def get_properties_from_molobj(molobj, show_progress=True, **kwargs):
"""
INCOMPLETE
"""
n_conformers = molobj.GetNumConformers()
if n_conformers == 0:
raise ValueError("No conformers found in molecule")
properties_list = []
atoms, _, charge = chembridge.get_axyzc(molobj, confid=-1, atomfmt=str)
if show_progress:
pbar = tqdm(total=n_conformers, desc="XTB", **constants.TQDM_OPTIONS)
# For conformers
for conformer in molobj.GetConformers():
coordinates = conformer.GetPositions()
coordinates = np.array(coordinates)
properties = get_properties_from_axyzc(atoms, coordinates, charge, **kwargs)
properties_list.append(properties)
if show_progress:
pbar.update(1)
if show_progress:
pbar.close()
return properties_list
def get_properties_from_molobj_parallel(
molobj, show_progress=True, n_cores=1, scr=None, options={}
):
worker_kwargs = {"scr": scr, "n_cores": 1, "options": options}
atoms, _, charge = chembridge.get_axyzc(molobj, atomfmt=str)
n_conformers = molobj.GetNumConformers()
coordinates_list = [
np.asarray(conformer.GetPositions()) for conformer in molobj.GetConformers()
]
n_procs = min(n_cores, n_conformers)
results = []
func = functools.partial(get_properties_from_acxyz, atoms, charge, **worker_kwargs)
results = misc.func_parallel(
func,
coordinates_list,
n_cores=n_procs,
show_progress=show_progress,
title="XTB",
)
return results
def get_output_from_axyzc(
atoms_str,
coordinates,
charge,
options=None,
scr=constants.SCR,
use_tempfile=True,
clean_tempfile=True,
cmd=XTB_CMD,
filename="_tmp_xtb_input.xyz",
n_cores=1,
):
""" NOT DONE """
lines = ...
return lines
def get_properties_from_acxyz(atoms, charge, coordinates, **kwargs):
""" get properties from atoms, charge and coordinates """
return get_properties_from_axyzc(atoms, coordinates, charge, **kwargs)
def get_properties_from_axyzc(
atoms_str,
coordinates,
charge,
options=None,
scr=constants.SCR,
clean_files=True,
cmd=XTB_CMD,
filename="_tmp_xtb_input.xyz",
n_cores=1,
n_threads=1,
**kwargs,
):
"""Get XTB properties from atoms, coordinates and charge for a molecule."""
assert health_check(cmd=cmd)
if isinstance(scr, str):
scr = pathlib.Path(scr)
if not filename.endswith(".xyz"):
filename += ".xyz"
temp_scr = tempfile.mkdtemp(dir=scr, prefix="xtb_")
temp_scr = pathlib.Path(temp_scr)
xtb_cmd = cmd
# Write input file
inputstr = rmsd.set_coordinates(atoms_str, coordinates, title="xtb input")
with open(temp_scr / filename, "w") as f:
f.write(inputstr)
# Set charge in file
with open(temp_scr / ".CHRG", "w") as f:
f.write(str(charge))
# Overwrite threads
env.set_threads(n_threads)
# Run subprocess command
cmd = [cmd, f"{filename}"]
if options is not None:
cmd += parse_options(options)
# Merge to string
cmd = " ".join(cmd)
cmd = f"cd {temp_scr}; " + cmd
_logger.debug(cmd)
lines = shell.stream(cmd)
lines = list(lines)
error_pattern = "abnormal termination of xtb"
idx = linesio.get_rev_index(lines, error_pattern, stoppattern="#")
if idx is not None:
_logger.critical(error_pattern)
idx = linesio.get_rev_index(lines, "ERROR")
if idx is None:
_logger.critical("could not read error message")
else:
for line in lines[idx + 1 : -2]:
_logger.critical(line.strip())
_logger.critical(cmd)
_logger.critical("xtbexec " + env.which(xtb_cmd))
_logger.critical("xtbpath " + os.environ.get("XTBPATH", ""))
_logger.critical("xtbhome " + os.environ.get("XTBHOME", ""))
return None
# Parse properties from xtb output
properties = read_properties(lines, options=options, scr=temp_scr)
# clean your room
if clean_files:
shutil.rmtree(temp_scr)
return properties
def calculate(molobj, confid=-1, show_progress=True, return_copy=True, **kwargs):
"""
INCOMPLETE
"""
# TODO Get coordinates
atoms, coordinates, charge = chembridge.get_axyzc(molobj, confid=confid, atomfmt=int)
properties = ...
if return_copy:
molobj = copy.deepcopy(molobj)
n_conf = molobj.GetNumConformers()
assert n_conf > 1, "No conformers to optimize"
energies = np.zeros(n_conf)
if show_progress:
pbar = tqdm(total=n_conf, desc="XTB", **constants.TQDM_OPTIONS)
# Iterat conformers and optimize with xtb
for i in range(n_conf):
atoms_str, coords, charge = chembridge.get_axyzc(molobj, confid=i, atomfmt=str)
properties = get_properties_from_axyzc(atoms_str, coords, charge=charge, **kwargs)
assert properties is not None, "Properties should never be None"
coord = properties[COLUMN_COORD]
chembridge.set_coordinates(molobj, coord, confid=i)
if COLUMN_ENERGY in properties:
total_energy = properties[COLUMN_ENERGY]
else:
total_energy = np.float("nan")
energies[i] = total_energy
if show_progress:
pbar.update(1)
if show_progress:
pbar.close()
return properties
def optimize_molobj(molobj, return_copy=True, show_progress=True, **kwargs):
"""
DEPRECIATED
TODO Should move into useing calculate_
TODO Embed energy into conformer?
"""
if return_copy:
molobj = copy.deepcopy(molobj)
n_conf = molobj.GetNumConformers()
assert n_conf > 1, "No conformers to optimize"
energies = np.zeros(n_conf)
if show_progress:
pbar = tqdm(total=n_conf, desc="XTB", **constants.TQDM_OPTIONS)
# Iterat conformers and optimize with xtb
for i in range(n_conf):
atoms_str, coords, charge = chembridge.get_axyzc(molobj, confid=i, atomfmt=str)
properties = get_properties_from_axyzc(atoms_str, coords, charge=charge, **kwargs)
assert properties is not None, "Properties should never be None"
coord = properties[COLUMN_COORD]
chembridge.set_coordinates(molobj, coord, confid=i)
if COLUMN_ENERGY in properties:
total_energy = properties[COLUMN_ENERGY]
else:
total_energy = np.float("nan")
energies[i] = total_energy
if show_progress:
pbar.update(1)
if show_progress:
pbar.close()
return molobj, energies
def _worker_calculate_molobj(job, atoms=None, charge=None, **kwargs):
""" INCOMPLETE """
# Get job info
i, coord = job
# Get process information
current = multiprocessing.current_process()
pid = current.name
pid = current._identity
if len(pid) == 0:
pid = 1
else:
pid = pid[0]
pid = str(pid)
scr = kwargs.get("scr")
scr = os.path.join(scr, pid)
kwargs["scr"] = scr
pathlib.Path(scr).mkdir(parents=True, exist_ok=True)
# Ensure only one thread per procs
kwargs["n_cores"] = 1
# TODO Should be general calculate with kwargs deciding to optimize
properties = get_properties_from_axyzc(atoms, coord, charge=charge, **kwargs)
return (i, properties)
def _worker_calculate_axyzc(job, **kwargs):
""" INCOMPLETE """
atoms, coord, charge = job
# Get process information
current = multiprocessing.current_process()
pid = current.name
pid = current._identity
if len(pid) == 0:
pid = 1
else:
pid = pid[0]
pid = str(pid)
pid = f"_calcxtb_{pid}"
scr = kwargs.get("scr")
scr = os.path.join(scr, pid)
kwargs["scr"] = scr
pathlib.Path(scr).mkdir(parents=True, exist_ok=True)
# Ensure only one thread per procs
kwargs["n_cores"] = 1
properties = get_properties_from_axyzc(atoms, coord, charge, **kwargs)
return properties
def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):
"""
INCOMPLETE
Start multiple subprocess over n_cores
"""
results = None
return results
def parallel_calculate_axyzc(
molecules,
options=None,
n_cores=-1,
show_progress=True,
scr=None,
cmd=XTB_CMD,
):
"""
INCOMPLETE
From lists of atoms, coords and charges. Return properties(dict) per
molecule.
:param molecules: List[Tuple[List[], array, int]]
"""
if scr is None:
scr = "_tmp_xtb_parallel_"
if n_cores == -1:
n_cores = env.get_available_cores()
# Ensure scratch directories
pathlib.Path(scr).mkdir(parents=True, exist_ok=True)
if show_progress:
pbar = tqdm(
total=len(molecules),
desc=f"XTB Parallel({n_cores})",
**constants.TQDM_OPTIONS,
)
# Pool
xtb_options = {"scr": scr, "cmd": cmd, "options": options}
# TODO Add this worker test to test_xtb
# TEST
# properties = _worker_calculate_axyzc(
# molecules[0],
# debug=True,
# super_debug=True,
# **options
# )
# print(properties)
# assert False
func = functools.partial(_worker_calculate_axyzc, **xtb_options)
p = multiprocessing.Pool(processes=n_cores)
try:
results_iter = p.imap(func, molecules, chunksize=1)
results = []
for result in results_iter:
if COLUMN_ENERGY not in result:
results[COLUMN_ENERGY] = np.float("nan")
# Update the progress bar
if show_progress:
pbar.update(1)
results.append(result)
except KeyboardInterrupt:
misc.eprint("got ^C while running pool of XTB workers...")
p.terminate()
except Exception as e:
misc.eprint("got exception: %r, terminating the pool" % (e,))
p.terminate()
finally:
p.terminate()
# End the progress
if show_progress:
pbar.close()
# TODO Clean scr dir for parallel folders, is the parallel folders needed
# if we use tempfile?
return results
def parallel_calculate_molobj(
molobj,
return_molobj=True,
return_copy=True,
return_energies=True,
return_properties=False,
update_coordinates=True,
**kwargs,
):
"""
INCOMPLETE
"""
if return_copy:
molobj = copy.deepcopy(molobj)
num_conformers = molobj.GetNumConformers()
assert num_conformers > 0, "No conformers to calculate"
atoms, _, charge = chembridge.get_axyzc(molobj, atomfmt=str)
jobs = []
for i in range(num_conformers):
conformer = molobj.GetConformer(id=i)
coordinates = conformer.GetPositions()
coordinates = np.array(coordinates)
jobs.append((atoms, coordinates, charge))
results = parallel_calculate_axyzc(jobs, **kwargs)
if update_coordinates:
for i, result in enumerate(results):
coordinates = result.get(COLUMN_COORD, None)
if coordinates is None:
continue
chembridge.set_coordinates(molobj, coordinates, confid=i)
ret = tuple()
if return_molobj:
ret += (molobj,)
if return_energies:
energies = [result[COLUMN_ENERGY] for result in results]
energies = np.array(energies)
ret += (energies,)
if return_properties:
ret += (results,)
return ret
# Readers
def read_status(lines):
""""""
keywords = [
"Program stopped due to fatal error",
"abnormal termination of xtb",
]
stoppattern = "normal termination of xtb"
idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)
for idx in idxs:
if idx is not None:
return False
return True
def parse_sum_table(lines):
""" """
properties = dict()
for line in lines:
if ":::" in line:
continue
if "..." in line:
continue
# Needs a loop break when the Hessian is computed.
if "Hessian" in line:
break
line = (
line.replace("w/o", "without")
.replace(":", "")
.replace("->", "")
.replace("/", "_")
.strip()
)
line = line.split()
if len(line) < 2:
continue
value = line[-2]
value = float(value)
# unit = line[-1]
name = line[:-2]
name = "_".join(name).lower()
name = name.replace("-", "_").replace(".", "")
properties[name] = float(value)
return properties
def read_properties(lines, options=None, scr=None):
""" Read output based on options or output """
reader = None
read_files = True
if options is None:
reader = read_properties_opt
elif "vfukui" in options:
reader = read_properties_fukui
read_files = False
elif "vomega" in options:
reader = read_properties_omega
read_files = False
elif "opt" in options or "ohess" in options:
reader = read_properties_opt
else:
reader = read_properties_sp
properties = reader(lines)
if scr is not None and read_files:
# Parse file properties
charges = get_mulliken_charges(scr=scr)
bonds, bondorders = get_wbo(scr=scr)
properties["mulliken_charges"] = charges
properties.update(get_cm5_charges(lines)) # Can return {} if not GFN1
properties["bonds"] = bonds
properties["bondorders"] = bondorders
if "vibspectrum" in os.listdir(scr):
properties["frequencies"] = get_frequencies(scr=scr)
return properties
def read_properties_sp(lines):
"""
TODO read dipole moment
TODO Inlcude units in docstring
TODO GEOMETRY OPTIMIZATION CONVERGED AFTER 48 ITERATIONS
electornic_energy is SCC energy
"""
# TODO Better logging for crashed xtb
if not read_status(lines):
return None
keywords = [
"final structure:",
":: SUMMARY ::",
"Property Printout ",
"ITERATIONS",
]
stoppattern = "CYCLE "
idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)
idxs[0]
idx_summary = idxs[1]
idx_end_summary = idxs[2]
idxs[3]
if idx_summary is None:
# TODO Better fix
assert False, "uncaught xtb exception"
# Get atom count
keyword = "number of atoms"
idx = linesio.get_index(lines, keyword)
line = lines[idx]
n_atoms = line.split()[-1]
n_atoms = int(n_atoms)
# Get energies
idx_summary = idxs[1] + 1
# :: total energy +1
# :: total w/o Gsasa/hb +2
# :: gradient norm +3
# :: HOMO-LUMO gap +4
# ::.....................+4
# :: SCC energy +5
# :: -> isotropic ES +6
# :: -> anisotropic ES +7
# :: -> anisotropic XC +8
# :: -> dispersion +9
# :: -> Gsolv +10
# :: -> Gborn +11
# :: -> Gsasa +12
# :: -> Ghb +13
# :: -> Gshift +14
# :: repulsion energy +15
# :: add. restraining +16
prop_lines = lines[idx_summary : idx_end_summary - 2]
prop_dict = parse_sum_table(prop_lines)
# total_energy = prop_dict.get("total_energy", float("nan"))
# gsolv = prop_dict.get("gsolv", float("nan"))
# electronic_energy = prop_dict.get("scc_energy", float("nan"))
properties = prop_dict
# Get dipole
dipole_str = "molecular dipole:"
idx = linesio.get_rev_index(lines, dipole_str)
if idx is None:
dipole_tot = None
else:
idx += 3
line = lines[idx]
line = line.split()
dipole_tot = line[-1]
dipole_tot = float(dipole_tot)
properties = {
COLUMN_DIPOLE: dipole_tot,
**properties,
}
# Get covalent properties
properties_covalent = read_covalent_coordination(lines)
# Get orbitals
properties_orbitals = read_properties_orbitals(lines)
properties = {**properties, **properties_orbitals, **properties_covalent}
return properties
def read_properties_opt(lines, convert_coords=False, debug=False):
"""
TODO read dipole moment
TODO Inlcude units in docstring
TODO GEOMETRY OPTIMIZATION CONVERGED AFTER 48 ITERATIONS
electornic_energy is SCC energy
"""
# TODO Better logging for crashed xtb
if not read_status(lines):
return None
keywords = [
"final structure:",
":: SUMMARY ::",
"Property Printout ",
"ITERATIONS",
]
stoppattern = "CYCLE "
idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)
idx_coord = idxs[0]
idx_summary = idxs[1]
idx_end_summary = idxs[2]
idx_optimization = idxs[3]
if idx_summary is None:
assert False, "Uncaught xtb exception. Please submit issue with calculation"
# Get atom count
keyword = "number of atoms"
idx = linesio.get_index(lines, keyword)
line = lines[idx]
n_atoms = line.split()[-1]
n_atoms = int(n_atoms)
# Get coordinates
if idx_coord is None:
coords = None
atoms = None
else:
def parse_coordline(line):
line = line.split()
atom = line[0]
coord = [float(x) for x in line[1:]]
return atom, coord
atoms = []
coords = []
for i in range(idx_coord + 4, idx_coord + 4 + n_atoms):
line = lines[i]
atom, coord = parse_coordline(line)
atoms.append(atom)
coords.append(coord)
atoms = np.array(atoms)
coords = np.array(coords)
if convert_coords:
coords *= units.bohr_to_aangstroem
# Get energies
idx_summary = idxs[1] + 1
# :: total energy +1
# :: total w/o Gsasa/hb +2
# :: gradient norm +3
# :: HOMO-LUMO gap +4
# ::.....................+4
# :: SCC energy +5
# :: -> isotropic ES +6
# :: -> anisotropic ES +7
# :: -> anisotropic XC +8
# :: -> dispersion +9
# :: -> Gsolv +10
# :: -> Gborn +11
# :: -> Gsasa +12
# :: -> Ghb +13
# :: -> Gshift +14
# :: repulsion energy +15
# :: add. restraining +16
prop_lines = lines[idx_summary : idx_end_summary - 2]
prop_dict = parse_sum_table(prop_lines)
# total_energy = prop_dict.get("total_energy", float("nan"))
# gsolv = prop_dict.get("gsolv", float("nan"))
# electronic_energy = prop_dict.get("scc_energy", float("nan"))
properties = prop_dict
# Get dipole
dipole_str = "molecular dipole:"
idx = linesio.get_rev_index(lines, dipole_str)
if idx is None:
dipole_tot = None
else:
idx += 3
line = lines[idx]
line = line.split()
dipole_tot = line[-1]
dipole_tot = float(dipole_tot)
if idx_optimization is None:
is_converged = None
n_cycles = None
else:
line = lines[idx_optimization]
if "FAILED" in line:
is_converged = False
else:
is_converged = True
line = line.split()
n_cycles = line[-3]
n_cycles = int(n_cycles)
# Get covCN and alpha
properties_covalent = read_covalent_coordination(lines)
properties = {
COLUMN_ATOMS: atoms,
COLUMN_COORD: coords,
COLUMN_DIPOLE: dipole_tot,
COLUMN_CONVERGED: is_converged,
COLUMN_STEPS: n_cycles,
**properties_covalent,
**properties,
}
return properties
def read_properties_omega(lines):
"""
Format:
Calculation of global electrophilicity index (IP+EA)²/(8·(IP-EA))
Global electrophilicity index (eV): 0.0058
"""
keywords = ["Global electrophilicity index"]
indices = linesio.get_rev_indices_patterns(lines, keywords)
if indices[0] is None:
return None
line = lines[indices[0]]
line = line.split()
global_index = line[-1]
global_index = float(global_index)
properties = {"global_electrophilicity_index": global_index}
return properties
def read_properties_fukui(lines):
"""
Read the Fukui properties fro XTB log
format:
# f(+) f(-) f(0)
1O -0.086 -0.598 -0.342
2H -0.457 -0.201 -0.329
3H -0.457 -0.201 -0.329
"""
keywords = ["Fukui index Calculation", "f(+)", "Property Printout"]
indices = linesio.get_rev_indices_patterns(lines, keywords)
if indices[0] is None:
return None
start_index = indices[1]
end_index = indices[2]
f_plus_list = []
f_minus_list = []
f_zero_list = []
for i in range(start_index + 1, end_index - 1):
line = lines[i]
line = line.split()
f_plus = float(line[1])
f_minus = float(line[2])
f_zero = float(line[3])
f_plus_list.append(f_plus)
f_minus_list.append(f_minus)
f_zero_list.append(f_zero)
f_plus_list = np.array(f_plus_list)
f_minus_list = np.array(f_minus_list)
f_zero_list = np.array(f_zero_list)
properties = {
"f_plus": f_plus_list,
"f_minus": f_minus_list,
"f_zero": f_zero_list,
}
return properties
def get_mulliken_charges(scr=None):
if scr is None:
scr = pathlib.Path(".")
filename = scr / "charges"
if not filename.is_file():
return None
# read charges files from work dir
charges = np.loadtxt(filename)
return charges
def get_cm5_charges(lines):
""" Get CM5 charges from gfn1-xTB calculation """
keywords = ["Mulliken/CM5 charges", "Wiberg/Mayer (AO) data"]
start, stop = linesio.get_rev_indices_patterns(lines, keywords)
if start is None: # No CM5 charges -> not GFN1 calculation
return {}
cm5_charges = []
for line in lines[start + 1 : stop]:
if (line := line.strip()) :
cm5_charges.append(float(line.split()[2]))
return {"cm5_charges": cm5_charges}
def get_wbo(scr=None):
if scr is None:
scr = pathlib.Path(".")
filename = scr / "wbo"
if not filename.is_file():
return None
# Read WBO file
with open(filename, "r") as f:
lines = f.readlines()
bonds, bondorders = read_wbo(lines)
return bonds, bondorders
def read_wbo(lines):
""""""
# keyword = "Wiberg bond orders"
bonds = []
bondorders = []
for line in lines:
parts = line.strip().split()
bondorders.append(float(parts[-1]))
parts = parts[:2]
parts = [int(x) - 1 for x in parts]
parts = (min(parts), max(parts))
bonds.append(parts)
return bonds, bondorders
def read_properties_orbitals(lines, n_offset=2):
"""
format:
# Occupation Energy/Eh Energy/eV
-------------------------------------------------------------
... ... ... ...
62 2.0000 -0.3635471 -9.8926
63 2.0000 -0.3540913 -9.6353 (HOMO)
64 -0.2808508 -7.6423 (LUMO)
... ... ... ...
"""
properties = dict()
keywords = ["(HOMO)", "(LUMO)"]
indices = linesio.get_rev_indices_patterns(lines, keywords)
if indices[0] is None:
return None
idx_homo = indices[0]
idx_lumo = indices[1]
# check if this is the right place
if idx_homo - idx_lumo != -1:
return None
# HOMO
line = lines[idx_homo]
line = line.split()
energy_homo = float(line[2])
properties["homo"] = energy_homo
# HOMO Offsets
for i in range(n_offset):
line = lines[idx_homo - (i + 1)]
line = line.strip().split()
if len(line) < 3:
continue
value = line[2]
properties[f"homo-{i+1}"] = float(value)
# LUMO
line = lines[idx_lumo]
line = line.split()
idx_lumo_col = 1
energy_lumo = float(line[idx_lumo_col])
properties["lumo"] = energy_lumo
# Lumo Offsets
for i in range(n_offset):
line = lines[idx_lumo + (i + 1)]
line = line.strip().split()
if len(line) < 3:
continue
value = line[idx_lumo_col]
properties[f"lumo+{i+1}"] = float(value)
return properties
def read_covalent_coordination(lines):
"""
Read computed covalent coordination number.
format:
# Z covCN q C6AA α(0)
1 6 C 3.743 -0.105 22.589 6.780
2 6 C 3.731 0.015 20.411 6.449
3 7 N 2.732 -0.087 22.929 7.112
...
Mol. C6AA /au·bohr
"""
properties = {"covCN": [], "alpha": []}
if (start_line := linesio.get_rev_index(lines, "covCN")) is None:
properties["covCN"] = None
properties["alpha"] = None
else:
for line in lines[start_line + 1 :]:
if set(line).issubset(set(["\n"])):
break
line = line.strip().split()
covCN = float(line[3])
alpha = float(line[-1])
properties["covCN"].append(covCN)
properties["alpha"].append(alpha)
return properties
def get_frequencies(scr=None):
""" """
if scr is None:
scr = pathlib.Path(".")
filename = scr / "vibspectrum"
if not filename.is_file():
return None
# Read WBO file
with open(filename, "r") as f:
lines = f.readlines()
frequencies = read_frequencies(lines)
return frequencies
def read_frequencies(lines):
"""" """
frequencies = []
for line in lines[3:]:
if "$end" in line:
break
if "-" in line: # non vib modes
continue
frequencies.append(float(line.strip().split()[2]))
return frequencies
def parse_options(options, return_list=True):
""" Parse dictionary/json of options, and return arg list for xtb """
cmd_options = []
for key, value in options.items():
if value is not None:
txt = f"--{key} {value}"
else:
txt = f"--{key}"
cmd_options.append(txt)
if return_list:
return cmd_options
cmd_options = " ".join(cmd_options)
return cmd_options
| []
| []
| [
"XTBPATH",
"XTBHOME"
]
| [] | ["XTBPATH", "XTBHOME"] | python | 2 | 0 | |
kinetick/lib/brokers/zerodha/zerodha.py | import json
import time
from urllib.parse import urljoin
import dateutil.parser
import requests
import logging
import os
from kinetick.enums import SecurityType, PositionType
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=os.getenv('LOGLEVEL') or logging.INFO)
logger = logging.getLogger(__name__)
class Zerodha():
# Constants
# Products
PRODUCT_MIS = "MIS"
PRODUCT_CNC = "CNC"
PRODUCT_NRML = "NRML"
PRODUCT_CO = "CO"
PRODUCT_BO = "BO"
# Order types
ORDER_TYPE_MARKET = "MARKET"
ORDER_TYPE_LIMIT = "LIMIT"
ORDER_TYPE_SLM = "SL-M"
ORDER_TYPE_SL = "SL"
# Varities
VARIETY_REGULAR = "regular"
VARIETY_BO = "bo"
VARIETY_CO = "co"
VARIETY_AMO = "amo"
# Transaction type
TRANSACTION_TYPE_BUY = "BUY"
TRANSACTION_TYPE_SELL = "SELL"
# Validity
VALIDITY_DAY = "DAY"
VALIDITY_IOC = "IOC"
# Exchanges
EXCHANGE_NSE = "NSE"
EXCHANGE_BSE = "BSE"
EXCHANGE_NFO = "NFO"
EXCHANGE_CDS = "CDS"
EXCHANGE_BFO = "BFO"
EXCHANGE_MCX = "MCX"
# Margins segments
MARGIN_EQUITY = "equity"
MARGIN_COMMODITY = "commodity"
# Status constants
STATUS_COMPLETE = "COMPLETE"
STATUS_REJECTED = "REJECTED"
STATUS_CANCELLED = "CANCELLED"
# GTT order type
GTT_TYPE_OCO = "two-leg"
GTT_TYPE_SINGLE = "single"
# GTT order status
GTT_STATUS_ACTIVE = "active"
GTT_STATUS_TRIGGERED = "triggered"
GTT_STATUS_DISABLED = "disabled"
GTT_STATUS_EXPIRED = "expired"
GTT_STATUS_CANCELLED = "cancelled"
GTT_STATUS_REJECTED = "rejected"
GTT_STATUS_DELETED = "deleted"
_routes = {
"login": "/api/login",
"twofa": "/api/twofa",
"api.token": "/session/token",
"api.token.invalidate": "/session/token",
"api.token.renew": "/session/refresh_token",
"user.profile": "/user/profile",
"user.margins": "/user/margins",
"user.margins.segment": "/user/margins/{segment}",
"orders": "oms/orders",
"trades": "oms/trades",
"order.info": "oms/orders/{order_id}",
"order.place": "oms/orders/{variety}",
"order.modify": "oms/orders/{variety}/{order_id}",
"order.cancel": "oms/orders/{variety}/{order_id}",
"order.trades": "oms/orders/{order_id}/trades",
"portfolio.positions": "oms/portfolio/positions",
"portfolio.holdings": "oms/portfolio/holdings",
"portfolio.positions.convert": "oms/portfolio/positions",
# MF api endpoints
"mf.orders": "/mf/orders",
"mf.order.info": "/mf/orders/{order_id}",
"mf.order.place": "/mf/orders",
"mf.order.cancel": "/mf/orders/{order_id}",
"mf.sips": "/mf/sips",
"mf.sip.info": "/mf/sips/{sip_id}",
"mf.sip.place": "/mf/sips",
"mf.sip.modify": "/mf/sips/{sip_id}",
"mf.sip.cancel": "/mf/sips/{sip_id}",
"mf.holdings": "/mf/holdings",
"mf.instruments": "/mf/instruments",
"market.instruments.all": "/instruments",
"market.instruments": "/instruments/{exchange}",
"market.margins": "/margins/{segment}",
"market.historical": "/instruments/historical/{instrument_token}/{interval}",
"market.trigger_range": "/instruments/trigger_range/{transaction_type}",
"market.quote": "/quote",
"market.quote.ohlc": "/quote/ohlc",
"market.quote.ltp": "/quote/ltp",
# GTT endpoints
"gtt": "/gtt/triggers",
"gtt.place": "/gtt/triggers",
"gtt.info": "/gtt/triggers/{trigger_id}",
"gtt.modify": "/gtt/triggers/{trigger_id}",
"gtt.delete": "/gtt/triggers/{trigger_id}"
}
def __init__(self, user_id, password, pin, debug=False):
self.user_id = user_id,
self.password = password,
self.pin = pin
self.apiKey = 'kitefront'
self.base_url = "https://kite.zerodha.com"
self._session = requests.Session()
self._session.headers.update({'X-Kite-Version': '2.4.0'})
self.debug = debug
self.timeout = 60
self.maxretry = 3
self._session_expiry_hook = self.default_session_expiry_hook
# ==== set default values. =====
self._account = {}
self.orders = {} # TODO initialize with pending orders
self.symbol_orders = {}
# =====
def login(self):
res = self._post("login", {'user_id': self.user_id, 'password': self.password})
time.sleep(1)
res = self._session.post(self.base_url + self._routes["twofa"],
{'user_id': res['user_id'], 'request_id': res['request_id'],
'twofa_value': self.pin})
data = res.json()
if data['status'] == 'success':
logger.info("logged into zerodha")
self._session.headers.update({'Authorization': "enctoken " + res.cookies.get_dict()['enctoken']})
return
def default_session_expiry_hook(self, response, **kwargs):
logger.info("Running session expiry hook")
headers = kwargs['headers'] if 'headers' in kwargs else {}
retryAttempts = headers["x-retry"] if "x-retry" in headers else 1
if int(retryAttempts) <= self.maxretry:
logger.info(f"Retrying request. Attempt: {retryAttempts}")
self.login()
headers["x-retry"] = str(int(retryAttempts) + 1)
kwargs['headers'] = headers
return self._request(**kwargs)
logger.error("Maximum session retry attempts {} exceeded".format(self.maxretry))
raise Exception(f"zerodha: maximum re-login attempts {self.maxretry} failed")
def set_session_expiry_hook(self, method):
"""
Set a callback hook for session (`TokenError` -- timeout, expiry etc.) errors.
A callback method that handles session errors
can be set here and when the client encounters
a token error at any point, it'll be called.
This callback, for instance, can log the user out of the UI,
clear session cookies, or initiate a fresh login.
"""
if not callable(method):
raise TypeError("Invalid input type. Only functions are accepted.")
self._session_expiry_hook = method
def place_order(self, variety, tradingsymbol, transaction_type, quantity, product,
order_type, exchange='NSE', **kwargs):
"""
:param variety:
:param tradingsymbol:
:param transaction_type:
:param quantity:
:param product:
:param order_type:
:param exchange:
:param price:
:param trigger_price:
:param validity:
:param disclosed_quantity:
:param trigger_price:
:param squareoff:
:param stoploss:
:param squareoff:
:param trailing_stoploss:
:param tag:
:return:
"""
params = {'variety': variety,
'tradingsymbol': tradingsymbol,
'transaction_type': transaction_type,
'quantity': quantity,
'product': product,
'order_type': order_type,
'exchange': exchange
}
for param in kwargs:
if param is not None:
params[param] = kwargs[param]
response = self._post("order.place", params)
logger.info("Order Placed with parameters ", params)
return response["order_id"]
def modify_order(self,
variety,
order_id,
parent_order_id=None,
quantity=None,
price=None,
order_type=None,
trigger_price=None,
validity=None,
disclosed_quantity=None):
"""Modify an open order."""
params = locals()
del (params["self"])
for k in list(params.keys()):
if params[k] is None:
del (params[k])
return self._put("order.modify", params)["order_id"]
def cancel_order(self, variety, order_id, parent_order_id=None):
"""Cancel an order."""
return self._delete("order.cancel", {
"order_id": order_id,
"variety": variety,
"parent_order_id": parent_order_id
})["order_id"]
def exit_order(self, order_id, variety=None, parent_order_id=None):
"""Exit order."""
if variety is None:
order = self.order_by_id(order_id)[-1]
variety = order['variety']
parent_order_id = order['parent_order_id']
self.cancel_order(variety, order_id, parent_order_id=parent_order_id)
def _format_response(self, data):
"""Parse and format responses."""
if type(data) == list:
_list = data
elif type(data) == dict:
_list = [data]
for item in _list:
# Convert date time string to datetime object
for field in ["order_timestamp", "exchange_timestamp", "created", "last_instalment", "fill_timestamp",
"timestamp", "last_trade_time"]:
if item.get(field) and len(item[field]) == 19:
item[field] = dateutil.parser.parse(item[field])
return _list[0] if type(data) == dict else _list
def orders(self):
"""Get list of orders."""
return self._format_response(self._get("orders"))
def order_by_id(self, order_id):
"""
Get history of individual order.
"""
return self._format_response(self._get("order.info", {"order_id": order_id}))
def positions(self):
"""Retrieve the list of positions."""
return self._get("portfolio.positions")
def holdings(self):
"""Retrieve the list of equity holdings."""
return self._get("portfolio.holdings")
def _get(self, route, params=None):
"""Alias for sending a GET request."""
return self._request(route, "GET", params)
def _post(self, route, params=None):
"""Alias for sending a POST request."""
return self._request(route, "POST", params)
def _put(self, route, params=None):
"""Alias for sending a PUT request."""
return self._request(route, "PUT", params)
def _delete(self, route, params=None):
"""Alias for sending a DELETE request."""
return self._request(route, "DELETE", params)
def _request(self, route, method, parameters=None, headers=None):
"""Make an HTTP request."""
if headers is None:
headers = {}
params = parameters.copy() if parameters else {}
# Form a restful URL
uri = self._routes[route].format(**params)
url = urljoin(self.base_url, uri)
if self.debug:
logger.debug("Request: {method} {url} {params}".format(method=method, url=url, params=params))
try:
response = self._session.request(method,
url,
data=params if method in ["POST", "PUT"] else None,
params=params if method in ["GET", "DELETE"] else None,
headers=headers,
# verify=not self.disable_ssl,
allow_redirects=True,
timeout=self.timeout)
# Any requests lib related exceptions are raised here -
# http://docs.python-requests.org/en/master/_modules/requests/exceptions/
except Exception as e:
raise e
if self.debug:
logger.debug("Response: {code} {content}".format(code=response.status_code, content=response.content))
# Validate the content type.
if "json" in response.headers["content-type"]:
try:
data = json.loads(response.content.decode("utf8"))
except ValueError:
raise Exception("Couldn't parse the JSON response received from the server: {content}".format(
content=response.content))
# api error
if data.get("error_type"):
# Call session hook if its registered and TokenException is raised
if self._session_expiry_hook and response.status_code == 403 and data["error_type"] == "TokenException":
return self._session_expiry_hook(
response, route=route, method=method, parameters=parameters, headers=headers)
else:
# native Kite errors
# exp = getattr(ex, data["error_type"], ex.GeneralException)
raise Exception(data["message"])
return data["data"]
elif "csv" in response.headers["content-type"]:
return response.content
else:
raise Exception("Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type=response.headers["content-type"],
content=response.content))
@property
def account(self):
return self._account
def get_order_variety(self, sec_type, pos_type):
if sec_type == SecurityType.OPTION:
return Zerodha.VARIETY_REGULAR, Zerodha.PRODUCT_MIS
if sec_type == SecurityType.STOCK and pos_type == PositionType.CO:
return Zerodha.VARIETY_CO, Zerodha.PRODUCT_MIS
if sec_type == SecurityType.STOCK and pos_type == PositionType.MIS:
return Zerodha.VARIETY_REGULAR, Zerodha.PRODUCT_MIS
return Zerodha.VARIETY_REGULAR, Zerodha.PRODUCT_CNC
if __name__ == "__main__":
# kite.set_access_token()
zerodha = Zerodha("", "", "", debug=True)
zerodha.login()
# print(zerodha.getOrders())
order = zerodha.place_order(variety=Zerodha.VARIETY_CO, tradingsymbol='ACC',
transaction_type=Zerodha.TRANSACTION_TYPE_BUY, quantity=1,
product=Zerodha.PRODUCT_CO, order_type=Zerodha.ORDER_TYPE_LIMIT,
price=89.5)
print(order)
| []
| []
| [
"LOGLEVEL"
]
| [] | ["LOGLEVEL"] | python | 1 | 0 | |
git-tar/function/ops.go | package function
import (
"archive/tar"
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
hmac "github.com/alexellis/hmac"
"github.com/openfaas/faas-cli/stack"
"github.com/openfaas/openfaas-cloud/sdk"
)
type tarEntry struct {
fileName string
functionName string
imageName string
}
type cfg struct {
Ref string `json:"ref"`
Frontend *string `json:"frontend,omitempty"`
}
func parseYAML(pushEvent sdk.PushEvent, filePath string) (*stack.Services, error) {
parsed, err := stack.ParseYAMLFile(path.Join(filePath, "stack.yml"), "", "")
return parsed, err
}
func shrinkwrap(pushEvent sdk.PushEvent, filePath string) (string, error) {
buildCmd := exec.Command("faas-cli", "build", "-f", "stack.yml", "--shrinkwrap")
buildCmd.Dir = filePath
err := buildCmd.Start()
if err != nil {
return "", fmt.Errorf("Cannot start faas-cli build: %t", err)
}
err = buildCmd.Wait()
return filePath, err
}
func makeTar(pushEvent sdk.PushEvent, filePath string, services *stack.Services) ([]tarEntry, error) {
tars := []tarEntry{}
fmt.Printf("Tar up %s\n", filePath)
for k, v := range services.Functions {
fmt.Println("Creating tar for: ", v.Handler, k)
tarPath := path.Join(filePath, fmt.Sprintf("%s.tar", k))
contextTar, err := os.Create(tarPath)
if err != nil {
return []tarEntry{}, err
}
tarWriter := tar.NewWriter(contextTar)
defer tarWriter.Close()
base := filepath.Join(filePath, filepath.Join("build", k))
pushRepositoryURL := os.Getenv("push_repository_url")
if len(pushRepositoryURL) == 0 {
fmt.Fprintf(os.Stderr, "push_repository_url env-var not set")
os.Exit(1)
}
imageName := formatImageShaTag(pushRepositoryURL, &v, pushEvent.AfterCommitID,
pushEvent.Repository.Owner.Login, pushEvent.Repository.Name)
config := cfg{
Ref: imageName,
}
configBytes, _ := json.Marshal(config)
configErr := ioutil.WriteFile(path.Join(base, "config"), configBytes, 0600)
if configErr != nil {
return nil, configErr
}
// fmt.Println("Base: ", base, filePath, k)
err = filepath.Walk(base, func(path string, f os.FileInfo, pathErr error) error {
if pathErr != nil {
return pathErr
}
if f.Name() == "context.tar" {
return nil
}
targetFile, err1 := os.Open(path)
log.Println(path)
if err1 != nil {
return err1
}
header, headerErr := tar.FileInfoHeader(f, f.Name())
if headerErr != nil {
return headerErr
}
header.Name = strings.TrimPrefix(path, base)
// log.Printf("header.Name '%s'\n", header.Name)
if header.Name != "/config" {
header.Name = filepath.Join("context", header.Name)
}
header.Name = strings.TrimPrefix(header.Name, "/")
// log.Println("tar - header.Name ", header.Name)
if err1 = tarWriter.WriteHeader(header); err != nil {
return err1
}
if f.Mode().IsDir() {
return nil
}
_, err1 = io.Copy(tarWriter, targetFile)
return err1
})
if err != nil {
return []tarEntry{}, err
}
tars = append(tars, tarEntry{fileName: tarPath, functionName: strings.TrimSpace(k), imageName: imageName})
}
return tars, nil
}
func formatImageShaTag(registry string, function *stack.Function, sha string, owner string, repo string) string {
tag := ":latest"
imageName := function.Image
tagIndex := strings.LastIndex(function.Image, ":")
if tagIndex > 0 {
tag = function.Image[tagIndex:]
imageName = function.Image[:tagIndex]
}
repoIndex := strings.LastIndex(imageName, "/")
if repoIndex > -1 {
imageName = imageName[repoIndex+1:]
}
var imageRef string
sharedRepo := strings.HasSuffix(registry, "/")
if sharedRepo {
imageRef = registry[:len(registry)-1] + "/" + owner + "-" + repo + "-" + imageName + tag + "-" + sha
} else {
imageRef = registry + "/" + owner + "/" + repo + "-" + imageName + tag + "-" + sha
}
return imageRef
}
func clone(pushEvent sdk.PushEvent) (string, error) {
workDir := os.TempDir()
destPath := path.Join(workDir, path.Join(pushEvent.Repository.Owner.Login, pushEvent.Repository.Name))
if _, err := os.Stat(destPath); err == nil {
truncateErr := os.RemoveAll(destPath)
if truncateErr != nil {
return "", truncateErr
}
}
userDir := path.Join(workDir, pushEvent.Repository.Owner.Login)
err := os.MkdirAll(userDir, 0777)
if err != nil {
return "", fmt.Errorf("cannot create user-dir: %s", userDir)
}
git := exec.Command("git", "clone", pushEvent.Repository.CloneURL)
git.Dir = path.Join(workDir, pushEvent.Repository.Owner.Login)
log.Println(git.Dir)
err = git.Start()
if err != nil {
return "", fmt.Errorf("Cannot start git: %t", err)
}
err = git.Wait()
git = exec.Command("git", "checkout", pushEvent.AfterCommitID)
git.Dir = destPath
err = git.Start()
if err != nil {
return "", fmt.Errorf("Cannot start git checkout: %t", err)
}
err = git.Wait()
return destPath, err
}
func deploy(tars []tarEntry, pushEvent sdk.PushEvent, stack *stack.Services) error {
owner := pushEvent.Repository.Owner.Login
repoName := pushEvent.Repository.Name
url := pushEvent.Repository.CloneURL
afterCommitID := pushEvent.AfterCommitID
installationId := pushEvent.Installation.Id
c := http.Client{}
gatewayURL := os.Getenv("gateway_url")
for _, tarEntry := range tars {
fmt.Println("Deploying service - " + tarEntry.functionName)
fileOpen, err := os.Open(tarEntry.fileName)
if err != nil {
return err
}
httpReq, _ := http.NewRequest(http.MethodPost, gatewayURL+"function/buildshiprun", fileOpen)
httpReq.Header.Add("Repo", repoName)
httpReq.Header.Add("Owner", owner)
httpReq.Header.Add("Url", url)
httpReq.Header.Add("Installation_id", fmt.Sprintf("%d", installationId))
httpReq.Header.Add("Service", tarEntry.functionName)
httpReq.Header.Add("Image", tarEntry.imageName)
httpReq.Header.Add("Sha", afterCommitID)
envJSON, marshalErr := json.Marshal(stack.Functions[tarEntry.functionName].Environment)
if marshalErr != nil {
log.Printf("Error marshaling %d env-vars for function %s, %s", len(stack.Functions[tarEntry.functionName].Environment), tarEntry.functionName, marshalErr)
}
httpReq.Header.Add("Env", string(envJSON))
secretsJSON, marshalErr := json.Marshal(stack.Functions[tarEntry.functionName].Secrets)
if marshalErr != nil {
log.Printf("Error marshaling secrets for function %s, %s", tarEntry.functionName, marshalErr)
}
httpReq.Header.Add("Secrets", string(secretsJSON))
res, reqErr := c.Do(httpReq)
if reqErr != nil {
fmt.Fprintf(os.Stderr, fmt.Errorf("unable to deploy function via buildshiprun: %s", reqErr.Error()).Error())
}
fmt.Println("Service deployed ", tarEntry.functionName, res.Status, owner)
}
return nil
}
func importSecrets(pushEvent sdk.PushEvent, stack *stack.Services, clonePath string) error {
gatewayURL := os.Getenv("gateway_url")
secretCount := 0
for _, fn := range stack.Functions {
secretCount += len(fn.Secrets)
}
owner := pushEvent.Repository.Owner.Login
secretPath := path.Join(clonePath, "secrets.yml")
// No secrets supplied.
if fileInfo, err := os.Stat(secretPath); fileInfo == nil || err != nil {
return nil
}
fileBytes, err := ioutil.ReadFile(secretPath)
if err != nil {
return fmt.Errorf("unable to read secret: %s", secretPath)
}
webhookSecretKey := os.Getenv("github_webhook_secret")
hash := hmac.Sign(fileBytes, []byte(webhookSecretKey))
c := http.Client{}
reader := bytes.NewReader(fileBytes)
httpReq, _ := http.NewRequest(http.MethodPost, gatewayURL+"function/import-secrets", reader)
httpReq.Header.Add("Owner", owner)
httpReq.Header.Add("X-Hub-Signature", "sha1="+hex.EncodeToString(hash))
res, reqErr := c.Do(httpReq)
if reqErr != nil {
fmt.Fprintf(os.Stderr, fmt.Errorf("error reaching import-secrets function: %s", reqErr.Error()).Error())
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted && res.StatusCode != http.StatusOK {
resBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("error reading response from import-secrets: %s", err.Error())
}
return fmt.Errorf("import-secrets returned error: %s, res: %s", err.Error(), string(resBytes))
}
auditEvent := sdk.AuditEvent{
Message: fmt.Sprintf("Parsed sealed secrets for owner: %s. Parsed %d secrets, from %d functions", owner, secretCount, len(stack.Functions)),
Owner: pushEvent.Repository.Owner.Login,
Repo: pushEvent.Repository.Name,
Source: Source,
}
sdk.PostAudit(auditEvent)
fmt.Println("Parsed sealed secrets", res.Status, owner)
return nil
}
| [
"\"push_repository_url\"",
"\"gateway_url\"",
"\"gateway_url\"",
"\"github_webhook_secret\""
]
| []
| [
"push_repository_url",
"github_webhook_secret",
"gateway_url"
]
| [] | ["push_repository_url", "github_webhook_secret", "gateway_url"] | go | 3 | 0 | |
naiveslam/helpers.py | import os
import numpy as np
# https://github.com/MagicLeapResearch/SuperPointPretrainedNetwork/blob/master/demo_superpoint.py
myjet = np.array([[0. , 0. , 0.5 ],
[0. , 0. , 0.99910873],
[0. , 0.37843137, 1. ],
[0. , 0.83333333, 1. ],
[0.30044276, 1. , 0.66729918],
[0.66729918, 1. , 0.30044276],
[1. , 0.90123457, 0. ],
[1. , 0.48002905, 0. ],
[0.99910873, 0.07334786, 0. ],
[0.5 , 0. , 0. ]])
def hamming_distance(a, b):
r = (1 << np.arange(8))[:,None]
return np.count_nonzero((np.bitwise_xor(a,b) & r) != 0)
def triangulate(pose1, pose2, pts1, pts2):
ret = np.zeros((pts1.shape[0], 4))
for i, p in enumerate(zip(pts1, pts2)):
A = np.zeros((4,4))
A[0] = p[0][0] * pose1[2] - pose1[0]
A[1] = p[0][1] * pose1[2] - pose1[1]
A[2] = p[1][0] * pose2[2] - pose2[0]
A[3] = p[1][1] * pose2[2] - pose2[1]
_, _, vt = np.linalg.svd(A)
ret[i] = vt[3]
return ret
def add_ones(x):
if len(x.shape) == 1:
return np.concatenate([x,np.array([1.0])], axis=0)
else:
return np.concatenate([x, np.ones((x.shape[0], 1))], axis=1)
def poseRt(R, t):
ret = np.eye(4)
ret[:3, :3] = R
ret[:3, 3] = t
return ret
def fundamentalToRt(F):
W = np.mat([[0,-1,0],[1,0,0],[0,0,1]],dtype=float)
U,d,Vt = np.linalg.svd(F)
if np.linalg.det(U) < 0:
U *= -1.0
if np.linalg.det(Vt) < 0:
Vt *= -1.0
R = np.dot(np.dot(U, W), Vt)
if np.sum(R.diagonal()) < 0:
R = np.dot(np.dot(U, W.T), Vt)
t = U[:, 2]
if t[2] < 0:
t *= -1
if os.getenv("REVERSE") is not None:
t *= -1
return np.linalg.inv(poseRt(R, t))
def normalize(Kinv, pts):
return np.dot(Kinv, add_ones(pts).T).T[:, 0:2]
# from https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_geometric.py
class EssentialMatrixTransform(object):
def __init__(self):
self.params = np.eye(3)
def __call__(self, coords):
coords_homogeneous = np.column_stack([coords, np.ones(coords.shape[0])])
return coords_homogeneous @ self.params.T
def estimate(self, src, dst):
assert src.shape == dst.shape
assert src.shape[0] >= 8
A = np.ones((src.shape[0], 9))
A[:, :2] = src
A[:, :3] *= dst[:, 0, np.newaxis]
A[:, 3:5] = src
A[:, 3:6] *= dst[:, 1, np.newaxis]
A[:, 6:8] = src
_, _, V = np.linalg.svd(A)
F = V[-1, :].reshape(3, 3)
U, S, V = np.linalg.svd(F)
S[0] = S[1] = (S[0] + S[1]) / 2.0
S[2] = 0
self.params = U @ np.diag(S) @ V
return True
def residuals(self, src, dst):
src_homogeneous = np.column_stack([src, np.ones(src.shape[0])])
dst_homogeneous = np.column_stack([dst, np.ones(dst.shape[0])])
F_src = self.params @ src_homogeneous.T
Ft_dst = self.params.T @ dst_homogeneous.T
dst_F_src = np.sum(dst_homogeneous * F_src.T, axis=1)
return np.abs(dst_F_src) / np.sqrt(F_src[0] ** 2 + F_src[1] ** 2
+ Ft_dst[0] ** 2 + Ft_dst[1] ** 2)
| []
| []
| [
"REVERSE"
]
| [] | ["REVERSE"] | python | 1 | 0 | |
tools/trial_tool/metrics_reader.py | # ============================================================================================================================== #
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ============================================================================================================================== #
import argparse
import errno
import json
import os
import re
import requests
from .constants import BASE_URL, DEFAULT_REST_PORT
from .rest_utils import rest_get, rest_post, rest_put, rest_delete
from .url_utils import gen_update_metrics_url
NNI_SYS_DIR = os.environ['NNI_SYS_DIR']
NNI_TRIAL_JOB_ID = os.environ['NNI_TRIAL_JOB_ID']
NNI_EXP_ID = os.environ['NNI_EXP_ID']
LEN_FIELD_SIZE = 6
MAGIC = 'ME'
print('In metrics_reader, NNI_SYS_DIR is {}'.format(NNI_SYS_DIR))
class TrialMetricsReader():
'''
Read metrics data from a trial job
'''
def __init__(self, rest_port = DEFAULT_REST_PORT):
metrics_base_dir = os.path.join(NNI_SYS_DIR, '.nni')
self.offset_filename = os.path.join(metrics_base_dir, 'metrics_offset')
self.metrics_filename = os.path.join(metrics_base_dir, 'metrics')
self.rest_port = rest_port
if not os.path.exists(metrics_base_dir):
os.makedirs(metrics_base_dir)
def _metrics_file_is_empty(self):
if not os.path.isfile(self.metrics_filename):
return True
statinfo = os.stat(self.metrics_filename)
return statinfo.st_size == 0
def _get_offset(self):
offset = 0
if os.path.isfile(self.offset_filename):
with open(self.offset_filename, 'r') as f:
offset = int(f.readline())
return offset
def _write_offset(self, offset):
statinfo = os.stat(self.metrics_filename)
if offset < 0 or offset > statinfo.st_size:
raise ValueError('offset value is invalid: {}'.format(offset))
with open(self.offset_filename, 'w') as f:
f.write(str(offset)+'\n')
def _read_all_available_records(self, offset):
new_offset = offset
metrics = []
with open(self.metrics_filename, 'r') as f:
print('offset is {}'.format(offset))
f.seek(offset)
while True:
magic_string = f.read(len(MAGIC))
# empty data means EOF
if not magic_string:
break
strdatalen = f.read(LEN_FIELD_SIZE)
# empty data means EOF
if not strdatalen:
raise ValueError("metric file {} format error after offset: {}.".format(self.metrics_filename, new_offset))
datalen = int(strdatalen)
data = f.read(datalen)
if datalen > 0 and len(data) == datalen:
print('data is \'{}\''.format(data))
new_offset = f.tell()
metrics.append(data)
else:
raise ValueError("metric file {} format error after offset: {}.".format(self.metrics_filename, new_offset))
self._write_offset(new_offset)
return metrics
def read_trial_metrics(self):
'''
Read available metrics data for a trial
'''
if self._metrics_file_is_empty():
print('metrics is empty')
return []
offset = self._get_offset()
return self._read_all_available_records(offset)
def read_experiment_metrics(nnimanager_ip):
'''
Read metrics data for specified trial jobs
'''
result = {}
try:
reader = TrialMetricsReader()
result['jobId'] = NNI_TRIAL_JOB_ID
result['metrics'] = reader.read_trial_metrics()
print('Result metrics is {}'.format(json.dumps(result)))
if len(result['metrics']) > 0:
response = rest_post(gen_update_metrics_url(BASE_URL.format(nnimanager_ip), DEFAULT_REST_PORT, NNI_EXP_ID, NNI_TRIAL_JOB_ID), json.dumps(result), 10)
print('Response code is {}'.format(response.status_code))
except Exception:
#TODO error logging to file
pass
return json.dumps(result) | []
| []
| [
"NNI_TRIAL_JOB_ID",
"NNI_SYS_DIR",
"NNI_EXP_ID"
]
| [] | ["NNI_TRIAL_JOB_ID", "NNI_SYS_DIR", "NNI_EXP_ID"] | python | 3 | 0 | |
src/cmd/compile/internal/gc/ssa.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"encoding/binary"
"fmt"
"html"
"os"
"sort"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
var ssaConfig *ssa.Config
var ssaCaches []ssa.Cache
func initssaconfig() {
types_ := ssa.Types{
Bool: types.Types[TBOOL],
Int8: types.Types[TINT8],
Int16: types.Types[TINT16],
Int32: types.Types[TINT32],
Int64: types.Types[TINT64],
UInt8: types.Types[TUINT8],
UInt16: types.Types[TUINT16],
UInt32: types.Types[TUINT32],
UInt64: types.Types[TUINT64],
Float32: types.Types[TFLOAT32],
Float64: types.Types[TFLOAT64],
Int: types.Types[TINT],
UInt: types.Types[TUINT],
Uintptr: types.Types[TUINTPTR],
String: types.Types[TSTRING],
BytePtr: types.NewPtr(types.Types[TUINT8]),
Int32Ptr: types.NewPtr(types.Types[TINT32]),
UInt32Ptr: types.NewPtr(types.Types[TUINT32]),
IntPtr: types.NewPtr(types.Types[TINT]),
UintptrPtr: types.NewPtr(types.Types[TUINTPTR]),
Float32Ptr: types.NewPtr(types.Types[TFLOAT32]),
Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
}
// Generate a few pointer types that are uncommon in the frontend but common in the backend.
// Caching is disabled in the backend, so generating these here avoids allocations.
_ = types.NewPtr(types.Types[TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string
_ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string
_ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{}
_ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
_ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string
_ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[TINT16]) // *int16
_ = types.NewPtr(types.Types[TINT64]) // *int64
_ = types.NewPtr(types.Errortype) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0)
if thearch.LinkArch.Name == "386" {
ssaConfig.Set387(thearch.Use387)
}
ssaCaches = make([]ssa.Cache, nBackendWorkers)
// Set up some runtime functions we'll need to call.
Newproc = sysfunc("newproc")
Deferproc = sysfunc("deferproc")
Deferreturn = sysfunc("deferreturn")
Duffcopy = sysfunc("duffcopy")
Duffzero = sysfunc("duffzero")
panicindex = sysfunc("panicindex")
panicslice = sysfunc("panicslice")
panicdivide = sysfunc("panicdivide")
growslice = sysfunc("growslice")
panicdottypeE = sysfunc("panicdottypeE")
panicdottypeI = sysfunc("panicdottypeI")
panicnildottype = sysfunc("panicnildottype")
assertE2I = sysfunc("assertE2I")
assertE2I2 = sysfunc("assertE2I2")
assertI2I = sysfunc("assertI2I")
assertI2I2 = sysfunc("assertI2I2")
goschedguarded = sysfunc("goschedguarded")
writeBarrier = sysfunc("writeBarrier")
writebarrierptr = sysfunc("writebarrierptr")
typedmemmove = sysfunc("typedmemmove")
typedmemclr = sysfunc("typedmemclr")
Udiv = sysfunc("udiv")
// GO386=387 runtime functions
ControlWord64trunc = sysfunc("controlWord64trunc")
ControlWord32 = sysfunc("controlWord32")
}
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *Node, worker int) *ssa.Func {
name := fn.funcname()
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
dumplist("buildssa-enter", fn.Func.Enter)
dumplist("buildssa-body", fn.Nbody)
dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.pushLine(fn.Pos)
defer s.popLine()
s.hasdefer = fn.Func.HasDefer()
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
fe := ssafn{
curfn: fn,
log: printssa,
}
s.curfn = fn
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name
if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true
}
defer func() {
if s.f.WBPos.IsKnown() {
fn.Func.WBPos = s.f.WBPos
}
}()
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
if name == os.Getenv("GOSSAFUNC") {
s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
// TODO: generate and print a mapping from nodes to values and blocks
}
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.fwdVars = map[*Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT:
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp)
if n.Class() == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
case PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case PAUTOHEAP:
// moved to heap - already handled by frontend
case PFUNC:
// local function - already handled by frontend
default:
s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()])
}
}
// Populate SSAable arguments.
for _, n := range fn.Func.Dcl {
if n.Class() == PPARAM && s.canSSA(n) {
s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Func.Enter)
s.stmtList(fn.Nbody)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Func.Endlineno)
s.exit()
s.popLine()
}
s.insertPhis()
// Don't carry reference this around longer than necessary
s.exitCode = Nodes{}
// Main call to ssa package to compile function
ssa.Compile(s.f)
return s.f
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// Node for function
curfn *Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
labeledNodes map[*Node]*ssaLabel
// Code that must precede any return
// (e.g., copying value of heap-escaped paramout back to true paramout)
exitCode Nodes
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
vars map[*Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
fwdVars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// line number stack. The current line number is top of stack
line []src.XPos
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
returns []*Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
}
type funcLine struct {
f *obj.LSym
file string
line uint
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
}
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *types.Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
func (s *state) Log() bool { return s.f.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) {
s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
}
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
var (
// dummy node for the memory variable
memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
// dummy nodes for temporary variables
ptrVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
lenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
capVar = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
typVar = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
okVar = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[*Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
b.Pos = s.peekPos()
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line src.XPos) {
if !line.IsKnown() {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekPos()
if Debug['K'] != 0 {
Warn("buildssa: unknown position (line 0)")
}
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekPos peeks the top of the line number stack.
func (s *state) peekPos() src.XPos {
return s.line[len(s.line)-1]
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekPos(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue3A adds a new value with three arguments and an aux value to the current block.
func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
}
// newValue4 adds a new value with four arguments to the current block.
func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
return s.f.Entry.NewValue0(src.NoXPos, op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t *types.Type) *ssa.Value {
return s.f.ConstSlice(s.peekPos(), t)
}
func (s *state) constInterface(t *types.Type) *ssa.Value {
return s.f.ConstInterface(s.peekPos(), t)
}
func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
func (s *state) constEmptyString(t *types.Type) *ssa.Value {
return s.f.ConstEmptyString(s.peekPos(), t)
}
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
}
func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekPos(), t, c)
}
func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekPos(), t, c)
}
func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekPos(), t, c)
}
func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekPos(), t, c)
}
func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekPos(), t, c)
}
func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekPos(), t, c)
}
func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
if s.config.PtrSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
}
// stmtList converts the statement list n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
s.pushLine(n.Pos)
defer s.popLine()
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
if s.curBlock == nil && n.Op != OLABEL {
return
}
s.stmtList(n.Ninit)
switch n.Op {
case OBLOCK:
s.stmtList(n.List)
// No-ops
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
// Expression statements
case OCALLFUNC:
if isIntrinsicCall(n) {
s.intrinsicCall(n)
return
}
fallthrough
case OCALLMETH, OCALLINTER:
s.call(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
}
case ODEFER:
s.call(n.Left, callDefer)
case OPROC:
s.call(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Rlist.First(), true)
deref := false
if !canSSAType(n.Rlist.First().Type) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
mem := s.mem()
if mem.Op == ssa.OpVarKill {
mem = mem.Args[0]
}
if res.Args[1] != mem {
s.Fatalf("memory no longer live from 2-result dottype load")
}
deref = true
res = res.Args[0]
}
s.assign(n.List.First(), res, deref, 0)
s.assign(n.List.Second(), resok, false, 0)
return
case OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
if !isIntrinsicCall(n.Rlist.First()) {
s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
}
v := s.intrinsicCall(n.Rlist.First())
v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
s.assign(n.List.First(), v1, false, 0)
s.assign(n.List.Second(), v2, false, 0)
return
case ODCL:
if n.Left.Class() == PAUTOHEAP {
Fatalf("DCL %v", n)
}
case OLABEL:
sym := n.Left.Sym
lab := s.label(sym)
// Associate label with its control flow node, if any
if ctl := n.labeledControl(); ctl != nil {
s.labeledNodes[ctl] = lab
}
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// Go to that label.
// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
if s.curBlock != nil {
b := s.endBlock()
b.AddEdgeTo(lab.target)
}
s.startBlock(lab.target)
case OGOTO:
sym := n.Left.Sym
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
b := s.endBlock()
b.AddEdgeTo(lab.target)
case OAS:
if n.Left == n.Right && n.Left.Op == ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
// Evaluate RHS.
rhs := n.Right
if rhs != nil {
switch rhs.Op {
case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case OAPPEND:
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
if !samesafeexpr(n.Left, rhs.List.First()) {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if s.canSSA(n.Left) {
if Debug_append > 0 { // replicating old diagnostic message
Warnl(n.Pos, "append: len-only update (in local slice)")
}
break
}
if Debug_append > 0 {
Warnl(n.Pos, "append: len-only update")
}
s.append(rhs, true)
return
}
}
if isblank(n.Left) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
s.expr(rhs)
}
return
}
var t *types.Type
if n.Right != nil {
t = n.Right.Type
} else {
t = n.Left.Type
}
var r *ssa.Value
deref := !canSSAType(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs, false)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
var skip skipMask
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.Left, r, deref, skip)
case OIF:
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
var likely int8
if n.Likely() {
likely = 1
}
if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, likely)
} else {
s.condBranch(n.Left, bThen, bEnd, likely)
}
s.startBlock(bThen)
s.stmtList(n.Nbody)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ORETURN:
s.stmtList(n.List)
s.exit()
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = n.Left.Sym.Linksym()
case OCONTINUE, OBREAK:
var to *ssa.Block
if n.Left == nil {
// plain break/continue
switch n.Op {
case OCONTINUE:
to = s.continueTo
case OBREAK:
to = s.breakTo
}
} else {
// labeled break/continue; look up the target
sym := n.Left.Sym
lab := s.label(sym)
switch n.Op {
case OCONTINUE:
to = lab.continueTarget
case OBREAK:
to = lab.breakTarget
}
}
b := s.endBlock()
b.AddEdgeTo(to)
case OFOR, OFORUNTIL:
// OFOR: for Ninit; Left; Right { Nbody }
// For = cond; body; incr
// Foruntil = body; incr; cond
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
if n.Op == OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
} else {
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled for loop
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmtList(n.Nbody)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr
s.startBlock(bIncr)
if n.Right != nil {
s.stmt(n.Right)
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
if n.Op == OFORUNTIL {
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
}
s.startBlock(bEnd)
case OSWITCH, OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled
lab.breakTarget = bEnd
}
// generate body code
s.stmtList(n.Nbody)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// walk adds explicit OBREAK nodes to the end of all reachable code paths.
// If we still have a current block here, then mark it unreachable.
if s.curBlock != nil {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
}
s.startBlock(bEnd)
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
switch n.Left.Class() {
case PAUTO, PPARAM, PPARAMOUT:
default:
s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
}
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
s.nilCheck(p)
default:
s.Fatalf("unhandled stmt %v", n.Op)
}
}
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if s.hasdefer {
s.rtcall(Deferreturn, true, nil)
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmtList(s.exitCode)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
// Do actual return.
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
return b
}
type opAndType struct {
op Op
etype types.EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64,
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
opAndType{ONOT, TBOOL}: ssa.OpNot,
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
opAndType{OCOM, TINT8}: ssa.OpCom8,
opAndType{OCOM, TUINT8}: ssa.OpCom8,
opAndType{OCOM, TINT16}: ssa.OpCom16,
opAndType{OCOM, TUINT16}: ssa.OpCom16,
opAndType{OCOM, TINT32}: ssa.OpCom32,
opAndType{OCOM, TUINT32}: ssa.OpCom32,
opAndType{OCOM, TINT64}: ssa.OpCom64,
opAndType{OCOM, TUINT64}: ssa.OpCom64,
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
opAndType{OMUL, TINT8}: ssa.OpMul8,
opAndType{OMUL, TUINT8}: ssa.OpMul8,
opAndType{OMUL, TINT16}: ssa.OpMul16,
opAndType{OMUL, TUINT16}: ssa.OpMul16,
opAndType{OMUL, TINT32}: ssa.OpMul32,
opAndType{OMUL, TUINT32}: ssa.OpMul32,
opAndType{OMUL, TINT64}: ssa.OpMul64,
opAndType{OMUL, TUINT64}: ssa.OpMul64,
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
opAndType{ODIV, TINT8}: ssa.OpDiv8,
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
opAndType{ODIV, TINT16}: ssa.OpDiv16,
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
opAndType{ODIV, TINT32}: ssa.OpDiv32,
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
opAndType{ODIV, TINT64}: ssa.OpDiv64,
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
opAndType{OMOD, TINT8}: ssa.OpMod8,
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
opAndType{OMOD, TINT16}: ssa.OpMod16,
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
opAndType{OMOD, TINT32}: ssa.OpMod32,
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
opAndType{OMOD, TINT64}: ssa.OpMod64,
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
opAndType{OAND, TINT8}: ssa.OpAnd8,
opAndType{OAND, TUINT8}: ssa.OpAnd8,
opAndType{OAND, TINT16}: ssa.OpAnd16,
opAndType{OAND, TUINT16}: ssa.OpAnd16,
opAndType{OAND, TINT32}: ssa.OpAnd32,
opAndType{OAND, TUINT32}: ssa.OpAnd32,
opAndType{OAND, TINT64}: ssa.OpAnd64,
opAndType{OAND, TUINT64}: ssa.OpAnd64,
opAndType{OOR, TINT8}: ssa.OpOr8,
opAndType{OOR, TUINT8}: ssa.OpOr8,
opAndType{OOR, TINT16}: ssa.OpOr16,
opAndType{OOR, TUINT16}: ssa.OpOr16,
opAndType{OOR, TINT32}: ssa.OpOr32,
opAndType{OOR, TUINT32}: ssa.OpOr32,
opAndType{OOR, TINT64}: ssa.OpOr64,
opAndType{OOR, TUINT64}: ssa.OpOr64,
opAndType{OXOR, TINT8}: ssa.OpXor8,
opAndType{OXOR, TUINT8}: ssa.OpXor8,
opAndType{OXOR, TINT16}: ssa.OpXor16,
opAndType{OXOR, TUINT16}: ssa.OpXor16,
opAndType{OXOR, TINT32}: ssa.OpXor32,
opAndType{OXOR, TUINT32}: ssa.OpXor32,
opAndType{OXOR, TINT64}: ssa.OpXor64,
opAndType{OXOR, TUINT64}: ssa.OpXor64,
opAndType{OEQ, TBOOL}: ssa.OpEqB,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{OEQ, TINTER}: ssa.OpEqInter,
opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
opAndType{ONE, TBOOL}: ssa.OpNeqB,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{ONE, TINTER}: ssa.OpNeqInter,
opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
}
func (s *state) concreteEtype(t *types.Type) types.EType {
e := t.Etype
switch e {
default:
return e
case TINT:
if s.config.PtrSize == 8 {
return TINT64
}
return TINT32
case TUINT:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
case TUINTPTR:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
}
}
func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Fatalf("unhandled binary op %v %s", op, etype)
}
return x
}
func floatForComplex(t *types.Type) *types.Type {
if t.Size() == 8 {
return types.Types[TFLOAT32]
} else {
return types.Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
etype1 types.EType
etype2 types.EType
}
type twoTypes struct {
etype1 types.EType
etype2 types.EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType types.EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
// unsigned
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
}
// uint64<->float conversions, only on machines that have intructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Pos)
defer s.popLine()
}
s.stmtList(n.Ninit)
switch n.Op {
case OARRAYBYTESTRTMP:
slice := s.expr(n.Left)
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
case OSTRARRAYBYTETMP:
str := s.expr(n.Left)
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
case OCFUNC:
aux := n.Left.Sym.Linksym()
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case ONAME:
if n.Class() == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym).Linksym()
return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
}
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OCLOSUREVAR:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OLITERAL:
switch u := n.Val().U.(type) {
case *Mpint:
i := u.Int64()
switch n.Type.Size() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
return s.constInt16(n.Type, int16(i))
case 4:
return s.constInt32(n.Type, int32(i))
case 8:
return s.constInt64(n.Type, i)
default:
s.Fatalf("bad integer size %d", n.Type.Size())
return nil
}
case string:
if u == "" {
return s.constEmptyString(n.Type)
}
return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
case bool:
return s.constBool(u)
case *NilVal:
t := n.Type
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case *Mpflt:
switch n.Type.Size() {
case 4:
return s.constFloat32(n.Type, u.Float32())
case 8:
return s.constFloat64(n.Type, u.Float64())
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
case *Mpcplx:
r := &u.Real
i := &u.Imag
switch n.Type.Size() {
case 8:
pt := types.Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
case 16:
pt := types.Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
default:
s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
return nil
}
case OCONVNOP:
to := n.Type
from := n.Left.Type
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.Left)
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Etype == TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Etype == to.Etype {
return v
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
return v
}
dowidth(from)
dowidth(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
return nil
}
if instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Etype) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case OCONV:
x := s.expr(n.Left)
ft := n.Left.Type // from type
tt := n.Type // to type
if ft.IsBoolean() && tt.IsKind(TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
return s.newValue1(ssa.OpCopy, n.Type, x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %v -> %v", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
return s.newValue1(op, n.Type, x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.ARM64 {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.MIPS {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint32Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint32Tofloat64(n, x, ft, tt)
}
} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint32(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint32(n, x, ft, tt)
}
}
}
if !ok {
s.Fatalf("weird float conversion %v -> %v", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValue1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
switch ft.Size() {
case 8:
op = ssa.OpRound32F
case 16:
op = ssa.OpRound64F
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
return nil
case ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %v", n.Op)
}
}
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := types.Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValue2(divop, wt, xreal, denom)
ximag = s.newValue2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
return s.intDivide(n, a, b)
case OMOD:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.intDivide(n, a, b)
case OADD, OSUB:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OXOR:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
el := s.expr(n.Left)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op == OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op == OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Right)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, types.Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
// unary ops
case OMINUS:
a := s.expr(n.Left)
if n.Type.IsComplex() {
tp := floatForComplex(n.Type)
negop := s.ssaOp(n.Op, tp)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case ONOT, OCOM:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case OIMAG, OREAL:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
case OPLUS:
return s.expr(n.Left)
case OADDR:
return s.addr(n.Left, n.Bounded())
case OINDREGSP:
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
p := s.exprPtr(n.Left, false, n.Pos)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOT:
t := n.Left.Type
if canSSAType(t) {
v := s.expr(n.Left)
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
}
if n.Left.Op == OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(n.Left) {
Fatalf("literal with nonzero value in SSA: %v", n.Left)
}
return s.zeroVal(n.Type)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOTPTR:
p := s.exprPtr(n.Left, false, n.Pos)
p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
switch {
case n.Left.Type.IsString():
if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
}
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
case n.Left.Type.IsArray():
if bound := n.Left.Type.NumElem(); bound <= 1 {
// SSA can handle arrays of length at most 1.
a := s.expr(n.Left)
i := s.expr(n.Right)
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
// The return value won't be live, return junk.
return s.newValue0(ssa.OpUnknown, n.Type)
}
i = s.extendIndex(i, panicindex)
if !n.Bounded() {
s.boundsCheck(i, s.constInt(types.Types[TINT], bound))
}
return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
default:
s.Fatalf("bad type for index %v", n.Left.Type)
return nil
}
case OLEN, OCAP:
switch {
case n.Left.Type.IsSlice():
op := ssa.OpSliceLen
if n.Op == OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
a := s.expr(n.Left)
if n.Left.Type.IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type, a)
}
case OITAB:
a := s.expr(n.Left)
return s.newValue1(ssa.OpITab, n.Type, a)
case OIDATA:
a := s.expr(n.Left)
return s.newValue1(ssa.OpIData, n.Type, a)
case OEFACE:
tab := s.expr(n.Left)
data := s.expr(n.Right)
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
v := s.expr(n.Left)
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
if max != nil {
k = s.extendIndex(s.expr(max), panicslice)
}
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR:
v := s.expr(n.Left)
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OCALLFUNC:
if isIntrinsicCall(n) {
return s.intrinsicCall(n)
}
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
case OAPPEND:
return s.append(n, false)
case OSTRUCTLIT, OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(n) {
Fatalf("literal with nonzero value in SSA: %v", n)
}
return s.zeroVal(n.Type)
default:
s.Fatalf("unhandled expr %v", n.Op)
return nil
}
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type.Elem()
pt := types.NewPtr(et)
// Evaluate slice
sn := n.List.First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn, false)
slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
if !inplace {
s.vars[&newlenVar] = nl
s.vars[&capVar] = c
} else {
s.vars[&lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.expr(n.Left)
r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op == ONAME && sn.Class() != PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem())
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[&ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
s.vars[&capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v.
v *ssa.Value
store bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.List.Slice()[1:] {
if canSSAType(n.Type) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n, false)
args = append(args, argRec{v: v})
}
}
p = s.variable(&ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
c = s.variable(&capVar, types.Types[TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
if arg.store {
s.storeType(et, addr, arg.v, 0)
} else {
store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem())
store.Aux = et
s.vars[&memVar] = store
}
}
delete(s.vars, &ptrVar)
if inplace {
delete(s.vars, &lenVar)
return nil
}
delete(s.vars, &newlenVar)
delete(s.vars, &capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
if cond.Op == OANDAND {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
}
if cond.Op == OOROR {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
}
if cond.Op == ONOT {
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op == ONAME && isblank(left) {
return
}
t := left.Type
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
if left.Op == ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
t := left.Left.Type
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.Left)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.Left, new, false, 0)
// TODO: do we need to update named values here?
return
}
if left.Op == OINDEX && left.Left.Type.IsArray() {
// We're assigning to an element of an ssa-able array.
// a[i] = v
t := left.Left.Type
n := t.NumElem()
i := s.expr(left.Right) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
z := s.constInt(types.Types[TINT], 0)
s.boundsCheck(z, z)
return
}
if n != 1 {
s.Fatalf("assigning to non-1-length array")
}
// Rewrite to a = [1]{v}
i = s.extendIndex(i, panicindex)
s.boundsCheck(i, s.constInt(types.Types[TINT], 1))
v := s.newValue1(ssa.OpArrayMake1, t, right)
s.assign(left.Left, v, false, 0)
return
}
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
}
if isReflectHeaderDataField(left) {
// Package unsafe's documentation says storing pointers into
// reflect.SliceHeader and reflect.StringHeader's Data fields
// is valid, even though they have type uintptr (#19168).
// Mark it pointer type to signal the writebarrier pass to
// insert a write barrier.
t = types.Types[TUNSAFEPTR]
}
if deref {
// Treat as a mem->mem move.
var store *ssa.Value
if right == nil {
store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem())
} else {
store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem())
}
store.Aux = t
s.vars[&memVar] = store
return
}
// Treat as a store.
s.storeType(t, addr, right, skip)
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(types.Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(types.Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %v", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i)))
}
return v
case t.IsArray():
switch t.NumElem() {
case 0:
return s.entryNewValue0(ssa.OpArrayMake0, t)
case 1:
return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
}
}
s.Fatalf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callGo
)
var intrinsics map[intrinsicKey]intrinsicBuilder
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
pkg string
fn string
}
func init() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
var p4 []*sys.Arch
var p8 []*sys.Arch
for _, a := range sys.Archs {
all = append(all, a)
if a.PtrSize == 4 {
p4 = append(p4, a)
} else {
p8 = append(p8, a)
}
}
// add adds the intrinsic b for pkg.fn for the given list of architectures.
add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
for _, a := range archs {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
// addF does the same as add but operates on architecture families.
addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
m := 0
for _, f := range archFamilies {
if f >= 32 {
panic("too many architecture families")
}
m |= 1 << uint(f)
}
for _, a := range all {
if m>>uint(a.Family)&1 != 0 {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
for _, a := range archs {
if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
intrinsics[intrinsicKey{a, pkg, fn}] = b
}
}
}
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
slice := args[0]
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
},
all...)
}
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
addF("runtime", "getcallerpc",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
}, sys.AMD64, sys.I386)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Ctz64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
addF("runtime/internal/sys", "Bswap32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Store64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "StorepNoWB",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
addF("runtime/internal/atomic", "Xchg",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xchg64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Xadd",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Xadd64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "Cas",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Cas64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
addF("runtime/internal/atomic", "And8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
addF("runtime/internal/atomic", "Or8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
/******** math ********/
addF("math", "Sqrt",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("math", "Trunc",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
},
sys.PPC64, sys.S390X)
addF("math", "Ceil",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
},
sys.PPC64, sys.S390X)
addF("math", "Floor",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
},
sys.PPC64, sys.S390X)
addF("math", "Round",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
},
sys.S390X)
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "TrailingZeros32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
c := s.constInt32(types.Types[TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
c := s.constInt64(types.Types[TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
c := s.constInt32(types.Types[TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
},
sys.ARM, sys.MIPS)
addF("math/bits", "TrailingZeros8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
c := s.constInt64(types.Types[TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
// Note: disabled on AMD64 because the Go code is faster!
addF("math/bits", "Len8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
}
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("math/bits", "Len",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
}
return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
}
return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
},
sys.ARM64)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
aux := syslook("support_popcnt").Sym.Linksym()
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(v)
bTrue := s.f.NewBlock(ssa.BlockPlain)
bFalse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bTrue)
b.AddEdgeTo(bFalse)
b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
// We have the intrinsic - use it directly.
s.startBlock(bTrue)
op := op64
if s.config.PtrSize == 4 {
op = op32
}
s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
s.endBlock().AddEdgeTo(bEnd)
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem())
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
s.startBlock(bEnd)
return s.variable(n, types.Types[TINT])
}
}
addF("math/bits", "OnesCount64",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
},
sys.PPC64)
addF("math/bits", "OnesCount32",
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
},
sys.PPC64)
addF("math/bits", "OnesCount16",
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
// Note: no OnesCount8, the Go implementation is faster - just a table load.
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
/******** sync/atomic ********/
// Note: these are disabled by flag_race in findIntrinsic below.
alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
/******** math/big ********/
add("math/big", "mulWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
},
sys.ArchAMD64)
add("math/big", "divWW",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
},
sys.ArchAMD64)
}
// findIntrinsic returns a function which builds the SSA equivalent of the
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
func findIntrinsic(sym *types.Sym) intrinsicBuilder {
if ssa.IntrinsicsDisable {
return nil
}
if sym == nil || sym.Pkg == nil {
return nil
}
pkg := sym.Pkg.Path
if sym.Pkg == localpkg {
pkg = myimportpath
}
if flag_race && pkg == "sync/atomic" {
// The race detector needs to be able to intercept these calls.
// We can't intrinsify them.
return nil
}
fn := sym.Name
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
func isIntrinsicCall(n *Node) bool {
if n == nil || n.Left == nil {
return false
}
return findIntrinsic(n.Left.Sym) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *Node) *ssa.Value {
v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
x = s.mem()
}
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
}
return v
}
type callArg struct {
offset int64
v *ssa.Value
}
type byOffset []callArg
func (x byOffset) Len() int { return len(x) }
func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOffset) Less(i, j int) bool {
return x[i].offset < x[j].offset
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// This code is complicated because of how walk transforms calls. For a call node,
// each entry in n.List is either an assignment to OINDREGSP which actually
// stores an arg, or an assignment to a temporary which computes an arg
// which is later assigned.
// The args can also be out of order.
// TODO: when walk goes away someday, this code can go away also.
var args []callArg
temps := map[*Node]*ssa.Value{}
for _, a := range n.List.Slice() {
if a.Op != OAS {
s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
}
l, r := a.Left, a.Right
switch l.Op {
case ONAME:
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
case OINDREGSP:
// Store a value to an argument slot.
var v *ssa.Value
if x, ok := temps[r]; ok {
// This is a previously computed temporary.
v = x
} else {
// This is an explicit value; evaluate it.
v = s.expr(r)
}
args = append(args, callArg{l.Xoffset, v})
default:
s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
}
}
sort.Sort(byOffset(args))
res := make([]*ssa.Value, len(args))
for i, a := range args {
res[i] = a.v
}
return res
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
sym = fn.Sym
break
}
closure = s.expr(fn)
case OCALLMETH:
if fn.Op != ODOTMETH {
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
if k == callNormal {
sym = fn.Sym
break
}
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
n2 := newnamel(fn.Pos, fn.Sym)
n2.Name.Curfn = s.curfn
n2.SetClass(PFUNC)
n2.Pos = fn.Pos
n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
if k != callNormal {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List)
// Set receiver (for interface calls)
if rcvr != nil {
argStart := Ctxt.FixedFrameSize()
if k != callNormal {
argStart += int64(2 * Widthptr)
}
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
// call target
var call *ssa.Value
switch {
case k == callDefer:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem())
case k == callGo:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
default:
Fatalf("bad call type %v %v", n.Op, n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
s.vars[&memVar] = call
// Finish block for defers
if k == callDefer {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
// Add recover edge to exit code.
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e types.EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
return +1
}
return 0
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
t := types.NewPtr(n.Type)
switch n.Op {
case ONAME:
switch n.Class() {
case PEXTERN:
// global variable
v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
}
return v
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
if n == nodfp {
// Special arg that points to the frame pointer (Used by ORECOVER).
return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
return s.newValue1A(ssa.OpAddr, t, n, s.sp)
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
return s.newValue1A(ssa.OpAddr, t, n, s.sp)
default:
s.Fatalf("variable address class %v not implemented", classnames[n.Class()])
return nil
}
case OINDREGSP:
// indirect off REGSP
// used for storing/loading arguments/returns to/from callees
return s.constOffPtrSP(t, n.Xoffset)
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
if !n.Bounded() {
s.boundsCheck(i, len)
}
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i, panicindex)
len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
if !n.Bounded() {
s.boundsCheck(i, len)
}
return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
}
case OIND:
return s.exprPtr(n.Left, bounded, n.Pos)
case ODOT:
p := s.addr(n.Left, bounded)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case ODOTPTR:
p := s.exprPtr(n.Left, bounded, n.Pos)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
case OCONVNOP:
addr := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal)
case ODOTTYPE:
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
if v.Args[1] != s.mem() {
s.Fatalf("memory no longer live from dottype load")
}
return v.Args[0]
default:
s.Fatalf("unhandled addr %v", n.Op)
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
if Debug['N'] != 0 {
return false
}
for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
n = n.Left
}
if n.Op != ONAME {
return false
}
if n.Addrtaken() {
return false
}
if n.isParamHeapCopy() {
return false
}
if n.Class() == PAUTOHEAP {
Fatalf("canSSA of PAUTOHEAP %v", n)
}
switch n.Class() {
case PEXTERN:
return false
case PPARAMOUT:
if s.hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
// Or maybe not, see issue 18860. Even unnamed return values
// must be written back so if a defer recovers, the caller can see them.
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
return false
}
return canSSAType(n.Type)
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *types.Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Etype {
case TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return canSSAType(t.Elem())
}
return false
case TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.f.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
// idx is already converted to full int width.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicindex)
}
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
// idx and len are already converted to full int width.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekPos()
pos := Ctxt.PosTable.Pos(line)
fl := funcLine{f: fn, file: pos.Filename(), line: pos.Line()}
bPanic := s.panics[fl]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[fl] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
if b.AuxInt != 0 {
needcheck = false
}
}
if needcheck {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthreg))
// Issue call
call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
s.vars[&memVar] = call
if !returns {
// Finish block
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - Ctxt.FixedFrameSize()
if len(results) > 0 {
Fatalf("panic call can't have results")
}
return nil
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(types.NewPtr(t), off)
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
off = Rnd(off, int64(Widthptr))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// do *left = right for type t.
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
return
}
// store scalar fields first, so write barrier stores for
// pointer fields can be grouped together, and scalar values
// don't need to be live across the write barrier call.
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && types.Haspointers(t) {
s.storeTypePtrs(t, left, right)
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft, addr, val, 0)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !types.Haspointers(ft) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft, addr, val)
}
case t.IsArray() && t.NumElem() == 0:
// nothing
case t.IsArray() && t.NumElem() == 1:
s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
default:
s.Fatalf("bad write barrier type %v", t)
}
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var elemtype *types.Type
var ptrtype *types.Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
zero := s.constInt(types.Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
case t.IsString():
elemtype = types.Types[TUINT8]
ptrtype = types.NewPtr(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
ptrtype = types.NewPtr(elemtype)
s.nilCheck(v)
ptr = v
len = s.constInt(types.Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = zero
}
if j == nil {
j = len
}
if k == nil {
k = cap
}
// Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j)
if j != k {
s.sliceBoundsCheck(j, k)
}
if k != cap {
s.sliceBoundsCheck(k, cap)
}
// Generate the following code assuming that indexes are in bounds.
// The masking is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = j - i
// rcap = k - i
// delta = i * elemsize
// rptr = p + delta&mask(rcap)
// result = (SliceMake rptr rlen rcap)
// where mask(x) is 0 if x==0 and -1 if x>0.
subOp := s.ssaOp(OSUB, types.Types[TINT])
mulOp := s.ssaOp(OMUL, types.Types[TINT])
andOp := s.ssaOp(OAND, types.Types[TINT])
rlen := s.newValue2(subOp, types.Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
// Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
case j == k:
rcap = rlen
default:
rcap = s.newValue2(subOp, types.Types[TINT], k, i)
}
var rptr *ssa.Value
if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
// No pointer arithmetic necessary.
rptr = ptr
} else {
// delta = # of bytes to offset pointer by.
delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width))
// If we're slicing to the point where the capacity is zero,
// zero out the delta.
mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
// Compute rptr = ptr + delta
rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
}
return rptr, rlen, rcap
}
type u642fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, *types.Type, int64) *ssa.Value
}
var u64_f64 = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 = u642fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
type u322fcvtTab struct {
cvtI2F, cvtF2F ssa.Op
}
var u32_f64 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to64F,
cvtF2F: ssa.OpCopy,
}
var u32_f32 = u322fcvtTab{
cvtI2F: ssa.OpCvt32to32F,
cvtF2F: ssa.OpCvt64Fto32F,
}
func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
// result = floatY(float64(x) + (1<<32))
// }
cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvtI2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
s.vars[n] = a3
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type
nilValue := s.constNil(types.Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
if n.Op == OLEN {
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
} else if n.Op == OCAP {
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
} else {
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf, or ssa.Op
floatValue func(*state, *types.Type, float64) *ssa.Value
intValue func(*state, *types.Type, int64) *ssa.Value
cutoff uint64
}
var f32_u64 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
or: ssa.OpOr64,
floatValue: (*state).constFloat32,
intValue: (*state).constInt64,
cutoff: 9223372036854775808,
}
var f64_u64 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
or: ssa.OpOr64,
floatValue: (*state).constFloat64,
intValue: (*state).constInt64,
cutoff: 9223372036854775808,
}
var f32_u32 = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto32,
subf: ssa.OpSub32F,
or: ssa.OpOr32,
floatValue: (*state).constFloat32,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
var f64_u32 = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto32,
subf: ssa.OpSub64F,
or: ssa.OpOr32,
floatValue: (*state).constFloat64,
intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
cutoff: 2147483648,
}
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// } else {
// y = x - floatX(cutoff)
// z = uintY(y)
// result = z | -(cutoff)
// }
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, cutoff)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left) // input interface
target := s.expr(n.Right) // target type
byteptr := s.f.Config.Types.BytePtr
if n.Type.IsInterface() {
if n.Type.IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
// Get itab/type field from input.
itab := s.newValue1(ssa.OpITab, byteptr, iface)
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
if n.Left.Type.IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// Branch on nilness.
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// On failure, panic by calling panicnildottype.
s.startBlock(bFail)
s.rtcall(panicnildottype, false, nil, target)
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
if n.Left.Type.IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
idata := s.newValue1(ssa.OpIData, n.Type, iface)
res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
return
}
s.startBlock(bOk)
// nonempty -> empty
// Need to load type from itab
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
s.endBlock()
// itab is nil, might as well use that as the nil result.
s.startBlock(bFail)
s.vars[&typVar] = itab
s.endBlock()
// Merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
idata := s.newValue1(ssa.OpIData, n.Type, iface)
res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
resok = cond
delete(s.vars, &typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion not inlined")
}
if n.Left.Type.IsEmptyInterface() {
if commaok {
call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if commaok {
call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
return call[0], call[1]
}
return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
}
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
// Converting to a concrete type.
direct := isdirectiface(n.Type)
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if Debug_typeassert > 0 {
Warnl(n.Pos, "type assertion inlined")
}
var targetITab *ssa.Value
if n.Left.Type.IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
targetITab = s.expr(n.List.First())
}
var tmp *Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !canSSAType(n.Type) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp = tempAt(n.Pos, s.curfn, n.Type)
addr = s.addr(tmp, false)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
}
cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.expr(n.Right.Right)
if n.Left.Type.IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
}
// on success, return data from interface
s.startBlock(bOk)
if direct {
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// Note that we need a new valVar each time (unlike okVar where we can
// reuse the variable) because it might have a different type every time.
valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
// type assertion succeeded
s.startBlock(bOk)
if tmp == nil {
if direct {
s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
s.vars[&okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type)
} else {
store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
if tmp == nil {
res = s.variable(valVar, n.Type)
delete(s.vars, valVar)
} else {
res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
resok = s.variable(&okVar, types.Types[TBOOL])
delete(s.vars, &okVar)
return res, resok
}
// variable returns the value of a variable at the current location.
func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
}
v = s.fwdVars[name]
if v != nil {
return v
}
if s.curBlock == s.f.Entry {
// No variable should be live at entry.
s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, name)
s.fwdVars[name] = v
s.addNamedValue(name, v)
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(&memVar, types.TypeMem)
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class() == Pxxx {
// Don't track our dummy nodes (&memVar etc.).
return
}
if n.IsAutoTmp() {
// Don't track temporary variables.
return
}
if n.Class() == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
if n.Class() == PAUTO && n.Xoffset != 0 {
s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
}
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
pp *Progs
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
SSEto387 map[int16]int16
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
ScratchFpMem *Node
maxarg int64 // largest frame size for arguments to calls made by the function
// Map from GC safe points to stack map index, generated by
// liveness analysis.
stackMapIndex map[*ssa.Value]int
}
// Prog appends a new Prog.
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
return s.pp.Prog(as)
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return s.pp.next
}
// SetPos sets the current source position.
func (s *SSAGenState) SetPos(pos src.XPos) {
s.pp.pos = pos
}
// DebugFriendlySetPos sets the position subject to heuristics
// that reduce "jumpy" line number churn when debugging.
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
// The two choices here are either to leave lineno unchanged,
// or to explicitly set it to src.NoXPos. Leaving it unchanged
// (reusing the preceding line number) produces slightly better-
// looking assembly language output from the compiler, and is
// expected by some already-existing tests.
// The debug information appears to be the same in either case
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// leave the position unchanged from beginning of block
// or previous line number.
default:
if v.Pos != src.NoXPos {
s.SetPos(v.Pos)
}
}
}
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *Progs) {
var s SSAGenState
e := f.Frontend().(*ssafn)
// Generate GC bitmaps, except if the stack is too large,
// in which compilation will fail later anyway (issue 20529).
if e.stksize < maxStackSize {
s.stackMapIndex = liveness(e, f)
}
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
s.pp = pp
var progToValue map[*obj.Prog]*ssa.Value
var progToBlock map[*obj.Prog]*ssa.Block
var valueToProg []*obj.Prog
var logProgs = e.log
if logProgs {
progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
progToBlock[s.pp.next] = f.Blocks[0]
}
if thearch.Use387 {
s.SSEto387 = map[int16]int16{}
}
s.ScratchFpMem = e.scratchFpMem
logLocationLists := Debug_locationlist != 0
if Ctxt.Flag_locationlists {
e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists)
valueToProg = make([]*obj.Prog, f.NumValues())
}
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.next
// Emit values in block
thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.next
s.DebugFriendlySetPosFrom(v)
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpSelect0, ssa.OpSelect1:
// nothing to do
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
// nothing to do; already used by liveness
case ssa.OpVarKill:
// Zero variable if it is ambiguously live.
// After the VARKILL anything this variable references
// might be collected. If it were to become live again later,
// the GC will see references to already-collected objects.
// See issue 20029.
n := v.Aux.(*Node)
if n.Name.Needzero() {
if n.Class() != PAUTO {
v.Fatalf("zero of variable which isn't PAUTO %v", n)
}
if n.Type.Size()%int64(Widthptr) != 0 {
v.Fatalf("zero of variable not a multiple of ptr size %v", n)
}
thearch.ZeroAuto(s.pp, n)
}
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpRegKill:
// nothing to do
default:
// let the backend handle it
thearch.SSAGenValue(&s, v)
}
if Ctxt.Flag_locationlists {
valueToProg[v.ID] = x
}
if logProgs {
for ; x != s.pp.next; x = x.Link {
progToValue[x] = v
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := s.pp.next
s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != s.pp.next; x = x.Link {
progToBlock[x] = b
}
}
}
if Ctxt.Flag_locationlists {
for _, locList := range e.curfn.Func.DebugInfo.Variables {
for _, loc := range locList.Locations {
loc.StartProg = valueToProg[loc.Start.ID]
if loc.End == nil {
Fatalf("empty loc %v compiling %v", loc, f.Name)
}
loc.EndProg = valueToProg[loc.End.ID]
if !logLocationLists {
loc.Start = nil
loc.End = nil
}
}
}
}
// Resolve branches
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
}
if logProgs {
for p := pp.Text; p != nil; p = p.Link {
var s string
if v, ok := progToValue[p]; ok {
s = v.String()
} else if b, ok := progToBlock[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf("%s\t%s\n", s, p)
}
if f.HTMLWriter != nil {
// LineHist is defunct now - this code won't do
// anything.
// TODO: fix this (ideally without a global variable)
// saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly
// pp.Text.Ctxt.LineHist.PrintFilenameOnly = true
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
for p := pp.Text; p != nil; p = p.Link {
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := progToValue[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := progToBlock[p]; ok {
buf.WriteString(b.HTML())
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString(p.String()))
buf.WriteString("</dd>")
buf.WriteString("</li>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.HTMLWriter.WriteColumn("genssa", buf.String())
// pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved
}
}
defframe(&s, e)
if Debug['f'] != 0 {
frame(0)
}
f.HTMLWriter.Close()
f.HTMLWriter = nil
}
func defframe(s *SSAGenState, e *ssafn) {
pp := s.pp
frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
if thearch.PadFrame != nil {
frame = thearch.PadFrame(frame)
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
// garbage collector only sees initialized values when it
// looks for pointers.
p := pp.Text
var lo, hi int64
// Opaque state for backend to use. Current backends use it to
// keep track of which helper registers have been zeroed.
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
for _, n := range e.curfn.Func.Dcl {
if !n.Name.Needzero() {
continue
}
if n.Class() != PAUTO {
Fatalf("needzero class %d", n.Class())
}
if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
}
if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
lo = n.Xoffset
continue
}
// Zero old range
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.Xoffset
hi = lo + n.Type.Size()
}
// Zero final range.
thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
type FloatingEQNEJump struct {
Jump obj.As
Index int
}
func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) {
p := s.Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
}
func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
switch next {
case b.Succs[0].Block():
s.oneFPJump(b, &jumps[0][0])
s.oneFPJump(b, &jumps[0][1])
case b.Succs[1].Block():
s.oneFPJump(b, &jumps[1][0])
s.oneFPJump(b, &jumps[1][1])
default:
s.oneFPJump(b, &jumps[1][0])
s.oneFPJump(b, &jumps[1][1])
q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
}
}
func AuxOffset(v *ssa.Value) (offset int64) {
if v.Aux == nil {
return 0
}
n, ok := v.Aux.(*Node)
if !ok {
v.Fatalf("bad aux type in %s\n", v.LongString())
}
if n.Class() == PAUTO {
return n.Xoffset
}
return 0
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch n := v.Aux.(type) {
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
case *Node:
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
a.Name = obj.NAME_PARAM
a.Sym = n.Orig.Sym.Linksym()
a.Offset += n.Xoffset
break
}
a.Name = obj.NAME_AUTO
a.Sym = n.Sym.Linksym()
a.Offset += n.Xoffset
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
size := v.Type.Size()
if size == s.config.PtrSize {
return v
}
if size > s.config.PtrSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
if Debug['B'] == 0 {
hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v)
cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
s.check(cmp, panicfn)
}
return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v)
}
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.PtrSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, types.Types[TINT], v)
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Sym.Linksym()
a.Reg = int16(thearch.REGSP)
a.Offset = n.Xoffset + off
if n.Class() == PPARAM || n.Class() == PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
a.Name = obj.NAME_AUTO
}
}
func (s *SSAGenState) AddrScratch(a *obj.Addr) {
if s.ScratchFpMem == nil {
panic("no scratch memory available; forgot to declare usesScratch for Op?")
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
a.Sym = s.ScratchFpMem.Sym.Linksym()
a.Reg = int16(thearch.REGSP)
a.Offset = s.ScratchFpMem.Xoffset
}
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
idx, ok := s.stackMapIndex[v]
if !ok {
Fatalf("missing stack map index for %v", v.LongString())
}
p := s.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
thearch.Ginsnop(s.pp)
}
p = s.Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
if s.maxarg < v.AuxInt {
s.maxarg = v.AuxInt
}
return p
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int {
t := n.Left.Type
f := n.Sym
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
var i int
for _, t1 := range t.Fields().Slice() {
if t1.Sym != f {
i++
continue
}
if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
}
panic(fmt.Sprintf("can't find field in expr %v\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
curfn *Node
strings map[string]interface{} // map from constant string to data symbols
scratchFpMem *Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool
}
// StringData returns a symbol (a *types.Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (e *ssafn) StringData(s string) interface{} {
if aux, ok := e.strings[s]; ok {
return aux
}
if e.strings == nil {
e.strings = make(map[string]interface{})
}
data := stringsym(s)
e.strings[s] = data
return data
}
func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
return p, l
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.splitSlot(&name, f, 0, t)
d := e.splitSlot(&name, ".data", t.Size(), t)
return c, d
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
}
func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.ElemType())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
return p, l, c
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
}
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *types.Type
if s == 8 {
t = types.Types[TFLOAT64]
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
return r, i
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
var t *types.Type
if name.Type.IsSigned() {
t = types.Types[TINT32]
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
}
return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
}
// Return the two parts of the larger variable.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
}
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
}
func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
var offset int64
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
}
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
}
func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
n := name.N.(*Node)
at := name.Type
if at.NumElem() != 1 {
Fatalf("bad array size")
}
et := at.ElemType()
if n.Class() == PAUTO && !n.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
return itabsym(it, offset)
}
// splitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
n := new(Node)
n.Name = new(Name)
n.Op = ONAME
n.Pos = parent.N.(*Node).Pos
n.Orig = n
s.Def = asTypesNode(n)
asNode(s.Def).Name.SetUsed(true)
n.Sym = s
n.Type = t
n.SetClass(PAUTO)
n.SetAddable(true)
n.Esc = EscNever
n.Name.Curfn = e.curfn
e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return canSSAType(t)
}
func (e *ssafn) Line(pos src.XPos) string {
return linestr(pos)
}
// Log logs a message from the compiler.
func (e *ssafn) Logf(msg string, args ...interface{}) {
if e.log {
fmt.Printf(msg, args...)
}
}
func (e *ssafn) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
lineno = pos
Fatalf(msg, args...)
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
Warnl(pos, fmt_, args...)
}
func (e *ssafn) Debug_checknil() bool {
return Debug_checknil != 0
}
func (e *ssafn) Debug_wb() bool {
return Debug_wb != 0
}
func (e *ssafn) UseWriteBarrier() bool {
return use_writebarrier
}
func (e *ssafn) Syslook(name string) *obj.LSym {
switch name {
case "goschedguarded":
return goschedguarded
case "writeBarrier":
return writeBarrier
case "writebarrierptr":
return writebarrierptr
case "typedmemmove":
return typedmemmove
case "typedmemclr":
return typedmemclr
}
Fatalf("unknown Syslook func %v", name)
return nil
}
func (n *Node) Typ() *types.Type {
return n.Type
}
func (n *Node) StorageClass() ssa.StorageClass {
switch n.Class() {
case PPARAM:
return ssa.ClassParam
case PPARAMOUT:
return ssa.ClassParamOut
case PAUTO:
return ssa.ClassAuto
default:
Fatalf("untranslateable storage class for %v: %s", n, n.Class())
return 0
}
}
| [
"\"GOSSAFUNC\"",
"\"GOSSAFUNC\""
]
| []
| [
"GOSSAFUNC"
]
| [] | ["GOSSAFUNC"] | go | 1 | 0 | |
sdh/metrics/qualitative/__init__.py | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
__author__ = 'Fernando Serena'
import calendar
from sdh.metrics.server import MetricsApp
from sdh.metrics.qualitative.store import QStore
from sdh.metrics.store.metrics import store_calc
import os
import urlparse
config = os.environ.get('CONFIG', 'sdh.metrics.qualitative.config.DevelopmentConfig')
app = MetricsApp(__name__, config)
st = QStore(**app.config['REDIS'])
app.store = st
| []
| []
| [
"CONFIG"
]
| [] | ["CONFIG"] | python | 1 | 0 | |
vendor/github.com/hashicorp/terraform/builtin/providers/google/resource_compute_instance_template_test.go | package google
import (
"fmt"
"os"
"strings"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
)
func TestAccComputeInstanceTemplate_basic(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
),
},
},
})
}
func TestAccComputeInstanceTemplate_IP(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_ip,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_networkIP(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
networkIP := "10.128.0.2"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_networkIP(networkIP),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
testAccCheckComputeInstanceTemplateNetworkIP(
"google_compute_instance_template.foobar", networkIP, &instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_disks(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_disks,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
),
},
},
})
}
func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
network := "network-" + acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_subnet_auto(network),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network),
),
},
},
})
}
func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_subnet_custom,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_startup_script,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"),
),
},
},
})
}
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_instance_template" {
continue
}
_, err := config.clientCompute.InstanceTemplates.Get(
config.Project, rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("Instance template still exists")
}
}
return nil
}
func testAccCheckComputeInstanceTemplateExists(n string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.InstanceTemplates.Get(
config.Project, rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Instance template not found")
}
*instanceTemplate = *found
return nil
}
}
func testAccCheckComputeInstanceTemplateMetadata(
instanceTemplate *compute.InstanceTemplate,
k string, v string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instanceTemplate.Properties.Metadata == nil {
return fmt.Errorf("no metadata")
}
for _, item := range instanceTemplate.Properties.Metadata.Items {
if k != item.Key {
continue
}
if item.Value != nil && v == *item.Value {
return nil
}
return fmt.Errorf("bad value for %s: %s", k, *item.Value)
}
return fmt.Errorf("metadata not found: %s", k)
}
}
func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
for _, c := range i.AccessConfigs {
if c.NatIP == "" {
return fmt.Errorf("no NAT IP")
}
}
}
return nil
}
}
func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc {
return func(s *terraform.State) error {
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
if !strings.Contains(i.Network, network) {
return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:])
}
}
return nil
}
}
func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instanceTemplate.Properties.Disks == nil {
return fmt.Errorf("no disks")
}
for _, disk := range instanceTemplate.Properties.Disks {
if disk.InitializeParams == nil {
// Check disk source
if disk.Source == source {
if disk.AutoDelete == delete && disk.Boot == boot {
return nil
}
}
} else {
// Check source image
if disk.InitializeParams.SourceImage == source {
if disk.AutoDelete == delete && disk.Boot == boot {
return nil
}
}
}
}
return fmt.Errorf("Disk not found: %s", source)
}
}
func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
if i.Subnetwork == "" {
return fmt.Errorf("no subnet")
}
}
return nil
}
}
func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instanceTemplate.Properties.Tags == nil {
return fmt.Errorf("no tags")
}
for _, k := range instanceTemplate.Properties.Tags.Items {
if k == n {
return nil
}
}
return fmt.Errorf("tag not found: %s", n)
}
}
func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if instanceTemplate.Properties.Metadata == nil && n == "" {
return nil
} else if instanceTemplate.Properties.Metadata == nil && n != "" {
return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n)
}
for _, item := range instanceTemplate.Properties.Metadata.Items {
if item.Key != "startup-script" {
continue
}
if item.Value != nil && *item.Value == n {
return nil
} else if item.Value == nil && n == "" {
return nil
} else if item.Value == nil && n != "" {
return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n)
} else if *item.Value != n {
return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value)
}
}
return fmt.Errorf("This should never be reached.")
}
}
func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP
err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s)
if err != nil {
return err
}
return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s)
}
}
var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
scheduling {
preemptible = false
automatic_restart = true
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}`, acctest.RandString(10))
var testAccComputeInstanceTemplate_ip = fmt.Sprintf(`
resource "google_compute_address" "foo" {
name = "instancet-test-%s"
}
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
machine_type = "n1-standard-1"
tags = ["foo", "bar"]
disk {
source_image = "debian-8-jessie-v20160803"
}
network_interface {
network = "default"
access_config {
nat_ip = "${google_compute_address.foo.address}"
}
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), acctest.RandString(10))
func testAccComputeInstanceTemplate_networkIP(networkIP string) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
machine_type = "n1-standard-1"
tags = ["foo", "bar"]
disk {
source_image = "debian-8-jessie-v20160803"
}
network_interface {
network = "default"
network_ip = "%s"
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), networkIP)
}
var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "instancet-test-%s"
image = "debian-8-jessie-v20160803"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
machine_type = "n1-standard-1"
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
disk_size_gb = 100
boot = true
}
disk {
source = "terraform-test-foobar"
auto_delete = false
boot = false
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), acctest.RandString(10))
func testAccComputeInstanceTemplate_subnet_auto(network string) string {
return fmt.Sprintf(`
resource "google_compute_network" "auto-network" {
name = "%s"
auto_create_subnetworks = true
}
resource "google_compute_instance_template" "foobar" {
name = "instance-tpl-%s"
machine_type = "n1-standard-1"
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
disk_size_gb = 10
boot = true
}
network_interface {
network = "${google_compute_network.auto-network.name}"
}
metadata {
foo = "bar"
}
}`, network, acctest.RandString(10))
}
var testAccComputeInstanceTemplate_subnet_custom = fmt.Sprintf(`
resource "google_compute_network" "network" {
name = "network-%s"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "subnetwork" {
name = "subnetwork-%s"
ip_cidr_range = "10.0.0.0/24"
region = "us-central1"
network = "${google_compute_network.network.self_link}"
}
resource "google_compute_instance_template" "foobar" {
name = "instance-test-%s"
machine_type = "n1-standard-1"
region = "us-central1"
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
disk_size_gb = 10
boot = true
}
network_interface {
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string {
return fmt.Sprintf(`
resource "google_compute_network" "network" {
name = "network-%s"
auto_create_subnetworks = false
project = "%s"
}
resource "google_compute_subnetwork" "subnetwork" {
name = "subnetwork-%s"
ip_cidr_range = "10.0.0.0/24"
region = "us-central1"
network = "${google_compute_network.network.self_link}"
project = "%s"
}
resource "google_compute_instance_template" "foobar" {
name = "instance-test-%s"
machine_type = "n1-standard-1"
region = "us-central1"
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
disk_size_gb = 10
boot = true
}
network_interface {
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
subnetwork_project = "${google_compute_subnetwork.subnetwork.project}"
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10))
}
var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instance-test-%s"
machine_type = "n1-standard-1"
disk {
source_image = "debian-8-jessie-v20160803"
auto_delete = true
disk_size_gb = 10
boot = true
}
metadata {
foo = "bar"
}
network_interface{
network = "default"
}
metadata_startup_script = "echo 'Hello'"
}`, acctest.RandString(10))
| [
"\"GOOGLE_XPN_HOST_PROJECT\""
]
| []
| [
"GOOGLE_XPN_HOST_PROJECT"
]
| [] | ["GOOGLE_XPN_HOST_PROJECT"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.