filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
cmd/giouiorg/main.go
|
// SPDX-License-Identifier: Unlicense OR MIT
package main
import (
"fmt"
"log"
"net/http"
"os"
"strings"
_ "gioui.org/website/internal/playground"
"gioui.org/website/page"
)
func main() {
subHandler("/scripts.js", http.HandlerFunc(page.ScriptsHandler))
subHandler("/files/", http.FileServer(http.Dir("files")))
subHandler("/issue/", http.HandlerFunc(issueHandler))
subHandler("/commit/", http.HandlerFunc(commitHandler))
subHandler("/patch/", http.HandlerFunc(patchesHandler))
subHandler("/include/files/", http.FileServer(http.Dir("include/files")))
site, err := page.NewSite("Gio - immediate mode GUI in Go")
if err != nil {
log.Fatal(err)
}
http.Handle("/", vanityHandler(
site.Handler(http.HandlerFunc(godocHandler)),
))
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
func subHandler(root string, handler http.Handler) {
http.Handle(root, http.StripPrefix(root, handler))
}
func patchesHandler(w http.ResponseWriter, r *http.Request) {
url := "https://lists.sr.ht/~eliasnaur/gio-patches/patches/" + r.URL.Path
http.Redirect(w, r, url, http.StatusFound)
}
func issueHandler(w http.ResponseWriter, r *http.Request) {
url := "https://todo.sr.ht/~eliasnaur/gio/" + r.URL.Path
http.Redirect(w, r, url, http.StatusFound)
}
func commitHandler(w http.ResponseWriter, r *http.Request) {
commit := r.URL.Path
var url string
if commit == "/" {
url = "https://git.sr.ht/~eliasnaur/gio/log"
} else {
url = "https://git.sr.ht/~eliasnaur/gio/commit/" + commit
}
http.Redirect(w, r, url, http.StatusFound)
}
func godocHandler(w http.ResponseWriter, r *http.Request) {
godocURL := "https://pkg.go.dev/gioui.org" + r.URL.Path
resp, err := http.Head(godocURL)
switch {
case err != nil:
http.Error(w, "failed to HEAD godoc.org", http.StatusInternalServerError)
case resp.StatusCode == http.StatusOK:
http.Redirect(w, r, godocURL, http.StatusFound)
case resp.StatusCode == http.StatusMethodNotAllowed:
// Because of https://github.com/golang/go/issues/43739, we can't HEAD
// the pkg.go.dev site. Redirect blindly.
http.Redirect(w, r, godocURL, http.StatusFound)
default:
http.NotFound(w, r)
}
}
// vanityHandler serves git location meta headers for the go tool.
// If the go-get=1 query is not present it falls back to handler.
func vanityHandler(fallback http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("go-get") == "1" {
var repo, root string
p := r.URL.Path
switch {
case p == "/example":
root = "gioui.org/example"
repo = "https://git.sr.ht/~eliasnaur/gio-example"
case p == "/website":
root = "gioui.org/website"
repo = "https://git.sr.ht/~eliasnaur/giouiorg"
case strings.HasPrefix(p, "/x"):
root = "gioui.org/x"
repo = "https://git.sr.ht/~whereswaldon/gio-x"
default:
root = "gioui.org"
repo = "https://git.sr.ht/~eliasnaur/gio"
}
fmt.Fprintf(w, `<html><head>
<meta name="go-import" content="%[1]s git %[2]s">
<meta name="go-source" content="%[1]s _ %[2]s/tree/main{/dir} %[2]s/tree/main{/dir}/{file}#L{line}">
</head></html>`, root, repo)
return
}
fallback.ServeHTTP(w, r)
})
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
pkg/client/cli/telepresence_test.go
|
package cli_test
import (
"bufio"
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/suite"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"github.com/datawire/ambassador/pkg/dtest"
"github.com/datawire/dlib/dcontext"
"github.com/datawire/dlib/dexec"
"github.com/datawire/dlib/dgroup"
"github.com/datawire/dlib/dhttp"
"github.com/datawire/dlib/dlog"
"github.com/telepresenceio/telepresence/v2/pkg/client"
_ "github.com/telepresenceio/telepresence/v2/pkg/client/cli"
"github.com/telepresenceio/telepresence/v2/pkg/filelocation"
"github.com/telepresenceio/telepresence/v2/pkg/version"
)
// serviceCount is the number of interceptable services that gets installed
// in the cluster and later intercepted
const serviceCount = 3
func TestTelepresence(t *testing.T) {
ctx := dlog.NewTestContext(t, false)
dtest.WithMachineLock(ctx, func(ctx context.Context) {
suite.Run(t, new(telepresenceSuite))
})
}
type telepresenceSuite struct {
suite.Suite
testVersion string
namespace string
managerTestNamespace string
}
func (ts *telepresenceSuite) SetupSuite() {
// Check that the "ko" program exists, and adjust PATH as necessary.
if info, err := os.Stat("../../../tools/bin/ko"); err != nil || !info.Mode().IsRegular() || (info.Mode().Perm()&0100) == 0 {
ts.FailNow("it looks like the ./tools/bin/ko executable wasn't built; be sure to build it with `make` before running `go test`!")
}
require := ts.Require()
toolbindir, err := filepath.Abs("../../../tools/bin")
require.NoError(err)
_ = os.Chdir("../../..")
os.Setenv("PATH", toolbindir+":"+os.Getenv("PATH"))
// Remove very verbose output from DTEST initialization
log.SetOutput(ioutil.Discard)
ts.testVersion = fmt.Sprintf("v2.0.0-gotest.%d", os.Getpid())
ts.namespace = fmt.Sprintf("telepresence-%d", os.Getpid())
ts.managerTestNamespace = fmt.Sprintf("ambassador-%d", os.Getpid())
version.Version = ts.testVersion
ctx := dlog.NewTestContext(ts.T(), false)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
executable, err := ts.buildExecutable(ctx)
ts.NoError(err)
client.SetExe(executable)
}()
_ = os.Remove(client.ConnectorSocketName)
err = run(ctx, "sudo", "true")
require.NoError(err, "acquire privileges")
registry := dtest.DockerRegistry(ctx)
os.Setenv("KO_DOCKER_REPO", registry)
os.Setenv("TELEPRESENCE_REGISTRY", registry)
os.Setenv("TELEPRESENCE_MANAGER_NAMESPACE", ts.managerTestNamespace)
wg.Add(1)
go func() {
defer wg.Done()
err := ts.publishManager()
ts.NoError(err)
}()
wg.Wait()
// We do this after the above goroutines are finished, instead of in parallel with them,
// because there seems to be a bug where buildExecutable sometimes modifies the kubeconfig and
// removes the telepresence-test-user that is created in this function.
ts.setupKubeConfig(ctx)
wg.Add(serviceCount)
for i := 0; i < serviceCount; i++ {
i := i
go func() {
defer wg.Done()
err = ts.applyEchoService(ctx, fmt.Sprintf("hello-%d", i))
ts.NoError(err)
}()
}
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "with-probes", "with-probes", 80)
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "rs-echo", "rs-echo", 80)
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "ss-echo", "ss-echo", 80)
ts.NoError(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
err = ts.applyApp(ctx, "echo-w-sidecars", "echo-w-sidecars", 80)
ts.NoError(err)
}()
wg.Wait()
// Ensure that telepresence is not logged in
_, _ = telepresence(ts.T(), "logout")
// Ensure that no telepresence is running when the tests start
_, _ = telepresence(ts.T(), "quit")
}
func (ts *telepresenceSuite) TearDownSuite() {
ctx := dlog.NewTestContext(ts.T(), false)
_ = run(ctx, "kubectl", "config", "use-context", "default")
_ = run(ctx, "kubectl", "delete", "namespace", ts.namespace)
_ = run(ctx, "kubectl", "delete", "mutatingwebhookconfiguration", "agent-injector-webhook-"+ts.managerTestNamespace)
_ = run(ctx, "kubectl", "delete", "namespace", ts.managerTestNamespace)
// Undo RBAC things
_ = run(ctx, "kubectl", "delete", "-f", "k8s/client_rbac.yaml")
_ = run(ctx, "kubectl", "config", "delete-context", "telepresence-test-developer")
_ = run(ctx, "kubectl", "config", "delete-user", "telepresence-test-developer")
}
func (ts *telepresenceSuite) TestA_WithNoDaemonRunning() {
ts.Run("Version", func() {
stdout, stderr := telepresence(ts.T(), "version")
ts.Empty(stderr)
ts.Contains(stdout, fmt.Sprintf("Client: %s", client.DisplayVersion()))
})
ts.Run("Status", func() {
out, _ := telepresence(ts.T(), "status")
ts.Contains(out, "Root Daemon: Not running")
ts.Contains(out, "User Daemon: Not running")
})
ts.Run("Connect using invalid KUBECONFIG", func() {
ts.Run("Reports config error and exits", func() {
kubeConfig := os.Getenv("KUBECONFIG")
defer os.Setenv("KUBECONFIG", kubeConfig)
os.Setenv("KUBECONFIG", "/dev/null")
stdout, stderr := telepresence(ts.T(), "connect")
ts.Contains(stderr, "kubeconfig has no context definition")
ts.Contains(stdout, "Launching Telepresence Daemon")
ts.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Connect with non existing context", func() {
ts.Run("Reports connect error and exits", func() {
stdout, stderr := telepresence(ts.T(), "connect", "--context", "not-likely-to-exist")
ts.Contains(stderr, `"not-likely-to-exist" does not exist`)
ts.Contains(stdout, "Launching Telepresence Daemon")
ts.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Connect with a command", func() {
ts.Run("Connects, executes the command, and then exits", func() {
stdout, stderr := telepresence(ts.T(), "connect", "--", client.GetExe(), "status")
require := ts.Require()
require.Empty(stderr)
require.Contains(stdout, "Launching Telepresence Daemon")
require.Contains(stdout, "Connected to context")
require.Contains(stdout, "Kubernetes context:")
require.Regexp(`Telepresence proxy:\s+ON`, stdout)
require.Contains(stdout, "Daemon quitting")
})
})
ts.Run("Root Daemon Log Level", func() {
t := ts.T()
require := ts.Require()
configDir := t.TempDir()
config, err := os.Create(filepath.Join(configDir, "config.yml"))
require.NoError(err)
_, err = config.WriteString("logLevels:\n rootDaemon: debug\n")
require.NoError(err)
config.Close()
logDir := t.TempDir()
ctx := dlog.NewTestContext(t, false)
ctx = filelocation.WithAppUserConfigDir(ctx, configDir)
ctx = filelocation.WithAppUserLogDir(ctx, logDir)
_, stderr := telepresenceContext(ctx, "connect")
require.Empty(stderr)
_, stderr = telepresenceContext(ctx, "quit")
require.Empty(stderr)
rootLog, err := os.Open(filepath.Join(logDir, "daemon.log"))
require.NoError(err)
defer rootLog.Close()
hasDebug := false
scn := bufio.NewScanner(rootLog)
match := regexp.MustCompile(` debug +daemon/server`)
for scn.Scan() && !hasDebug {
hasDebug = match.MatchString(scn.Text())
}
ts.True(hasDebug, "daemon.log does not contain expected debug statements")
})
ts.Run("DNS includes", func() {
t := ts.T()
require := ts.Require()
tmpDir := t.TempDir()
origKubeconfigFileName := os.Getenv("DTEST_KUBECONFIG")
kubeconfigFileName := filepath.Join(tmpDir, "kubeconfig")
configFileName := filepath.Join(tmpDir, "config.yml")
var cfg *api.Config
cfg, err := clientcmd.LoadFromFile(origKubeconfigFileName)
require.NoError(err, "Unable to read DTEST_KUBECONFIG")
require.NoError(err, api.MinifyConfig(cfg), "unable to minify config")
var cluster *api.Cluster
for _, c := range cfg.Clusters {
cluster = c
break
}
require.NotNilf(cluster, "unable to get cluster from config")
cluster.Extensions = map[string]runtime.Object{"telepresence.io": &runtime.Unknown{
Raw: []byte(`{"dns":{"include-suffixes": [".org"]}}`),
}}
require.NoError(clientcmd.WriteToFile(*cfg, kubeconfigFileName), "unable to write modified kubeconfig")
configFile, err := os.Create(configFileName)
require.NoError(err)
_, err = configFile.WriteString("logLevels:\n rootDaemon: debug\n")
require.NoError(err)
configFile.Close()
defer os.Setenv("KUBECONFIG", origKubeconfigFileName)
os.Setenv("KUBECONFIG", kubeconfigFileName)
ctx := dlog.NewTestContext(t, false)
ctx = filelocation.WithAppUserConfigDir(ctx, tmpDir)
ctx = filelocation.WithAppUserLogDir(ctx, tmpDir)
_, stderr := telepresenceContext(ctx, "connect")
require.Empty(stderr)
_ = run(ctx, "curl", "--silent", "example.org")
_, stderr = telepresenceContext(ctx, "quit")
require.Empty(stderr)
rootLog, err := os.Open(filepath.Join(tmpDir, "daemon.log"))
require.NoError(err)
defer rootLog.Close()
hasLookup := false
scn := bufio.NewScanner(rootLog)
for scn.Scan() && !hasLookup {
hasLookup = strings.Contains(scn.Text(), `LookupHost "example.org"`)
}
ts.True(hasLookup, "daemon.log does not contain expected LookupHost statement")
})
}
func (ts *telepresenceSuite) TestB_Connected() {
suite.Run(ts.T(), &connectedSuite{tpSuite: ts})
}
func (ts *telepresenceSuite) TestC_Uninstall() {
ts.Run("Uninstalls the traffic manager and quits", func() {
require := ts.Require()
ctx := dlog.NewTestContext(ts.T(), false)
names := func() (string, error) {
return ts.kubectlOut(ctx, "get",
"--namespace", ts.managerTestNamespace,
"svc,deploy", "traffic-manager",
"--ignore-not-found",
"-o", "jsonpath={.items[*].metadata.name}")
}
stdout, err := names()
require.NoError(err)
require.Equal(2, len(strings.Split(stdout, " "))) // The service and the deployment
// The telepresence-test-developer will not be able to uninstall everything
require.NoError(run(ctx, "kubectl", "config", "use-context", "default"))
stdout, stderr := telepresence(ts.T(), "uninstall", "--everything")
require.Empty(stderr)
require.Contains(stdout, "Daemon quitting")
require.Eventually(
func() bool {
stdout, _ := names()
return stdout == ""
},
5*time.Second, // waitFor
500*time.Millisecond, // polling interval
)
})
}
type connectedSuite struct {
suite.Suite
tpSuite *telepresenceSuite
}
func (cs *connectedSuite) ns() string {
return cs.tpSuite.namespace
}
func (cs *connectedSuite) SetupSuite() {
require := cs.Require()
c := dlog.NewTestContext(cs.T(), false)
cs.Eventually(func() bool {
return run(c, "kubectl", "config", "use-context", "telepresence-test-developer") == nil
}, 5*time.Second, time.Second)
stdout, stderr := telepresence(cs.T(), "connect")
require.Empty(stderr)
require.Contains(stdout, "Connected to context")
// Give outbound interceptor 15 seconds to kick in.
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "status")
return regexp.MustCompile(`Telepresence proxy:\s+ON`).FindString(stdout) != ""
},
15*time.Second, // waitFor
time.Second, // polling interval
"Timeout waiting for network overrides to establish", // msg
)
}
func (cs *connectedSuite) TearDownSuite() {
stdout, stderr := telepresence(cs.T(), "quit")
cs.Empty(stderr)
cs.Contains(stdout, "quitting")
c := dlog.NewTestContext(cs.T(), false)
cs.NoError(cs.tpSuite.kubectl(c, "config", "use-context", "default"))
time.Sleep(time.Second) // Allow some time for processes to die and sockets to vanish
}
func (cs *connectedSuite) TestA_ReportsVersionFromDaemon() {
stdout, stderr := telepresence(cs.T(), "version")
cs.Empty(stderr)
vs := client.DisplayVersion()
cs.Contains(stdout, fmt.Sprintf("Client: %s", vs))
cs.Contains(stdout, fmt.Sprintf("Root Daemon: %s", vs))
cs.Contains(stdout, fmt.Sprintf("User Daemon: %s", vs))
}
func (cs *connectedSuite) TestB_ReportsStatusAsConnected() {
stdout, stderr := telepresence(cs.T(), "status")
cs.Empty(stderr)
cs.Contains(stdout, "Kubernetes context:")
}
func (cs *connectedSuite) TestC_ProxiesOutboundTraffic() {
ctx := dlog.NewTestContext(cs.T(), false)
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d.%s", i, cs.ns())
expectedOutput := fmt.Sprintf("Request served by hello-%d", i)
cs.Require().Eventually(
// condition
func() bool {
dlog.Infof(ctx, "trying %q...", "http://"+svc)
hc := http.Client{Timeout: time.Second}
resp, err := hc.Get("http://" + svc)
if err != nil {
dlog.Error(ctx, err)
return false
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
dlog.Error(ctx, err)
return false
}
dlog.Infof(ctx, "body: %q", body)
return strings.Contains(string(body), expectedOutput)
},
15*time.Second, // waitfor
3*time.Second, // polling interval
`body of %q contains %q`, "http://"+svc, expectedOutput,
)
}
}
func (cs *connectedSuite) TestD_Intercepted() {
suite.Run(cs.T(), &interceptedSuite{tpSuite: cs.tpSuite})
}
func (cs *connectedSuite) TestE_PodWithSubdomain() {
require := cs.Require()
c := dlog.NewTestContext(cs.T(), false)
require.NoError(cs.tpSuite.applyApp(c, "echo-w-subdomain", "echo.subsonic", 8080))
defer func() {
cs.NoError(cs.tpSuite.kubectl(c, "delete", "svc", "subsonic", "--context", "default"))
cs.NoError(cs.tpSuite.kubectl(c, "delete", "deploy", "echo-subsonic", "--context", "default"))
}()
cc, cancel := context.WithTimeout(c, 3*time.Second)
defer cancel()
ip, err := net.DefaultResolver.LookupHost(cc, "echo.subsonic."+cs.ns())
cs.NoError(err)
cs.Equal(1, len(ip))
ip, err = net.DefaultResolver.LookupHost(cc, "echo.subsonic."+cs.ns()+".svc.cluster.local")
cs.NoError(err)
cs.Equal(1, len(ip))
}
func (cs *connectedSuite) TestF_SuccessfullyInterceptsDeploymentWithProbes() {
defer telepresence(cs.T(), "leave", "with-probes-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "with-probes", "--port", "9090")
require.Empty(stderr)
require.Contains(stdout, "Using Deployment with-probes")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "with-probes: intercepted")
}
func (cs *connectedSuite) TestG_SuccessfullyInterceptsReplicaSet() {
defer telepresence(cs.T(), "leave", "rs-echo-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "rs-echo", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using ReplicaSet rs-echo")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "rs-echo: intercepted")
}
func (cs *connectedSuite) TestH_SuccessfullyInterceptsStatefulSet() {
defer telepresence(cs.T(), "leave", "ss-echo-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "ss-echo", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using StatefulSet ss-echo")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "ss-echo: intercepted")
}
func (cs *connectedSuite) TestI_LocalOnlyIntercept() {
cs.Run("intercept can be established", func() {
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--local-only", "mylocal")
cs.Empty(stdout)
cs.Empty(stderr)
})
cs.Run("is included in list output", func() {
// list includes local intercept
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
cs.Empty(stderr)
cs.Contains(stdout, "mylocal: local-only intercept")
})
cs.Run("makes services reachable using unqualified name", func() {
ctx := dlog.NewTestContext(cs.T(), false)
// service can be resolve with unqualified name
cs.Eventually(func() bool {
return run(ctx, "curl", "--silent", "ss-echo") == nil
}, 3*time.Second, 1*time.Second)
})
cs.Run("leaving renders services unavailable using unqualified name", func() {
stdout, stderr := telepresence(cs.T(), "leave", "mylocal")
cs.Empty(stdout)
cs.Empty(stderr)
ctx := dlog.NewTestContext(cs.T(), false)
cs.Eventually(func() bool {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
return run(ctx, "curl", "--silent", "ss-echo") != nil
}, 3*time.Second, time.Second)
})
}
func (cs *connectedSuite) TestJ_ListOnlyMapped() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "connect", "--mapped-namespaces", "default")
require.Empty(stderr)
require.Empty(stdout)
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns())
require.Empty(stderr)
require.Contains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
stdout, stderr = telepresence(cs.T(), "connect", "--mapped-namespaces", "all")
require.Empty(stderr)
require.Empty(stdout)
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns())
require.Empty(stderr)
require.NotContains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
}
func (cs *connectedSuite) TestK_DockerRun() {
require := cs.Require()
ctx := dlog.NewTestContext(cs.T(), false)
svc := "hello-0"
tag := "telepresence/hello-test"
testDir := "pkg/client/cli/testdata/hello"
_, err := output(ctx, "docker", "build", "-t", tag, testDir)
require.NoError(err)
abs, err := filepath.Abs(testDir)
require.NoError(err)
grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{
EnableWithSoftness: true,
ShutdownOnNonError: true,
})
grp.Go("server", func(ctx context.Context) error {
stdout, _ := telepresenceContext(ctx, "intercept", "--namespace", cs.ns(), svc,
"--docker-run", "--port", "8000", "--", "--rm", "-v", abs+":/usr/src/app", tag)
cs.Contains(stdout, "Using Deployment "+svc)
return nil
})
grp.Go("client", func(ctx context.Context) error {
expectedOutput := "Hello from intercepted echo-server!"
cs.Eventually(
// condition
func() bool {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
out, err := output(ctx, "curl", "--silent", svc)
if err != nil {
dlog.Error(ctx, err)
return false
}
dlog.Info(ctx, out)
return strings.Contains(out, expectedOutput)
},
30*time.Second, // waitFor
1*time.Second, // polling interval
`body of %q equals %q`, "http://"+svc, expectedOutput,
)
return nil
})
cs.NoError(grp.Wait())
}
func (cs *connectedSuite) TestL_LegacySwapDeploymentDoesIntercept() {
require := cs.Require()
// We don't need to defer leaving the intercept because the
// intercept is automatically left once the command is finished
_, stderr := telepresence(cs.T(), "--swap-deployment", "with-probes", "--expose", "9090", "--namespace", cs.ns(), "--mount", "false", "--run", "sleep", "1")
require.Contains(stderr, "Legacy Telepresence command used")
require.Contains(stderr, "Using Deployment with-probes")
// Since legacy Telepresence commands are detected and translated in the
// RunSubcommands function, so we ensure that the help text is *not* being
// printed out in this case.
require.NotContains(stderr, "Telepresence can connect to a cluster and route all outbound traffic")
// Verify that the intercept no longer exists
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "No Workloads (Deployments, StatefulSets, or ReplicaSets)")
}
func (cs *connectedSuite) TestM_AutoInjectedAgent() {
ctx := dlog.NewTestContext(cs.T(), false)
cs.NoError(cs.tpSuite.applyApp(ctx, "echo-auto-inject", "echo-auto-inject", 80))
defer func() {
cs.NoError(cs.tpSuite.kubectl(ctx, "delete", "svc,deploy", "echo-auto-inject", "--context", "default"))
}()
cs.Run("shows up with agent installed in list output", func() {
cs.Eventually(func() bool {
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
cs.Empty(stderr)
return strings.Contains(stdout, "echo-auto-inject: ready to intercept (traffic-agent already installed)")
},
10*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("can be intercepted", func() {
defer telepresence(cs.T(), "leave", "echo-auto-inject-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "echo-auto-inject", "--port", "9091")
require.Empty(stderr)
require.Contains(stdout, "Using Deployment echo-auto-inject")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "echo-auto-inject: intercepted")
})
}
func (cs *connectedSuite) TestN_ToPodPortForwarding() {
defer telepresence(cs.T(), "leave", "echo-w-sidecars-"+cs.ns())
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "intercept", "--namespace", cs.ns(), "--mount", "false", "echo-w-sidecars", "--port", "8080", "--to-pod", "8081", "--to-pod", "8082")
require.Empty(stderr)
require.Contains(stdout, "Using Deployment echo-w-sidecars")
stdout, stderr = telepresence(cs.T(), "list", "--namespace", cs.ns(), "--intercepts")
require.Empty(stderr)
require.Contains(stdout, "echo-w-sidecars: intercepted")
cs.Run("Forwarded port is reachable as localhost:PORT", func() {
ctx := dlog.NewTestContext(cs.T(), false)
cs.Eventually(func() bool {
return run(ctx, "curl", "--silent", "localhost:8081") == nil
}, 3*time.Second, 1*time.Second)
cs.Eventually(func() bool {
return run(ctx, "curl", "--silent", "localhost:8082") == nil
}, 3*time.Second, 1*time.Second)
})
cs.Run("Non-forwarded port is not reachable", func() {
ctx := dlog.NewTestContext(cs.T(), false)
cs.Eventually(func() bool {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
return run(ctx, "curl", "--silent", "localhost:8083") != nil
}, 3*time.Second, 1*time.Second)
})
}
func (cs *connectedSuite) TestZ_Uninstall() {
cs.Run("Uninstalls agent on given deployment", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "with-probes")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "with-probes")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "with-probes")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls agent on given replicaset", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "rs-echo")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "rs-echo")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "rs-echo")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls agent on given statefulset", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.Contains(stdout, "ss-echo")
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--agent", "ss-echo")
require.Empty(stderr)
require.Eventually(
// condition
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return !strings.Contains(stdout, "ss-echo")
},
30*time.Second, // waitFor
2*time.Second, // polling interval
)
})
cs.Run("Uninstalls all agents", func() {
require := cs.Require()
stdout, stderr := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
require.Empty(stderr)
require.GreaterOrEqual(len(strings.Split(stdout, "\n")), serviceCount)
_, stderr = telepresence(cs.T(), "uninstall", "--namespace", cs.ns(), "--all-agents")
require.Empty(stderr)
require.Eventually(
func() bool {
stdout, _ := telepresence(cs.T(), "list", "--namespace", cs.ns(), "--agents")
return stdout == "No Workloads (Deployments, StatefulSets, or ReplicaSets)"
},
30*time.Second, // waitFor
2*time.Millisecond, // polling interval
)
})
}
type interceptedSuite struct {
suite.Suite
tpSuite *telepresenceSuite
intercepts []string
mountPoint string // mount point for service 0.
services *dgroup.Group
cancelServices context.CancelFunc
}
func (is *interceptedSuite) ns() string {
return is.tpSuite.namespace
}
func (is *interceptedSuite) SetupSuite() {
is.intercepts = make([]string, 0, serviceCount)
ctx, cancel := context.WithCancel(dcontext.WithSoftness(dlog.NewTestContext(is.T(), true)))
is.services = dgroup.NewGroup(ctx, dgroup.GroupConfig{})
is.cancelServices = cancel
is.Run("all intercepts ready", func() {
rxs := make([]*regexp.Regexp, serviceCount)
for i := 0; i < serviceCount; i++ {
rxs[i] = regexp.MustCompile(fmt.Sprintf("hello-%d\\s*:\\s+ready to intercept", i))
}
is.Require().Eventually(
// condition
func() bool {
stdout, _ := telepresence(is.T(), "list", "--namespace", is.ns())
is.T().Log(stdout)
for i := 0; i < serviceCount; i++ {
if !rxs[i].MatchString(stdout) {
return false
}
}
return true
},
15*time.Second, // waitFor
3*time.Second, // polling interval
`telepresence list reports all agents`,
)
})
is.mountPoint = is.T().TempDir()
is.Run("adding intercepts", func() {
// Add all `hello-N` intercepts. Let `hello-0` have a mounted volume.
addIntercept := func(i int, extraArgs ...string) {
svc := fmt.Sprintf("hello-%d", i)
port := strconv.Itoa(9000 + i)
args := []string{"intercept", "--namespace", is.ns(), svc, "--port", port}
stdout, stderr := telepresence(is.T(), append(args, extraArgs...)...)
is.Require().Empty(stderr)
is.intercepts = append(is.intercepts, svc)
is.Contains(stdout, "Using Deployment "+svc)
}
addIntercept(0, "--mount", is.mountPoint)
for i := 1; i < serviceCount; i++ {
addIntercept(i, "--mount", "false")
}
})
is.Run("starting http servers", func() {
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d", i)
port := strconv.Itoa(9000 + i)
is.services.Go(svc, func(ctx context.Context) error {
sc := &dhttp.ServerConfig{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s from intercept at %s", svc, r.URL.Path)
}),
}
return sc.ListenAndServe(ctx, ":"+port)
})
}
})
}
func (is *interceptedSuite) TearDownSuite() {
for _, svc := range is.intercepts {
stdout, stderr := telepresence(is.T(), "leave", svc+"-"+is.ns())
is.Empty(stderr)
is.Empty(stdout)
}
is.cancelServices()
is.NoError(is.services.Wait())
time.Sleep(time.Second) // Allow some time for processes to die and intercepts to vanish
}
func (is *interceptedSuite) TestA_VerifyingResponsesFromInterceptor() {
for i := 0; i < serviceCount; i++ {
svc := fmt.Sprintf("hello-%d", i)
expectedOutput := fmt.Sprintf("%s from intercept at /", svc)
is.Require().Eventually(
// condition
func() bool {
is.T().Logf("trying %q...", "http://"+svc)
hc := http.Client{Timeout: time.Second}
resp, err := hc.Get("http://" + svc)
if err != nil {
is.T().Log(err)
return false
}
defer resp.Body.Close()
is.T().Logf("status code: %v", resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
is.T().Log(err)
return false
}
is.T().Logf("body: %q", body)
return string(body) == expectedOutput
},
15*time.Second, // waitFor
3*time.Second, // polling interval
`body of %q equals %q`, "http://"+svc, expectedOutput,
)
}
}
func (is *interceptedSuite) TestB_ListingActiveIntercepts() {
require := is.Require()
stdout, stderr := telepresence(is.T(), "--namespace", is.ns(), "list", "--intercepts")
require.Empty(stderr)
for i := 0; i < serviceCount; i++ {
require.Contains(stdout, fmt.Sprintf("hello-%d: intercepted", i))
}
}
func (is *interceptedSuite) TestC_MountedFilesystem() {
require := is.Require()
st, err := os.Stat(is.mountPoint)
require.NoError(err, "Stat on <mount point> failed")
require.True(st.IsDir(), "Mount point is not a directory")
st, err = os.Stat(filepath.Join(is.mountPoint, "var"))
require.NoError(err, "Stat on <mount point>/var failed")
require.True(st.IsDir(), "<mount point>/var is not a directory")
}
func (is *interceptedSuite) TestD_RestartInterceptedPod() {
ts := is.tpSuite
assert := is.Assert()
require := is.Require()
c := dlog.NewTestContext(is.T(), false)
rx := regexp.MustCompile(fmt.Sprintf(`Intercept name\s*: hello-0-` + is.ns() + `\s+State\s*: ([^\n]+)\n`))
// Scale down to zero pods
require.NoError(ts.kubectl(c, "--context", "default", "scale", "deploy", "hello-0", "--replicas", "0"))
// Verify that intercept remains but that no agent is found
assert.Eventually(func() bool {
stdout, _ := telepresence(is.T(), "--namespace", is.ns(), "list")
if match := rx.FindStringSubmatch(stdout); match != nil {
dlog.Infof(c, "Got match '%s'", match[1])
return match[1] == "WAITING" || strings.Contains(match[1], `No agent found for "hello-0"`)
}
return false
}, 5*time.Second, time.Second)
// Verify that volume mount is broken
_, err := os.Stat(filepath.Join(is.mountPoint, "var"))
assert.Error(err, "Stat on <mount point>/var succeeded although no agent was found")
// Scale up again (start intercepted pod)
assert.NoError(ts.kubectl(c, "--context", "default", "scale", "deploy", "hello-0", "--replicas", "1"))
// Verify that intercept becomes active
assert.Eventually(func() bool {
stdout, _ := telepresence(is.T(), "--namespace", is.ns(), "list")
if match := rx.FindStringSubmatch(stdout); match != nil {
return match[1] == "ACTIVE"
}
return false
}, 5*time.Second, time.Second)
// Verify that volume mount is restored
assert.Eventually(func() bool {
st, err := os.Stat(filepath.Join(is.mountPoint, "var"))
return err == nil && st.IsDir()
}, 5*time.Second, time.Second)
}
func (is *interceptedSuite) TestE_StopInterceptedPodOfMany() {
ts := is.tpSuite
assert := is.Assert()
require := is.Require()
c := dlog.NewTestContext(is.T(), false)
rx := regexp.MustCompile(fmt.Sprintf(`Intercept name\s*: hello-0-` + is.ns() + `\s+State\s*: ([^\n]+)\n`))
helloZeroPods := func() []string {
pods, err := ts.kubectlOut(c, "get", "pods", "--field-selector", "status.phase==Running", "-l", "app=hello-0", "-o", "jsonpath={.items[*].metadata.name}")
assert.NoError(err)
pods = strings.TrimSpace(pods)
dlog.Infof(c, "Pods = '%s'", pods)
return strings.Split(pods, " ")
}
// Wait for exactly one active pod
var currentPod string
require.Eventually(func() bool {
currentPods := helloZeroPods()
if len(currentPods) == 1 {
currentPod = currentPods[0]
return true
}
return false
}, 5*time.Second, time.Second)
// Scale up to two pods
require.NoError(ts.kubectl(c, "--context", "default", "scale", "deploy", "hello-0", "--replicas", "2"))
defer func() {
_ = ts.kubectl(c, "--context", "default", "scale", "deploy", "hello-0", "--replicas", "1")
}()
// Wait for second pod to arrive
assert.Eventually(func() bool { return len(helloZeroPods()) == 2 }, 5*time.Second, time.Second)
// Delete the currently intercepted pod
require.NoError(ts.kubectl(c, "--context", "default", "delete", "pod", currentPod))
// Wait for that pod to disappear
assert.Eventually(
func() bool {
for _, zp := range helloZeroPods() {
if zp == currentPod {
return false
}
}
return true
}, 5*time.Second, time.Second)
// Verify that intercept is still active
assert.Eventually(func() bool {
stdout, _ := telepresence(is.T(), "--namespace", is.ns(), "list", "--intercepts")
if match := rx.FindStringSubmatch(stdout); match != nil {
return match[1] == "ACTIVE"
}
return false
}, 5*time.Second, time.Second)
// Verify response from intercepting client
assert.Eventually(func() bool {
hc := http.Client{Timeout: time.Second}
resp, err := hc.Get("http://hello-0")
if err != nil {
return false
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false
}
return "hello-0 from intercept at /" == string(body)
}, 5*time.Second, time.Second)
// Verify that volume mount is restored
st, err := os.Stat(filepath.Join(is.mountPoint, "var"))
require.NoError(err, "Stat on <mount point>/var failed")
require.True(st.IsDir(), "<mount point>/var is not a directory")
}
func (ts *telepresenceSuite) applyApp(c context.Context, name, svcName string, port int) error {
err := ts.kubectl(c, "apply", "-f", fmt.Sprintf("k8s/%s.yaml", name), "--context", "default")
if err != nil {
return fmt.Errorf("failed to deploy %s: %w", name, err)
}
return ts.waitForService(c, svcName, port)
}
func (ts *telepresenceSuite) applyEchoService(c context.Context, name string) error {
err := ts.kubectl(c, "create", "deploy", name, "--image", "jmalloc/echo-server:0.1.0")
if err != nil {
return fmt.Errorf("failed to create deployment %s: %w", name, err)
}
err = ts.kubectl(c, "expose", "deploy", name, "--port", "80", "--target-port", "8080")
if err != nil {
return fmt.Errorf("failed to expose deployment %s: %w", name, err)
}
return ts.waitForService(c, name, 80)
}
func (ts *telepresenceSuite) waitForService(c context.Context, name string, port int) error {
c, cancel := context.WithTimeout(c, 120*time.Second)
defer cancel()
// Since this function can be called multiple times in parallel
// we add the name of the service to the title of the pod so they
// can run at the same time. We strip out any characters that we
// can't use in a name in k8s.
reg := regexp.MustCompile("[^a-zA-Z0-9-]+")
k8sSafeName := reg.ReplaceAllString(name, "")
containerName := fmt.Sprintf("curl-%s-from-cluster", k8sSafeName)
for c.Err() == nil {
time.Sleep(time.Second)
err := ts.kubectl(c, "run", containerName, "--context", "default", "--rm", "-it",
"--image=docker.io/pstauffer/curl", "--restart=Never", "--",
"curl", "--silent", "--output", "/dev/null",
fmt.Sprintf("http://%s.%s:%d", name, ts.namespace, port),
)
if err == nil {
return nil
}
}
return fmt.Errorf("timed out waiting for %s service", name)
}
func (ts *telepresenceSuite) kubectl(c context.Context, args ...string) error {
return run(c, append([]string{"kubectl", "--namespace", ts.namespace}, args...)...)
}
func (ts *telepresenceSuite) kubectlOut(ctx context.Context, args ...string) (string, error) {
return output(ctx, append([]string{"kubectl", "--namespace", ts.namespace}, args...)...)
}
func (ts *telepresenceSuite) publishManager() error {
ctx := dlog.NewTestContext(ts.T(), true)
cmd := dexec.CommandContext(ctx, "make", "push-image")
// Go sets a lot of variables that we don't want to pass on to the ko executable. If we do,
// then it builds for the platform indicated by those variables.
cmd.Env = []string{
"TELEPRESENCE_VERSION=" + ts.testVersion,
"TELEPRESENCE_REGISTRY=" + dtest.DockerRegistry(ctx),
}
includeEnv := []string{"KO_DOCKER_REPO=", "HOME=", "PATH=", "LOGNAME=", "TMPDIR=", "MAKELEVEL="}
for _, env := range os.Environ() {
for _, incl := range includeEnv {
if strings.HasPrefix(env, incl) {
cmd.Env = append(cmd.Env, env)
break
}
}
}
if err := cmd.Run(); err != nil {
return client.RunError(err)
}
return nil
}
func (ts *telepresenceSuite) buildExecutable(c context.Context) (string, error) {
executable := filepath.Join("build-output", "bin", "/telepresence")
return executable, run(c, "go", "build", "-ldflags",
fmt.Sprintf("-X=github.com/telepresenceio/telepresence/v2/pkg/version.Version=%s", ts.testVersion),
"-o", executable, "./cmd/telepresence")
}
func (ts *telepresenceSuite) setupKubeConfig(ctx context.Context) {
kubeconfig := dtest.Kubeconfig(ctx)
os.Setenv("DTEST_KUBECONFIG", kubeconfig)
os.Setenv("KUBECONFIG", kubeconfig)
err := run(ctx, "kubectl", "create", "namespace", ts.namespace)
ts.NoError(err)
err = run(ctx, "kubectl", "apply", "-f", "k8s/client_rbac.yaml")
ts.NoError(err)
// This is how we create a user that has their rbac restricted to what we have in
// k8s/client_rbac.yaml. We do this by creating a service account and then getting
// the token from said service account and storing it in our kubeconfig.
secret, err := output(ctx, "kubectl", "get", "sa", "telepresence-test-developer", "-o", "jsonpath={.secrets[0].name}")
ts.NoError(err)
encSecret, err := output(ctx, "kubectl", "get", "secret", secret, "-o", "jsonpath={.data.token}")
ts.NoError(err)
token, err := base64.StdEncoding.DecodeString(encSecret)
ts.NoError(err)
err = run(ctx, "kubectl", "config", "set-credentials", "telepresence-test-developer", "--token", string(token))
ts.NoError(err)
err = run(ctx, "kubectl", "config", "set-context", "telepresence-test-developer", "--user", "telepresence-test-developer", "--cluster", "default")
ts.NoError(err)
// We start with the default context, and will switch to the
// telepresence-test-developer user later in the tests
err = run(ctx, "kubectl", "config", "use-context", "default")
ts.NoError(err)
}
func run(c context.Context, args ...string) error {
return client.RunError(dexec.CommandContext(c, args[0], args[1:]...).Run())
}
func output(ctx context.Context, args ...string) (string, error) {
cmd := dexec.CommandContext(ctx, args[0], args[1:]...)
cmd.DisableLogging = true
out, err := cmd.Output()
return string(out), client.RunError(err)
}
// telepresence executes the CLI command in-process
func telepresence(t testing.TB, args ...string) (string, string) {
return telepresenceContext(dlog.NewTestContext(t, false), args...)
}
// telepresence executes the CLI command in-process
func telepresenceContext(ctx context.Context, args ...string) (string, string) {
var stdout, stderr strings.Builder
configDir, _ := filelocation.AppUserConfigDir(ctx)
logDir, _ := filelocation.AppUserLogDir(ctx)
cmd := dexec.CommandContext(ctx, client.GetExe(), args...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cmd.Env = append(os.Environ(),
"DEV_TELEPRESENCE_CONFIG_DIR="+configDir,
"DEV_TELEPRESENCE_LOG_DIR="+logDir)
_ = cmd.Run()
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String())
}
|
[
"\"PATH\"",
"\"KUBECONFIG\"",
"\"DTEST_KUBECONFIG\""
] |
[] |
[
"DTEST_KUBECONFIG",
"PATH",
"KUBECONFIG"
] |
[]
|
["DTEST_KUBECONFIG", "PATH", "KUBECONFIG"]
|
go
| 3 | 0 | |
utils/prediction.py
|
#!/home/catskills/anaconda3/envs/xview2/bin/python
import glob, os
from shutil import copyfile
from tqdm import tqdm
from subprocess import call
from IPython.utils.path import ensure_dir_exists
# os.environ["CUDA_VISIBLE_DEVICES"]="1" # second gpu
VERSION=os.getenv('VERSION')
PROJECT='xview2-catskills'
USERDIR='/home/catskills/Desktop'
CODEDIR=f'{USERDIR}/{PROJECT}'
DATADIR=f'{USERDIR}/dataxv2'
TESTDIR=f'{DATADIR}/test/images/'
SUBMIT_DIR=f'{DATADIR}/{VERSION}_submit_001'
MODEL_DIR=f'/home/catskills/Desktop/dataxv2/release/{VERSION}'
LOCALIZATION_MODEL=f'{MODEL_DIR}/localization.hdf5'
DAMAGE_MODEL=f'{MODEL_DIR}/classification.hdf5'
ensure_dir_exists(SUBMIT_DIR)
files = glob.glob(f'{TESTDIR}test_pre_*.png')
for pre_png in tqdm(files):
post_png = pre_png.replace('_pre_','_post_')
image_id = pre_png.split('.')[0].split('/')[-1].split('_')[-1]
out_damage_png = f'{SUBMIT_DIR}/test_damage_{image_id}_prediction.png'
if os.path.isfile(out_damage_png):
continue
out_local_json = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.json'
out_local_png = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.png'
C=f'./inference.sh -x {CODEDIR} -i {pre_png} -p {post_png} -l {LOCALIZATION_MODEL} -c {DAMAGE_MODEL} -o {out_damage_png} -y'
call(C, shell=True)
if os.path.isfile(out_damage_png):
copyfile(out_damage_png, out_local_png)
else:
print("PROCESS FAILED", image_id)
|
[] |
[] |
[
"VERSION",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["VERSION", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
bucket/service_acc_test.go
|
// Copyright 2022 The sacloud/object-storage-service-go Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build acctest
// +build acctest
package bucket
import (
"os"
"testing"
v1 "github.com/sacloud/object-storage-api-go/apis/v1"
service "github.com/sacloud/object-storage-service-go"
"github.com/sacloud/packages-go/testutil"
"github.com/stretchr/testify/require"
)
func TestAccBucket_CRUD_plus_L(t *testing.T) {
testutil.PreCheckEnvsFunc(
"SAKURACLOUD_ACCESS_TOKEN",
"SAKURACLOUD_ACCESS_TOKEN_SECRET",
"SACLOUD_OJS_ACCESS_KEY_ID",
"SACLOUD_OJS_SECRET_ACCESS_KEY",
)(t)
key := os.Getenv("SACLOUD_OJS_ACCESS_KEY_ID")
secret := os.Getenv("SACLOUD_OJS_SECRET_ACCESS_KEY")
svc := New(service.NewClient())
siteId := "isk01"
bucketName := testutil.RandomName("object-storage-service-go-", 16, testutil.CharSetAlpha)
notExistName := testutil.RandomName("object-storage-service-go-", 16, testutil.CharSetAlpha)
t.Run("create", func(t *testing.T) {
bucket, err := svc.Create(&CreateRequest{
AccessKey: key,
SecretKey: secret,
SiteId: siteId,
Id: bucketName,
})
require.NoError(t, err)
require.NotNil(t, bucket)
t.Logf("created: name: %s, creation-date: %s", bucket.Name, bucket.CreationDate)
})
t.Run("list and read", func(t *testing.T) {
// Note: このサービスのReadは内部でListを読んでいるため、ここではReadのみ実施している
bucket, err := svc.Read(&ReadRequest{
AccessKey: key,
SecretKey: secret,
SiteId: siteId,
Id: bucketName,
})
require.NoError(t, err)
require.NotNil(t, bucket)
t.Logf("read: name: %s, creation-date: %s", bucket.Name, bucket.CreationDate)
})
t.Run("read return NotFoundError when bucket is not found", func(t *testing.T) {
_, err := svc.Read(&ReadRequest{
AccessKey: key,
SecretKey: secret,
SiteId: siteId,
Id: notExistName,
})
require.Error(t, err)
require.True(t, v1.IsError404(err))
})
t.Run("delete", func(t *testing.T) {
err := svc.Delete(&DeleteRequest{
AccessKey: key,
SecretKey: secret,
SiteId: siteId,
Id: bucketName,
})
require.NoError(t, err)
})
}
|
[
"\"SACLOUD_OJS_ACCESS_KEY_ID\"",
"\"SACLOUD_OJS_SECRET_ACCESS_KEY\""
] |
[] |
[
"SACLOUD_OJS_SECRET_ACCESS_KEY",
"SACLOUD_OJS_ACCESS_KEY_ID"
] |
[]
|
["SACLOUD_OJS_SECRET_ACCESS_KEY", "SACLOUD_OJS_ACCESS_KEY_ID"]
|
go
| 2 | 0 | |
BatchBox/makeJSONfromJobLog.py
|
"""
In crab logs we print the PSet. For extracte the LS the following part is relevant
== CMSSW: lumisToProcess = cms.untracked.VLuminosityBlockRange(
== CMSSW: "299478:140-299478:146", "299478:19-299478:19", "299478:148-299478:150", "299478:23-299478:26", "299478:61-299478:63",
== CMSSW: "299478:147-299478:147", "299478:69-299478:75", "299478:20-299478:20", "299478:18-299478:18", "299478:114-299478:125"
== CMSSW: ),
"""
import re
import analyzeJSON
import json
def makeJSONFromJobLog(inputFile, outputFileName):
### Fist find the relevant part of the log
lumiLines = []
with open(inputFile, "r") as f:
lines = f.readlines()
parseLumis = False
for line in lines:
if "lumisToProcess" in line:
parseLumis = True
continue
if ")," in line and parseLumis:
break
if parseLumis:
lumiLines.append(line)
lumiLineElements = []
for line in lumiLines:
for elem in line.split(" "):
lumiLineElements.append(elem)
#Find the lumi blocks in the relevant printput
lumiBlocks = []
for line in lumiLineElements:
result = re.findall('\".+-.+\"', line)
lumiBlocks += result
# Format the printout and fill in dict
lumis = {}
for block in lumiBlocks:
block_ = block.replace('"','')
start, end = block_.split("-")
run, startLS = start.split(":")
run_, endLS = end.split(":")
if run != run_:
raise RuntimeError("Can be different runs in the begin and end ls? Got %"%block)
if run not in lumis.keys():
lumis[run] = []
for i in range(int(startLS), int(endLS)+1):
lumis[run].append(i)
# Check if LS are present multiple times
for run in lumis:
origLen = len(lumis[run])
lumis[run] = sorted(list(set(lumis[run])))
if origLen != len(lumis[run]):
raise RuntimeError("There where some double LS in run %s. Please check"%run)
# Count for feedback in output
nRuns = 0
nLS = 0
for run in lumis:
nRuns += 1
LSinRun = lumis[run]
for LS in LSinRun:
nLS += 1
print "Extracted %s LS in %s runs"%(nLS, nRuns)
# Use formating function from analyzeJSON module to get the json formatted for the output
outJSON = analyzeJSON.fromatJSON(lumis)
# Write Output file
outputName = outputFileName+".json"
print "Writing:",outputName
with open(outputName, "w") as f:
json.dump(outJSON, f)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Converts lumirange part of the PSet printout to json for further processing')
parser.add_argument('--input', action="store", required=True, help="Snippet of the JobLog", type=str)
parser.add_argument("--output", action="store", required=True, help="Name of the output file", type=str)
args = parser.parse_args()
makeJSONFromJobLog(args.input, args.output)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
routers/admin/admin.go
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package admin
import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/Unknwon/com"
"gopkg.in/macaron.v1"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/cron"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting"
)
const (
tplDashboard base.TplName = "admin/dashboard"
tplConfig base.TplName = "admin/config"
tplMonitor base.TplName = "admin/monitor"
)
var (
startTime = time.Now()
)
var sysStatus struct {
Uptime string
NumGoroutine int
// General statistics.
MemAllocated string // bytes allocated and still in use
MemTotal string // bytes allocated (even if freed)
MemSys string // bytes obtained from system (sum of XxxSys below)
Lookups uint64 // number of pointer lookups
MemMallocs uint64 // number of mallocs
MemFrees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc string // bytes allocated and still in use
HeapSys string // bytes obtained from system
HeapIdle string // bytes in idle spans
HeapInuse string // bytes in non-idle span
HeapReleased string // bytes released to the OS
HeapObjects uint64 // total number of allocated objects
// Low-level fixed-size structure allocator statistics.
// Inuse is bytes used now.
// Sys is bytes obtained from system.
StackInuse string // bootstrap stacks
StackSys string
MSpanInuse string // mspan structures
MSpanSys string
MCacheInuse string // mcache structures
MCacheSys string
BuckHashSys string // profiling bucket hash table
GCSys string // GC metadata
OtherSys string // other system allocations
// Garbage collector statistics.
NextGC string // next run in HeapAlloc time (bytes)
LastGC string // last run in absolute time (ns)
PauseTotalNs string
PauseNs string // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
NumGC uint32
}
func updateSystemStatus() {
sysStatus.Uptime = base.TimeSincePro(startTime, "en")
m := new(runtime.MemStats)
runtime.ReadMemStats(m)
sysStatus.NumGoroutine = runtime.NumGoroutine()
sysStatus.MemAllocated = base.FileSize(int64(m.Alloc))
sysStatus.MemTotal = base.FileSize(int64(m.TotalAlloc))
sysStatus.MemSys = base.FileSize(int64(m.Sys))
sysStatus.Lookups = m.Lookups
sysStatus.MemMallocs = m.Mallocs
sysStatus.MemFrees = m.Frees
sysStatus.HeapAlloc = base.FileSize(int64(m.HeapAlloc))
sysStatus.HeapSys = base.FileSize(int64(m.HeapSys))
sysStatus.HeapIdle = base.FileSize(int64(m.HeapIdle))
sysStatus.HeapInuse = base.FileSize(int64(m.HeapInuse))
sysStatus.HeapReleased = base.FileSize(int64(m.HeapReleased))
sysStatus.HeapObjects = m.HeapObjects
sysStatus.StackInuse = base.FileSize(int64(m.StackInuse))
sysStatus.StackSys = base.FileSize(int64(m.StackSys))
sysStatus.MSpanInuse = base.FileSize(int64(m.MSpanInuse))
sysStatus.MSpanSys = base.FileSize(int64(m.MSpanSys))
sysStatus.MCacheInuse = base.FileSize(int64(m.MCacheInuse))
sysStatus.MCacheSys = base.FileSize(int64(m.MCacheSys))
sysStatus.BuckHashSys = base.FileSize(int64(m.BuckHashSys))
sysStatus.GCSys = base.FileSize(int64(m.GCSys))
sysStatus.OtherSys = base.FileSize(int64(m.OtherSys))
sysStatus.NextGC = base.FileSize(int64(m.NextGC))
sysStatus.LastGC = fmt.Sprintf("%.1fs", float64(time.Now().UnixNano()-int64(m.LastGC))/1000/1000/1000)
sysStatus.PauseTotalNs = fmt.Sprintf("%.1fs", float64(m.PauseTotalNs)/1000/1000/1000)
sysStatus.PauseNs = fmt.Sprintf("%.3fs", float64(m.PauseNs[(m.NumGC+255)%256])/1000/1000/1000)
sysStatus.NumGC = m.NumGC
}
// Operation Operation types.
type Operation int
const (
cleanInactivateUser Operation = iota + 1
cleanRepoArchives
cleanMissingRepos
gitGCRepos
syncSSHAuthorizedKey
syncRepositoryUpdateHook
reinitMissingRepository
syncExternalUsers
gitFsck
)
// Dashboard show admin panel dashboard
func Dashboard(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.dashboard")
ctx.Data["PageIsAdmin"] = true
ctx.Data["PageIsAdminDashboard"] = true
// Run operation.
op, _ := com.StrTo(ctx.Query("op")).Int()
if op > 0 {
var err error
var success string
switch Operation(op) {
case cleanInactivateUser:
success = ctx.Tr("admin.dashboard.delete_inactivate_accounts_success")
err = models.DeleteInactivateUsers()
case cleanRepoArchives:
success = ctx.Tr("admin.dashboard.delete_repo_archives_success")
err = models.DeleteRepositoryArchives()
case cleanMissingRepos:
success = ctx.Tr("admin.dashboard.delete_missing_repos_success")
err = models.DeleteMissingRepositories(ctx.User)
case gitGCRepos:
success = ctx.Tr("admin.dashboard.git_gc_repos_success")
err = models.GitGcRepos()
case syncSSHAuthorizedKey:
success = ctx.Tr("admin.dashboard.resync_all_sshkeys_success")
err = models.RewriteAllPublicKeys()
case syncRepositoryUpdateHook:
success = ctx.Tr("admin.dashboard.resync_all_hooks_success")
err = models.SyncRepositoryHooks()
case reinitMissingRepository:
success = ctx.Tr("admin.dashboard.reinit_missing_repos_success")
err = models.ReinitMissingRepositories()
case syncExternalUsers:
success = ctx.Tr("admin.dashboard.sync_external_users_started")
go models.SyncExternalUsers()
case gitFsck:
success = ctx.Tr("admin.dashboard.git_fsck_started")
go models.GitFsck()
}
if err != nil {
ctx.Flash.Error(err.Error())
} else {
ctx.Flash.Success(success)
}
ctx.Redirect(setting.AppSubURL + "/admin")
return
}
ctx.Data["Stats"] = models.GetStatistic()
// FIXME: update periodically
updateSystemStatus()
ctx.Data["SysStatus"] = sysStatus
ctx.HTML(200, tplDashboard)
}
// SendTestMail send test mail to confirm mail service is OK
func SendTestMail(ctx *context.Context) {
email := ctx.Query("email")
// Send a test email to the user's email address and redirect back to Config
if err := models.SendTestMail(email); err != nil {
ctx.Flash.Error(ctx.Tr("admin.config.test_mail_failed", email, err))
} else {
ctx.Flash.Info(ctx.Tr("admin.config.test_mail_sent", email))
}
ctx.Redirect(setting.AppSubURL + "/admin/config")
}
// Config show admin config page
func Config(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.config")
ctx.Data["PageIsAdmin"] = true
ctx.Data["PageIsAdminConfig"] = true
ctx.Data["CustomConf"] = setting.CustomConf
ctx.Data["AppUrl"] = setting.AppURL
ctx.Data["Domain"] = setting.Domain
ctx.Data["OfflineMode"] = setting.OfflineMode
ctx.Data["DisableRouterLog"] = setting.DisableRouterLog
ctx.Data["RunUser"] = setting.RunUser
ctx.Data["RunMode"] = strings.Title(macaron.Env)
ctx.Data["GitVersion"] = setting.Git.Version
ctx.Data["RepoRootPath"] = setting.RepoRootPath
ctx.Data["CustomRootPath"] = setting.CustomPath
ctx.Data["StaticRootPath"] = setting.StaticRootPath
ctx.Data["LogRootPath"] = setting.LogRootPath
ctx.Data["ScriptType"] = setting.ScriptType
ctx.Data["ReverseProxyAuthUser"] = setting.ReverseProxyAuthUser
ctx.Data["ReverseProxyAuthEmail"] = setting.ReverseProxyAuthEmail
ctx.Data["SSH"] = setting.SSH
ctx.Data["Service"] = setting.Service
ctx.Data["DbCfg"] = models.DbCfg
ctx.Data["Webhook"] = setting.Webhook
ctx.Data["MailerEnabled"] = false
if setting.MailService != nil {
ctx.Data["MailerEnabled"] = true
ctx.Data["Mailer"] = setting.MailService
}
ctx.Data["CacheAdapter"] = setting.CacheService.Adapter
ctx.Data["CacheInterval"] = setting.CacheService.Interval
ctx.Data["CacheConn"] = setting.CacheService.Conn
ctx.Data["SessionConfig"] = setting.SessionConfig
ctx.Data["DisableGravatar"] = setting.DisableGravatar
ctx.Data["EnableFederatedAvatar"] = setting.EnableFederatedAvatar
ctx.Data["Git"] = setting.Git
type envVar struct {
Name, Value string
}
envVars := map[string]*envVar{}
if len(os.Getenv("GITEA_WORK_DIR")) > 0 {
envVars["GITEA_WORK_DIR"] = &envVar{"GITEA_WORK_DIR", os.Getenv("GITEA_WORK_DIR")}
}
if len(os.Getenv("GITEA_CUSTOM")) > 0 {
envVars["GITEA_CUSTOM"] = &envVar{"GITEA_CUSTOM", os.Getenv("GITEA_CUSTOM")}
}
ctx.Data["EnvVars"] = envVars
type logger struct {
Mode, Config string
}
loggers := make([]*logger, len(setting.LogModes))
for i := range setting.LogModes {
loggers[i] = &logger{setting.LogModes[i], setting.LogConfigs[i]}
}
ctx.Data["Loggers"] = loggers
ctx.HTML(200, tplConfig)
}
// Monitor show admin monitor page
func Monitor(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.monitor")
ctx.Data["PageIsAdmin"] = true
ctx.Data["PageIsAdminMonitor"] = true
ctx.Data["Processes"] = process.GetManager().Processes
ctx.Data["Entries"] = cron.ListTasks()
ctx.HTML(200, tplMonitor)
}
|
[
"\"GITEA_WORK_DIR\"",
"\"GITEA_WORK_DIR\"",
"\"GITEA_CUSTOM\"",
"\"GITEA_CUSTOM\""
] |
[] |
[
"GITEA_CUSTOM",
"GITEA_WORK_DIR"
] |
[]
|
["GITEA_CUSTOM", "GITEA_WORK_DIR"]
|
go
| 2 | 0 | |
config/parameters.conf.go
|
package config
import "os"
func GetParameters() map[string]interface{} {
return map[string]interface{}{
"github": map[string]string{
"github_account": "",
"github_secret": "",
},
"youtrack": map[string]string{
"url": os.Getenv("YOUTRACK_URL"),
"token": os.Getenv("YOUTRACK_TOKEN"),
},
}
}
|
[
"\"YOUTRACK_URL\"",
"\"YOUTRACK_TOKEN\""
] |
[] |
[
"YOUTRACK_TOKEN",
"YOUTRACK_URL"
] |
[]
|
["YOUTRACK_TOKEN", "YOUTRACK_URL"]
|
go
| 2 | 0 | |
AutoWorkup/logismosb/utils/fs_thickness_measurements.py
|
"""
fs_thickness_measurements.py
==============================
Description:
Author:
Usage:
"""
import vtk
import SimpleITK as sitk
import numpy as np
from scipy.spatial import distance
from nipype.interfaces.freesurfer import MRIsConvert
import os
import sys
def read_poly_data(filename):
"""
This function..
:param filename:
:return:
"""
# Check which PolyData reader should be used
if ".vtk" in filename:
reader = vtk.vtkPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
elif ".vtp" in filename:
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
else:
print("ERROR: Failed to read in polydata")
return sys.exit(os.EX_IOERR)
def ras_to_lps(point):
"""
This function..
:param point:
:return:
"""
surf_x, surf_y, surf_z = point
point = (-surf_x, -surf_y, surf_z) # must flip y axis to convert from VTK to ITK
return point
# Find the label of a given vtk point from a label map
def vtk_point_to_label(point, labelmap):
"""
This function..
:param point:
:param labelmap:
:return:
"""
point = ras_to_lps(point)
index = labelmap.TransformPhysicalPointToIndex(point)
x = int(index[0])
y = int(index[1])
z = int(index[2])
return labelmap.GetPixel(x, y, z)
def build_kd_tree(mesh):
"""
This function..
:param mesh:
:return:
"""
kd_tree = vtk.vtkKdTreePointLocator()
kd_tree.SetDataSet(mesh)
kd_tree.BuildLocator()
return kd_tree
def convert_fs_surface(in_surf, out_surf, to_scanner=True):
"""
This function..
:param in_surf:
:param out_surf:
:param to_scanner:
:return:
"""
if os.path.isfile(os.path.abspath(out_surf)):
return os.path.abspath(out_surf)
mris_convert = MRIsConvert()
mris_convert.inputs.in_file = in_surf
mris_convert.inputs.out_file = os.path.abspath(out_surf)
mris_convert.inputs.to_scanner = to_scanner
result = mris_convert.run()
return result.outputs.converted
def get_vtk_file_name(fs_file_name):
"""
This function..
:param fs_file_name:
:return:
"""
fs_dir, fs_basename = os.path.split(fs_file_name)
return os.path.join(fs_dir, fs_basename.replace(".", "_") + ".vtk")
def fs_to_vtk(fs_surface):
"""
This function..
:param fs_surface:
:return:
"""
output_file = get_vtk_file_name(fs_surface)
return convert_fs_surface(fs_surface, output_file)
def get_surf(surf_dir, hemisphere, surf):
"""
This function..
:param surf_dir:
:param hemisphere:
:param surf:
:return:
"""
return os.path.join(surf_dir, "{0}.{1}".format(hemisphere, surf))
def get_white(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
return get_surf(surf_dir, hemisphere, "white")
def get_pial(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
return get_surf(surf_dir, hemisphere, "pial")
def get_white_and_pial_fs_files(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
fs_white = get_white(surf_dir, hemisphere)
fs_pial = get_pial(surf_dir, hemisphere)
return fs_white, fs_pial
def get_white_and_pial_vtk_files(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
fs_white, fs_pial = get_white_and_pial_fs_files(surf_dir, hemisphere)
return fs_to_vtk(fs_white), fs_to_vtk(fs_pial)
def get_white_and_pial(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
vtk_white, vtk_pial = get_white_and_pial_vtk_files(surf_dir, hemisphere)
white = read_poly_data(vtk_white)
pial = read_poly_data(vtk_pial)
return white, pial
def compute_thickness(wmP, kdTreegm, kdTreewm):
"""
This function..
:param wmP:
:param kdTreegm:
:param kdTreewm:
:return:
"""
# Find the closest point to the gray matter surface point
gmIndex = kdTreegm.FindClosestPoint(wmP)
gmP = kdTreegm.GetDataSet().GetPoint(gmIndex)
# compute the distance
# distance from wm point to gm point
dst1 = distance.euclidean(wmP, gmP)
wmIndex = kdTreewm.FindClosestPoint(gmP)
wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex)
# distnace from gm to closest wm point
dst2 = distance.euclidean(gmP, wmP2)
# average the two distances
thickness = (dst1 + dst2) / float(2)
return thickness
def create_thickness_array():
"""
This function..
:return:
"""
thicknesses = vtk.vtkFloatArray()
thicknesses.SetName("thickness")
return thicknesses
def calculate_distance(white, pial):
"""
This function..
:param white:
:param pial:
:return:
"""
# setup KdTrees for each surface
# this will help in finding the closest points
kd_tree_white = build_kd_tree(white)
kd_tree_pial = build_kd_tree(pial)
white_points = white.GetPoints()
white_count = white.GetNumberOfPoints()
white_point_data = white.GetPointData()
thicknesses = create_thickness_array()
for i in range(0, white_count):
white_matter_point = white_points.GetPoint(i)
# compute the thickness
thickness = compute_thickness(white_matter_point, kd_tree_pial, kd_tree_white)
thicknesses.InsertNextValue(thickness)
white_point_data.AddArray(thicknesses)
return white
def get_surf_dir(subjects_dir, subject_id):
"""
This function..
:param subjects_dir:
:param subject_id:
:return:
"""
return os.path.join(subjects_dir, subject_id, "surf")
def write_vtk_file(polydata, file_name):
"""
This function..
:param polydata:
:param file_name:
:return:
"""
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(file_name)
writer.SetInputData(polydata)
writer.Update()
return os.path.abspath(writer.GetFileName())
def get_thickness_file(subjects_dir, subject_id, hemisphere):
"""
This function..
:param subjects_dir:
:param subjects_id:
:param hemisphere:
:return:
"""
surf_dir = get_surf_dir(subjects_dir, subject_id)
white, pial = get_white_and_pial(surf_dir, hemisphere)
thickness = calculate_distance(white, pial)
return write_vtk_file(
thickness, os.path.join(surf_dir, "{0}_thickness.vtk".format(hemisphere))
)
def get_thickness_files_for_both_hemispheres(subjects_dir, subject_id):
"""
This function..
:param subjects_dir:
:param subjects_id:
:return:
"""
lh_thickness = get_thickness_file(subjects_dir, subject_id, "lh")
rh_thickness = get_thickness_file(subjects_dir, subject_id, "rh")
return lh_thickness, rh_thickness
def masked_thickness_values(thickness_file, mask_image_file, array_index=None):
"""
This function..
:param thickness_file:
:param mask_file:
:param array_index:
:return:
"""
thickness = read_poly_data(thickness_file)
mask = sitk.ReadImage(mask_image_file)
inside_mask_values = list()
outside_mask_values = list()
thickness_point_data = thickness.GetPointData()
if not array_index:
# set the array index to the last array added to the poly data
array_index = thickness_point_data.GetNumberOfArrays() - 1
thickness_values = thickness.GetPointData().GetArray(array_index)
for point_index in range(thickness.GetNumberOfPoints()):
point = thickness.GetPoint(point_index)
mask_value = vtk_point_to_label(point, mask)
thickness_value = thickness_values.GetValue(point_index)
if mask_value == 1:
inside_mask_values.append(thickness_value)
else:
outside_mask_values.append(thickness_value)
return inside_mask_values, outside_mask_values
def calculate_stats(values):
"""
This function..
:param values:
:return:
"""
if values:
values_array = np.array(values)
return dict(
mean=values_array.mean(),
std=values_array.std(),
min=values_array.min(),
max=values_array.max(),
)
else:
return dict(mean=None, std=None, min=None, max=None)
def masked_thickness_stats(thickness_file, mask_image_file):
"""
This function..
:param thickness_file:
:param mask_image_file:
:return:
"""
inside_mask_values, outside_mask_values = masked_thickness_values(
thickness_file, mask_image_file
)
stats = dict()
stats["inside"] = calculate_stats(inside_mask_values)
stats["outside"] = calculate_stats(outside_mask_values)
return stats
def get_thickness_stats_for_both_hemispheres(subjects_dir, subject_id, mask_file):
"""
This function..
:param subject_id:
:param subjects_dir:
:param mask_file:
:return:
"""
stats = dict()
lh_thickness, rh_thickness = get_thickness_files_for_both_hemispheres(
subjects_dir, subject_id
)
stats["lh"] = masked_thickness_stats(lh_thickness, mask_file)
stats["rh"] = masked_thickness_stats(rh_thickness, mask_file)
return stats
def main():
"""
This function..
"""
os.environ[
"PATH"
] += ":/Shared/sinapse/sharedopt/apps/freesurfer/Darwin/x86_64/6.0-beta/20150915/bin/"
mask_file = "/Shared/sinapse/CACHE/20160712_AtrophySimulation_Results/2559/58661/simulation_1/atrophy_regions.nii.gz"
subj_dir = "/Shared/sinapse/CACHE/20160713_AtrophySimulation_BAW_base_Results/PHD_024/2559_58661/79/"
print(get_thickness_stats_for_both_hemispheres(subj_dir, "FreeSurfer", mask_file))
print("done")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PATH\"\n "
] |
[]
|
["PATH\"\n "]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"github.com/hashicorp/go-plugin"
"github.com/wulie/go-plugin-bidirectional/shared"
"io/ioutil"
"log"
"os"
"os/exec"
"strconv"
)
type Adder struct {
}
func (a *Adder) Sum(x, y int64) (int64, error) {
return x + y, nil
}
//func main() {
// log.SetOutput(ioutil.Discard)
//
// client := plugin.NewClient(&plugin.ClientConfig{
// HandshakeConfig: shared.Handshake,
// Plugins: shared.PluginMap,
// Cmd: exec.Command("sh", "-c", os.Getenv("COUNTER_PLUGIN")),
// AllowedProtocols: []plugin.Protocol{
// plugin.ProtocolNetRPC, plugin.ProtocolGRPC,
// },
// })
// defer client.Kill()
// protocol, err := client.Client()
// if err != nil {
// panic(err)
// }
// raw, err := protocol.Dispense("counter")
// if err != nil {
// panic(err)
// }
// counter := raw.(shared.Counter)
//
// err = counter.Put("hello", 1, &Adder{})
// get, err := counter.Get("hello")
// log.Println(get,err)
//
//}
func main() {
// We don't want to see the plugin logs.
log.SetOutput(ioutil.Discard)
// We're a host. Start by launching the plugin process.
client := plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: shared.Handshake,
Plugins: shared.PluginMap,
Cmd: exec.Command("sh", "-c", os.Getenv("COUNTER_PLUGIN")),
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC, plugin.ProtocolGRPC},
})
defer client.Kill()
// Connect via RPC
rpcClient, err := client.Client()
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
// Request the plugin
raw, err := rpcClient.Dispense("counter")
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
// We should have a Counter store now! This feels like a normal interface
// implementation but is in fact over an RPC connection.
counter := raw.(shared.Counter)
os.Args = os.Args[1:]
switch os.Args[0] {
case "get":
result, err := counter.Get(os.Args[1])
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
fmt.Println(result)
case "put":
i, err := strconv.Atoi(os.Args[2])
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
err = counter.Put(os.Args[1], int64(i), &Adder{})
if err != nil {
fmt.Println("Error:", err.Error())
os.Exit(1)
}
default:
fmt.Println("Please only use 'get' or 'put'")
os.Exit(1)
}
}
|
[
"\"COUNTER_PLUGIN\"",
"\"COUNTER_PLUGIN\""
] |
[] |
[
"COUNTER_PLUGIN"
] |
[]
|
["COUNTER_PLUGIN"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "icat_site.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
batch/batch-put-profiles/main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/guregu/dynamo"
)
var region = "ap-northeast-1"
var sourceTableName = "profiles"
var endpoint = ""
func main() {
db := dynamo.New(session.New(), &aws.Config{
Region: aws.String(region),
})
sourceTable := db.Table(sourceTableName)
tableIter := sourceTable.Scan().Iter()
client := &http.Client{}
var item map[string]interface{}
counter := 0
for tableIter.Next(&item) {
counter++
if counter%100 == 0 {
fmt.Printf("%+v items...\n", counter)
}
jsonRep, err := json.Marshal(item)
if err != nil {
fmt.Printf("skipping %+v\n", item)
continue
}
body := bytes.NewBuffer(jsonRep)
req, err := http.NewRequest("PUT", endpoint+item["userId"].(string), body)
if err != nil {
fmt.Printf("skipping %+v\n", item)
continue
}
req.Header.Add("Authorization", "Bearer "+os.Getenv("auth_token"))
res, err := client.Do(req)
if err != nil {
fmt.Printf("skipping %+v\n", item)
continue
}
if res.StatusCode != 200 {
fmt.Printf("skipping %+v\n", item)
continue
}
}
fmt.Printf("%+v items processed successfully.\n", counter)
}
|
[
"\"auth_token\""
] |
[] |
[
"auth_token"
] |
[]
|
["auth_token"]
|
go
| 1 | 0 | |
04_text_classification/sentiment_classification_avg.py
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: sentiment classification
"""
# 本notebook参考了https://github.com/bentrevett/pytorch-sentiment-analysis
#
# 在这份notebook中,我们会用PyTorch模型和TorchText再来做情感分析(检测一段文字的情感是正面的还是负面的)。我们会使用[IMDb 数据集](http://ai.stanford.edu/~amaas/data/sentiment/),即电影评论。
#
# 模型从简单到复杂,我们会依次构建:
# - Word Averaging模型(now)
# - RNN/LSTM模型
# - CNN模型
import random
import os
import torch
import torchtext
from torchtext.legacy import datasets
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
SEED = 1
torch.manual_seed(SEED)
random_state = random.seed(SEED)
TEXT = torchtext.legacy.data.Field()
LABEL = torchtext.legacy.data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')
print(vars(train_data.examples[0]))
train_data, valid_data = train_data.split(random_state=random_state)
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
TEXT.build_vocab(train_data, max_size=25000)
LABEL.build_vocab(train_data)
print(f'Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}')
print(f'Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}')
print('most common vocab: ', TEXT.vocab.freqs.most_common(10))
print(TEXT.vocab.itos[:10])
print(LABEL.vocab.stoi)
BATCH_SIZE = 32
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device:', device)
train_iter, valid_iter, test_iter = torchtext.legacy.data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device,
shuffle=True
)
# Word Average model
import torch.nn as nn
import torch.nn.functional as F
class WordAVGModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, output_dim, pad_idx):
super(WordAVGModel, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
self.fc = nn.Linear(embedding_dim, output_dim)
def forward(self, x):
x = self.embedding(x) # sent_len, batch_size, emb_dim
x = x.permute(1, 0, 2) # batch_size, sent_len, emb_dim
x = F.avg_pool2d(x, (x.shape[1], 1)).squeeze(1) # batch_size, emb_dim
x = self.fc(x)
return x
VOCAB_SIZE = len(TEXT.vocab)
EMBEDDING_DIM = 50
OUTPUT_DIM = 1
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = WordAVGModel(VOCAB_SIZE, EMBEDDING_DIM, OUTPUT_DIM, PAD_IDX).to(device)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Model has {count_parameters(model):,} trainable parameters')
# pretrained_embeddings = TEXT.vocab.vectors
# model.embedding.weight.data.copy_(pretrained_embeddings)
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.BCEWithLogitsLoss().to(device)
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
def train_one_batch(model, data, optimizer, loss_fn):
epoch_loss = 0.
epoch_acc = 0.
model.train()
for batch in data:
text, label = batch.text.to(device), batch.label.to(device)
optimizer.zero_grad()
preds = model(text).squeeze(1)
loss = loss_fn(preds, label)
acc = binary_accuracy(preds, label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(data), epoch_acc / len(data)
def evaluate(model, data, loss_fn):
epoch_loss = 0.
epoch_acc = 0.
model.eval()
with torch.no_grad():
for batch in data:
text = batch.text.to(device)
label = batch.label.to(device)
preds = model(text).squeeze(1)
loss = loss_fn(preds, label)
acc = binary_accuracy(preds, label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(data), epoch_acc / len(data)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
NUM_EPOCHS = 10
MODEL_PATH = 'wordavg_model.pth'
def train():
best_val_loss = 1e3
for epoch in range(NUM_EPOCHS):
start_time = time.time()
train_loss, train_acc = train_one_batch(model, train_iter, optimizer, loss_fn)
valid_loss, valid_acc = evaluate(model, valid_iter, loss_fn)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_val_loss:
best_val_loss = valid_loss
torch.save(model.state_dict(), MODEL_PATH)
print(f"Epoch: {epoch + 1}/{NUM_EPOCHS} | Epoch Time: {epoch_mins}m {epoch_secs}s")
print(f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.4f}%")
print(f"\tValid Loss: {valid_loss:.3f} | Valid Acc: {valid_acc * 100:.4f}%")
train()
model.load_state_dict(torch.load(MODEL_PATH))
test_loss, test_acc = evaluate(model, test_iter, loss_fn)
print(f"\tTest Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.4f}%")
def predict_sentiment(sentence):
tokens = [token for token in sentence.split()]
indexed = [TEXT.vocab.stoi[t] for t in tokens]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
pred = torch.sigmoid(model(tensor))
return pred.item()
print(predict_sentiment('This film is terrible'))
print(predict_sentiment('This film is good'))
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
src/client/client.go
|
package client
import (
"crypto/x509"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
types "github.com/gogo/protobuf/types"
log "github.com/sirupsen/logrus"
"github.com/pachyderm/pachyderm/src/client/admin"
"github.com/pachyderm/pachyderm/src/client/auth"
"github.com/pachyderm/pachyderm/src/client/debug"
"github.com/pachyderm/pachyderm/src/client/deploy"
"github.com/pachyderm/pachyderm/src/client/enterprise"
"github.com/pachyderm/pachyderm/src/client/health"
"github.com/pachyderm/pachyderm/src/client/limit"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/config"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
"github.com/pachyderm/pachyderm/src/client/pps"
"github.com/pachyderm/pachyderm/src/client/version/versionpb"
)
const (
// MaxListItemsLog specifies the maximum number of items we log in response to a List* API
MaxListItemsLog = 10
// StorageSecretName is the name of the Kubernetes secret in which
// storage credentials are stored.
StorageSecretName = "pachyderm-storage-secret"
)
// PfsAPIClient is an alias for pfs.APIClient.
type PfsAPIClient pfs.APIClient
// PpsAPIClient is an alias for pps.APIClient.
type PpsAPIClient pps.APIClient
// ObjectAPIClient is an alias for pfs.ObjectAPIClient
type ObjectAPIClient pfs.ObjectAPIClient
// AuthAPIClient is an alias of auth.APIClient
type AuthAPIClient auth.APIClient
// DeployAPIClient is an alias of auth.APIClient
type DeployAPIClient deploy.APIClient
// VersionAPIClient is an alias of versionpb.APIClient
type VersionAPIClient versionpb.APIClient
// AdminAPIClient is an alias of admin.APIClient
type AdminAPIClient admin.APIClient
// DebugClient is an alias of debug.DebugClient
type DebugClient debug.DebugClient
// An APIClient is a wrapper around pfs, pps and block APIClients.
type APIClient struct {
PfsAPIClient
PpsAPIClient
ObjectAPIClient
AuthAPIClient
DeployAPIClient
VersionAPIClient
AdminAPIClient
DebugClient
Enterprise enterprise.APIClient // not embedded--method name conflicts with AuthAPIClient
// addr is a "host:port" string pointing at a pachd endpoint
addr string
// The trusted CAs, for authenticating a pachd server over TLS
caCerts *x509.CertPool
// clientConn is a cached grpc connection to 'addr'
clientConn *grpc.ClientConn
// healthClient is a cached healthcheck client connected to 'addr'
healthClient health.HealthClient
// streamSemaphore limits the number of concurrent message streams between
// this client and pachd
limiter limit.ConcurrencyLimiter
// metricsUserID is an identifier that is included in usage metrics sent to
// Pachyderm Inc. and is used to count the number of unique Pachyderm users.
// If unset, no usage metrics are sent back to Pachyderm Inc.
metricsUserID string
// metricsPrefix is used to send information from this client to Pachyderm Inc
// for usage metrics
metricsPrefix string
// authenticationToken is an identifier that authenticates the caller in case
// they want to access privileged data
authenticationToken string
// The context used in requests, can be set with WithCtx
ctx context.Context
}
// GetAddress returns the pachd host:port with which 'c' is communicating. If
// 'c' was created using NewInCluster or NewOnUserMachine then this is how the
// address may be retrieved from the environment.
func (c *APIClient) GetAddress() string {
return c.addr
}
// DefaultMaxConcurrentStreams defines the max number of Putfiles or Getfiles happening simultaneously
const DefaultMaxConcurrentStreams = 100
type clientSettings struct {
maxConcurrentStreams int
caCerts *x509.CertPool
}
// NewFromAddress constructs a new APIClient for the server at addr.
func NewFromAddress(addr string, options ...Option) (*APIClient, error) {
// Apply creation options
settings := clientSettings{
maxConcurrentStreams: DefaultMaxConcurrentStreams,
}
for _, option := range options {
if err := option(&settings); err != nil {
return nil, err
}
}
c := &APIClient{
addr: addr,
caCerts: settings.caCerts,
limiter: limit.New(settings.maxConcurrentStreams),
}
if err := c.connect(); err != nil {
return nil, err
}
return c, nil
}
// Option is a client creation option that may be passed to NewOnUserMachine(), or NewInCluster()
type Option func(*clientSettings) error
// WithMaxConcurrentStreams instructs the New* functions to create client that
// can have at most 'streams' concurrent streams open with pachd at a time
func WithMaxConcurrentStreams(streams int) Option {
return func(settings *clientSettings) error {
settings.maxConcurrentStreams = streams
return nil
}
}
func addCertFromFile(pool *x509.CertPool, path string) error {
bytes, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("could not read x509 cert from \"%s\": %v", path, err)
}
if ok := pool.AppendCertsFromPEM(bytes); !ok {
return fmt.Errorf("could not add %s to cert pool as PEM", path)
}
return nil
}
// WithRootCAs instructs the New* functions to create client that uses the
// given signed x509 certificates as the trusted root certificates (instead of
// the system certs). Introduced to pass certs provided via command-line flags
func WithRootCAs(path string) Option {
return func(settings *clientSettings) error {
settings.caCerts = x509.NewCertPool()
return addCertFromFile(settings.caCerts, path)
}
}
// WithAdditionalRootCAs instructs the New* functions to additionally trust the
// given base64-encoded, signed x509 certificates as root certificates.
// Introduced to pass certs in the Pachyderm config
func WithAdditionalRootCAs(pemBytes []byte) Option {
return func(settings *clientSettings) error {
// append certs from config
if settings.caCerts == nil {
settings.caCerts = x509.NewCertPool()
}
if ok := settings.caCerts.AppendCertsFromPEM(pemBytes); !ok {
return fmt.Errorf("server CA certs are present in Pachyderm config, but could not be added to client")
}
return nil
}
}
// WithAdditionalPachdCert instructs the New* functions to additionally trust
// the signed cert mounted in Pachd's cert volume. This is used by Pachd
// when connecting to itself (if no cert is present, the clients cert pool
// will not be modified, so that if no other options have been passed, pachd
// will connect to itself over an insecure connection)
func WithAdditionalPachdCert() Option {
return func(settings *clientSettings) error {
if _, err := os.Stat(grpcutil.TLSVolumePath); err == nil {
if settings.caCerts == nil {
settings.caCerts = x509.NewCertPool()
}
return addCertFromFile(settings.caCerts, path.Join(grpcutil.TLSVolumePath, grpcutil.TLSCertFile))
}
return nil
}
}
func getCertOptionsFromEnv() ([]Option, error) {
var options []Option
if certPaths, ok := os.LookupEnv("PACH_CA_CERTS"); ok {
paths := strings.Split(certPaths, ",")
for _, p := range paths {
// Try to read all certs under 'p'--skip any that we can't read/stat
if err := filepath.Walk(p, func(p string, info os.FileInfo, err error) error {
if err != nil {
log.Warnf("skipping \"%s\", could not stat path: %v", p, err)
return nil // Don't try and fix any errors encountered by Walk() itself
}
if info.IsDir() {
return nil // We'll just read the children of any directories when we traverse them
}
pemBytes, err := ioutil.ReadFile(p)
if err != nil {
log.Warnf("could not read server CA certs at %s: %v", p, err)
return nil
}
options = append(options, WithAdditionalRootCAs(pemBytes))
return nil
}); err != nil {
return nil, err
}
}
}
return options, nil
}
func getAddrAndExtraOptionsOnUserMachine(cfg *config.Config) (string, []Option, error) {
// 1) ADDRESS environment variable (shell-local) overrides global config
if envAddr, ok := os.LookupEnv("ADDRESS"); ok {
options, err := getCertOptionsFromEnv()
if err != nil {
return "", nil, err
}
return envAddr, options, nil
}
// 2) Get target address from global config if possible
if cfg != nil && cfg.V1 != nil && cfg.V1.PachdAddress != "" {
// Also get cert info from config (if set)
if cfg.V1.ServerCAs != "" {
pemBytes, err := base64.StdEncoding.DecodeString(cfg.V1.ServerCAs)
if err != nil {
return "", nil, fmt.Errorf("could not decode server CA certs in config: %v", err)
}
return cfg.V1.PachdAddress, []Option{WithAdditionalRootCAs(pemBytes)}, nil
}
return cfg.V1.PachdAddress, nil, nil
}
// 3) Use default address (broadcast) if nothing else works
options, err := getCertOptionsFromEnv()
if err != nil {
return "", nil, err
}
return "0.0.0.0:30650", options, nil
}
// NewOnUserMachine constructs a new APIClient using env vars that may be set
// on a user's machine (i.e. ADDRESS), as well as $HOME/.pachyderm/config if it
// exists. This is primarily intended to be used with the pachctl binary, but
// may also be useful in tests.
//
// TODO(msteffen) this logic is fairly linux/unix specific, and makes the
// pachyderm client library incompatible with Windows. We may want to move this
// (and similar) logic into src/server and have it call a NewFromOptions()
// constructor.
func NewOnUserMachine(reportMetrics bool, prefix string, options ...Option) (*APIClient, error) {
cfg, err := config.Read()
if err != nil {
// metrics errors are non fatal
log.Warningf("error loading user config from ~/.pachderm/config: %v", err)
}
// create new pachctl client
addr, cfgOptions, err := getAddrAndExtraOptionsOnUserMachine(cfg)
if err != nil {
return nil, err
}
client, err := NewFromAddress(addr, append(options, cfgOptions...)...)
if err != nil {
return nil, err
}
// Add metrics info & authentication token
client.metricsPrefix = prefix
if cfg.UserID != "" && reportMetrics {
client.metricsUserID = cfg.UserID
}
if cfg.V1 != nil && cfg.V1.SessionToken != "" {
client.authenticationToken = cfg.V1.SessionToken
}
return client, nil
}
// NewInCluster constructs a new APIClient using env vars that Kubernetes creates.
// This should be used to access Pachyderm from within a Kubernetes cluster
// with Pachyderm running on it.
func NewInCluster(options ...Option) (*APIClient, error) {
addr := os.Getenv("PACHD_PORT_650_TCP_ADDR")
if addr == "" {
return nil, fmt.Errorf("PACHD_PORT_650_TCP_ADDR not set")
}
// create new pachctl client
return NewFromAddress(addr, options...)
}
// Close the connection to gRPC
func (c *APIClient) Close() error {
return c.clientConn.Close()
}
// DeleteAll deletes everything in the cluster.
// Use with caution, there is no undo.
func (c APIClient) DeleteAll() error {
if _, err := c.AuthAPIClient.Deactivate(
c.Ctx(),
&auth.DeactivateRequest{},
); err != nil && !auth.IsErrNotActivated(err) {
return grpcutil.ScrubGRPC(err)
}
if _, err := c.PpsAPIClient.DeleteAll(
c.Ctx(),
&types.Empty{},
); err != nil {
return grpcutil.ScrubGRPC(err)
}
if _, err := c.PfsAPIClient.DeleteAll(
c.Ctx(),
&types.Empty{},
); err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}
// SetMaxConcurrentStreams Sets the maximum number of concurrent streams the
// client can have. It is not safe to call this operations while operations are
// outstanding.
func (c APIClient) SetMaxConcurrentStreams(n int) {
c.limiter = limit.New(n)
}
// DefaultDialOptions is a helper returning a slice of grpc.Dial options
// such that grpc.Dial() is synchronous: the call doesn't return until
// the connection has been established and it's safe to send RPCs
func DefaultDialOptions() []grpc.DialOption {
return []grpc.DialOption{
// Don't return from Dial() until the connection has been established
grpc.WithBlock(),
// If no connection is established in 30s, fail the call
grpc.WithTimeout(30 * time.Second),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(grpcutil.MaxMsgSize),
grpc.MaxCallSendMsgSize(grpcutil.MaxMsgSize),
),
}
}
func (c *APIClient) connect() error {
keepaliveOpt := grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 20 * time.Second, // if 20s since last msg (any kind), ping
Timeout: 20 * time.Second, // if no response to ping for 20s, reset
PermitWithoutStream: true, // send ping even if no active RPCs
})
var dialOptions []grpc.DialOption
if c.caCerts == nil {
dialOptions = append(DefaultDialOptions(), grpc.WithInsecure(), keepaliveOpt)
} else {
tlsCreds := credentials.NewClientTLSFromCert(c.caCerts, "")
dialOptions = append(DefaultDialOptions(),
grpc.WithTransportCredentials(tlsCreds),
keepaliveOpt)
}
clientConn, err := grpc.Dial(c.addr, dialOptions...)
if err != nil {
return err
}
c.PfsAPIClient = pfs.NewAPIClient(clientConn)
c.PpsAPIClient = pps.NewAPIClient(clientConn)
c.ObjectAPIClient = pfs.NewObjectAPIClient(clientConn)
c.AuthAPIClient = auth.NewAPIClient(clientConn)
c.Enterprise = enterprise.NewAPIClient(clientConn)
c.DeployAPIClient = deploy.NewAPIClient(clientConn)
c.VersionAPIClient = versionpb.NewAPIClient(clientConn)
c.AdminAPIClient = admin.NewAPIClient(clientConn)
c.DebugClient = debug.NewDebugClient(clientConn)
c.clientConn = clientConn
c.healthClient = health.NewHealthClient(clientConn)
return nil
}
// AddMetadata adds necessary metadata (including authentication credentials)
// to the context 'ctx', preserving any metadata that is present in either the
// incoming or outgoing metadata of 'ctx'.
func (c *APIClient) AddMetadata(ctx context.Context) context.Context {
// TODO(msteffen): There are several places in this client where it's possible
// to set per-request metadata (specifically auth tokens): client.WithCtx(),
// client.SetAuthToken(), etc. These should be consolidated, as this API
// doesn't make it obvious how these settings are resolved when they conflict.
clientData := make(map[string]string)
if c.authenticationToken != "" {
clientData[auth.ContextTokenKey] = c.authenticationToken
}
// metadata API downcases all the key names
if c.metricsUserID != "" {
clientData["userid"] = c.metricsUserID
clientData["prefix"] = c.metricsPrefix
}
// Rescue any metadata pairs already in 'ctx' (otherwise
// metadata.NewOutgoingContext() would drop them). Note that this is similar
// to metadata.Join(), but distinct because it discards conflicting k/v pairs
// instead of merging them)
incomingMD, _ := metadata.FromIncomingContext(ctx)
outgoingMD, _ := metadata.FromOutgoingContext(ctx)
clientMD := metadata.New(clientData)
finalMD := make(metadata.MD) // Collect k/v pairs
for _, md := range []metadata.MD{incomingMD, outgoingMD, clientMD} {
for k, v := range md {
finalMD[k] = v
}
}
return metadata.NewOutgoingContext(ctx, finalMD)
}
// Ctx is a convenience function that returns adds Pachyderm authn metadata
// to context.Background().
func (c *APIClient) Ctx() context.Context {
if c.ctx == nil {
return c.AddMetadata(context.Background())
}
return c.AddMetadata(c.ctx)
}
// WithCtx returns a new APIClient that uses ctx for requests it sends. Note
// that the new APIClient will still use the authentication token and metrics
// metadata of this client, so this is only useful for propagating other
// context-associated metadata.
func (c *APIClient) WithCtx(ctx context.Context) *APIClient {
result := *c // copy c
result.ctx = ctx
return &result
}
// SetAuthToken sets the authentication token that will be used for all
// API calls for this client.
func (c *APIClient) SetAuthToken(token string) {
c.authenticationToken = token
}
|
[
"\"PACHD_PORT_650_TCP_ADDR\""
] |
[] |
[
"PACHD_PORT_650_TCP_ADDR"
] |
[]
|
["PACHD_PORT_650_TCP_ADDR"]
|
go
| 1 | 0 | |
Chapter 3/prototypes.py
|
import sys
import os
from django.conf import settings
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
SECRET_KEY = os.environ.get('SECRET_KEY', 'a^hi#2sv)yy%v(6fhlv(j@-5e%+7h*d%#g%+ru(hv-7rj08r7n'),
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
BASE_DIR = os.path.dirname(__file__)
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ALLOWED_HOSTS=ALLOWED_HOSTS,
ROOT_URLCONF='sitebuilder.urls',
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.staticfiles',
'sitebuilder'
),
TEMPLATES=(
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True
},
),
STATIC_URL='/static/',
SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR, 'pages'),
SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR, '_build'),
STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage'
)
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[
"SECRET_KEY",
"ALLOWED_HOSTS",
"DEBUG"
] |
[]
|
["SECRET_KEY", "ALLOWED_HOSTS", "DEBUG"]
|
python
| 3 | 0 | |
neptune/new/internal/hardware/hardware_metric_reporting_job.py
|
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
from itertools import groupby
from typing import TYPE_CHECKING, Optional, Dict
from neptune.new.types.series import FloatSeries
from neptune.internal.hardware.metrics.reports.metric_reporter import MetricReporter
from neptune.internal.hardware.metrics.reports.metric_reporter_factory import (
MetricReporterFactory,
)
from neptune.internal.hardware.metrics.metrics_factory import MetricsFactory
from neptune.internal.hardware.gauges.gauge_factory import GaugeFactory
from neptune.internal.hardware.resources.system_resource_info_factory import (
SystemResourceInfoFactory,
)
from neptune.internal.hardware.gauges.gauge_mode import GaugeMode
from neptune.internal.hardware.system.system_monitor import SystemMonitor
from neptune.utils import in_docker
from neptune.new.internal.hardware.gpu.gpu_monitor import GPUMonitor
from neptune.new.internal.background_job import BackgroundJob
from neptune.new.internal.threading.daemon import Daemon
if TYPE_CHECKING:
from neptune.new.metadata_containers import MetadataContainer
_logger = logging.getLogger(__name__)
class HardwareMetricReportingJob(BackgroundJob):
def __init__(self, period: float = 10, attribute_namespace: str = "monitoring"):
self._period = period
self._thread = None
self._started = False
self._gauges_in_resource: Dict[str, int] = dict()
self._attribute_namespace = attribute_namespace
def start(self, container: "MetadataContainer"):
gauge_mode = GaugeMode.CGROUP if in_docker() else GaugeMode.SYSTEM
system_resource_info = SystemResourceInfoFactory(
system_monitor=SystemMonitor(),
gpu_monitor=GPUMonitor(),
os_environ=os.environ,
).create(gauge_mode=gauge_mode)
gauge_factory = GaugeFactory(gauge_mode=gauge_mode)
metrics_factory = MetricsFactory(
gauge_factory=gauge_factory, system_resource_info=system_resource_info
)
metrics_container = metrics_factory.create_metrics_container()
metric_reporter = MetricReporterFactory(time.time()).create(
metrics=metrics_container.metrics()
)
for metric in metrics_container.metrics():
self._gauges_in_resource[metric.resource_type] = len(metric.gauges)
for metric in metrics_container.metrics():
for gauge in metric.gauges:
path = self.get_attribute_name(metric.resource_type, gauge.name())
if not container.get_attribute(path):
container[path] = FloatSeries(
[], min=metric.min_value, max=metric.max_value, unit=metric.unit
)
self._thread = self.ReportingThread(
self, self._period, container, metric_reporter
)
self._thread.start()
self._started = True
def stop(self):
if not self._started:
return
self._thread.interrupt()
def join(self, seconds: Optional[float] = None):
if not self._started:
return
self._thread.join(seconds)
def get_attribute_name(self, resource_type, gauge_name) -> str:
gauges_count = self._gauges_in_resource.get(resource_type, None)
if gauges_count is None or gauges_count != 1:
return "{}/{}_{}".format(
self._attribute_namespace, resource_type, gauge_name
).lower()
return "{}/{}".format(self._attribute_namespace, resource_type).lower()
class ReportingThread(Daemon):
def __init__(
self,
outer: "HardwareMetricReportingJob",
period: float,
container: "MetadataContainer",
metric_reporter: MetricReporter,
):
super().__init__(sleep_time=period, name="NeptuneReporting")
self._outer = outer
self._container = container
self._metric_reporter = metric_reporter
def work(self) -> None:
metric_reports = self._metric_reporter.report(time.time())
for report in metric_reports:
for gauge_name, metric_values in groupby(
report.values, lambda value: value.gauge_name
):
attr = self._container[
self._outer.get_attribute_name(
report.metric.resource_type, gauge_name
)
]
# TODO: Avoid loop
for metric_value in metric_values:
attr.log(
value=metric_value.value, timestamp=metric_value.timestamp
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
emscripten.py
|
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""A small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
"""
from __future__ import print_function
import difflib
import os
import json
import subprocess
import re
import time
import logging
import pprint
from collections import OrderedDict
from tools import shared
from tools import gen_struct_info
from tools import jsrun
from tools.response_file import substitute_response_files
from tools.shared import WINDOWS, asstr, path_from_root, exit_with_error, asmjs_mangle, treat_as_user_function
from tools.toolchain_profiler import ToolchainProfiler
from tools.minified_js_name_generator import MinifiedJsNameGenerator
logger = logging.getLogger('emscripten')
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logger.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["'" + p + "'" for p in prop.split('.')])
else:
return prop
def access_quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["['" + p + "']" for p in prop.split('.')])
else:
return '.' + prop
def emscript_fastcomp(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
outfile: An open file object where the output is written.
"""
assert shared.Settings.ASM_JS, 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
# metadata is modified by reference in some of the below
# these functions are split up to force variables to go out of scope and allow
# memory to be reclaimed
with ToolchainProfiler.profile_block('get_and_parse_backend'):
backend_output = compile_js(infile, temp_files, DEBUG)
funcs, metadata, mem_init = parse_fastcomp_output(backend_output, DEBUG)
fixup_metadata_tables(metadata)
funcs = fixup_functions(funcs, metadata)
with ToolchainProfiler.profile_block('compiler_glue'):
glue, forwarded_data = compiler_glue(metadata, compiler_engine, temp_files, DEBUG)
with ToolchainProfiler.profile_block('function_tables_and_exports'):
(post, function_table_data, bundled_args) = (
function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG))
with ToolchainProfiler.profile_block('write_output_file'):
finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG)
success = True
finally:
outfile.close()
if not success:
shared.try_delete(outfile.name) # remove partial output
def compile_js(infile, temp_files, DEBUG):
"""Compile infile with asm.js backend, return the contents of the compiled js"""
with temp_files.get_file('.4.js') as temp_js:
backend_cmd = create_backend_cmd(infile, temp_js)
if DEBUG:
logger.debug('emscript: llvm backend: ' + ' '.join(backend_cmd))
t = time.time()
shared.print_compiler_stage(backend_cmd)
with ToolchainProfiler.profile_block('emscript_llvm_backend'):
shared.check_call(backend_cmd)
if DEBUG:
logger.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
# Split up output
backend_output = open(temp_js).read()
return backend_output
def parse_fastcomp_output(backend_output, DEBUG):
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs + len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split + len(metadata_split_marker):]
mem_init = backend_output[end_funcs + len(end_funcs_marker):metadata_split]
# we no longer use the "Runtime" object. TODO: stop emiting it in the backend
mem_init = mem_init.replace('Runtime.', '')
try:
metadata = json.loads(metadata_raw, object_pairs_hook=OrderedDict)
except ValueError:
logger.error('emscript: failure to parse metadata output from compiler backend. raw output is: \n' + metadata_raw)
raise
# This key is being added to fastcomp but doesn't exist in the current
# version.
metadata.setdefault('externFunctions', [])
if 'externUses' not in metadata:
exit_with_error('Your fastcomp compiler is out of date, please update! (need >= 1.38.26)')
# JS optimizer turns some heap accesses to others as an optimization, so make HEAP8 imply HEAPU8, HEAP16->HEAPU16, and HEAPF64->HEAPF32.
if 'Int8Array' in metadata['externUses']:
metadata['externUses'] += ['Uint8Array']
if 'Int16Array' in metadata['externUses']:
metadata['externUses'] += ['Uint16Array']
if 'Float64Array' in metadata['externUses']:
metadata['externUses'] += ['Float32Array']
# If we are generating references to Math.fround() from here in emscripten.py, declare it used as well.
if provide_fround() or metadata['simd']:
metadata['externUses'] += ['Math.fround']
# functions marked llvm.used in the code are exports requested by the user
shared.Building.user_requested_exports += metadata['exports']
# In MINIMAL_RUNTIME stackSave() and stackRestore are JS library functions. If LLVM backend generated
# calls to invoke_*() functions that save and restore the stack, we must include the stack functions
# explicitly into the build. (In traditional runtime the stack functions are always present, so this
# tracking is not needed)
if shared.Settings.MINIMAL_RUNTIME and (len(metadata['invokeFuncs']) > 0 or shared.Settings.LINKABLE):
shared.Settings.EXPORTED_FUNCTIONS += ['stackSave', 'stackRestore']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$stackSave', '$stackRestore']
return funcs, metadata, mem_init
def fixup_metadata_tables(metadata):
# if emulating pointer casts, force all tables to the size of the largest
# (for wasm, we use binaryen's fpcast-emu pass, we don't need to do anything
# here)
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
max_size = 0
for k, v in metadata['tables'].items():
max_size = max(max_size, v.count(',') + 1)
for k, v in metadata['tables'].items():
curr = v.count(',') + 1
if curr < max_size:
if v.count('[]') == 1:
metadata['tables'][k] = v.replace(']', (','.join(['0'] * (max_size - curr)) + ']'))
else:
metadata['tables'][k] = v.replace(']', (',0' * (max_size - curr)) + ']')
if shared.Settings.SIDE_MODULE:
for k in metadata['tables'].keys():
metadata['tables'][k] = metadata['tables'][k].replace('var FUNCTION_TABLE_', 'var SIDE_FUNCTION_TABLE_')
def fixup_functions(funcs, metadata):
# function table masks
table_sizes = {}
for k, v in metadata['tables'].items():
# undercounts by one, but that is what we want
table_sizes[k] = str(v.count(','))
# if shared.Settings.ASSERTIONS >= 2 and table_sizes[k] == 0:
# shared.warning('no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not shared.Settings.RUNNING_JS_OPTS:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e')
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', fix_dot_zero, funcs)
return funcs
def compiler_glue(metadata, compiler_engine, temp_files, DEBUG):
if DEBUG:
logger.debug('emscript: js compiler glue')
t = time.time()
# FIXME: do these one by one as normal js lib funcs
metadata['declares'] = [i64_func for i64_func in metadata['declares'] if i64_func not in ['getHigh32', 'setHigh32']]
update_settings_glue(metadata, DEBUG)
assert not (metadata['simd'] and shared.Settings.WASM), 'SIMD is used, but not supported in WASM mode yet'
assert not (shared.Settings.SIMD and shared.Settings.WASM), 'SIMD is requested, but not supported in WASM mode yet'
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
return glue, forwarded_data
def analyze_table(function_table_data):
def table_size(table):
table_contents = table[table.index('[') + 1: table.index(']')]
if len(table_contents) == 0: # empty table
return 0
return table_contents.count(',') + 1
# note that this is a minimal estimate, as when asm2wasm lays out tables it adds padding
table_total_size = sum(table_size(s) for s in function_table_data.values())
shared.Settings.WASM_TABLE_SIZE = table_total_size
# Extracts from JS library code dependencies to runtime primitives.
def get_asm_extern_primitives(pre):
primitives = re.search(r'\/\/ ASM_LIBRARY EXTERN PRIMITIVES: ([^\n]*)', pre)
if primitives:
return [x.strip().replace('Math_', 'Math.') for x in primitives.group(1).split(',')]
else:
return []
def compute_minimal_runtime_initializer_and_exports(post, initializers, exports, receiving):
# Generate invocations for all global initializers directly off the asm export object, e.g. asm['__GLOBAL__INIT']();
post = post.replace('/*** RUN_GLOBAL_INITIALIZERS(); ***/', '\n'.join(["asm['" + x + "']();" for x in global_initializer_funcs(initializers)]))
if shared.Settings.WASM:
# Declare all exports out to global JS scope so that JS library functions can access them in a way that minifies well with Closure
# e.g. var a,b,c,d,e,f;
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
if shared.Settings.WASM_BACKEND:
# In Wasm backend the exports are still unmangled at this point, so mangle the names here
exports_that_are_not_initializers = [asmjs_mangle(x) for x in exports_that_are_not_initializers]
post = post.replace('/*** ASM_MODULE_EXPORTS_DECLARES ***/', 'var ' + ','.join(exports_that_are_not_initializers) + ';')
# Generate assignments from all asm.js/wasm exports out to the JS variables above: e.g. a = asm['a']; b = asm['b'];
post = post.replace('/*** ASM_MODULE_EXPORTS ***/', receiving)
receiving = ''
return post, receiving
def function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG):
if DEBUG:
logger.debug('emscript: python processing: function tables and exports')
t = time.time()
forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
function_table_data = metadata['tables']
if shared.Settings.WASM:
analyze_table(function_table_data)
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
pre = apply_script_source(pre)
asm_extern_primitives = get_asm_extern_primitives(pre)
metadata['externUses'] += asm_extern_primitives
pre = memory_and_global_initializers(pre, metadata, mem_init)
pre, funcs_js = get_js_funcs(pre, funcs)
all_exported_functions = get_all_exported_functions(function_table_data)
all_implemented = get_all_implemented(forwarded_json, metadata)
report_missing_symbols(all_implemented, pre)
implemented_functions = get_implemented_functions(metadata)
pre = include_asm_consts(pre, forwarded_json, metadata)
pre = apply_table(pre)
outfile.write(pre)
pre = None
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO and len(funcs_js) > 1:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', move_preasm, funcs_js[1])
if 'pre' in function_table_data:
pre_tables = function_table_data['pre']
del function_table_data['pre']
else:
pre_tables = ''
function_table_sigs = list(function_table_data.keys())
in_table, debug_tables, function_tables_defs = make_function_tables_defs(
implemented_functions, all_implemented, function_table_data, metadata)
exported_implemented_functions = get_exported_implemented_functions(
all_exported_functions, all_implemented, metadata)
# List of function signatures of used 'invoke_xxx()' functions in the application
# For backwards compatibility if one might be using a mismatching Emscripten compiler version, if 'invokeFuncs' is not present in metadata,
# use the full list of signatures in function table and generate invoke_() functions for all signatures in the program (producing excessive code size)
# we must also emit the full list if we are emitting code that can be linked later
if 'invokeFuncs' in metadata and not shared.Settings.LINKABLE:
invoke_function_names = metadata['invokeFuncs']
else:
invoke_function_names = ['invoke_' + x for x in function_table_sigs]
asm_setup = create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata)
basic_funcs = create_basic_funcs(function_table_sigs, invoke_function_names)
basic_vars = create_basic_vars(exported_implemented_functions, forwarded_json, metadata)
funcs_js += create_mftCall_funcs(function_table_data)
exports = create_exports(exported_implemented_functions, in_table, function_table_data, metadata)
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
if not shared.Settings.RELOCATABLE:
global_vars = metadata['externs']
else:
global_vars = [] # linkable code accesses globals through function calls
global_funcs = set(key for key, value in forwarded_json['Functions']['libraryFunctions'].items() if value != 2)
global_funcs = sorted(global_funcs.difference(set(global_vars)).difference(implemented_functions))
if shared.Settings.RELOCATABLE:
global_funcs += ['g$' + extern for extern in metadata['externs']]
global_funcs += ['fp$' + extern for extern in metadata['externFunctions']]
# Tracks the set of used (minified) function names in
# JS symbols imported to asm.js module.
minified_js_names = MinifiedJsNameGenerator()
# Converts list of imports ['foo', 'bar', ...] to a dictionary of
# name mappings in form { 'minified': 'unminified', ... }
def define_asmjs_import_names(imports):
if shared.Settings.MINIFY_ASMJS_IMPORT_NAMES:
return [(minified_js_names.generate(), i) for i in imports]
else:
return [(i, i) for i in imports]
basic_funcs = define_asmjs_import_names(basic_funcs)
global_funcs = define_asmjs_import_names(global_funcs)
basic_vars = define_asmjs_import_names(basic_vars)
global_vars = define_asmjs_import_names(global_vars)
bg_funcs = basic_funcs + global_funcs
bg_vars = basic_vars + global_vars
asm_global_funcs = create_asm_global_funcs(bg_funcs, metadata)
asm_global_vars = create_asm_global_vars(bg_vars)
the_global = create_the_global(metadata)
sending_vars = bg_funcs + bg_vars
sending = OrderedDict([(math_fix(minified), unminified) for (minified, unminified) in sending_vars])
if shared.Settings.WASM:
add_standard_wasm_imports(sending)
sorted_sending_keys = sorted(sending.keys())
sending = '{ ' + ', '.join('"' + k + '": ' + sending[k] for k in sorted_sending_keys) + ' }'
receiving = create_receiving(function_table_data, function_tables_defs,
exported_implemented_functions, metadata['initializers'])
post = apply_table(post)
post = apply_static_code_hooks(post)
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], [mangled for mangled, unmangled in shared.Settings.MODULE_EXPORTS], receiving)
function_tables_impls = make_function_tables_impls(function_table_data)
final_function_tables = '\n'.join(function_tables_impls) + '\n' + function_tables_defs
if shared.Settings.EMULATED_FUNCTION_POINTERS:
final_function_tables = (
final_function_tables
.replace("asm['", '')
.replace("']", '')
.replace('var SIDE_FUNCTION_TABLE_', 'var FUNCTION_TABLE_')
.replace('var dynCall_', '//')
)
if DEBUG:
logger.debug('asm text sizes' + str([
[len(s) for s in funcs_js], len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables),
len('\n'.join(function_tables_impls)), len(function_tables_defs) + (function_tables_defs.count('\n') * len(' ')),
len(exports), len(the_global), len(sending), len(receiving)]))
logger.debug(' emscript: python processing: function tables and exports took %s seconds' % (time.time() - t))
bundled_args = (funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports)
return (post, function_table_data, bundled_args)
def finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG):
function_table_sigs = function_table_data.keys()
module = create_module_asmjs(function_table_sigs, metadata, *bundled_args)
if DEBUG:
logger.debug('emscript: python processing: finalize')
t = time.time()
write_output_file(outfile, post, module)
module = None
if DEBUG:
logger.debug(' emscript: python processing: finalize took %s seconds' % (time.time() - t))
write_cyberdwarf_data(outfile, metadata)
# Given JS code that consists only exactly of a series of "var a = ...;\n var b = ...;" statements,
# this function collapses the redundant 'var ' statements at the beginning of each line to a
# single var a =..., b=..., c=...; statement.
def collapse_redundant_vars(code):
if shared.Settings.WASM:
return code # Skip if targeting Wasm, this does not matter there
old_code = ''
while code != old_code: # Repeated vars overlap, so can't run in one regex pass. Runs in O(log(N)) time
old_code = code
code = re.sub(r'(var [^;]*);\s*var ', r'\1,\n ', code)
return code
def global_initializer_funcs(initializers):
# If we have at most one global ctor, no need to group global initializers.
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# do not group ctors into one.
return ['globalCtors'] if (len(initializers) > 1 and not shared.Settings.EVAL_CTORS) else initializers
# Each .cpp file with global constructors generates a __GLOBAL__init() function that needs to be
# called to construct the global objects in that compilation unit. This function groups all these
# global initializer functions together into a single globalCtors() function that lives inside the
# asm.js/wasm module, and gets exported out to JS scope to be called at the startup of the application.
def create_global_initializer(initializers):
# If we have no global ctors, don't even generate a dummy empty function to save code space
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# we do not group ctors into one.
if 'globalCtors' not in global_initializer_funcs(initializers):
return ''
global_initializer = ''' function globalCtors() {
%s
}''' % '\n '.join(i + '();' for i in initializers)
return global_initializer
def create_module_asmjs(function_table_sigs, metadata,
funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports):
receiving += create_named_globals(metadata)
runtime_funcs = create_runtime_funcs_asmjs(exports, metadata)
asm_start_pre = create_asm_start_pre(asm_setup, the_global, sending, metadata)
memory_views = create_memory_views(metadata)
asm_temp_vars = create_asm_temp_vars(metadata)
asm_runtime_thread_local_vars = create_asm_runtime_thread_local_vars()
stack = ''
if not shared.Settings.RELOCATABLE and not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
if 'STACKTOP' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACKTOP = {{{ STACK_BASE }}};\n')
if 'STACK_MAX' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACK_MAX = {{{ STACK_MAX }}};\n')
if 'tempFloat' in shared.Settings.ASM_PRIMITIVE_VARS:
temp_float = ' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround() else '0.0')
else:
temp_float = ''
async_state = ' var asyncState = 0;\n' if shared.Settings.EMTERPRETIFY_ASYNC else ''
f0_fround = ' const f0 = Math_fround(0);\n' if provide_fround() else ''
replace_memory = create_replace_memory(metadata)
start_funcs_marker = '\n// EMSCRIPTEN_START_FUNCS\n'
asm_end = create_asm_end(exports)
asm_variables = collapse_redundant_vars(memory_views + asm_global_vars + asm_temp_vars + asm_runtime_thread_local_vars + '\n' + asm_global_funcs + stack + temp_float + async_state + f0_fround)
asm_global_initializer = create_global_initializer(metadata['initializers'])
module = [
asm_start_pre,
asm_variables,
replace_memory,
start_funcs_marker,
asm_global_initializer
] + runtime_funcs + funcs_js + [
'\n ',
pre_tables, final_function_tables, asm_end,
'\n', receiving, ';\n'
]
if shared.Settings.SIDE_MODULE:
module.append('''
parentModule['registerFunctions'](%s, Module);
''' % str([str(f) for f in function_table_sigs]))
return module
def write_output_file(outfile, post, module):
for i in range(len(module)): # do this loop carefully to save memory
module[i] = normalize_line_endings(module[i])
outfile.write(module[i])
post = normalize_line_endings(post)
outfile.write(post)
def write_cyberdwarf_data(outfile, metadata):
if not shared.Settings.CYBERDWARF:
return
assert('cyberdwarf_data' in metadata)
cd_file_name = outfile.name + ".cd"
with open(cd_file_name, 'w') as f:
json.dump({'cyberdwarf': metadata['cyberdwarf_data']}, f)
def create_backend_cmd(infile, temp_js):
"""Create asm.js backend command from settings dict"""
args = [
shared.LLVM_COMPILER, infile, '-march=js', '-filetype=asm', '-o', temp_js,
'-emscripten-stack-size=%d' % shared.Settings.TOTAL_STACK,
'-O%s' % shared.Settings.OPT_LEVEL,
]
if shared.Settings.PRECISE_F32:
args += ['-emscripten-precise-f32']
if shared.Settings.USE_PTHREADS:
args += ['-emscripten-enable-pthreads']
if shared.Settings.WARN_UNALIGNED:
args += ['-emscripten-warn-unaligned']
if shared.Settings.RESERVED_FUNCTION_POINTERS > 0:
args += ['-emscripten-reserved-function-pointers=%d' % shared.Settings.RESERVED_FUNCTION_POINTERS]
if shared.Settings.ASSERTIONS > 0:
args += ['-emscripten-assertions=%d' % shared.Settings.ASSERTIONS]
if shared.Settings.ALIASING_FUNCTION_POINTERS == 0:
args += ['-emscripten-no-aliasing-function-pointers']
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args += ['-emscripten-emulated-function-pointers']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
args += ['-emscripten-emulate-function-pointer-casts']
if shared.Settings.RELOCATABLE:
args += ['-emscripten-relocatable']
args += ['-emscripten-global-base=0']
elif shared.Settings.GLOBAL_BASE >= 0:
args += ['-emscripten-global-base=%d' % shared.Settings.GLOBAL_BASE]
if shared.Settings.SIDE_MODULE:
args += ['-emscripten-side-module']
if shared.Settings.LEGALIZE_JS_FFI != 1:
args += ['-emscripten-legalize-javascript-ffi=0']
if shared.Settings.DISABLE_EXCEPTION_CATCHING != 1:
args += ['-enable-emscripten-cpp-exceptions']
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 2:
args += ['-emscripten-cpp-exceptions-whitelist=' + ','.join(shared.Settings.EXCEPTION_CATCHING_WHITELIST or ['fake'])]
if not shared.Settings.EXIT_RUNTIME:
args += ['-emscripten-no-exit-runtime']
if shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG:
args += ['-emscripten-asmjs-work-around-ios-9-right-shift-bug']
if shared.Settings.WASM:
args += ['-emscripten-wasm']
if shared.Building.is_wasm_only():
args += ['-emscripten-only-wasm']
if shared.Settings.CYBERDWARF:
args += ['-enable-cyberdwarf']
return args
def optimize_syscalls(declares, DEBUG):
"""Disables filesystem if only a limited subset of syscalls is used.
Our syscalls are static, and so if we see a very limited set of them - in particular,
no open() syscall and just simple writing - then we don't need full filesystem support.
If FORCE_FILESYSTEM is set, we can't do this. We also don't do it if INCLUDE_FULL_LIBRARY, since
not including the filesystem would mean not including the full JS libraries, and the same for
MAIN_MODULE since a side module might need the filesystem.
"""
relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE']
if any(shared.Settings[s] for s in relevant_settings):
return
if shared.Settings.FILESYSTEM == 0:
# without filesystem support, it doesn't matter what syscalls need
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
else:
syscall_prefixes = ('__syscall', 'fd_', '__wasi_fd_')
syscalls = [d for d in declares if d.startswith(syscall_prefixes)]
# check if the only filesystem syscalls are in: close, ioctl, llseek, write
# (without open, etc.. nothing substantial can be done, so we can disable
# extra filesystem support in that case)
if set(syscalls).issubset(set([
'__syscall6', '__syscall54', '__syscall140',
'fd_seek', '__wasi_fd_seek',
'fd_write', '__wasi_fd_write',
'fd_close', '__wasi_fd_close',
])):
if DEBUG:
logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls))
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def align_memory(addr):
return (addr + 15) & -16
def align_static_bump(metadata):
metadata['staticBump'] = align_memory(metadata['staticBump'])
return metadata['staticBump']
def update_settings_glue(metadata, DEBUG):
optimize_syscalls(metadata['declares'], DEBUG)
if shared.Settings.CYBERDWARF:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.append("cyberdwarf_Debugger")
shared.Settings.EXPORTED_FUNCTIONS.append("cyberdwarf_Debugger")
# Integrate info from backend
if shared.Settings.SIDE_MODULE:
# we don't need any JS library contents in side modules
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
if metadata.get('cantValidate') and shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
shared.Settings.ASM_JS = 2
all_funcs = shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE + [shared.JS.to_nice_ident(d) for d in metadata['declares']]
implemented_funcs = [x[1:] for x in metadata['implementedFunctions']]
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = sorted(set(all_funcs).difference(implemented_funcs))
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [x[1:] for x in metadata['externs']]
if metadata['simd']:
shared.Settings.SIMD = 1
if shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of SIMD')
shared.Settings.ASM_JS = 2
shared.Settings.MAX_GLOBAL_ALIGN = metadata['maxGlobalAlign']
shared.Settings.IMPLEMENTED_FUNCTIONS = metadata['implementedFunctions']
# Extract the list of function signatures that MAIN_THREAD_EM_ASM blocks in
# the compiled code have, each signature will need a proxy function invoker
# generated for it.
def read_proxied_function_signatures(asmConsts):
proxied_function_signatures = set()
for _, sigs, proxying_types in asmConsts.values():
for sig, proxying_type in zip(sigs, proxying_types):
if proxying_type == 'sync_on_main_thread_':
proxied_function_signatures.add(sig + '_sync')
elif proxying_type == 'async_on_main_thread_':
proxied_function_signatures.add(sig + '_async')
return list(proxied_function_signatures)
shared.Settings.PROXIED_FUNCTION_SIGNATURES = read_proxied_function_signatures(metadata['asmConsts'])
shared.Settings.STATIC_BUMP = align_static_bump(metadata)
if shared.Settings.WASM_BACKEND:
shared.Settings.BINARYEN_FEATURES = metadata['features']
shared.Settings.WASM_TABLE_SIZE = metadata['tableSize']
if shared.Settings.RELOCATABLE:
# When building relocatable output (e.g. MAIN_MODULE) the reported table
# size does not include the reserved slot at zero for the null pointer.
# Instead we use __table_base to offset the elements by 1.
shared.Settings.WASM_TABLE_SIZE += 1
shared.Settings.MAIN_READS_PARAMS = metadata['mainReadsParams']
# static code hooks
class StaticCodeHooks:
atinits = []
atmains = []
atexits = []
def apply_static_code_hooks(code):
code = code.replace('{{{ ATINITS }}}', StaticCodeHooks.atinits)
code = code.replace('{{{ ATMAINS }}}', StaticCodeHooks.atmains)
code = code.replace('{{{ ATEXITS }}}', StaticCodeHooks.atexits)
return code
def apply_forwarded_data(forwarded_data):
forwarded_json = json.loads(forwarded_data)
# Be aware of JS static allocations
shared.Settings.STATIC_BUMP = forwarded_json['STATIC_BUMP']
shared.Settings.DYNAMICTOP_PTR = forwarded_json['DYNAMICTOP_PTR']
# Be aware of JS static code hooks
StaticCodeHooks.atinits = str(forwarded_json['ATINITS'])
StaticCodeHooks.atmains = str(forwarded_json['ATMAINS'])
StaticCodeHooks.atexits = str(forwarded_json['ATEXITS'])
def compile_settings(compiler_engine, temp_files):
# Save settings to a file to work around v8 issue 1579
with temp_files.get_file('.txt') as settings_file:
with open(settings_file, 'w') as s:
json.dump(shared.Settings.to_dict(), s, sort_keys=True)
# Call js compiler
env = os.environ.copy()
env['EMCC_BUILD_DIR'] = os.getcwd()
out = jsrun.run_js_tool(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file], stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), env=env)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
apply_forwarded_data(forwarded_data)
return glue, forwarded_data
class Memory():
def __init__(self):
# Note: if RELOCATABLE, then only relative sizes can be computed, and we don't
# actually write out any absolute memory locations ({{{ STACK_BASE }}}
# does not exist, etc.)
# Memory layout:
# * first the static globals
self.global_base = shared.Settings.GLOBAL_BASE
self.static_bump = shared.Settings.STATIC_BUMP
# * then the stack (up on fastcomp, down on upstream)
self.stack_low = align_memory(self.global_base + self.static_bump)
self.stack_high = align_memory(self.stack_low + shared.Settings.TOTAL_STACK)
if shared.Settings.WASM_BACKEND:
self.stack_base = self.stack_high
self.stack_max = self.stack_low
else:
self.stack_base = self.stack_low
self.stack_max = self.stack_high
# * then dynamic memory begins
self.dynamic_base = align_memory(self.stack_high)
if self.dynamic_base >= shared.Settings.TOTAL_MEMORY:
exit_with_error('Memory is not large enough for static data (%d) plus the stack (%d), please increase TOTAL_MEMORY (%d) to at least %d' % (self.static_bump, shared.Settings.TOTAL_STACK, shared.Settings.TOTAL_MEMORY, self.dynamic_base))
def apply_memory(js):
# Apply the statically-at-compile-time computed memory locations.
memory = Memory()
# Write it all out
js = js.replace('{{{ STATIC_BUMP }}}', str(memory.static_bump))
js = js.replace('{{{ STACK_BASE }}}', str(memory.stack_base))
js = js.replace('{{{ STACK_MAX }}}', str(memory.stack_max))
js = js.replace('{{{ DYNAMIC_BASE }}}', str(memory.dynamic_base))
logger.debug('global_base: %d stack_base: %d, stack_max: %d, dynamic_base: %d, static bump: %d', memory.global_base, memory.stack_base, memory.stack_max, memory.dynamic_base, memory.static_bump)
shared.Settings.DYNAMIC_BASE = memory.dynamic_base
return js
def apply_table(js):
js = js.replace('{{{ WASM_TABLE_SIZE }}}', str(shared.Settings.WASM_TABLE_SIZE))
return js
def apply_script_source(js):
js = js.replace('{{{ TARGET_BASENAME }}}', shared.Settings.TARGET_BASENAME)
return js
def memory_and_global_initializers(pre, metadata, mem_init):
if shared.Settings.SIMD == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = shared.Settings.STATIC_BUMP
pthread = ''
if shared.Settings.USE_PTHREADS:
pthread = 'if (!ENVIRONMENT_IS_PTHREAD)'
global_initializers = ''
if not shared.Settings.MINIMAL_RUNTIME:
# In traditional runtime, global initializers are pushed to the __ATINIT__ array to be processed when runtime is loaded
# In MINIMAL_RUNTIME global initializers are invoked directly off of the asm[''] export object, so this does not apply.
global_initializers = global_initializer_funcs(metadata['initializers'])
if len(global_initializers) > 0:
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in global_initializers)
global_initializers = '/* global initializers */ {pthread} __ATINIT__.push({global_initializers});'.format(pthread=pthread, global_initializers=global_initializers)
else:
global_initializers = '/* global initializers */ /*__ATINIT__.push();*/'
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''\
STATICTOP = STATIC_BASE + {staticbump};
{global_initializers}
{mem_init}'''.format(staticbump=staticbump,
global_initializers=global_initializers,
mem_init=mem_init))
if shared.Settings.SIDE_MODULE:
pre = pre.replace('GLOBAL_BASE', 'gb')
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
return pre
def get_js_funcs(pre, funcs):
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
return pre, funcs_js
def get_all_exported_functions(function_table_data):
# both asm.js and otherwise
all_exported_functions = set(shared.Settings.EXPORTED_FUNCTIONS)
# additional functions to export from asm, if they are implemented
for additional_export in shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE:
all_exported_functions.add('_' + additional_export)
if shared.Settings.EXPORT_FUNCTION_TABLES:
for table in function_table_data.values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
return all_exported_functions
def get_all_implemented(forwarded_json, metadata):
return set(metadata['implementedFunctions']).union(forwarded_json['Functions']['implementedFunctions'])
def report_missing_symbols(all_implemented, pre):
# we are not checking anyway, so just skip this
if not shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS and not shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
return
# the initial list of missing functions are that the user explicitly exported
# but were not implemented in compiled code
missing = list(set(shared.Settings.USER_EXPORTED_FUNCTIONS) - all_implemented)
for requested in missing:
if ('function ' + asstr(requested)) in pre:
continue
# special-case malloc, EXPORTED by default for internal use, but we bake in a
# trivial allocator and warn at runtime if used in ASSERTIONS
if missing == '_malloc':
continue
if shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
exit_with_error('undefined exported function: "%s"', requested)
elif shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
shared.warning('undefined exported function: "%s"', requested)
def get_exported_implemented_functions(all_exported_functions, all_implemented, metadata):
funcs = set(metadata['exports'])
export_bindings = shared.Settings.EXPORT_BINDINGS
export_all = shared.Settings.EXPORT_ALL
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
funcs.add(key)
if not export_all:
for name, alias in metadata['aliases'].items():
# here we export the aliases,
# if not the side module (which imports the alias)
# will not be able to get to the actual implementation
if alias in all_implemented and name in all_exported_functions:
funcs.add(alias)
funcs = list(funcs) + global_initializer_funcs(metadata['initializers'])
if shared.Settings.ALLOW_MEMORY_GROWTH:
funcs.append('_emscripten_replace_memory')
if not shared.Settings.SIDE_MODULE and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
if shared.Settings.EMTERPRETIFY:
funcs += ['emterpret']
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs += ['setAsyncState', 'emtStackSave', 'emtStackRestore', 'getEmtStackMax', 'setEmtStackMax']
return sorted(set(funcs))
def get_implemented_functions(metadata):
return set(metadata['implementedFunctions'])
def proxy_debug_print(sync):
if shared.Settings.PTHREADS_DEBUG:
if sync:
return 'warnOnce("sync proxying function " + code);'
else:
return 'warnOnce("async proxying function " + code);'
return ''
def include_asm_consts(pre, forwarded_json, metadata):
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
if metadata['asmConsts']:
exit_with_error('EM_ASM is not yet supported in shared wasm module (it cannot be stored in the wasm itself, need some solution)')
asm_consts, all_sigs = all_asm_consts(metadata)
asm_const_funcs = []
for sig, call_type in all_sigs:
if 'j' in sig:
exit_with_error('emscript: EM_ASM should not receive i64s as inputs, they are not valid in JS')
if '_emscripten_asm_const_' + call_type + sig in forwarded_json['Functions']['libraryFunctions']:
continue # Only one invoker needs to be emitted for each ASM_CONST (signature x call_type) item
forwarded_json['Functions']['libraryFunctions']['_emscripten_asm_const_' + call_type + sig] = 1
args = ['a%d' % i for i in range(len(sig) - 1)]
all_args = ['code'] + args
pre_asm_const = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
proxy_args = ['-1 - code', str(int(sync_proxy))] + args
pre_asm_const += ' if (ENVIRONMENT_IS_PTHREAD) { ' + proxy_debug_print(sync_proxy) + 'return _emscripten_proxy_to_main_thread_js(' + ', '.join(proxy_args) + '); }\n'
if shared.Settings.EMTERPRETIFY_ASYNC and shared.Settings.ASSERTIONS:
# we cannot have an EM_ASM on the stack when saving/loading
pre_asm_const += " assert(typeof EmterpreterAsync !== 'object' || EmterpreterAsync.state !== 2, 'cannot have an EM_ASM on the stack when emterpreter pauses/resumes - the JS is not emterpreted, so we would end up running it again from the start');\n"
asm_const_funcs.append(r'''
function _emscripten_asm_const_%s(%s) {
%s return ASM_CONSTS[code](%s);
}''' % (call_type + asstr(sig), ', '.join(all_args), pre_asm_const, ', '.join(args)))
asm_consts_text = '\nvar ASM_CONSTS = [' + ',\n '.join(asm_consts) + '];\n'
asm_funcs_text = '\n'.join(asm_const_funcs) + '\n'
em_js_funcs = create_em_js(forwarded_json, metadata)
em_js_text = '\n'.join(em_js_funcs) + '\n'
body_marker = '// === Body ==='
return pre.replace(body_marker, body_marker + '\n' + asm_consts_text + asstr(asm_funcs_text) + em_js_text)
# Test if the parentheses at body[openIdx] and body[closeIdx] are a match to
# each other.
def parentheses_match(body, openIdx, closeIdx):
if closeIdx < 0:
closeIdx += len(body)
count = 1
for i in range(openIdx + 1, closeIdx + 1):
if body[i] == body[openIdx]:
count += 1
elif body[i] == body[closeIdx]:
count -= 1
if count <= 0:
return i == closeIdx
return False
def trim_asm_const_body(body):
body = body.strip()
orig = None
while orig != body:
orig = body
if len(body) > 1 and body[0] == '"' and body[-1] == '"':
body = body[1:-1].replace('\\"', '"').strip()
if len(body) > 1 and body[0] == '{' and body[-1] == '}' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
if len(body) > 1 and body[0] == '(' and body[-1] == ')' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
return body
def all_asm_consts(metadata):
asm_consts = [0] * len(metadata['asmConsts'])
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
const = '{ ' + const + ' }'
args = []
arity = max(len(s) for s in sigs) - 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') ' + const
asm_consts[int(k)] = const
assert(len(sigs) == len(call_types))
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
return asm_consts, all_sigs
def unfloat(s):
"""lower float to double for ffis"""
return 'd' if s == 'f' else s
def make_function_tables_defs(implemented_functions, all_implemented, function_table_data, metadata):
class Counter(object):
next_bad_item = 0
next_item = 0
pre = []
in_table = set()
debug_tables = {}
def make_params(sig):
return ','.join('p%d' % p for p in range(len(sig) - 1))
def make_coerced_params(sig):
return ','.join(shared.JS.make_coercion('p%d', unfloat(sig[p + 1])) % p for p in range(len(sig) - 1))
def make_coercions(sig):
return ';'.join('p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p + 1])) for p in range(len(sig) - 1)) + ';'
# when emulating function pointer casts, we need to know what is the target of each pointer
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
function_pointer_targets = {}
for sig, table in function_table_data.items():
start = table.index('[')
end = table.rindex(']')
body = table[start + 1:end].split(',')
for i, parsed in enumerate(x.strip() for x in body):
if parsed != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed)]
def make_table(sig, raw):
if '[]' in raw:
return ('', '') # empty table
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.next_bad_item
Counter.next_bad_item += 1
if target is None:
target = i
name = 'b' + str(i)
if not shared.Settings.ASSERTIONS:
if 'abort' in shared.Settings.RUNTIME_FUNCS_TO_IMPORT:
code = 'abort(%s);' % target
else:
# Advanced use: developers is generating code that does not include the function 'abort()'. Generate invalid
# function pointers to be no-op passthroughs that silently continue execution.
code = '\n/*execution is supposed to abort here, but you did not include "abort" in RUNTIME_FUNCS_TO_IMPORT (to save code size?). Silently trucking through, enjoy :)*/\n'
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0]) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if shared.Settings.ASSERTIONS <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start + 1:end].split(',')
if shared.Settings.EMULATED_FUNCTION_POINTERS:
def receive(item):
if item == '0':
return item
if item not in all_implemented:
# this is not implemented; it would normally be wrapped, but with emulation, we just use it directly outside
return item
in_table.add(item)
return "asm['" + item + "']"
body = [receive(b) for b in body]
for j in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
curr = 'jsCall_%s_%s' % (sig, j)
body[1 + j] = curr
implemented_functions.add(curr)
Counter.next_item = 0
def fix_item(item):
j = Counter.next_item
Counter.next_item += 1
newline = Counter.next_item % 30 == 29
if item == '0':
# emulate all non-null pointer calls, if asked to
if j > 0 and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM and j in function_pointer_targets:
proper_sig, proper_target = function_pointer_targets[j]
if shared.Settings.EMULATED_FUNCTION_POINTERS:
if proper_target in all_implemented:
proper_target = "asm['" + proper_target + "']"
def make_emulated_param(i):
if i >= len(sig):
return shared.JS.make_initializer(proper_sig[i]) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i - 1), proper_sig[i], convert_from=sig[i])
proper_code = proper_target + '(' + ','.join([make_emulated_param(i + 1) for i in range(len(proper_sig) - 1)]) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0])
if proper_sig[0] != sig[0]:
# first coercion ensured we call the target ok; this one ensures we return the right type in the wrapper
proper_code = shared.JS.make_coercion(proper_code, sig[0], convert_from=proper_sig[0])
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0])
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if shared.Settings.ASSERTIONS <= 1:
return bad if not newline else (bad + '\n')
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
clean_item = item.replace("asm['", '').replace("']", '')
# when emulating function pointers, we don't need wrappers
# but if relocating, then we also have the copies in-module, and do
# in wasm we never need wrappers though
if clean_item not in implemented_functions and not (shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.RELOCATABLE) and not shared.Settings.WASM:
# this is imported into asm, we must wrap it
call_ident = clean_item
if call_ident in metadata['redirects']:
call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'):
call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f':
code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0])
code += ';'
Counter.pre.append(make_func(clean_item + '__wrapper', code, params, coercions))
assert not sig == 'X', 'must know the signature in order to create a wrapper for "%s" (TODO for shared wasm modules)' % item
return clean_item + '__wrapper'
return item if not newline else (item + '\n')
if shared.Settings.ASSERTIONS >= 2:
debug_tables[sig] = body
body = ','.join(fix_item(b) for b in body)
return ('\n'.join(Counter.pre), ''.join([raw[:start + 1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in function_table_data.items()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n'
function_tables_defs += '\n// EMSCRIPTEN_END_FUNCS\n'
function_tables_defs += '\n'.join([info[1] for info in infos])
return in_table, debug_tables, function_tables_defs
def make_func(name, code, params, coercions):
return 'function %s(%s) {\n %s %s\n}' % (name, params, coercions, code)
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
# asm.js function tables have one table in each linked asm.js module, so we
# can't just dynCall into them - ftCall exists for that purpose. In wasm,
# even linked modules share the table, so it's all fine.
def asm_js_emulated_function_pointers():
return shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.WASM
def make_function_tables_impls(function_table_data):
function_tables_impls = []
for sig, table in function_table_data.items():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i]) for i in range(1, len(sig))])
sig_mask = str(table.count(','))
if not (shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS):
ret = 'FUNCTION_TABLE_%s[index&%s](%s)' % (sig, sig_mask, coerced_args)
else:
# for wasm with emulated function pointers, emit an mft_SIG(..) call, we avoid asm.js function tables there.
ret = 'mftCall_%s(index%s%s)' % (sig, ',' if len(sig) > 1 else '', coerced_args)
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion(ret, sig[0])
if not asm_js_emulated_function_pointers():
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
else:
function_tables_impls.append('''
var dynCall_%s = ftCall_%s;
''' % (sig, sig))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], ffi_arg=True) for i in range(1, len(sig))])
for i in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall_%s(%d%s%s)' % (sig, i, ',' if ffi_args else '', ffi_args), sig[0], ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
return function_tables_impls
def create_mftCall_funcs(function_table_data):
if not asm_js_emulated_function_pointers():
return []
if shared.Settings.WASM or not shared.Settings.RELOCATABLE:
return []
mftCall_funcs = []
# in wasm, emulated function pointers are just simple table calls
for sig, table in function_table_data.items():
return_type, sig_args = sig[0], sig[1:]
num_args = len(sig_args)
params = ','.join(['ptr'] + ['p%d' % i for i in range(num_args)])
coerced_params = ','.join([shared.JS.make_coercion('ptr', 'i')] + [shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i])) for i in range(num_args)])
coercions = ';'.join(['ptr = ptr | 0'] + ['p%d = %s' % (i, shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i]))) for i in range(num_args)]) + ';'
mini_coerced_params = ','.join([shared.JS.make_coercion('p%d' % i, sig_args[i]) for i in range(num_args)])
maybe_return = '' if return_type == 'v' else 'return'
final_return = maybe_return + ' ' + shared.JS.make_coercion('ftCall_' + sig + '(' + coerced_params + ')', unfloat(return_type)) + ';'
if shared.Settings.EMULATED_FUNCTION_POINTERS == 1:
body = final_return
else:
sig_mask = str(table.count(','))
body = ('if (((ptr|0) >= (fb|0)) & ((ptr|0) < (fb + ' + sig_mask + ' | 0))) { ' + maybe_return + ' ' +
shared.JS.make_coercion(
'FUNCTION_TABLE_' + sig + '[(ptr-fb)&' + sig_mask + '](' +
mini_coerced_params + ')', return_type, ffi_arg=True
) + '; ' + ('return;' if return_type == 'v' else '') + ' }' + final_return)
mftCall_funcs.append(make_func('mftCall_' + sig, body, params, coercions) + '\n')
return mftCall_funcs
def get_function_pointer_error(sig, function_table_sigs):
if shared.Settings.ASSERTIONS == 0:
# Release build: do the most minimal sized abort possible
return "abort();"
else:
# ASSERTIONS-enabled build, identify the pointer and the failing signature.
return "abortFnPtrError(x, '" + sig + "');"
def signature_sort_key(sig):
def closure(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other):
ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133 * difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15 * abs(len(other) - len(sig)) / float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]:
ret -= 5 / float(maxlen) # prioritize on identically-placed params
ret += 20 * len(other) # deprioritize on length
return ret
return closure
def asm_backend_uses(metadata, symbol):
# If doing dynamic linking, we should generate full set of runtime primitives, since we cannot know up front ahead
# of time what the dynamically linked in modules will need. Also with SAFE_HEAP and Emterpretify, generate full set of views.
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE or shared.Settings.SAFE_HEAP or shared.Settings.EMTERPRETIFY:
return True
# Allow querying asm_backend_uses(metadata, 'Math.') to find if any of the Math objects are used
if symbol.endswith('.'):
return any(e.startswith(symbol) for e in metadata['externUses'])
else:
# Querying a single symbol
return symbol in metadata['externUses']
def create_asm_global_funcs(bg_funcs, metadata):
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'max', 'clz32']]
if provide_fround():
maths += ['Math.fround']
asm_global_funcs = ''
for math in maths:
if asm_backend_uses(metadata, math):
asm_global_funcs += ' var ' + math.replace('.', '_') + '=global' + access_quote(math) + ';\n'
asm_global_funcs += ''.join([' var ' + unminified + '=env' + access_quote(math_fix(minified)) + ';\n' for (minified, unminified) in bg_funcs])
asm_global_funcs += global_simd_funcs(access_quote, metadata)
if shared.Settings.USE_PTHREADS:
asm_global_funcs += ''.join([' var Atomics_' + ty + '=global' + access_quote('Atomics') + access_quote(ty) + ';\n' for ty in ['load', 'store', 'exchange', 'compareExchange', 'add', 'sub', 'and', 'or', 'xor']])
return asm_global_funcs
def create_asm_global_vars(bg_vars):
asm_global_vars = ''.join([' var ' + unminified + '=env' + access_quote(minified) + '|0;\n' for (minified, unminified) in bg_vars])
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# wasm side modules internally define their stack, these are set at module startup time
asm_global_vars += '\n var STACKTOP = 0, STACK_MAX = 0;\n'
return asm_global_vars
def global_simd_funcs(access_quote, metadata):
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
if not (metadata['simd'] or shared.Settings.SIMD):
return ''
def string_contains_any(s, str_list):
return any(sub in s for sub in str_list)
nonexisting_simd_symbols = ['Int8x16_fromInt8x16', 'Uint8x16_fromUint8x16', 'Int16x8_fromInt16x8', 'Uint16x8_fromUint16x8', 'Int32x4_fromInt32x4', 'Uint32x4_fromUint32x4', 'Float32x4_fromFloat32x4', 'Float64x2_fromFloat64x2']
nonexisting_simd_symbols += ['Int32x4_addSaturate', 'Int32x4_subSaturate', 'Uint32x4_addSaturate', 'Uint32x4_subSaturate']
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8', 'Float64x2'] for y in ['load2', 'store2']]
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8'] for y in ['load1', 'store1']]
simd = make_simd_types(metadata)
simd_func_text = ''
simd_func_text += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simd['types']])
def generate_symbols(types, funcs):
symbols = [' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in types for g in funcs]
symbols = [x for x in symbols if not string_contains_any(x, nonexisting_simd_symbols)]
return ''.join(symbols)
simd_func_text += generate_symbols(simd['int_types'], simd['int_funcs'])
simd_func_text += generate_symbols(simd['float_types'], simd['float_funcs'])
simd_func_text += generate_symbols(simd['bool_types'], simd['bool_funcs'])
# SIMD conversions (not bitcasts) between same lane sizes:
def add_simd_cast(dst, src):
return ' var SIMD_' + dst + '_from' + src + '=SIMD_' + dst + '.from' + src + ';\n'
def add_simd_casts(t1, t2):
return add_simd_cast(t1, t2) + add_simd_cast(t2, t1)
# Bug: Skip importing conversions for int<->uint for now, they don't validate
# as asm.js. https://bugzilla.mozilla.org/show_bug.cgi?id=1313512
# This is not an issue when building SSEx code, because it doesn't use these.
# (but it will be an issue if using SIMD.js intrinsics from vector.h to
# explicitly call these)
# if metadata['simdInt8x16'] and metadata['simdUint8x16']:
# simd_func_text += add_simd_casts('Int8x16', 'Uint8x16')
# if metadata['simdInt16x8'] and metadata['simdUint16x8']:
# simd_func_text += add_simd_casts('Int16x8', 'Uint16x8')
# if metadata['simdInt32x4'] and metadata['simdUint32x4']:
# simd_func_text += add_simd_casts('Int32x4', 'Uint32x4')
if metadata['simdInt32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Int32x4', 'Float32x4')
if metadata['simdUint32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Uint32x4', 'Float32x4')
if metadata['simdInt32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Int32x4', 'Float64x2') # Unofficial, needed for emscripten_int32x4_fromFloat64x2
if metadata['simdUint32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Uint32x4', 'Float64x2') # Unofficial, needed for emscripten_uint32x4_fromFloat64x2
# Unofficial, Bool64x2 does not yet exist, but needed for Float64x2 comparisons.
if metadata['simdFloat64x2']:
simd_func_text += ' var SIMD_Int32x4_fromBool64x2Bits = global.SIMD.Int32x4.fromBool64x2Bits;\n'
return simd_func_text
def make_simd_types(metadata):
simd_float_types = []
simd_int_types = []
simd_bool_types = []
simd_funcs = ['splat', 'check', 'extractLane', 'replaceLane']
simd_intfloat_funcs = ['add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'swizzle', 'shuffle',
'load', 'store', 'load1', 'store1', 'load2', 'store2']
simd_intbool_funcs = ['and', 'xor', 'or', 'not']
if metadata['simdUint8x16']:
simd_int_types += ['Uint8x16']
simd_intfloat_funcs += ['fromUint8x16Bits']
if metadata['simdInt8x16']:
simd_int_types += ['Int8x16']
simd_intfloat_funcs += ['fromInt8x16Bits']
if metadata['simdUint16x8']:
simd_int_types += ['Uint16x8']
simd_intfloat_funcs += ['fromUint16x8Bits']
if metadata['simdInt16x8']:
simd_int_types += ['Int16x8']
simd_intfloat_funcs += ['fromInt16x8Bits']
if metadata['simdUint32x4']:
simd_int_types += ['Uint32x4']
simd_intfloat_funcs += ['fromUint32x4Bits']
if metadata['simdInt32x4'] or shared.Settings.SIMD:
# Always import Int32x4 when building with -s SIMD=1, since memcpy is SIMD optimized.
simd_int_types += ['Int32x4']
simd_intfloat_funcs += ['fromInt32x4Bits']
if metadata['simdFloat32x4']:
simd_float_types += ['Float32x4']
simd_intfloat_funcs += ['fromFloat32x4Bits']
if metadata['simdFloat64x2']:
simd_float_types += ['Float64x2']
simd_intfloat_funcs += ['fromFloat64x2Bits']
if metadata['simdBool8x16']:
simd_bool_types += ['Bool8x16']
if metadata['simdBool16x8']:
simd_bool_types += ['Bool16x8']
if metadata['simdBool32x4']:
simd_bool_types += ['Bool32x4']
if metadata['simdBool64x2']:
simd_bool_types += ['Bool64x2']
simd_float_funcs = simd_funcs + simd_intfloat_funcs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'reciprocalApproximation', 'reciprocalSqrtApproximation']
simd_int_funcs = simd_funcs + simd_intfloat_funcs + simd_intbool_funcs + ['shiftLeftByScalar', 'shiftRightByScalar', 'addSaturate', 'subSaturate']
simd_bool_funcs = simd_funcs + simd_intbool_funcs + ['anyTrue', 'allTrue']
simd_types = simd_float_types + simd_int_types + simd_bool_types
return {
'types': simd_types,
'float_types': simd_float_types,
'int_types': simd_int_types,
'bool_types': simd_bool_types,
'funcs': simd_funcs,
'float_funcs': simd_float_funcs,
'int_funcs': simd_int_funcs,
'bool_funcs': simd_bool_funcs,
'intfloat_funcs': simd_intfloat_funcs,
'intbool_funcs': simd_intbool_funcs,
}
def asm_safe_heap():
"""optimized safe heap in asm, when we can"""
return shared.Settings.SAFE_HEAP and not shared.Settings.SAFE_HEAP_LOG and not shared.Settings.RELOCATABLE
def provide_fround():
return shared.Settings.PRECISE_F32 or shared.Settings.SIMD
def create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata):
function_table_sigs = function_table_data.keys()
asm_setup = ''
if shared.Settings.ASSERTIONS >= 2:
debug_tables_map = 'var debug_tables = {\n'
for sig in function_table_data:
# if the table is empty, debug_tables will not contain it
body = debug_tables.get(sig, [])
asm_setup += 'var debug_table_' + sig + ' = [' + ','.join(['0' if x == '0' else "'" + x.replace("'", '"') + "'" for x in body]) + '];\n'
debug_tables_map += " '" + sig + "': debug_table_" + sig + ',\n'
asm_setup += debug_tables_map + '};\n'
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
asm_setup += 'function nullFunc_' + sig + '(x) { ' + get_function_pointer_error(sig, function_table_sigs) + ' }\n'
if shared.Settings.RELOCATABLE:
if not shared.Settings.SIDE_MODULE:
asm_setup += 'var gb = GLOBAL_BASE, fb = 0;\n'
side = 'parent' if shared.Settings.SIDE_MODULE else ''
def check(extern):
if shared.Settings.ASSERTIONS:
return ('\n assert(%sModule["%s"] || %s, "external symbol `%s` is missing.' % (side, extern, extern, extern) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=1 in the environment");')
return ''
for extern in metadata['externs']:
asm_setup += 'var g$' + extern + ' = function() {' + check(extern) + '\n return ' + side + 'Module["' + extern + '"];\n}\n'
for extern in metadata['externFunctions']:
barename, sig = extern.split('$')
fullname = "fp$" + extern
key = '%sModule["%s"]' % (side, fullname)
asm_setup += '''\
var %s = function() {
if (!%s) { %s
var fid = addFunction(%sModule["%s"] || %s, "%s");
%s = fid;
}
return %s;
}
''' % (fullname, key, check(barename), side, barename, barename, sig, key, key)
asm_setup += create_invoke_wrappers(invoke_function_names)
asm_setup += setup_function_pointers(function_table_sigs)
if shared.Settings.EMULATED_FUNCTION_POINTERS:
function_tables_impls = make_function_tables_impls(function_table_data)
asm_setup += '\n' + '\n'.join(function_tables_impls) + '\n'
return asm_setup
def setup_function_pointers(function_table_sigs):
asm_setup = ''
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
asm_setup += '\n' + shared.JS.make_jscall(sig) + '\n'
# nothing special to do here for wasm, we just use dynCalls
if not shared.Settings.WASM:
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args = ['a%d' % i for i in range(len(sig) - 1)]
full_args = ['x'] + args
table_access = 'FUNCTION_TABLE_' + sig
if shared.Settings.SIDE_MODULE:
table_access = 'parentModule["' + table_access + '"]' # side module tables were merged into the parent, we need to access the global one
table_read = table_access + '[x]'
prelude = ''
if shared.Settings.ASSERTIONS:
prelude = '''
if (x < 0 || x >= %s.length) { err("Function table mask error (out of range)"); %s ; abort(x) }''' % (table_access, get_function_pointer_error(sig, function_table_sigs))
asm_setup += '''
function ftCall_%s(%s) {%s
return %s(%s);
}
''' % (sig, ', '.join(full_args), prelude, table_read, ', '.join(args))
return asm_setup
def create_basic_funcs(function_table_sigs, invoke_function_names):
basic_funcs = shared.Settings.RUNTIME_FUNCS_TO_IMPORT
if shared.Settings.STACK_OVERFLOW_CHECK and not shared.Settings.MINIMAL_RUNTIME:
basic_funcs += ['abortStackOverflow']
if shared.Settings.EMTERPRETIFY:
basic_funcs += ['abortStackOverflowEmterpreter']
if shared.Settings.SAFE_HEAP:
if asm_safe_heap():
basic_funcs += ['segfault', 'alignfault', 'ftfault']
else:
# Binaryen generates calls to these two so they are always needed with wasm
if shared.Settings.WASM:
basic_funcs += ['segfault', 'alignfault']
basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_LOAD_D', 'SAFE_HEAP_STORE', 'SAFE_HEAP_STORE_D', 'SAFE_FT_MASK']
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
basic_funcs += ['nullFunc_' + sig]
basic_funcs += invoke_function_names
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
basic_funcs.append('jsCall_%s' % sig)
if asm_js_emulated_function_pointers():
basic_funcs.append('ftCall_%s' % sig)
return basic_funcs
def create_basic_vars(exported_implemented_functions, forwarded_json, metadata):
basic_vars = []
if 'tempDoublePtr' in shared.Settings.ASM_PRIMITIVE_VARS:
basic_vars += ['tempDoublePtr']
if shared.Settings.RELOCATABLE:
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
basic_vars += ['gb', 'fb', 'STACKTOP', 'STACK_MAX']
else:
# wasm side modules have a specific convention for these
basic_vars += ['__memory_base', '__table_base']
if shared.Settings.EMTERPRETIFY:
basic_vars += ['EMTSTACKTOP', 'EMT_STACK_MAX', 'eb']
return basic_vars
def create_exports(exported_implemented_functions, in_table, function_table_data, metadata):
asm_runtime_funcs = create_asm_runtime_funcs()
all_exported = exported_implemented_functions + asm_runtime_funcs + function_tables(function_table_data)
# In asm.js + emulated function pointers, export all the table because we use
# JS to add the asm.js module's functions to the table (which is external
# in this mode). In wasm, we don't need that since wasm modules can
# directly add functions to the imported Table.
if not shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS:
all_exported += in_table
exports = []
for export in sorted(set(all_exported)):
exports.append(quote(export) + ": " + export)
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# named globals in side wasm modules are exported globals from asm/wasm
for k, v in metadata['namedGlobals'].items():
exports.append(quote('_' + str(k)) + ': ' + str(v))
# aliases become additional exports
for k, v in metadata['aliases'].items():
exports.append(quote(str(k)) + ': ' + str(v))
# shared wasm emulated function pointer mode requires us to know the function pointer for
# each function. export fp$func => function pointer for func
if shared.Settings.WASM and shared.Settings.RELOCATABLE and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
for k, v in metadata['functionPointers'].items():
exports.append(quote('fp$' + str(k)) + ': ' + str(v))
return '{ ' + ', '.join(exports) + ' }'
def create_asm_runtime_funcs():
funcs = []
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE) and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
return funcs
def function_tables(function_table_data):
if not asm_js_emulated_function_pointers():
return ['dynCall_' + table for table in function_table_data]
else:
return []
def create_the_global(metadata):
# the global is only needed for asm.js
if shared.Settings.WASM:
return '{}'
fundamentals = []
if asm_backend_uses(metadata, 'Math.'):
fundamentals += ['Math']
for f in ['Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']:
if asm_backend_uses(metadata, f):
fundamentals += [f]
if metadata['simd'] or shared.Settings.SIMD:
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
fundamentals += ['SIMD']
return '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
RUNTIME_ASSERTIONS = '''
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');'''
def create_receiving(function_table_data, function_tables_defs, exported_implemented_functions, initializers):
receiving = ''
if not shared.Settings.ASSERTIONS or shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are some support code.
# WASM=1 already inserts runtime assertions, so no need to do it again here (see create_receiving_wasm)
if not shared.Settings.WASM:
receiving_functions = [f for f in exported_implemented_functions if f not in ('_memcpy', '_memset', '_emscripten_replace_memory', '__start_module')]
wrappers = []
for name in receiving_functions:
wrappers.append('''\
var real_%(name)s = asm["%(name)s"];
asm["%(name)s"] = function() {%(runtime_assertions)s
return real_%(name)s.apply(null, arguments);
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving = '\n'.join(wrappers)
module_exports = exported_implemented_functions + function_tables(function_table_data)
shared.Settings.MODULE_EXPORTS = [(f, f) for f in module_exports]
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
imported_exports = [s for s in module_exports if s not in initializers]
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += '\n'.join([s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
if shared.Settings.MINIMAL_RUNTIME:
# In asm.js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += '\n'.join(['var ' + s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
receiving += '\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"];' for s in module_exports]) + '\n'
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[__exportedFunc] = '
receiving += 'for(var __exportedFunc in asm) ' + global_object + '[__exportedFunc] = ' + module_assign + 'asm[__exportedFunc];\n'
else:
receiving += 'Module["asm"] = asm;\n'
wrappers = []
for name in module_exports:
wrappers.append('''\
var %(name)s = Module["%(name)s"] = function() {%(runtime_assertions)s
return Module["asm"]["%(name)s"].apply(null, arguments)
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving += '\n'.join(wrappers)
if shared.Settings.EXPORT_FUNCTION_TABLES and not shared.Settings.WASM:
for table in function_table_data.values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
if shared.Settings.EMULATED_FUNCTION_POINTERS:
# in asm.js emulated function tables, emit the table on the outside, where
# JS can manage it (for wasm, a native wasm Table is used directly, and we
# don't need this)
if not shared.Settings.WASM:
receiving += '\n' + function_tables_defs.replace('// EMSCRIPTEN_END_FUNCS\n', '')
# wasm still needs definitions for dyncalls on the outside, for JS
receiving += '\n' + ''.join(['Module["dynCall_%s"] = dynCall_%s\n' % (sig, sig) for sig in function_table_data])
if not shared.Settings.WASM:
for sig in function_table_data.keys():
name = 'FUNCTION_TABLE_' + sig
fullname = name if not shared.Settings.SIDE_MODULE else ('SIDE_' + name)
receiving += 'Module["' + name + '"] = ' + fullname + ';\n'
return receiving
def create_fp_accessors(metadata):
if not shared.Settings.RELOCATABLE:
return ''
# Create `fp$XXX` handlers for determining function pionters (table addresses)
# at runtime.
# For SIDE_MODULEs these are generated by the proxyHandler at runtime.
accessors = []
for fullname in metadata['declares']:
if not fullname.startswith('fp$'):
continue
_, name, sig = fullname.split('$')
mangled = asmjs_mangle(name)
side = 'parent' if shared.Settings.SIDE_MODULE else ''
assertion = ('\n assert(%sModule["%s"] || typeof %s !== "undefined", "external function `%s` is missing.' % (side, mangled, mangled, name) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=XX in the environment");')
# the name of the original function is generally the normal function
# name, unless it is legalized, in which case the export is the legalized
# version, and the original provided by orig$X
if shared.Settings.LEGALIZE_JS_FFI and not shared.JS.is_legal_sig(sig):
name = 'orig$' + name
accessors.append('''
Module['%(full)s'] = function() {
%(assert)s
// Use the wasm function itself, for the table.
var func = Module['asm']['%(original)s'];
// If there is no wasm function, this may be a JS library function or
// something from another module.
if (!func) func = Module['%(mangled)s'];
if (!func) func = %(mangled)s;
var fp = addFunction(func, '%(sig)s');
Module['%(full)s'] = function() { return fp };
return fp;
}
''' % {'full': asmjs_mangle(fullname), 'mangled': mangled, 'original': name, 'assert': assertion, 'sig': sig})
return '\n'.join(accessors)
def create_named_globals(metadata):
if not shared.Settings.RELOCATABLE:
return ''
named_globals = '''
var NAMED_GLOBALS = {
%s
};
for (var named in NAMED_GLOBALS) {
Module['_' + named] = gb + NAMED_GLOBALS[named];
}
Module['NAMED_GLOBALS'] = NAMED_GLOBALS;
''' % ',\n '.join('"' + k + '": ' + str(v) for k, v in metadata['namedGlobals'].items())
if shared.Settings.WASM:
# wasm side modules are pure wasm, and cannot create their g$..() methods, so we help them out
# TODO: this works if we are the main module, but if the supplying module is later, it won't, so
# we'll need another solution for that. one option is to scan the module imports, if/when
# wasm supports that, then the loader can do this.
named_globals += '''
for (var named in NAMED_GLOBALS) {
(function(named) {
var addr = Module['_' + named];
Module['g$_' + named] = function() { return addr };
})(named);
}
'''
named_globals += ''.join(["Module['%s'] = Module['%s']\n" % (k, v) for k, v in metadata['aliases'].items()])
return named_globals
def create_runtime_funcs_asmjs(exports, metadata):
if shared.Settings.ASSERTIONS or shared.Settings.STACK_OVERFLOW_CHECK >= 2:
stack_check = ' if ((STACKTOP|0) >= (STACK_MAX|0)) abortStackOverflow(size|0);\n'
else:
stack_check = ''
funcs = ['''
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = (STACKTOP + 15)&-16;
%s
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function establishStackSpace(stackBase, stackMax) {
stackBase = stackBase|0;
stackMax = stackMax|0;
STACKTOP = stackBase;
STACK_MAX = stackMax;
}
''' % stack_check]
if shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME moves stack functions to library.
funcs = []
if shared.Settings.EMTERPRETIFY:
funcs.append('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}''')
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs.append('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
function emtStackRestore(x) {
x = x | 0;
EMTSTACKTOP = x;
}
function getEmtStackMax() {
return EMT_STACK_MAX | 0;
}
function setEmtStackMax(x) {
x = x | 0;
EMT_STACK_MAX = x;
}
''')
if asm_safe_heap():
if '_sbrk' in metadata['implementedFunctions']:
brk_check = 'if ((dest + bytes|0) > (HEAP32[(_emscripten_get_sbrk_ptr()|0)>>2]|0)) segfault();'
else:
# sbrk and malloc were not linked in, but SAFE_HEAP is used - so safe heap
# can ignore the sbrk location.
brk_check = ''
funcs.append('''
function SAFE_HEAP_STORE(dest, value, bytes) {
dest = dest | 0;
value = value | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
HEAP32[dest>>2] = value;
} else if ((bytes|0) == 1) {
HEAP8[dest>>0] = value;
} else {
if ((dest&1)) alignfault();
HEAP16[dest>>1] = value;
}
}
function SAFE_HEAP_STORE_D(dest, value, bytes) {
dest = dest | 0;
value = +value;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
HEAPF64[dest>>3] = value;
} else {
if ((dest&3)) alignfault();
HEAPF32[dest>>2] = value;
}
}
function SAFE_HEAP_LOAD(dest, bytes, unsigned) {
dest = dest | 0;
bytes = bytes | 0;
unsigned = unsigned | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
return HEAP32[dest>>2] | 0;
} else if ((bytes|0) == 1) {
if (unsigned) {
return HEAPU8[dest>>0] | 0;
} else {
return HEAP8[dest>>0] | 0;
}
}
if ((dest&1)) alignfault();
if (unsigned) return HEAPU16[dest>>1] | 0;
return HEAP16[dest>>1] | 0;
}
function SAFE_HEAP_LOAD_D(dest, bytes) {
dest = dest | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
return +HEAPF64[dest>>3];
}
if ((dest&3)) alignfault();
return +HEAPF32[dest>>2];
}
function SAFE_FT_MASK(value, mask) {
value = value | 0;
mask = mask | 0;
var ret = 0;
ret = value & mask;
if ((ret|0) != (value|0)) ftfault();
return ret | 0;
}
''' % {'brk_check': brk_check})
return funcs
def create_asm_start_pre(asm_setup, the_global, sending, metadata):
shared_array_buffer = ''
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
shared_array_buffer = "asmGlobalArg['Atomics'] = Atomics;"
module_global = 'var asmGlobalArg = ' + the_global + ';'
module_library = 'var asmLibraryArg = ' + sending + ';'
asm_function_top = ('// EMSCRIPTEN_START_ASM\n'
'var asm = (/** @suppress {uselessCode} */ function(global, env, buffer) {')
use_asm = "'almost asm';"
if shared.Settings.ASM_JS == 1:
use_asm = "'use asm';"
lines = [
asm_setup,
module_global,
shared_array_buffer,
module_library,
asm_function_top,
use_asm,
create_first_in_asm(),
]
return '\n'.join(lines)
def create_asm_temp_vars(metadata):
temp_ints = ['__THREW__', 'threwValue', 'setjmpId', 'tempInt', 'tempBigInt', 'tempBigIntS', 'tempValue']
temp_doubles = ['tempDouble']
rtn = ''
for i in temp_ints:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0;\n'
for i in temp_doubles:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0.0;\n'
if asm_backend_uses(metadata, 'NaN'):
rtn += 'var nan = global%s;\n' % (access_quote('NaN'))
if asm_backend_uses(metadata, 'Infinity'):
rtn += 'var inf = global%s;\n' % (access_quote('Infinity'))
return rtn
def create_asm_runtime_thread_local_vars():
if not shared.Settings.USE_PTHREADS:
return ''
return '''
var __pthread_ptr = 0;
var __pthread_is_main_runtime_thread = 0;
var __pthread_is_main_browser_thread = 0;
'''
def create_replace_memory(metadata):
if not shared.Settings.ALLOW_MEMORY_GROWTH:
return ''
emscripten_replace_memory = '''
function _emscripten_replace_memory(newBuffer) {
'''
for heap, view in [
('HEAP8', 'Int8Array'),
('HEAPU8', 'Uint8Array'),
('HEAP16', 'Int16Array'),
('HEAPU16', 'Uint16Array'),
('HEAP32', 'Int32Array'),
('HEAPU32', 'Uint32Array'),
('HEAPF32', 'Float32Array'),
('HEAPF64', 'Float64Array')]:
if asm_backend_uses(metadata, view):
emscripten_replace_memory += ' %s = new %s(newBuffer);\n' % (heap, view)
emscripten_replace_memory += '''
buffer = newBuffer;
return true;
}
'''
return emscripten_replace_memory
def create_asm_end(exports):
if shared.Settings.MINIMAL_RUNTIME and shared.Settings.WASM:
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
''' % (exports)
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
(asmGlobalArg, asmLibraryArg, buffer);
''' % (exports)
def create_first_in_asm():
return ''
def create_memory_views(metadata):
"""Generates memory views for the different heap types.
Generated symbols:
Int8View Int16View Int32View
Uint8View Uint16View Uint32View
Float32View Float64View
"""
ret = '\n'
for info in HEAP_TYPE_INFOS:
heap_name = '{}Array'.format(info.long_name)
access = access_quote(heap_name)
if asm_backend_uses(metadata, heap_name):
format_args = {
'heap': info.heap_name,
'long': info.long_name,
'access': access,
}
ret += ' var {heap} = new global{access}(buffer);\n'.format(**format_args)
return ret
class HeapTypeInfo(object):
"""Struct that holds data for a type of HEAP* views."""
def __init__(self, heap_name, long_name, shift_amount):
assert heap_name.startswith('HEAP')
self.heap_name = heap_name
self.long_name = long_name
self.shift_amount = shift_amount
def short_name(self):
"""The unique part of the heap name for this type.
Derive this from heap_name instead of the other way around so that searching,
e.g. for HEAP8, from the generated JS code leads back here.
"""
return self.heap_name[len('HEAP'):]
def is_int(self):
"""Whether this heap type is an integer type or not."""
return self.short_name()[0] != 'F'
def coerce(self, expression):
"""Adds asm.js type coercion to a string expression."""
if self.is_int():
return expression + '| 0'
else:
return '+' + expression
HEAP_TYPE_INFOS = [
HeapTypeInfo(heap_name='HEAP8', long_name='Int8', shift_amount=0),
HeapTypeInfo(heap_name='HEAP16', long_name='Int16', shift_amount=1),
HeapTypeInfo(heap_name='HEAP32', long_name='Int32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPU8', long_name='Uint8', shift_amount=0),
HeapTypeInfo(heap_name='HEAPU16', long_name='Uint16', shift_amount=1),
HeapTypeInfo(heap_name='HEAPU32', long_name='Uint32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF32', long_name='Float32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF64', long_name='Float64', shift_amount=3),
]
def emscript_wasm_backend(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
# Overview:
# * Run wasm-emscripten-finalize to extract metadata and modify the binary
# to use emscripten's wasm<->JS ABI
# * Use the metadata to generate the JS glue that goes with the wasm
metadata = finalize_wasm(temp_files, infile, outfile, memfile, DEBUG)
update_settings_glue(metadata, DEBUG)
if shared.Settings.SIDE_MODULE:
return
if DEBUG:
logger.debug('emscript: js compiler glue')
if DEBUG:
t = time.time()
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
forwarded_json = json.loads(forwarded_data)
# For the wasm backend the implementedFunctions from compiler.js should
# alwasys be empty. This only gets populated for __asm function when using
# the JS backend.
assert not forwarded_json['Functions']['implementedFunctions']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
# memory and global initializers
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in metadata['initializers'])
staticbump = shared.Settings.STATIC_BUMP
if shared.Settings.MINIMAL_RUNTIME:
# In minimal runtime, global initializers are run after the Wasm Module instantiation has finished.
global_initializers = ''
else:
# In regular runtime, global initializers are recorded in an __ATINIT__ array.
global_initializers = '''/* global initializers */ %s __ATINIT__.push(%s);
''' % ('if (!ENVIRONMENT_IS_PTHREAD)' if shared.Settings.USE_PTHREADS else '',
global_initializers)
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
%s
''' % (staticbump, global_initializers))
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre) # In regular runtime, atinits etc. exist in the preamble part
post = apply_static_code_hooks(post) # In MINIMAL_RUNTIME, atinit exists in the postamble part
if shared.Settings.RELOCATABLE and not shared.Settings.SIDE_MODULE:
pre += 'var gb = GLOBAL_BASE, fb = 0;\n'
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
exports = metadata['exports']
# Store exports for Closure compiler to be able to track these as globals in
# -s DECLARE_ASM_MODULE_EXPORTS=0 builds.
shared.Settings.MODULE_EXPORTS = [(asmjs_mangle(f), f) for f in exports]
if shared.Settings.ASYNCIFY:
exports += ['asyncify_start_unwind', 'asyncify_stop_unwind', 'asyncify_start_rewind', 'asyncify_stop_rewind']
report_missing_symbols(set([asmjs_mangle(f) for f in exports]), pre)
asm_consts, asm_const_funcs = create_asm_consts_wasm(forwarded_json, metadata)
em_js_funcs = create_em_js(forwarded_json, metadata)
asm_const_pairs = ['%s: %s' % (key, value) for key, value in asm_consts]
asm_const_map = 'var ASM_CONSTS = {\n ' + ', \n '.join(asm_const_pairs) + '\n};\n'
pre = pre.replace(
'// === Body ===',
('// === Body ===\n\n' + asm_const_map +
asstr('\n'.join(asm_const_funcs)) +
'\n'.join(em_js_funcs) + '\n'))
pre = apply_table(pre)
outfile.write(pre)
pre = None
invoke_funcs = metadata['invokeFuncs']
if shared.Settings.RELOCATABLE:
invoke_funcs.append('invoke_X')
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
sending = create_sending_wasm(invoke_funcs, forwarded_json, metadata)
receiving = create_receiving_wasm(exports, metadata['initializers'])
if shared.Settings.MINIMAL_RUNTIME:
post, receiving = compute_minimal_runtime_initializer_and_exports(post, metadata['initializers'], exports, receiving)
module = create_module_wasm(sending, receiving, invoke_funcs, metadata)
write_output_file(outfile, post, module)
module = None
outfile.close()
def remove_trailing_zeros(memfile):
with open(memfile, 'rb') as f:
mem_data = f.read()
end = len(mem_data)
while end > 0 and (mem_data[end - 1] == b'\0' or mem_data[end - 1] == 0):
end -= 1
with open(memfile, 'wb') as f:
f.write(mem_data[:end])
def finalize_wasm(temp_files, infile, outfile, memfile, DEBUG):
basename = shared.unsuffixed(outfile.name)
wasm = basename + '.wasm'
base_wasm = infile
shared.Building.save_intermediate(infile, 'base.wasm')
args = ['--detect-features']
write_source_map = shared.Settings.DEBUG_LEVEL >= 4
if write_source_map:
shared.Building.emit_wasm_source_map(base_wasm, base_wasm + '.map')
shared.Building.save_intermediate(base_wasm + '.map', 'base_wasm.map')
args += ['--output-source-map-url=' + shared.Settings.SOURCE_MAP_BASE + os.path.basename(shared.Settings.WASM_BINARY_FILE) + '.map']
# tell binaryen to look at the features section, and if there isn't one, to use MVP
# (which matches what llvm+lld has given us)
if shared.Settings.DEBUG_LEVEL >= 2 or shared.Settings.PROFILING_FUNCS or shared.Settings.EMIT_SYMBOL_MAP or shared.Settings.ASYNCIFY_WHITELIST or shared.Settings.ASYNCIFY_BLACKLIST:
args.append('-g')
if shared.Settings.LEGALIZE_JS_FFI != 1:
args.append('--no-legalize-javascript-ffi')
if not shared.Settings.MEM_INIT_IN_WASM:
args.append('--separate-data-segments=' + memfile)
if shared.Settings.SIDE_MODULE:
args.append('--side-module')
else:
# --global-base is used by wasm-emscripten-finalize to calculate the size
# of the static data used. The argument we supply here needs to match the
# global based used by lld (see Building.link_lld). For relocatable this is
# zero for the global base although at runtime __memory_base is used.
# For non-relocatable output we used shared.Settings.GLOBAL_BASE.
# TODO(sbc): Can we remove this argument infer this from the segment
# initializer?
if shared.Settings.RELOCATABLE:
args.append('--global-base=0')
else:
args.append('--global-base=%s' % shared.Settings.GLOBAL_BASE)
if shared.Settings.WASM_BACKEND and shared.Settings.STACK_OVERFLOW_CHECK >= 2:
args.append('--check-stack-overflow')
if shared.Settings.STANDALONE_WASM:
args.append('--standalone-wasm')
# When we dynamically link our JS loader adds functions from wasm modules to
# the table. It must add the original versions of them, not legalized ones,
# so that indirect calls have the right type, so export those.
if shared.Settings.RELOCATABLE:
args.append('--pass-arg=legalize-js-interface-export-originals')
stdout = shared.Building.run_binaryen_command('wasm-emscripten-finalize',
infile=base_wasm,
outfile=wasm,
args=args,
stdout=subprocess.PIPE)
if write_source_map:
shared.Building.save_intermediate(wasm + '.map', 'post_finalize.map')
shared.Building.save_intermediate(wasm, 'post_finalize.wasm')
if not shared.Settings.MEM_INIT_IN_WASM:
# we have a separate .mem file. binaryen did not strip any trailing zeros,
# because it's an ABI question as to whether it is valid to do so or not.
# we can do so here, since we make sure to zero out that memory (even in
# the dynamic linking case, our loader zeros it out)
remove_trailing_zeros(memfile)
return load_metadata_wasm(stdout, DEBUG)
def create_asm_consts_wasm(forwarded_json, metadata):
asm_consts = {}
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
args = []
max_arity = 16
arity = 0
for i in range(max_arity):
if ('$' + str(i)) in const:
arity = i + 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') {' + const + '}'
asm_consts[int(k)] = const
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
asm_const_funcs = []
if all_sigs:
# emit the signature-reading helper function only if we have any EM_ASM
# functions in the module
check = ''
if shared.Settings.ASSERTIONS:
check = ' else abort("unexpected char in asm const signature " + ch);'
asm_const_funcs.append(r'''
// Avoid creating a new array
var _readAsmConstArgsArray = [];
function readAsmConstArgs(sigPtr, buf) {
var args = _readAsmConstArgsArray;
args.length = 0;
while (1) {
var ch = HEAPU8[sigPtr++];
if (!ch) return args;
if (ch === 'd'.charCodeAt(0) || ch === 'f'.charCodeAt(0)) {
buf = alignMemory(buf, 8);
args.push(HEAPF64[(buf >> 3)]);
buf += 8;
} else if (ch === 'i'.charCodeAt(0)) {
buf = alignMemory(buf, 4);
args.push(HEAP32[(buf >> 2)]);
buf += 4;
}%s
}
}
''' % check)
for sig, call_type in set(all_sigs):
const_name = '_emscripten_asm_const_' + call_type + sig
forwarded_json['Functions']['libraryFunctions'][const_name] = 1
preamble = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
preamble += ('\n if (ENVIRONMENT_IS_PTHREAD) { ' +
proxy_debug_print(sync_proxy) +
'return _emscripten_proxy_to_main_thread_js(-1 - code, ' +
str(int(sync_proxy)) +
', code, sigPtr, argbuf); }')
if shared.Settings.RELOCATABLE:
preamble += '\n code -= %s;\n' % shared.Settings.GLOBAL_BASE
asm_const_funcs.append(r'''
function %s(code, sigPtr, argbuf) {%s
var args = readAsmConstArgs(sigPtr, argbuf);
return ASM_CONSTS[code].apply(null, args);
}''' % (const_name, preamble))
asm_consts = [(key, value) for key, value in asm_consts.items()]
asm_consts.sort()
return asm_consts, asm_const_funcs
def create_em_js(forwarded_json, metadata):
em_js_funcs = []
separator = '<::>'
for name, raw in metadata.get('emJsFuncs', {}).items():
assert separator in raw
args, body = raw.split(separator, 1)
args = args[1:-1]
if args == 'void':
args = []
else:
args = args.split(',')
arg_names = [arg.split()[-1].replace("*", "") for arg in args if arg]
func = 'function {}({}){}'.format(name, ','.join(arg_names), asstr(body))
em_js_funcs.append(func)
forwarded_json['Functions']['libraryFunctions'][name] = 1
return em_js_funcs
def add_standard_wasm_imports(send_items_map):
# Normally we import these into the wasm (so that JS could use them even
# before the wasm loads), while in standalone mode we do not depend
# on JS to create them, but create them in the wasm and export them.
if not shared.Settings.STANDALONE_WASM:
memory_import = 'wasmMemory'
if shared.Settings.MODULARIZE and shared.Settings.USE_PTHREADS:
# Pthreads assign wasmMemory in their worker startup. In MODULARIZE mode, they cannot assign inside the
# Module scope, so lookup via Module as well.
memory_import += " || Module['wasmMemory']"
send_items_map['memory'] = memory_import
send_items_map['table'] = 'wasmTable'
# With the wasm backend __memory_base and __table_base and only needed for
# relocatable output.
if shared.Settings.RELOCATABLE or not shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__memory_base'] = str(shared.Settings.GLOBAL_BASE) # tell the memory segments where to place themselves
# the wasm backend reserves slot 0 for the NULL function pointer
table_base = '1' if shared.Settings.WASM_BACKEND else '0'
send_items_map['__table_base'] = table_base
if shared.Settings.RELOCATABLE and shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__stack_pointer'] = 'STACK_BASE'
if shared.Settings.MAYBE_WASM2JS or shared.Settings.AUTODEBUG or shared.Settings.LINKABLE:
# legalization of i64 support code may require these in some modes
send_items_map['setTempRet0'] = 'setTempRet0'
send_items_map['getTempRet0'] = 'getTempRet0'
if shared.Settings.AUTODEBUG:
send_items_map['log_execution'] = '''function(loc) {
console.log('log_execution ' + loc);
}'''
send_items_map['get_i32'] = '''function(loc, index, value) {
console.log('get_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_i64'] = '''function(loc, index, low, high) {
console.log('get_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['get_f32'] = '''function(loc, index, value) {
console.log('get_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_f64'] = '''function(loc, index, value) {
console.log('get_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_anyref'] = '''function(loc, index, value) {
console.log('get_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['get_exnref'] = '''function(loc, index, value) {
console.log('get_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i32'] = '''function(loc, index, value) {
console.log('set_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i64'] = '''function(loc, index, low, high) {
console.log('set_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['set_f32'] = '''function(loc, index, value) {
console.log('set_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_f64'] = '''function(loc, index, value) {
console.log('set_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_anyref'] = '''function(loc, index, value) {
console.log('set_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_exnref'] = '''function(loc, index, value) {
console.log('set_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['load_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('load_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['load_val_i32'] = '''function(loc, value) {
console.log('load_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['load_val_i64'] = '''function(loc, low, high) {
console.log('load_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['load_val_f32'] = '''function(loc, value) {
console.log('loaload_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['load_val_f64'] = '''function(loc, value) {
console.log('load_val_f64 ' + [loc, value]);
return value;
}'''
send_items_map['store_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('store_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['store_val_i32'] = '''function(loc, value) {
console.log('store_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['store_val_i64'] = '''function(loc, low, high) {
console.log('store_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['store_val_f32'] = '''function(loc, value) {
console.log('loastore_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['store_val_f64'] = '''function(loc, value) {
console.log('store_val_f64 ' + [loc, value]);
return value;
}'''
def create_sending_wasm(invoke_funcs, forwarded_json, metadata):
basic_funcs = []
if shared.Settings.SAFE_HEAP:
basic_funcs += ['segfault', 'alignfault']
em_asm_sigs = [zip(sigs, call_types) for _, sigs, call_types in metadata['asmConsts'].values()]
# flatten em_asm_sigs
em_asm_sigs = [sig for sigs in em_asm_sigs for sig in sigs]
em_asm_funcs = ['_emscripten_asm_const_' + call_type + sig for sig, call_type in em_asm_sigs]
em_js_funcs = list(metadata['emJsFuncs'].keys())
declared_items = ['_' + item for item in metadata['declares']]
send_items = set(basic_funcs + invoke_funcs + em_asm_funcs + em_js_funcs + declared_items)
def fix_import_name(g):
if g.startswith('Math_'):
return g.split('_')[1]
# Unlike fastcomp the wasm backend doesn't use the '_' prefix for native
# symbols. Emscripten currently expects symbols to start with '_' so we
# artificially add them to the output of emscripten-wasm-finalize and them
# strip them again here.
# note that we don't do this for EM_JS functions (which, rarely, may have
# a '_' prefix)
if g.startswith('_') and g not in metadata['emJsFuncs']:
return g[1:]
return g
send_items_map = OrderedDict()
for name in send_items:
internal_name = fix_import_name(name)
if internal_name in send_items_map:
exit_with_error('duplicate symbol in exports to wasm: %s', name)
send_items_map[internal_name] = name
add_standard_wasm_imports(send_items_map)
sorted_keys = sorted(send_items_map.keys())
return '{ ' + ', '.join('"' + k + '": ' + send_items_map[k] for k in sorted_keys) + ' }'
def create_receiving_wasm(exports, initializers):
exports_that_are_not_initializers = [x for x in exports if x not in initializers]
receiving = []
if shared.Settings.MINIMAL_RUNTIME or not shared.Settings.ASSERTIONS:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code
for e in exports:
receiving.append('''\
var real_%(mangled)s = asm["%(e)s"];
asm["%(e)s"] = function() {%(assertions)s
return real_%(mangled)s.apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += [asmjs_mangle(s) + ' = asm["' + s + '"];' for s in exports_that_are_not_initializers]
else:
if shared.Settings.MINIMAL_RUNTIME:
# In wasm2js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += ['var ' + asmjs_mangle(s) + ' = asm["' + asmjs_mangle(s) + '"];' for s in exports_that_are_not_initializers]
else:
receiving += ['var ' + asmjs_mangle(s) + ' = Module["' + asmjs_mangle(s) + '"] = asm["' + s + '"];' for s in exports]
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[asmjs_mangle(__exportedFunc)] = '
receiving.append('''
function asmjs_mangle(x) {
var unmangledSymbols = %s;
return x.indexOf('dynCall_') == 0 || unmangledSymbols.indexOf(x) != -1 ? x : '_' + x;
}
''' % shared.Settings.WASM_FUNCTIONS_THAT_ARE_NOT_NAME_MANGLED)
receiving.append('for(var __exportedFunc in asm) ' + global_object + '[asmjs_mangle(__exportedFunc)] = ' + module_assign + 'asm[__exportedFunc];')
else:
receiving.append('Module["asm"] = asm;')
for e in exports:
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return Module["asm"]["%(e)s"].apply(null, arguments)
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
return '\n'.join(receiving) + '\n'
def create_module_wasm(sending, receiving, invoke_funcs, metadata):
invoke_wrappers = create_invoke_wrappers(invoke_funcs)
receiving += create_named_globals(metadata)
receiving += create_fp_accessors(metadata)
module = []
module.append('var asmGlobalArg = {};\n')
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
module.append("if (typeof SharedArrayBuffer !== 'undefined') asmGlobalArg['Atomics'] = Atomics;\n")
module.append('var asmLibraryArg = %s;\n' % (sending))
if shared.Settings.ASYNCIFY and shared.Settings.ASSERTIONS:
module.append('Asyncify.instrumentWasmImports(asmLibraryArg);\n')
if not shared.Settings.MINIMAL_RUNTIME:
module.append("var asm = createWasm();\n")
module.append(receiving)
module.append(invoke_wrappers)
return module
def load_metadata_wasm(metadata_raw, DEBUG):
try:
metadata_json = json.loads(metadata_raw)
except Exception:
logger.error('emscript: failure to parse metadata output from wasm-emscripten-finalize. raw output is: \n' + metadata_raw)
raise
metadata = {
'aliases': {},
'declares': [],
'implementedFunctions': [],
'externs': [],
'simd': False,
'maxGlobalAlign': 0,
'staticBump': 0,
'tableSize': 0,
'initializers': [],
'exports': [],
'namedGlobals': {},
'emJsFuncs': {},
'asmConsts': {},
'invokeFuncs': [],
'features': [],
'mainReadsParams': 1,
}
assert 'tableSize' in metadata_json.keys()
for key, value in metadata_json.items():
# json.loads returns `unicode` for strings but other code in this file
# generally works with utf8 encoded `str` objects, and they don't alwasy
# mix well. e.g. s.replace(x, y) will blow up is `s` a uts8 str containing
# non-ascii and either x or y are unicode objects.
# TODO(sbc): Remove this encoding if we switch to unicode elsewhere
# (specifically the glue returned from compile_settings)
if type(value) == list:
value = [asstr(v) for v in value]
if key not in metadata:
exit_with_error('unexpected metadata key received from wasm-emscripten-finalize: %s', key)
metadata[key] = value
if not shared.Settings.MINIMAL_RUNTIME:
# In regular runtime initializers call the global var version of the export, so they get the mangled name.
# In MINIMAL_RUNTIME, the initializers are called directly off the export object for minimal code size.
metadata['initializers'] = [asmjs_mangle(i) for i in metadata['initializers']]
if DEBUG:
logger.debug("Metadata parsed: " + pprint.pformat(metadata))
# Calculate the subset of exports that were explicitly marked with llvm.used.
# These are any exports that were not requested on the command line and are
# not known auto-generated system functions.
unexpected_exports = [e for e in metadata['exports'] if treat_as_user_function(e)]
unexpected_exports = [asmjs_mangle(e) for e in unexpected_exports]
unexpected_exports = [e for e in unexpected_exports if e not in shared.Settings.EXPORTED_FUNCTIONS]
shared.Building.user_requested_exports += unexpected_exports
return metadata
def create_invoke_wrappers(invoke_funcs):
"""Asm.js-style exception handling: invoke wrapper generation."""
invoke_wrappers = ''
for invoke in invoke_funcs:
sig = invoke[len('invoke_'):]
invoke_wrappers += '\n' + shared.JS.make_invoke(sig) + '\n'
return invoke_wrappers
def normalize_line_endings(text):
"""Normalize to UNIX line endings.
On Windows, writing to text file will duplicate \r\n to \r\r\n otherwise.
"""
if WINDOWS:
return text.replace('\r\n', '\n')
return text
def run(infile, outfile, memfile):
temp_files = get_configuration().get_temp_files()
infile, outfile = substitute_response_files([infile, outfile])
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO:
generated_struct_info_name = 'generated_struct_info.json'
def generate_struct_info():
with ToolchainProfiler.profile_block('gen_struct_info'):
out = shared.Cache.get_path(generated_struct_info_name)
gen_struct_info.main(['-q', '-c', '-o', out])
return out
shared.Settings.STRUCT_INFO = shared.Cache.get(generated_struct_info_name, generate_struct_info)
# do we need an else, to define it for the bootstrap case?
outfile_obj = open(outfile, 'w')
emscripter = emscript_wasm_backend if shared.Settings.WASM_BACKEND else emscript_fastcomp
return temp_files.run_and_clean(lambda: emscripter(
infile, outfile_obj, memfile, shared.NODE_JS, temp_files, shared.DEBUG)
)
|
[] |
[] |
[
"EMCC_STDERR_FILE"
] |
[]
|
["EMCC_STDERR_FILE"]
|
python
| 1 | 0 | |
util/testutil/integration/containerd.go
|
package integration
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
)
func InitContainerdWorker() {
Register(&containerd{
name: "containerd",
containerd: "containerd",
containerdShim: "containerd-shim",
})
// defined in hack/dockerfiles/test.buildkit.Dockerfile.
// e.g. `containerd-1.1=/opt/containerd-1.1/bin,containerd-42.0=/opt/containerd-42.0/bin`
if s := os.Getenv("BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"); s != "" {
entries := strings.Split(s, ",")
for _, entry := range entries {
pair := strings.Split(strings.TrimSpace(entry), "=")
if len(pair) != 2 {
panic(errors.Errorf("unexpected BUILDKIT_INTEGRATION_CONTAINERD_EXTRA: %q", s))
}
name, bin := pair[0], pair[1]
Register(&containerd{
name: name,
containerd: filepath.Join(bin, "containerd"),
containerdShim: filepath.Join(bin, "containerd-shim"),
})
}
}
}
type containerd struct {
name string
containerd string
containerdShim string
}
func (c *containerd) Name() string {
return c.name
}
func (c *containerd) New(cfg *BackendConfig) (b Backend, cl func() error, err error) {
if err := lookupBinary(c.containerd); err != nil {
return nil, nil, err
}
if err := lookupBinary(c.containerdShim); err != nil {
return nil, nil, err
}
if err := lookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
deferF := &multiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
tmpdir, err := ioutil.TempDir("", "bktest_containerd")
if err != nil {
return nil, nil, err
}
deferF.append(func() error { return os.RemoveAll(tmpdir) })
address := filepath.Join(tmpdir, "containerd.sock")
config := fmt.Sprintf(`root = %q
state = %q
# CRI plugins listens on 10010/tcp for stream server.
# We disable CRI plugin so that multiple instance can run simultaneously.
disabled_plugins = ["cri"]
[grpc]
address = %q
[debug]
level = "debug"
address = %q
[plugins]
[plugins.linux]
shim = %q
`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"), c.containerdShim)
configFile := filepath.Join(tmpdir, "config.toml")
if err := ioutil.WriteFile(configFile, []byte(config), 0644); err != nil {
return nil, nil, err
}
cmd := exec.Command(c.containerd, "--config", configFile)
ctdStop, err := startCmd(cmd, cfg.Logs)
if err != nil {
return nil, nil, err
}
if err := waitUnix(address, 5*time.Second); err != nil {
ctdStop()
return nil, nil, errors.Wrapf(err, "containerd did not start up: %s", formatLogs(cfg.Logs))
}
deferF.append(ctdStop)
buildkitdArgs := []string{"buildkitd",
"--oci-worker=false",
"--containerd-worker-gc=false",
"--containerd-worker=true",
"--containerd-worker-addr", address,
"--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603
}
buildkitdSock, stop, err := runBuildkitd(cfg, buildkitdArgs, cfg.Logs, 0, 0)
if err != nil {
printLogs(cfg.Logs, log.Println)
return nil, nil, err
}
deferF.append(stop)
return cdbackend{
containerdAddress: address,
backend: backend{
address: buildkitdSock,
rootless: false,
}}, cl, nil
}
func formatLogs(m map[string]*bytes.Buffer) string {
var ss []string
for k, b := range m {
if b != nil {
ss = append(ss, fmt.Sprintf("%q:%q", k, b.String()))
}
}
return strings.Join(ss, ",")
}
type cdbackend struct {
backend
containerdAddress string
}
func (s cdbackend) ContainerdAddress() string {
return s.containerdAddress
}
|
[
"\"BUILDKIT_INTEGRATION_CONTAINERD_EXTRA\""
] |
[] |
[
"BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"
] |
[]
|
["BUILDKIT_INTEGRATION_CONTAINERD_EXTRA"]
|
go
| 1 | 0 | |
db/redis.go
|
package db
import (
"github.com/go-redis/redis/v8"
"os"
)
var RedisClient *redis.Client
func getRedisUrl() string {
url := os.Getenv("REDIS_URL")
if url == "" {
url = "redis://127.0.0.1:6379/1"
}
return url
}
func init() {
opt, err := redis.ParseURL(getRedisUrl())
if err != nil {
panic(err)
}
RedisClient = redis.NewClient(opt)
}
|
[
"\"REDIS_URL\""
] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
go
| 1 | 0 | |
pkg/util/log/clog.go
|
// Copyright 2013 Google Inc. All Rights Reserved.
// Copyright 2017 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code originated in the github.com/golang/glog package.
package log
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
stdLog "log"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/petermattis/goid"
"github.com/znbasedb/ttycolor"
"github.com/znbasedb/znbase/pkg/build"
"github.com/znbasedb/znbase/pkg/util/caller"
"github.com/znbasedb/znbase/pkg/util/envutil"
"github.com/znbasedb/znbase/pkg/util/syncutil"
"github.com/znbasedb/znbase/pkg/util/sysutil"
"github.com/znbasedb/znbase/pkg/util/timeutil"
)
// maxSyncDuration is set to a conservative value since this is a new mechanism.
// In practice, even a fraction of that would indicate a problem.
var maxSyncDuration = envutil.EnvOrDefaultDuration("ZNBASE_LOG_MAX_SYNC_DURATION", 30*time.Second)
const fatalErrorPostamble = `
****************************************************************************
This node experienced a fatal error (printed above), and as a result the
process is terminating.
Fatal errors can occur due to faulty hardware (disks, memory, clocks) or a
problem in ZNBaseDB. With your help, the support team at ZNBase Labs
will try to determine the root cause, recommend next steps, and we can
improve ZNBaseDB based on your report.
Please contact ZNBaseDB technical support.
The ZNBase Labs team appreciates your feedback.
`
// FatalChan is closed when Fatal is called. This can be used to make
// the process stop handling requests while the final log messages and
// crash report are being written.
func FatalChan() <-chan struct{} {
return logging.fatalCh
}
const severityChar = "IWEF"
const (
tracebackNone = iota
tracebackSingle
tracebackAll
)
// Obey the GOTRACEBACK environment variable for determining which stacks to
// output during a log.Fatal.
var traceback = func() int {
switch os.Getenv("GOTRACEBACK") {
case "none":
return tracebackNone
case "single", "":
return tracebackSingle
default: // "all", "system", "crash"
return tracebackAll
}
}()
// DisableTracebacks turns off tracebacks for log.Fatals. Returns a function
// that sets the traceback settings back to where they were.
// Only intended for use by tests.
func DisableTracebacks() func() {
oldVal := traceback
traceback = tracebackNone
return func() { traceback = oldVal }
}
// get returns the value of the Severity.
func (s *Severity) get() Severity {
return Severity(atomic.LoadInt32((*int32)(s)))
}
// set sets the value of the Severity.
func (s *Severity) set(val Severity) {
atomic.StoreInt32((*int32)(s), int32(val))
}
// Set is part of the flag.Value interface.
func (s *Severity) Set(value string) error {
var threshold Severity
// Is it a known name?
if v, ok := SeverityByName(value); ok {
threshold = v
} else {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
threshold = Severity(v)
}
s.set(threshold)
return nil
}
// Name returns the string representation of the severity (i.e. ERROR, INFO).
func (s *Severity) Name() string {
return s.String()
}
// SeverityByName attempts to parse the passed in string into a severity. (i.e.
// ERROR, INFO). If it succeeds, the returned bool is set to true.
func SeverityByName(s string) (Severity, bool) {
s = strings.ToUpper(s)
if i, ok := Severity_value[s]; ok {
return Severity(i), true
}
switch s {
case "TRUE":
return Severity_INFO, true
case "FALSE":
return Severity_NONE, true
}
return 0, false
}
// Level is exported because it appears in the arguments to V and is
// the type of the v flag, which can be set programmatically.
// It's a distinct type because we want to discriminate it from logType.
// Variables of type level are only changed under logging.mu.
// The --verbosity flag is read only with atomic ops, so the state of the logging
// module is consistent.
// Level is treated as a sync/atomic int32.
// Level specifies a level of verbosity for V logs. *Level implements
// flag.Value; the --verbosity flag is of type Level and should be modified
// only through the flag.Value interface.
type level int32
// get returns the value of the Level.
func (l *level) get() level {
return level(atomic.LoadInt32((*int32)(l)))
}
// set sets the value of the Level.
func (l *level) set(val level) {
atomic.StoreInt32((*int32)(l), int32(val))
}
// String is part of the flag.Value interface.
func (l *level) String() string {
return strconv.FormatInt(int64(*l), 10)
}
// Set is part of the flag.Value interface.
func (l *level) Set(value string) error {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
logging.mu.Lock()
defer logging.mu.Unlock()
logging.setVState(level(v), logging.vmodule.filter, false)
return nil
}
// moduleSpec represents the setting of the --vmodule flag.
type moduleSpec struct {
filter []modulePat
}
// modulePat contains a filter for the --vmodule flag.
// It holds a verbosity level and a file pattern to match.
type modulePat struct {
pattern string
literal bool // The pattern is a literal string
level level
}
// match reports whether the file matches the pattern. It uses a string
// comparison if the pattern contains no metacharacters.
func (m *modulePat) match(file string) bool {
if m.literal {
return file == m.pattern
}
match, _ := filepath.Match(m.pattern, file)
return match
}
func (m *moduleSpec) String() string {
// Lock because the type is not atomic. TODO: clean this up.
logging.mu.Lock()
defer logging.mu.Unlock()
var b bytes.Buffer
for i, f := range m.filter {
if i > 0 {
b.WriteRune(',')
}
fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
}
return b.String()
}
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
// Syntax: --vmodule=recordio=2,file=1,gfs*=3
func (m *moduleSpec) Set(value string) error {
var filter []modulePat
for _, pat := range strings.Split(value, ",") {
if len(pat) == 0 {
// Empty strings such as from a trailing comma can be ignored.
continue
}
patLev := strings.Split(pat, "=")
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
return errVmoduleSyntax
}
pattern := patLev[0]
v, err := strconv.Atoi(patLev[1])
if err != nil {
return errors.New("syntax error: expect comma-separated list of filename=N")
}
if v < 0 {
return errors.New("negative value for vmodule level")
}
if v == 0 {
continue // Ignore. It's harmless but no point in paying the overhead.
}
// TODO: check syntax of filter?
filter = append(filter, modulePat{pattern, isLiteral(pattern), level(v)})
}
logging.mu.Lock()
defer logging.mu.Unlock()
logging.setVState(logging.verbosity, filter, true)
return nil
}
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
// that require filepath.Match to be called to match the pattern.
func isLiteral(pattern string) bool {
return !strings.ContainsAny(pattern, `\*?[]`)
}
// traceLocation represents the setting of the -log_backtrace_at flag.
type traceLocation struct {
file string
line int
}
// isSet reports whether the trace location has been specified.
// logging.mu is held.
func (t *traceLocation) isSet() bool {
return t.line > 0
}
// match reports whether the specified file and line matches the trace location.
// The argument file name is the full path, not the basename specified in the flag.
// logging.mu is held.
func (t *traceLocation) match(file string, line int) bool {
if t.line != line {
return false
}
if i := strings.LastIndexByte(file, '/'); i >= 0 {
file = file[i+1:]
}
return t.file == file
}
func (t *traceLocation) String() string {
// Lock because the type is not atomic. TODO: clean this up.
logging.mu.Lock()
defer logging.mu.Unlock()
return fmt.Sprintf("%s:%d", t.file, t.line)
}
var errTraceSyntax = errors.New("syntax error: expect file.go:234")
// Syntax: -log_backtrace_at=gopherflakes.go:234
// Note that unlike vmodule the file extension is included here.
func (t *traceLocation) Set(value string) error {
if value == "" {
// Unset.
logging.mu.Lock()
defer logging.mu.Unlock()
t.line = 0
t.file = ""
return nil
}
fields := strings.Split(value, ":")
if len(fields) != 2 {
return errTraceSyntax
}
file, line := fields[0], fields[1]
if !strings.Contains(file, ".") {
return errTraceSyntax
}
v, err := strconv.Atoi(line)
if err != nil {
return errTraceSyntax
}
if v <= 0 {
return errors.New("negative or zero value for level")
}
logging.mu.Lock()
defer logging.mu.Unlock()
t.line = v
t.file = file
return nil
}
// We don't include a capture group for the log message here, just for the
// preamble, because a capture group that handles multiline messages is very
// slow when running on the large buffers passed to EntryDecoder.split.
var entryRE = regexp.MustCompile(
`(?m)^([IWEF])(\d{6} \d{2}:\d{2}:\d{2}.\d{6}) (?:(\d+) )?([^:]+):(\d+)`)
// EntryDecoder reads successive encoded log entries from the input
// buffer. Each entry is preceded by a single big-ending uint32
// describing the next entry's length.
type EntryDecoder struct {
re *regexp.Regexp
scanner *bufio.Scanner
truncatedLastEntry bool
}
// NewEntryDecoder creates a new instance of EntryDecoder.
func NewEntryDecoder(in io.Reader) *EntryDecoder {
d := &EntryDecoder{scanner: bufio.NewScanner(in), re: entryRE.Copy()}
d.scanner.Split(d.split)
return d
}
// MessageTimeFormat is the format of the timestamp in log message headers as
// used in time.Parse and time.Format.
const MessageTimeFormat = "060102 15:04:05.999999"
// Decode decodes the next log entry into the provided protobuf message.
func (d *EntryDecoder) Decode(entry *Entry) error {
for {
if !d.scanner.Scan() {
if err := d.scanner.Err(); err != nil {
return err
}
return io.EOF
}
b := d.scanner.Bytes()
m := d.re.FindSubmatch(b)
if m == nil {
continue
}
entry.Severity = Severity(strings.IndexByte(severityChar, m[1][0]) + 1)
t, err := time.Parse(MessageTimeFormat, string(m[2]))
if err != nil {
return err
}
entry.Time = t.UnixNano()
if len(m[3]) > 0 {
goroutine, err := strconv.Atoi(string(m[3]))
if err != nil {
return err
}
entry.Goroutine = int64(goroutine)
}
entry.File = string(m[4])
line, err := strconv.Atoi(string(m[5]))
if err != nil {
return err
}
entry.Line = int64(line)
entry.Message = strings.TrimSpace(string(b[len(m[0]):]))
return nil
}
}
func (d *EntryDecoder) split(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if d.truncatedLastEntry {
i := d.re.FindIndex(data)
if i == nil {
// If there's no entry that starts in this chunk, advance past it, since
// we've truncated the entry it was originally part of.
return len(data), nil, nil
}
d.truncatedLastEntry = false
if i[0] > 0 {
// If an entry starts anywhere other than the first index, advance to it
// to maintain the invariant that entries start at the beginning of data.
// This isn't necessary, but simplifies the code below.
return i[0], nil, nil
}
// If i[0] == 0, then a new entry starts at the beginning of data, so fall
// through to the normal logic.
}
// From this point on, we assume we're currently positioned at a log entry.
// We want to find the next one so we start our search at data[1].
i := d.re.FindIndex(data[1:])
if i == nil {
if atEOF {
return len(data), data, nil
}
if len(data) >= bufio.MaxScanTokenSize {
// If there's no room left in the buffer, return the current truncated
// entry.
d.truncatedLastEntry = true
return len(data), data, nil
}
// If there is still room to read more, ask for more before deciding whether
// to truncate the entry.
return 0, nil, nil
}
// i[0] is the start of the next log entry, but we need to adjust the value
// to account for using data[1:] above.
i[0]++
return i[0], data[:i[0]], nil
}
// flushSyncWriter is the interface satisfied by logging destinations.
type flushSyncWriter interface {
Flush() error
Sync() error
io.Writer
}
// the --no-color flag.
var noColor bool
// formatHeader formats a log header using the provided file name and
// line number. Log lines are colorized depending on severity.
//
// Log lines have this form:
// Lyymmdd hh:mm:ss.uuuuuu goid file:line msg...
// where the fields are defined as follows:
// L A single character, representing the log level (eg 'I' for INFO)
// yy The year (zero padded; ie 2016 is '16')
// mm The month (zero padded; ie May is '05')
// dd The day (zero padded)
// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
// goid The goroutine id (omitted if zero for use by tests)
// file The file name
// line The line number
// msg The user-supplied message
func formatHeader(
s Severity, now time.Time, gid int, file string, line int, cp ttycolor.Profile,
) *buffer {
if noColor {
cp = nil
}
buf := logging.getBuffer()
if line < 0 {
line = 0 // not a real line number, but acceptable to someDigits
}
if s > Severity_FATAL || s <= Severity_UNKNOWN {
s = Severity_INFO // for safety.
}
tmp := buf.tmp[:len(buf.tmp)]
var n int
var prefix []byte
switch s {
case Severity_INFO:
prefix = cp[ttycolor.Cyan]
case Severity_WARNING:
prefix = cp[ttycolor.Yellow]
case Severity_ERROR, Severity_FATAL:
prefix = cp[ttycolor.Red]
}
n += copy(tmp, prefix)
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
year, month, day := now.Date()
hour, minute, second := now.Clock()
// Lyymmdd hh:mm:ss.uuuuuu file:line
tmp[n] = severityChar[s-1]
n++
if year < 2000 {
year = 2000
}
n += buf.twoDigits(n, year-2000)
n += buf.twoDigits(n, int(month))
n += buf.twoDigits(n, day)
n += copy(tmp[n:], cp[ttycolor.Gray]) // gray for time, file & line
tmp[n] = ' '
n++
n += buf.twoDigits(n, hour)
tmp[n] = ':'
n++
n += buf.twoDigits(n, minute)
tmp[n] = ':'
n++
n += buf.twoDigits(n, second)
tmp[n] = '.'
n++
n += buf.nDigits(6, n, now.Nanosecond()/1000, '0')
tmp[n] = ' '
n++
if gid > 0 {
n += buf.someDigits(n, gid)
tmp[n] = ' '
n++
}
buf.Write(tmp[:n])
buf.WriteString(file)
tmp[0] = ':'
n = buf.someDigits(1, line)
n++
// Extra space between the header and the actual message for scannability.
tmp[n] = ' '
n++
n += copy(tmp[n:], cp[ttycolor.Reset])
tmp[n] = ' '
n++
buf.Write(tmp[:n])
return buf
}
// Some custom tiny helper functions to print the log header efficiently.
const digits = "0123456789"
// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
// Returns two.
func (buf *buffer) twoDigits(i, d int) int {
buf.tmp[i+1] = digits[d%10]
d /= 10
buf.tmp[i] = digits[d%10]
return 2
}
// nDigits formats an n-digit integer at buf.tmp[i],
// padding with pad on the left.
// It assumes d >= 0. Returns n.
func (buf *buffer) nDigits(n, i, d int, pad byte) int {
j := n - 1
for ; j >= 0 && d > 0; j-- {
buf.tmp[i+j] = digits[d%10]
d /= 10
}
for ; j >= 0; j-- {
buf.tmp[i+j] = pad
}
return n
}
// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
func (buf *buffer) someDigits(i, d int) int {
// Print into the top, then copy down. We know there's space for at least
// a 10-digit number.
j := len(buf.tmp)
for {
j--
buf.tmp[j] = digits[d%10]
d /= 10
if d == 0 {
break
}
}
return copy(buf.tmp[i:], buf.tmp[j:])
}
func formatLogEntry(entry Entry, stacks []byte, cp ttycolor.Profile) *buffer {
buf := formatHeader(entry.Severity, timeutil.Unix(0, entry.Time),
int(entry.Goroutine), entry.File, int(entry.Line), cp)
_, _ = buf.WriteString(entry.Message)
if buf.Bytes()[buf.Len()-1] != '\n' {
_ = buf.WriteByte('\n')
}
if len(stacks) > 0 {
buf.Write(stacks)
}
return buf
}
func init() {
// Default stderrThreshold and fileThreshold to log everything.
// This will be the default in tests unless overridden; the CLI
// commands set their default separately in cli/flags.go
logging.stderrThreshold = Severity_INFO
logging.fileThreshold = Severity_INFO
logging.pcsPool = sync.Pool{
New: func() interface{} {
return [1]uintptr{}
},
}
logging.prefix = program
logging.setVState(0, nil, false)
logging.gcNotify = make(chan struct{}, 1)
logging.fatalCh = make(chan struct{})
go flushDaemon()
go signalFlusher()
}
// signalFlusher flushes the log(s) every time SIGHUP is received.
func signalFlusher() {
ch := sysutil.RefreshSignaledChan()
for sig := range ch {
Infof(context.Background(), "%s received, flushing logs", sig)
Flush()
}
}
// LoggingToStderr returns true if log messages of the given severity
// are visible on stderr.
func LoggingToStderr(s Severity) bool {
return s >= logging.stderrThreshold.get()
}
// StartGCDaemon starts the log file GC -- this must be called after
// command-line parsing has completed so that no data is lost when the
// user configures larger max sizes than the defaults.
//
// The logger's GC daemon stops when the provided context is canceled.
func StartGCDaemon(ctx context.Context) {
go logging.gcDaemon(ctx)
}
// Flush flushes all pending log I/O.
func Flush() {
logging.lockAndFlushAll()
secondaryLogRegistry.mu.Lock()
defer secondaryLogRegistry.mu.Unlock()
for _, l := range secondaryLogRegistry.mu.loggers {
// Some loggers (e.g. the audit log) want to keep all the files.
l.logger.lockAndFlushAll()
}
}
// SetSync configures whether logging synchronizes all writes.
func SetSync(sync bool) {
logging.lockAndSetSync(sync)
func() {
secondaryLogRegistry.mu.Lock()
defer secondaryLogRegistry.mu.Unlock()
for _, l := range secondaryLogRegistry.mu.loggers {
if !sync && l.forceSyncWrites {
// We're not changing this.
continue
}
l.logger.lockAndSetSync(sync)
}
}()
if sync {
// There may be something in the buffers already; flush it.
Flush()
}
}
// loggingT collects all the global state of the logging setup.
type loggingT struct {
noStderrRedirect bool
// Directory prefix where to store this logger's files.
logDir DirName
// Name prefix for log files.
prefix string
// Level flag for output to stderr. Handled atomically.
stderrThreshold Severity
// Level flag for output to files.
fileThreshold Severity
// freeList is a list of byte buffers, maintained under freeListMu.
freeList *buffer
// freeListMu maintains the free list. It is separate from the main mutex
// so buffers can be grabbed and printed to without holding the main lock,
// for better parallelization.
freeListMu syncutil.Mutex
// mu protects the remaining elements of this structure and is
// used to synchronize logging.
mu syncutil.Mutex
// file holds the log file writer.
file flushSyncWriter
// syncWrites if true calls file.Flush and file.Sync on every log write.
syncWrites bool
// pcsPool maintains a set of [1]uintptr buffers to be used in V to avoid
// allocating every time we compute the caller's PC.
pcsPool sync.Pool
// vmap is a cache of the V Level for each V() call site, identified by PC.
// It is wiped whenever the vmodule flag changes state.
vmap map[uintptr]level
// filterLength stores the length of the vmodule filter chain. If greater
// than zero, it means vmodule is enabled. It may be read safely
// using sync.LoadInt32, but is only modified under mu.
filterLength int32
// traceLocation is the state of the -log_backtrace_at flag.
traceLocation traceLocation
// disableDaemons can be used to turn off both the GC and flush deamons.
disableDaemons bool
// These flags are modified only under lock, although verbosity may be fetched
// safely using atomic.LoadInt32.
vmodule moduleSpec // The state of the --vmodule flag.
verbosity level // V logging level, the value of the --verbosity flag/
exitOverride struct {
f func(int) // overrides os.Exit when non-nil; testing only
hideStack bool // hides stack trace; only in effect when f is not nil
}
gcNotify chan struct{} // notify GC daemon that a new log file was created
fatalCh chan struct{} // closed on fatal error
interceptor atomic.Value // InterceptorFn
// The Cluster ID is reported on every new log file so as to ease the correlation
// of panic reports with self-reported log files.
clusterID string
}
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
type buffer struct {
bytes.Buffer
tmp [64]byte // temporary byte array for creating headers.
next *buffer
}
var logging loggingT
// SetClusterID stores the Cluster ID for further reference.
func SetClusterID(clusterID string) {
// Ensure that the clusterID is logged with the same format as for
// new log files, even on the first log file. This ensures that grep
// will always find it.
file, line, _ := caller.Lookup(1)
logging.outputLogEntry(Severity_INFO, file, line,
fmt.Sprintf("[config] clusterID: %s", clusterID))
// Perform the change proper.
logging.mu.Lock()
defer logging.mu.Unlock()
if logging.clusterID != "" {
panic("clusterID already set")
}
logging.clusterID = clusterID
}
// setVState sets a consistent state for V logging.
// l.mu is held.
func (l *loggingT) setVState(verbosity level, filter []modulePat, setFilter bool) {
// Turn verbosity off so V will not fire while we are in transition.
logging.verbosity.set(0)
// Ditto for filter length.
atomic.StoreInt32(&logging.filterLength, 0)
// Set the new filters and wipe the pc->Level map if the filter has changed.
if setFilter {
logging.vmodule.filter = filter
logging.vmap = make(map[uintptr]level)
}
// Things are consistent now, so enable filtering and verbosity.
// They are enabled in order opposite to that in V.
atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
logging.verbosity.set(verbosity)
}
// getBuffer returns a new, ready-to-use buffer.
func (l *loggingT) getBuffer() *buffer {
l.freeListMu.Lock()
b := l.freeList
if b != nil {
l.freeList = b.next
}
l.freeListMu.Unlock()
if b == nil {
b = new(buffer)
} else {
b.next = nil
b.Reset()
}
return b
}
// putBuffer returns a buffer to the free list.
func (l *loggingT) putBuffer(b *buffer) {
if b.Len() >= 256 {
// Let big buffers die a natural death.
return
}
l.freeListMu.Lock()
b.next = l.freeList
l.freeList = b
l.freeListMu.Unlock()
}
// ensureFile ensures that l.file is set and valid.
func (l *loggingT) ensureFile() error {
if l.file == nil {
return l.createFile()
}
return nil
}
// writeToFile writes to the file and applies the synchronization policy.
func (l *loggingT) writeToFile(data []byte) error {
if _, err := l.file.Write(data); err != nil {
return err
}
if l.syncWrites {
_ = l.file.Flush()
_ = l.file.Sync()
}
return nil
}
// outputLogEntry marshals a log entry proto into bytes, and writes
// the data to the log files. If a trace location is set, stack traces
// are added to the entry before marshaling.
func (l *loggingT) outputLogEntry(s Severity, file string, line int, msg string) {
// Set additional details in log entry.
now := timeutil.Now()
entry := MakeEntry(s, now.UnixNano(), file, line, msg)
if f, ok := l.interceptor.Load().(InterceptorFn); ok && f != nil {
f(entry)
return
}
// TODO(tschottdorf): this is a pretty horrible critical section.
l.mu.Lock()
var stacks []byte
var fatalTrigger chan struct{}
if s == Severity_FATAL {
// Close l.fatalCh if it is not already closed (note that we're
// holding l.mu to guard against concurrent closes).
select {
case <-l.fatalCh:
default:
close(l.fatalCh)
}
switch traceback {
case tracebackSingle:
stacks = getStacks(false)
case tracebackAll:
stacks = getStacks(true)
}
stacks = append(stacks, []byte(fatalErrorPostamble)...)
logExitFunc = func(error) {} // If we get a write error, we'll still exit.
// We don't want to hang forever writing our final log message. If
// things are broken (for example, if the disk fills up and there
// are cascading errors and our process manager has stopped
// reading from its side of a stderr pipe), it's more important to
// let the process exit than limp along.
//
// Note that we do not use os.File.SetWriteDeadline because not
// all files support this (for example, plain files on a network
// file system do not support deadlines but can block
// indefinitely).
//
// https://github.com/znbasedb/znbase/issues/23119
fatalTrigger = make(chan struct{})
exitFunc := os.Exit
if l.exitOverride.f != nil {
if l.exitOverride.hideStack {
stacks = []byte("stack trace omitted via SetExitFunc)\n")
}
exitFunc = l.exitOverride.f
}
exitCalled := make(chan struct{})
// This defer prevents outputLogEntry() from returning until the
// exit function has been called.
defer func() {
<-exitCalled
}()
go func() {
select {
case <-time.After(10 * time.Second):
case <-fatalTrigger:
}
exitFunc(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
close(exitCalled)
}()
} else if l.traceLocation.isSet() {
if l.traceLocation.match(file, line) {
stacks = getStacks(false)
}
}
if s >= l.stderrThreshold.get() || (s == Severity_FATAL && l.stderrRedirected()) {
// We force-copy FATAL messages to stderr, because the process is bound
// to terminate and the user will want to know why.
l.outputToStderr(entry, stacks)
}
if l.logDir.IsSet() && s >= l.fileThreshold.get() {
if err := l.ensureFile(); err != nil {
// Make sure the message appears somewhere.
l.outputToStderr(entry, stacks)
l.exitLocked(err)
l.mu.Unlock()
return
}
buf := l.processForFile(entry, stacks)
data := buf.Bytes()
if err := l.writeToFile(data); err != nil {
l.exitLocked(err)
l.mu.Unlock()
return
}
l.putBuffer(buf)
}
// Flush and exit on fatal logging.
if s == Severity_FATAL {
l.flushAndSync(true /*doSync*/)
close(fatalTrigger)
// Note: although it seems like the function is allowed to return
// below when s == Severity_FATAL, this is not so, because the
// anonymous function func() { <-exitCalled } is deferred
// above. That function ensures that outputLogEntry() will wait
// until the exit function has been called. If the exit function
// is os.Exit, it will never return, outputLogEntry()'s defer will
// never complete and all is well. If the exit function was
// overridden, then the client that has overridden the exit
// function is expecting log.Fatal to return and all is well too.
}
l.mu.Unlock()
}
// printPanicToFile copies the panic details to the log file. This is
// useful when the standard error is not redirected to the log file
// (!stderrRedirected), as the go runtime will only print panics to
// stderr.
func (l *loggingT) printPanicToFile(r interface{}) {
if !l.logDir.IsSet() {
// There's no log file. Nothing to do.
return
}
l.mu.Lock()
defer l.mu.Unlock()
if err := l.ensureFile(); err != nil {
fmt.Fprintf(OrigStderr, "log: %v", err)
return
}
panicBytes := []byte(fmt.Sprintf("%v\n\n%s\n", r, debug.Stack()))
if err := l.writeToFile(panicBytes); err != nil {
fmt.Fprintf(OrigStderr, "log: %v", err)
return
}
}
func (l *loggingT) outputToStderr(entry Entry, stacks []byte) {
buf := l.processForStderr(entry, stacks)
if _, err := OrigStderr.Write(buf.Bytes()); err != nil {
l.exitLocked(err)
}
l.putBuffer(buf)
}
// processForStderr formats a log entry for output to standard error.
func (l *loggingT) processForStderr(entry Entry, stacks []byte) *buffer {
return formatLogEntry(entry, stacks, ttycolor.StderrProfile)
}
// processForFile formats a log entry for output to a file.
func (l *loggingT) processForFile(entry Entry, stacks []byte) *buffer {
return formatLogEntry(entry, stacks, nil)
}
// getStacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
func getStacks(all bool) []byte {
// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
n := 10000
if all {
n = 100000
}
var trace []byte
for i := 0; i < 5; i++ {
trace = make([]byte, n)
nbytes := runtime.Stack(trace, all)
if nbytes < len(trace) {
return trace[:nbytes]
}
n *= 2
}
return trace
}
// logExitFunc provides a simple mechanism to override the default behavior
// of exiting on error. Used in testing and to guarantee we reach a required exit
// for fatal logs. Instead, exit could be a function rather than a method but that
// would make its use clumsier.
var logExitFunc func(error)
// exitLocked is called if there is trouble creating or writing log files, or
// writing to stderr. It flushes the logs and exits the program; there's no
// point in hanging around. l.mu is held.
func (l *loggingT) exitLocked(err error) {
l.mu.AssertHeld()
// Either stderr or our log file is broken. Try writing the error to both
// streams in the hope that one still works or else the user will have no idea
// why we crashed.
outputs := make([]io.Writer, 2)
outputs[0] = OrigStderr
if f, ok := l.file.(*syncBuffer); ok {
// Don't call syncBuffer's Write method, because it can call back into
// exitLocked. Go directly to syncBuffer's underlying writer.
outputs[1] = f.Writer
} else {
outputs[1] = l.file
}
for _, w := range outputs {
if w == nil {
continue
}
fmt.Fprintf(w, "log: exiting because of error: %s\n", err)
}
// If logExitFunc is set, we do that instead of exiting.
if logExitFunc != nil {
logExitFunc(err)
return
}
l.flushAndSync(true /*doSync*/)
if l.exitOverride.f != nil {
l.exitOverride.f(2)
} else {
os.Exit(2)
}
}
// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
// file's Sync method and providing a wrapper for the Write method that provides log
// file rotation. There are conflicting methods, so the file cannot be embedded.
// l.mu is held for all its methods.
type syncBuffer struct {
logger *loggingT
*bufio.Writer
file *os.File
lastRotation int64
nbytes int64 // The number of bytes written to this file
}
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
if sb.nbytes+int64(len(p)) >= atomic.LoadInt64(&LogFileMaxSize) {
if err := sb.rotateFile(timeutil.Now()); err != nil {
sb.logger.exitLocked(err)
}
}
n, err = sb.Writer.Write(p)
sb.nbytes += int64(n)
if err != nil {
sb.logger.exitLocked(err)
}
return
}
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
if sb.file != nil {
if err := sb.Flush(); err != nil {
return err
}
if err := sb.file.Close(); err != nil {
return err
}
}
var err error
sb.file, sb.lastRotation, _, err = create(&sb.logger.logDir, sb.logger.prefix, now, sb.lastRotation)
sb.nbytes = 0
if err != nil {
return err
}
// Redirect stderr to the current INFO log file in order to capture panic
// stack traces that are written by the Go runtime to stderr. Note that if
// --logtostderr is true we'll never enter this code path and panic stack
// traces will go to the original stderr as you would expect.
if sb.logger.stderrRedirected() {
// NB: any concurrent output to stderr may straddle the old and new
// files. This doesn't apply to log messages as we won't reach this code
// unless we're not logging to stderr.
if err := hijackStderr(sb.file); err != nil {
return err
}
}
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
messages := make([]string, 0, 6)
messages = append(messages,
fmt.Sprintf("[config] file created at: %s\n", now.Format("2006/01/02 15:04:05")),
fmt.Sprintf("[config] running on machine: %s\n", host),
fmt.Sprintf("[config] binary: %s\n", build.GetInfo().Short()),
fmt.Sprintf("[config] arguments: %s\n", os.Args),
)
if sb.logger.clusterID != "" {
messages = append(messages, fmt.Sprintf("[config] clusterID: %s\n", sb.logger.clusterID))
}
// Including a non-ascii character in the first 1024 bytes of the log helps
// viewers that attempt to guess the character encoding.
messages = append(messages, fmt.Sprintf("line format: [IWEF]yymmdd hh:mm:ss.uuuuuu goid file:line msg utf8=\u2713\n"))
f, l, _ := caller.Lookup(1)
for _, msg := range messages {
buf := formatLogEntry(Entry{
Severity: Severity_INFO,
Time: now.UnixNano(),
Goroutine: goid.Get(),
File: f,
Line: int64(l),
Message: msg,
}, nil, nil)
var n int
n, err = sb.file.Write(buf.Bytes())
sb.nbytes += int64(n)
if err != nil {
return err
}
logging.putBuffer(buf)
}
select {
case sb.logger.gcNotify <- struct{}{}:
default:
}
return nil
}
// bufferSize sizes the buffer associated with each log file. It's large
// so that log records can accumulate without the logging thread blocking
// on disk I/O. The flushDaemon will block instead.
const bufferSize = 256 * 1024
func (l *loggingT) closeFileLocked() error {
if l.file != nil {
if sb, ok := l.file.(*syncBuffer); ok {
if err := sb.file.Close(); err != nil {
return err
}
}
l.file = nil
}
return restoreStderr()
}
// createFile creates the log file.
// l.mu is held.
func (l *loggingT) createFile() error {
now := timeutil.Now()
if l.file == nil {
sb := &syncBuffer{
logger: l,
}
if err := sb.rotateFile(now); err != nil {
return err
}
l.file = sb
}
return nil
}
// flushInterval is the delay between periodic flushes of the buffered log data.
const flushInterval = time.Second
// syncInterval is the multiple of flushInterval where the log is also synced to disk.
const syncInterval = 30
// flushDaemon periodically flushes and syncs the log file buffers.
//
// Flush propagates the in-memory buffer inside ZNBaseDB to the
// in-memory buffer(s) of the OS. The flush is relatively frequent so
// that a human operator can see "up to date" logging data in the log
// file.
//
// Syncs ensure that the OS commits the data to disk. Syncs are less
// frequent because they can incur more significant I/O costs.
func flushDaemon() {
syncCounter := 1
// This doesn't need to be Stop()'d as the loop never escapes.
for range time.Tick(flushInterval) {
doSync := syncCounter == syncInterval
syncCounter = (syncCounter + 1) % syncInterval
// Flush the main log.
logging.mu.Lock()
if !logging.disableDaemons {
logging.flushAndSync(doSync)
}
logging.mu.Unlock()
// Flush the secondary logs.
secondaryLogRegistry.mu.Lock()
for _, l := range secondaryLogRegistry.mu.loggers {
l.logger.mu.Lock()
if !l.logger.disableDaemons {
l.logger.flushAndSync(doSync)
}
l.logger.mu.Unlock()
}
secondaryLogRegistry.mu.Unlock()
}
}
// lockAndFlushAll is like flushAll but locks l.mu first.
func (l *loggingT) lockAndFlushAll() {
l.mu.Lock()
l.flushAndSync(true /*doSync*/)
l.mu.Unlock()
}
// lockAndSetSync configures syncWrites
func (l *loggingT) lockAndSetSync(sync bool) {
l.mu.Lock()
l.syncWrites = sync
l.mu.Unlock()
}
// flushAndSync flushes the current log and, if doSync is set,
// attempts to sync its data to disk.
// l.mu is held.
func (l *loggingT) flushAndSync(doSync bool) {
if l.file == nil {
return
}
// If we can't sync within this duration, exit the process.
t := time.AfterFunc(maxSyncDuration, func() {
// NB: the disk-stall-detected roachtest matches on this message.
Shout(context.Background(), Severity_FATAL, fmt.Sprintf(
"disk stall detected: unable to sync log files within %s", maxSyncDuration,
))
})
defer t.Stop()
_ = l.file.Flush() // ignore error
if doSync {
_ = l.file.Sync() // ignore error
}
}
func (l *loggingT) gcDaemon(ctx context.Context) {
l.gcOldFiles()
for {
select {
case <-ctx.Done():
return
case <-l.gcNotify:
}
l.mu.Lock()
if !l.disableDaemons {
l.gcOldFiles()
}
l.mu.Unlock()
}
}
func (l *loggingT) gcOldFiles() {
dir, err := l.logDir.get()
if err != nil {
// No log directory configured. Nothing to do.
return
}
// This only lists the log files for the current logger (sharing the
// prefix).
allFiles, err := l.listLogFiles()
if err != nil {
fmt.Fprintf(OrigStderr, "unable to GC log files: %s\n", err)
return
}
logFilesCombinedMaxSize := atomic.LoadInt64(&LogFilesCombinedMaxSize)
files := selectFiles(allFiles, math.MaxInt64)
if len(files) == 0 {
return
}
// files is sorted with the newest log files first (which we want
// to keep). Note that we always keep the most recent log file.
sum := files[0].SizeBytes
for _, f := range files[1:] {
sum += f.SizeBytes
if sum < logFilesCombinedMaxSize {
continue
}
path := filepath.Join(dir, f.Name)
// archive log file
if LogFilesArchiveOn {
// create gzip file used by gzip writer
tf, err := os.Create(path + ".gz")
if err != nil {
fmt.Fprintln(OrigStderr, err)
return
}
defer func() {
if err := tf.Close(); err != nil {
fmt.Fprintln(OrigStderr, err)
}
}()
// writer for gzip with default compression
gz, err := gzip.NewWriterLevel(tf, gzip.DefaultCompression)
if err != nil {
fmt.Fprintln(OrigStderr, err)
return
}
defer func() {
if err := gz.Close(); err != nil {
fmt.Fprintln(OrigStderr, err)
}
}()
// open file and write to Writer
fs, err := os.Open(path)
if err != nil {
fmt.Fprintln(OrigStderr, err)
return
}
defer func() {
if err := fs.Close(); err != nil {
fmt.Fprintln(OrigStderr, err)
}
}()
// compress log file to gzip file
if _, err := io.Copy(gz, fs); err != nil {
fmt.Fprintln(OrigStderr, err)
return
}
}
// delete log file
if err := os.Remove(path); err != nil {
fmt.Fprintln(OrigStderr, err)
}
}
}
// copyStandardLogTo arranges for messages written to the Go "log"
// package's default logs to also appear in the ZNBaseDB logs with
// the specified severity. Subsequent changes to the standard log's
// default output location or format may break this behavior.
//
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
// recognized, copyStandardLogTo panics.
func copyStandardLogTo(severityName string) {
sev, ok := SeverityByName(severityName)
if !ok {
panic(fmt.Sprintf("copyStandardLogTo(%q): unrecognized Severity name", severityName))
}
// Set a log format that captures the user's file and line:
// d.go:23: message
stdLog.SetFlags(stdLog.Lshortfile)
stdLog.SetOutput(logBridge(sev))
}
// logBridge provides the Write method that enables copyStandardLogTo to connect
// Go's standard logs to the logs provided by this package.
type logBridge Severity
// Write parses the standard logging line and passes its components to the
// logger for Severity(lb).
func (lb logBridge) Write(b []byte) (n int, err error) {
var (
file = "???"
line = 1
text string
)
// Split "d.go:23: message" into "d.go", "23", and "message".
if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
text = fmt.Sprintf("bad log format: %s", b)
} else {
file = string(parts[0])
text = string(parts[2][1 : len(parts[2])-1]) // skip leading space and trailing newline
line, err = strconv.Atoi(string(parts[1]))
if err != nil {
text = fmt.Sprintf("bad line number: %s", b)
line = 1
}
}
logging.outputLogEntry(Severity(lb), file, line, text)
return len(b), nil
}
// NewStdLogger creates a *stdLog.Logger that forwards messages to the
// ZNBaseDB logs with the specified severity.
func NewStdLogger(severity Severity) *stdLog.Logger {
return stdLog.New(logBridge(severity), "", stdLog.Lshortfile)
}
// setV computes and remembers the V level for a given PC
// when vmodule is enabled.
// File pattern matching takes the basename of the file, stripped
// of its .go suffix, and uses filepath.Match, which is a little more
// general than the *? matching used in C++.
// l.mu is held.
func (l *loggingT) setV(pc [1]uintptr) level {
frame, _ := runtime.CallersFrames(pc[:]).Next()
file := frame.File
// The file is something like /a/b/c/d.go. We want just the d.
if strings.HasSuffix(file, ".go") {
file = file[:len(file)-3]
}
if slash := strings.LastIndexByte(file, '/'); slash >= 0 {
file = file[slash+1:]
}
for _, filter := range l.vmodule.filter {
if filter.match(file) {
l.vmap[pc[0]] = filter.level
return filter.level
}
}
l.vmap[pc[0]] = 0
return 0
}
func v(level level) bool {
return VDepth(int32(level), 1)
}
// InterceptorFn is the type of function accepted by Intercept().
type InterceptorFn func(entry Entry)
// Intercept diverts log traffic to the given function `f`. When `f` is not nil,
// the logging package begins operating at full verbosity (i.e. `V(n) == true`
// for all `n`) but nothing will be printed to the logs. Instead, `f` is invoked
// for each log entry.
//
// To end log interception, invoke `Intercept()` with `f == nil`. Note that
// interception does not terminate atomically, that is, the originally supplied
// callback may still be invoked after a call to `Intercept` with `f == nil`.
func Intercept(ctx context.Context, f InterceptorFn) {
logging.Intercept(ctx, f)
}
func (l *loggingT) Intercept(ctx context.Context, f InterceptorFn) {
// TODO(tschottdorf): restore sanity so that all methods have a *loggingT
// receiver.
if f != nil {
logDepth(ctx, 0, Severity_WARNING, "log traffic is now intercepted; log files will be incomplete", nil)
}
l.interceptor.Store(f) // intentionally also when f == nil
if f == nil {
logDepth(ctx, 0, Severity_INFO, "log interception is now stopped; normal logging resumes", nil)
}
}
// VDepth reports whether verbosity at the call site is at least the requested
// level.
func VDepth(l int32, depth int) bool {
// This function tries hard to be cheap unless there's work to do.
// The fast path is three atomic loads and compares.
// Here is a cheap but safe test to see if V logging is enabled globally.
if logging.verbosity.get() >= level(l) {
return true
}
if f, ok := logging.interceptor.Load().(InterceptorFn); ok && f != nil {
return true
}
// It's off globally but vmodule may still be set.
// Here is another cheap but safe test to see if vmodule is enabled.
if atomic.LoadInt32(&logging.filterLength) > 0 {
// Grab a buffer to use for reading the program counter. Keeping the
// interface{} version around to Put back into the pool rather than
// Put-ting the array saves an interface allocation.
poolObj := logging.pcsPool.Get()
pcs := poolObj.([1]uintptr)
// We prefer not to use a defer in this function, which can be used in hot
// paths, because a defer anywhere in the body of a function causes a call
// to runtime.deferreturn at the end of that function, which has a
// measurable performance penalty when in a very hot path.
// defer logging.pcsPool.Put(pcs)
if runtime.Callers(2+depth, pcs[:]) == 0 {
logging.pcsPool.Put(poolObj)
return false
}
logging.mu.Lock()
v, ok := logging.vmap[pcs[0]]
if !ok {
v = logging.setV(pcs)
}
logging.mu.Unlock()
logging.pcsPool.Put(poolObj)
return v >= level(l)
}
return false
}
|
[
"\"GOTRACEBACK\""
] |
[] |
[
"GOTRACEBACK"
] |
[]
|
["GOTRACEBACK"]
|
go
| 1 | 0 | |
amlpp/__init__.py
|
from amlpp import transformers
from amlpp import fit_model
from amlpp import conveyor
from amlpp import architect
from amlpp import additional
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
vendor/github.com/mongodb/grip/vendor/github.com/shirou/gopsutil/process/process_test.go
|
package process
import (
"fmt"
"net"
"os"
"os/exec"
"os/user"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/shirou/gopsutil/internal/common"
"github.com/stretchr/testify/assert"
)
var mu sync.Mutex
func skipIfNotImplementedErr(t *testing.T, err error) {
if err == common.ErrNotImplementedError {
t.Skip("not implemented")
}
}
func testGetProcess() Process {
checkPid := os.Getpid() // process.test
ret, _ := NewProcess(int32(checkPid))
return *ret
}
func Test_Pids(t *testing.T) {
ret, err := Pids()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(ret) == 0 {
t.Errorf("could not get pids %v", ret)
}
}
func Test_Pids_Fail(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skip("darwin only")
}
mu.Lock()
defer mu.Unlock()
invoke = common.FakeInvoke{Suffix: "fail"}
ret, err := Pids()
skipIfNotImplementedErr(t, err)
invoke = common.Invoke{}
if err != nil {
t.Errorf("error %v", err)
}
if len(ret) != 9 {
t.Errorf("wrong getted pid nums: %v/%d", ret, len(ret))
}
}
func Test_Pid_exists(t *testing.T) {
checkPid := os.Getpid()
ret, err := PidExists(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if ret == false {
t.Errorf("could not get process exists: %v", ret)
}
}
func Test_NewProcess(t *testing.T) {
checkPid := os.Getpid()
ret, err := NewProcess(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
empty := &Process{}
if runtime.GOOS != "windows" { // Windows pid is 0
if empty == ret {
t.Errorf("error %v", ret)
}
}
}
func Test_Process_memory_maps(t *testing.T) {
checkPid := os.Getpid()
ret, err := NewProcess(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// ungrouped memory maps
mmaps, err := ret.MemoryMaps(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("memory map get error %v", err)
}
empty := MemoryMapsStat{}
for _, m := range *mmaps {
if m == empty {
t.Errorf("memory map get error %v", m)
}
}
// grouped memory maps
mmaps, err = ret.MemoryMaps(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("memory map get error %v", err)
}
if len(*mmaps) != 1 {
t.Errorf("grouped memory maps length (%v) is not equal to 1", len(*mmaps))
}
if (*mmaps)[0] == empty {
t.Errorf("memory map is empty")
}
}
func Test_Process_MemoryInfo(t *testing.T) {
p := testGetProcess()
v, err := p.MemoryInfo()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting memory info error %v", err)
}
empty := MemoryInfoStat{}
if v == nil || *v == empty {
t.Errorf("could not get memory info %v", v)
}
}
func Test_Process_CmdLine(t *testing.T) {
p := testGetProcess()
v, err := p.Cmdline()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting cmdline error %v", err)
}
if !strings.Contains(v, "process.test") {
t.Errorf("invalid cmd line %v", v)
}
}
func Test_Process_CmdLineSlice(t *testing.T) {
p := testGetProcess()
v, err := p.CmdlineSlice()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("getting cmdline slice error %v", err)
}
if !reflect.DeepEqual(v, os.Args) {
t.Errorf("returned cmdline slice not as expected:\nexp: %v\ngot: %v", os.Args, v)
}
}
func Test_Process_Ppid(t *testing.T) {
p := testGetProcess()
v, err := p.Ppid()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting ppid error %v", err)
}
if v == 0 {
t.Errorf("return value is 0 %v", v)
}
}
func Test_Process_Status(t *testing.T) {
p := testGetProcess()
v, err := p.Status()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting status error %v", err)
}
if v != "R" && v != "S" {
t.Errorf("could not get state %v", v)
}
}
func Test_Process_Terminal(t *testing.T) {
p := testGetProcess()
_, err := p.Terminal()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting terminal error %v", err)
}
}
func Test_Process_IOCounters(t *testing.T) {
p := testGetProcess()
v, err := p.IOCounters()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting iocounter error %v", err)
return
}
empty := &IOCountersStat{}
if v == empty {
t.Errorf("error %v", v)
}
}
func Test_Process_NumCtx(t *testing.T) {
p := testGetProcess()
_, err := p.NumCtxSwitches()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting numctx error %v", err)
return
}
}
func Test_Process_Nice(t *testing.T) {
p := testGetProcess()
n, err := p.Nice()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting nice error %v", err)
}
if n != 0 && n != 20 && n != 8 {
t.Errorf("invalid nice: %d", n)
}
}
func Test_Process_NumThread(t *testing.T) {
p := testGetProcess()
n, err := p.NumThreads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting NumThread error %v", err)
}
if n < 0 {
t.Errorf("invalid NumThread: %d", n)
}
}
func Test_Process_Threads(t *testing.T) {
p := testGetProcess()
n, err := p.NumThreads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting NumThread error %v", err)
}
if n < 0 {
t.Errorf("invalid NumThread: %d", n)
}
ts, err := p.Threads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting Threads error %v", err)
}
if len(ts) != int(n) {
t.Errorf("unexpected number of threads: %v vs %v", len(ts), n)
}
}
func Test_Process_Name(t *testing.T) {
p := testGetProcess()
n, err := p.Name()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting name error %v", err)
}
if !strings.Contains(n, "process.test") {
t.Errorf("invalid Exe %s", n)
}
}
func Test_Process_Exe(t *testing.T) {
p := testGetProcess()
n, err := p.Exe()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting Exe error %v", err)
}
if !strings.Contains(n, "process.test") {
t.Errorf("invalid Exe %s", n)
}
}
func Test_Process_CpuPercent(t *testing.T) {
p := testGetProcess()
percent, err := p.Percent(0)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
duration := time.Duration(1000) * time.Microsecond
time.Sleep(duration)
percent, err = p.Percent(0)
if err != nil {
t.Errorf("error %v", err)
}
numcpu := runtime.NumCPU()
// if percent < 0.0 || percent > 100.0*float64(numcpu) { // TODO
if percent < 0.0 {
t.Fatalf("CPUPercent value is invalid: %f, %d", percent, numcpu)
}
}
func Test_Process_CpuPercentLoop(t *testing.T) {
p := testGetProcess()
numcpu := runtime.NumCPU()
for i := 0; i < 2; i++ {
duration := time.Duration(100) * time.Microsecond
percent, err := p.Percent(duration)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// if percent < 0.0 || percent > 100.0*float64(numcpu) { // TODO
if percent < 0.0 {
t.Fatalf("CPUPercent value is invalid: %f, %d", percent, numcpu)
}
}
}
func Test_Process_CreateTime(t *testing.T) {
if os.Getenv("CIRCLECI") == "true" {
t.Skip("Skip CI")
}
p := testGetProcess()
c, err := p.CreateTime()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if c < 1420000000 {
t.Errorf("process created time is wrong.")
}
gotElapsed := time.Since(time.Unix(int64(c/1000), 0))
maxElapsed := time.Duration(5 * time.Second)
if gotElapsed >= maxElapsed {
t.Errorf("this process has not been running for %v", gotElapsed)
}
}
func Test_Parent(t *testing.T) {
p := testGetProcess()
c, err := p.Parent()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("error %v", err)
}
if c == nil {
t.Fatalf("could not get parent")
}
if c.Pid == 0 {
t.Fatalf("wrong parent pid")
}
}
func Test_Connections(t *testing.T) {
p := testGetProcess()
ch0 := make(chan string)
ch1 := make(chan string)
go func() { // TCP listening goroutine
addr, err := net.ResolveTCPAddr("tcp", "localhost:0") // dynamically get a random open port from OS
if err != nil {
t.Skip("unable to resolve localhost:", err)
}
l, err := net.ListenTCP(addr.Network(), addr)
if err != nil {
t.Skip(fmt.Sprintf("unable to listen on %v: %v", addr, err))
}
defer l.Close()
ch0 <- l.Addr().String()
for {
conn, err := l.Accept()
if err != nil {
t.Skip("unable to accept connection:", err)
}
ch1 <- l.Addr().String()
defer conn.Close()
}
}()
go func() { // TCP client goroutine
tcpServerAddr := <-ch0
net.Dial("tcp", tcpServerAddr)
}()
tcpServerAddr := <-ch1
tcpServerAddrIP := strings.Split(tcpServerAddr, ":")[0]
tcpServerAddrPort, err := strconv.ParseUint(strings.Split(tcpServerAddr, ":")[1], 10, 32)
if err != nil {
t.Errorf("unable to parse tcpServerAddr port: %v", err)
}
c, err := p.Connections()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(c) == 0 {
t.Errorf("no connections found")
}
found := 0
for _, connection := range c {
if connection.Status == "ESTABLISHED" && (connection.Laddr.IP == tcpServerAddrIP && connection.Laddr.Port == uint32(tcpServerAddrPort)) || (connection.Raddr.IP == tcpServerAddrIP && connection.Raddr.Port == uint32(tcpServerAddrPort)) {
found++
}
}
if found != 2 { // two established connections, one for the server, the other for the client
t.Errorf(fmt.Sprintf("wrong connections: %+v", c))
}
}
func Test_Children(t *testing.T) {
p := testGetProcess()
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "4")
} else {
cmd = exec.Command("sleep", "3")
}
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
c, err := p.Children()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("error %v", err)
}
if len(c) == 0 {
t.Fatalf("children is empty")
}
if c[0].Pid != int32(cmd.Process.Pid) {
t.Errorf("could not find child %d", cmd.Process.Pid)
}
}
func Test_Username(t *testing.T) {
myPid := os.Getpid()
currentUser, _ := user.Current()
myUsername := currentUser.Username
process, _ := NewProcess(int32(myPid))
pidUsername, err := process.Username()
skipIfNotImplementedErr(t, err)
assert.Equal(t, myUsername, pidUsername)
t.Log(pidUsername)
}
func Test_CPUTimes(t *testing.T) {
pid := os.Getpid()
process, err := NewProcess(int32(pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
spinSeconds := 0.2
cpuTimes0, err := process.Times()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
// Spin for a duration of spinSeconds
t0 := time.Now()
tGoal := t0.Add(time.Duration(spinSeconds*1000) * time.Millisecond)
assert.Nil(t, err)
for time.Now().Before(tGoal) {
// This block intentionally left blank
}
cpuTimes1, err := process.Times()
assert.Nil(t, err)
if cpuTimes0 == nil || cpuTimes1 == nil {
t.FailNow()
}
measuredElapsed := cpuTimes1.Total() - cpuTimes0.Total()
message := fmt.Sprintf("Measured %fs != spun time of %fs\ncpuTimes0=%v\ncpuTimes1=%v",
measuredElapsed, spinSeconds, cpuTimes0, cpuTimes1)
assert.True(t, measuredElapsed > float64(spinSeconds)/5, message)
assert.True(t, measuredElapsed < float64(spinSeconds)*5, message)
}
func Test_OpenFiles(t *testing.T) {
pid := os.Getpid()
p, err := NewProcess(int32(pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
v, err := p.OpenFiles()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
assert.NotEmpty(t, v) // test always open files.
for _, vv := range v {
assert.NotEqual(t, "", vv.Path)
}
}
func Test_Kill(t *testing.T) {
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "4")
} else {
cmd = exec.Command("sleep", "3")
}
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
p, err := NewProcess(int32(cmd.Process.Pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
err = p.Kill()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
cmd.Wait()
}
func Test_IsRunning(t *testing.T) {
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "2")
} else {
cmd = exec.Command("sleep", "1")
}
cmd.Start()
p, err := NewProcess(int32(cmd.Process.Pid))
assert.Nil(t, err)
running, err := p.IsRunning()
if err != nil {
t.Fatalf("IsRunning error: %v", err)
}
if !running {
t.Fatalf("process should be found running")
}
cmd.Wait()
running, err = p.IsRunning()
if err != nil {
t.Fatalf("IsRunning error: %v", err)
}
if running {
t.Fatalf("process should NOT be found running")
}
}
|
[
"\"CIRCLECI\""
] |
[] |
[
"CIRCLECI"
] |
[]
|
["CIRCLECI"]
|
go
| 1 | 0 | |
providers/fastly/fastly_provider.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fastly
import (
"errors"
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
)
type FastlyProvider struct { //nolint
terraformutils.Provider
customerID string
apiKey string
}
func (p *FastlyProvider) Init(args []string) error {
if os.Getenv("FASTLY_API_KEY") == "" {
return errors.New("set FASTLY_API_KEY env var")
}
p.apiKey = os.Getenv("FASTLY_API_KEY")
if os.Getenv("FASTLY_CUSTOMER_ID") == "" {
return errors.New("set FASTLY_CUSTOMER_ID env var")
}
p.customerID = os.Getenv("FASTLY_CUSTOMER_ID")
return nil
}
func (p *FastlyProvider) GetName() string {
return "fastly"
}
func (p *FastlyProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{
"provider": map[string]interface{}{
"fastly": map[string]interface{}{
"customer_id": p.customerID,
},
},
}
}
func (FastlyProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{}
}
func (p *FastlyProvider) GetSupportedService() map[string]terraformutils.ServiceGenerator {
return map[string]terraformutils.ServiceGenerator{
"service_v1": &ServiceV1Generator{},
"tls_subscription": &TLSSubscriptionGenerator{},
"user": &UserGenerator{},
}
}
func (p *FastlyProvider) InitService(serviceName string, verbose bool) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New("fastly: " + serviceName + " not supported service")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetVerbose(verbose)
p.Service.SetProviderName(p.GetName())
p.Service.SetArgs(map[string]interface{}{
"customer_id": p.customerID,
"api_key": p.apiKey,
})
return nil
}
|
[
"\"FASTLY_API_KEY\"",
"\"FASTLY_API_KEY\"",
"\"FASTLY_CUSTOMER_ID\"",
"\"FASTLY_CUSTOMER_ID\""
] |
[] |
[
"FASTLY_API_KEY",
"FASTLY_CUSTOMER_ID"
] |
[]
|
["FASTLY_API_KEY", "FASTLY_CUSTOMER_ID"]
|
go
| 2 | 0 | |
examples/pwr_run/checkpointing/final_3level/final4_3level_obsolete/job67.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4_3level/' + job_name + '*'
total_epochs = 52
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4_3level/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
tests/cli_tests/test_command_input.py
|
import os
import pytest
def test_wrong_select_db_index(cli):
cli.sendline("select 1")
cli.expect(["OK", "127.0.0.1"])
cli.sendline("select 128")
cli.expect(["DB index is out of range", "127.0.0.1:6379[1]>"])
if int(os.environ["REDIS_VERSION"]) > 5:
text = "value is not an integer or out of range"
else:
text = "invalid DB index"
cli.sendline("select abc")
cli.expect([text, "127.0.0.1:6379[1]>"])
cli.sendline("select 15")
cli.expect("OK")
def test_set_command_with_shash(clean_redis, cli):
cli.sendline("set a \\hello\\") # legal redis command
cli.expect("OK")
cli.sendline("get a")
cli.expect(r"hello")
def test_enter_key_binding(clean_redis, cli):
cli.send("set")
cli.expect("set")
cli.send("\033[B") # down
cli.sendline() # enter
cli.sendline(" a 'hello'")
cli.expect("OK")
cli.sendline("get a")
cli.expect(r"hello")
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) < 6")
def test_auth_hidden_password_with_username(clean_redis, cli):
cli.send("auth default hello-world")
cli.expect("default")
cli.expect(r"\*{11}")
@pytest.mark.skipif("int(os.environ['REDIS_VERSION']) > 5")
def test_auth_hidden_password(clean_redis, cli):
cli.send("auth hello-world")
cli.expect("auth")
cli.expect(r"\*{11}")
def test_hello_command_is_not_supported(cli):
cli.sendline("hello 3")
cli.expect("IRedis currently not support RESP3")
def test_abort_reading_connection(cli):
cli.sendline("blpop mylist 30")
cli.send(chr(3))
cli.expect(
r"KeyboardInterrupt received! User canceled reading response!", timeout=10
)
cli.sendline("set foo bar")
cli.expect("OK")
cli.sendline("get foo")
cli.expect("bar")
|
[] |
[] |
[
"REDIS_VERSION"
] |
[]
|
["REDIS_VERSION"]
|
python
| 1 | 0 | |
train.py
|
# -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
from torch.autograd import Variable
import datasets
from models import *
# import torch_util
import os, shutil
import argparse
import test
import torchvision
import settings
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# Hyper Parameters
num_epochs = 300
batch_size = 20
learning_rate = 0.001
# device = torch_util.select_device()
device = torch.device("cpu")
def main(args):
cnn = CNN().to(device)
cnn.train()
criterion = nn.MultiLabelSoftMarginLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
if args.resume:
cnn.load_state_dict(torch.load(args.model_path, map_location=device))
max_acc = 0
# Train the Model
train_dataloader = datasets.get_train_data_loader()
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_dataloader):
images = Variable(images)
labels = Variable(labels.float())
predict_labels = cnn(images)
loss = criterion(predict_labels, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 2 == 0:
print("epoch: %03g \t step: %03g \t loss: %.5f \t\r" % (epoch, i+1, loss.item()))
torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
print("epoch: %03g \t step: %03g \t loss: %.5f \t" % (epoch, i, loss.item()))
torch.save(cnn.state_dict(), "./weights/cnn_%03g.pt" % epoch)
acc = test.test_data("./weights/cnn_%03g.pt" % epoch)
if max_acc < acc:
print("update accuracy %.5f." % acc)
max_acc = acc
shutil.copy("./weights/cnn_%03g.pt" % epoch, "./weights/cnn_best.pt")
else:
print("do not update %.5f." % acc)
torch.save(cnn.state_dict(), "./weights/cnn_last.pt")
print("save last model")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="load path")
parser.add_argument('--model-path', type=str, default="./weights/cnn_0.pt")
parser.add_argument('--resume',action='store_true')
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
util/unzip_test.go
|
// Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
package util
import (
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// testZipPath is a valid zip file
var testZipPath = filepath.Join(os.Getenv("GOPATH"), "src/github.com/keybase/go-updater/test/test.zip")
// testSymZipPath is a valid zip file with a symbolic link
var testSymZipPath = filepath.Join(os.Getenv("GOPATH"), "src/github.com/keybase/go-updater/test/test-with-sym.zip")
// testCorruptedZipPath is a corrupted zip file (flipped a bit)
var testCorruptedZipPath = filepath.Join(os.Getenv("GOPATH"), "src/github.com/keybase/go-updater/test/test-corrupted2.zip")
// testInvalidZipPath is not a valid zip file
var testInvalidZipPath = filepath.Join(os.Getenv("GOPATH"), "src/github.com/keybase/go-updater/test/test-invalid.zip")
func assertFileExists(t *testing.T, path string) {
t.Logf("Checking %s", path)
fileExists, err := FileExists(path)
assert.NoError(t, err)
assert.True(t, fileExists)
}
func testUnzipOverValid(t *testing.T, path string) string {
destinationPath := TempPath("", "TestUnzipOver.")
noCheck := func(sourcePath, destinationPath string) error { return nil }
err := UnzipOver(path, "test", destinationPath, noCheck, "", testLog)
require.NoError(t, err)
dirExists, err := FileExists(destinationPath)
assert.NoError(t, err)
assert.True(t, dirExists)
assertFileExists(t, filepath.Join(destinationPath, "testfile"))
assertFileExists(t, filepath.Join(destinationPath, "testfolder"))
assertFileExists(t, filepath.Join(destinationPath, "testfolder", "testsubfolder"))
assertFileExists(t, filepath.Join(destinationPath, "testfolder", "testsubfolder", "testfile2"))
// Unzip again over existing path
err = UnzipOver(path, "test", destinationPath, noCheck, "", testLog)
require.NoError(t, err)
dirExists2, err := FileExists(destinationPath)
require.NoError(t, err)
require.True(t, dirExists2)
fileExists2, err := FileExists(filepath.Join(destinationPath, "testfile"))
require.NoError(t, err)
require.True(t, fileExists2)
// Unzip again over existing path, fail check
failCheck := func(sourcePath, destinationPath string) error { return fmt.Errorf("Failed check") }
err = UnzipOver(testZipPath, "test", destinationPath, failCheck, "", testLog)
assert.Error(t, err)
return destinationPath
}
func TestUnzipOverValid(t *testing.T) {
destinationPath := testUnzipOverValid(t, testZipPath)
defer RemoveFileAtPath(destinationPath)
}
func TestUnzipOverSymlink(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Symlink in zip unsupported on Windows")
}
destinationPath := testUnzipOverValid(t, testSymZipPath)
defer RemoveFileAtPath(destinationPath)
assertFileExists(t, filepath.Join(destinationPath, "testfolder", "testlink"))
}
func TestUnzipOverInvalidPath(t *testing.T) {
noCheck := func(sourcePath, destinationPath string) error { return nil }
err := UnzipOver(testZipPath, "test", "", noCheck, "", testLog)
assert.Error(t, err)
destinationPath := TempPath("", "TestUnzipOverInvalidPath.")
defer RemoveFileAtPath(destinationPath)
err = UnzipOver("/badfile.zip", "test", destinationPath, noCheck, "", testLog)
assert.Error(t, err)
err = UnzipOver("", "test", destinationPath, noCheck, "", testLog)
assert.Error(t, err)
err = unzipOver("", "", testLog)
assert.Error(t, err)
}
func TestUnzipOverInvalidZip(t *testing.T) {
noCheck := func(sourcePath, destinationPath string) error { return nil }
destinationPath := TempPath("", "TestUnzipOverInvalidZip.")
defer RemoveFileAtPath(destinationPath)
err := UnzipOver(testInvalidZipPath, "test", destinationPath, noCheck, "", testLog)
t.Logf("Error: %s", err)
assert.Error(t, err)
}
func TestUnzipOverInvalidContents(t *testing.T) {
noCheck := func(sourcePath, destinationPath string) error { return nil }
destinationPath := TempPath("", "TestUnzipOverInvalidContents.")
defer RemoveFileAtPath(destinationPath)
err := UnzipOver(testInvalidZipPath, "invalid", destinationPath, noCheck, "", testLog)
t.Logf("Error: %s", err)
assert.Error(t, err)
}
func TestUnzipOverCorrupted(t *testing.T) {
noCheck := func(sourcePath, destinationPath string) error { return nil }
destinationPath := TempPath("", "TestUnzipOverCorrupted.")
defer RemoveFileAtPath(destinationPath)
err := UnzipOver(testCorruptedZipPath, "test", destinationPath, noCheck, "", testLog)
t.Logf("Error: %s", err)
assert.Error(t, err)
}
func tempDir(t *testing.T) string {
tmpDir := TempPath("", "TestUnzipOver")
err := MakeDirs(tmpDir, 0700, testLog)
require.NoError(t, err)
return tmpDir
}
func TestUnzipOverMoveExisting(t *testing.T) {
noCheck := func(sourcePath, destinationPath string) error { return nil }
destinationPath := TempPath("", "TestUnzipOverMoveExisting.")
defer RemoveFileAtPath(destinationPath)
tmpDir := tempDir(t)
defer RemoveFileAtPath(tmpDir)
err := UnzipOver(testZipPath, "test", destinationPath, noCheck, tmpDir, testLog)
assert.NoError(t, err)
err = UnzipOver(testZipPath, "test", destinationPath, noCheck, tmpDir, testLog)
assert.NoError(t, err)
assertFileExists(t, filepath.Join(tmpDir, filepath.Base(destinationPath)))
}
|
[
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
attack_range.py
|
import os
import sys
import argparse
from modules import logger
from pathlib import Path
from modules.CustomConfigParser import CustomConfigParser
from modules.TerraformController import TerraformController
# need to set this ENV var due to a OSX High Sierra forking bug
# see this discussion for more details: https://github.com/ansible/ansible/issues/34056#issuecomment-352862252
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
VERSION = 1
def init(args):
config = args.config
print("""
starting program loaded for B1 battle droid
||/__'`.
|//()'-.:
|-.||
|o(o)
|||\\\ .==._
|||(o)==::'
`|T ""
()
|\\
||\\
()()
||//
|//
.'=`=.
""")
# parse config
attack_range_config = Path(config)
if attack_range_config.is_file():
print("attack_range is using config at path {0}".format(
attack_range_config))
configpath = str(attack_range_config)
else:
print("ERROR: attack_range failed to find a config file")
sys.exit(1)
# Parse config
parser = CustomConfigParser()
config = parser.load_conf(configpath)
log = logger.setup_logging(config['log_path'], config['log_level'])
log.info("INIT - attack_range v" + str(VERSION))
if config['cloud_provider'] == 'azure':
os.environ["AZURE_SUBSCRIPTION_ID"] = config['azure_subscription_id']
if config['attack_range_password'] == 'Pl3ase-k1Ll-me:p':
log.error('ERROR: please change attack_range_password in attack_range.conf')
sys.exit(1)
if config['cloud_provider'] == 'azure' and config['zeek_sensor'] == '1':
log.error('ERROR: zeek sensor only available for aws in the moment. Plase change zeek_sensor to 0 and try again.')
sys.exit(1)
return TerraformController(config, log), config, log
def show(args):
controller, _, _ = init(args)
if args.machines:
controller.list_machines()
def simulate(args):
controller, config, _ = init(args)
target = args.target
simulation_techniques = args.simulation_technique
simulation_atomics = args.simulation_atomics
# lets give CLI priority over config file for pre-configured techniques
if simulation_techniques:
pass
else:
simulation_techniques = config['art_run_techniques']
if not simulation_atomics:
simulation_atomics = 'no'
return controller.simulate(target, simulation_techniques, simulation_atomics)
def dump(args):
controller, _, _ = init(args)
controller.dump_attack_data(args.dump_name, args.last_sim)
def replay(args):
controller, _, _ = init(args)
controller.replay_attack_data(args.dump_name, args.dump)
def build(args):
controller, _, _ = init(args)
controller.build()
def destroy(args):
controller, _, _ = init(args)
controller.destroy()
def stop(args):
controller, _, _ = init(args)
controller.stop()
def resume(args):
controller, _, _ = init(args)
controller.resume()
def test(args):
controller, _, _ = init(args)
return controller.test(args.test_file)
def main(args):
# grab arguments
parser = argparse.ArgumentParser(
description="Use `attack_range.py action -h` to get help with any Attack Range action")
parser.add_argument("-c", "--config", required=False, default="attack_range.conf",
help="path to the configuration file of the attack range")
parser.add_argument("-v", "--version", default=False, action="version", version="version: {0}".format(VERSION),
help="shows current attack_range version")
parser.set_defaults(func=lambda _: parser.print_help())
actions_parser = parser.add_subparsers(title="Attack Range actions", dest="action")
build_parser = actions_parser.add_parser("build", help="Builds attack range instances")
simulate_parser = actions_parser.add_parser("simulate", help="Simulates attack techniques")
destroy_parser = actions_parser.add_parser("destroy", help="destroy attack range instances")
stop_parser = actions_parser.add_parser("stop", help="stops attack range instances")
resume_parser = actions_parser.add_parser("resume", help="resumes previously stopped attack range instances")
show_parser = actions_parser.add_parser("show", help="list machines")
test_parser = actions_parser.add_parser("test")
dump_parser = actions_parser.add_parser("dump", help="dump locally logs from attack range instances")
replay_parser = actions_parser.add_parser("replay", help="replay dumps into the Splunk Enterprise server")
# Build arguments
build_parser.set_defaults(func=build)
# Destroy arguments
destroy_parser.set_defaults(func=destroy)
# Stop arguments
stop_parser.set_defaults(func=stop)
# Resume arguments
resume_parser.set_defaults(func=resume)
# Simulation arguments
simulate_parser.add_argument("-t", "--target", required=True,
help="target for attack simulation. Use the name of the aws EC2 name")
simulate_parser.add_argument("-st", "--simulation_technique", required=False, type=str, default="",
help="comma delimited list of MITRE ATT&CK technique ID to simulate in the "
"attack_range, example: T1117, T1118, requires --simulation flag")
simulate_parser.add_argument("-sa", "--simulation_atomics", required=False, type=str, default="",
help="specify dedicated Atomic Red Team atomics to simulate in the attack_range, "
"example: Regsvr32 remote COM scriptlet execution for T1117")
simulate_parser.set_defaults(func=simulate)
# Dump Arguments
dump_parser.add_argument("-dn", "--dump_name", required=True,
help="name for the dumped attack data")
dump_parser.add_argument("--last-sim", required=False, action='store_true',
help="overrides dumps.yml time and dumps from the start of previous simulation")
dump_parser.set_defaults(func=dump)
# Replay Arguments
replay_parser.add_argument("-dn", "--dump_name", required=True,
help="name for the dumped attack data")
replay_parser.add_argument("--dump", required=False,
help="name of the dump as defined in attack_data/dumps.yml")
replay_parser.set_defaults(func=replay)
# Test Arguments
test_parser.add_argument("-tf", "--test_file", required=True,
type=str, default="", help='test file for test command')
test_parser.set_defaults(func=test)
# Show arguments
show_parser.add_argument("-m", "--machines", required=False, default=False,
action="store_true", help="prints out all available machines")
show_parser.set_defaults(func=show, machines=True)
# # parse them
args = parser.parse_args()
return args.func(args)
if __name__ == "__main__":
main(sys.argv[1:])
# rnfgre rtt ol C4G12VPX
|
[] |
[] |
[
"AZURE_SUBSCRIPTION_ID",
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY"
] |
[]
|
["AZURE_SUBSCRIPTION_ID", "OBJC_DISABLE_INITIALIZE_FORK_SAFETY"]
|
python
| 2 | 0 | |
src/saml2/sigver.py
|
""" Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
import base64
import datetime
import dateutil
import hashlib
import itertools
import logging
import os
import re
import six
import sys
from uuid import uuid4 as gen_random_key
from time import mktime
from tempfile import NamedTemporaryFile
from subprocess import Popen
from subprocess import PIPE
# importlib.resources was introduced in python 3.7
# files API from importlib.resources introduced in python 3.9
if sys.version_info[:2] >= (3, 9):
from importlib.resources import files as _resource_files
else:
from importlib_resources import files as _resource_files
from OpenSSL import crypto
import pytz
from six.moves.urllib import parse
import saml2.cryptography.asymmetric
import saml2.cryptography.pki
import saml2.xmldsig as ds
import saml2.data.templates as _data_template
from saml2 import samlp
from saml2 import SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2.cert import OpenSSLWrapper
from saml2.cert import read_cert_from_file
from saml2.cert import CertificateError
from saml2.extension import pefim
from saml2.extension.pefim import SPCertEnc
from saml2.saml import EncryptedAssertion
from saml2.s_utils import Unsupported
from saml2.time_util import str_to_time
from saml2.xmldsig import ALLOWED_CANONICALIZATIONS
from saml2.xmldsig import ALLOWED_TRANSFORMS
from saml2.xmldsig import TRANSFORM_C14N
from saml2.xmldsig import TRANSFORM_ENVELOPED
from saml2.xmldsig import SIG_RSA_SHA1
from saml2.xmldsig import SIG_RSA_SHA224
from saml2.xmldsig import SIG_RSA_SHA256
from saml2.xmldsig import SIG_RSA_SHA384
from saml2.xmldsig import SIG_RSA_SHA512
from saml2.xmlenc import EncryptionMethod
from saml2.xmlenc import EncryptedKey
from saml2.xmlenc import CipherData
from saml2.xmlenc import CipherValue
from saml2.xmlenc import EncryptedData
from saml2.xml.schema import node_to_schema
from saml2.xml.schema import XMLSchemaError
logger = logging.getLogger(__name__)
SIG = '{{{ns}#}}{attribute}'.format(ns=ds.NAMESPACE, attribute='Signature')
# RSA_1_5 is considered deprecated
RSA_1_5 = 'http://www.w3.org/2001/04/xmlenc#rsa-1_5'
TRIPLE_DES_CBC = 'http://www.w3.org/2001/04/xmlenc#tripledes-cbc'
RSA_OAEP_MGF1P = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
def get_pem_wrapped_unwrapped(cert):
begin_cert = "-----BEGIN CERTIFICATE-----\n"
end_cert = "\n-----END CERTIFICATE-----\n"
unwrapped_cert = re.sub(f'{begin_cert}|{end_cert}', '', cert)
wrapped_cert = f'{begin_cert}{unwrapped_cert}{end_cert}'
return wrapped_cert, unwrapped_cert
def rm_xmltag(statement):
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode()
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
"""
Is any part of the document signed ?
:param item: A Samlbase instance
:return: True if some part of it is signed
"""
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == 'posix':
bin_name = ['xmlsec1']
elif os.name == 'nt':
bin_name = ['xmlsec.exe', 'xmlsec1.exe']
else: # Default !?
bin_name = ['xmlsec1']
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ['PATH'].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError('Cannot find {binary}'.format(binary=bin_name))
def _get_xmlsec_cryptobackend(path=None, search_paths=None, delete_tmpfiles=True):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, delete_tmpfiles=delete_tmpfiles)
NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:Assertion'
ENC_NODE_NAME = 'urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion'
ENC_KEY_CLASS = 'EncryptedKey'
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [
_make_vals(
sval,
klass,
seccont,
klass_inst,
prop,
True,
base64encode,
elements_to_sign)
for sval in val
]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, str(ava[prop]).encode())
elif isinstance(ava[prop], int):
setattr(instance, prop, str(ava[prop]))
else:
setattr(instance, prop, ava[prop])
if 'text' in ava:
instance.set_text(ava['text'], base64encode)
for prop, klassdef in instance.c_children.values():
if prop in ava:
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if 'extension_elements' in ava:
for item in ava['extension_elements']:
instance.extension_elements.append(
ExtensionElement(item['tag']).loadd(item))
if 'extension_attributes' in ava:
for key, val in ava['extension_attributes'].items():
instance.extension_attributes[key] = val
if 'signature' in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
# XXX will actually sign the nodes
# XXX assumes pre_signature_part has already been called
# XXX calls sign without specifying sign_alg/digest_alg
# XXX this is fine as the algs are embeded in the document
# XXX as setup by pre_signature_part
# XXX !!expects instance string!!
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if not elements_to_sign:
return instance
signed_xml = instance
if not isinstance(instance, six.string_types):
signed_xml = instance.to_string()
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid
)
return signed_xml
def make_temp(content, suffix="", decode=True, delete_tmpfiles=True):
"""
Create a temporary file with the given content.
This is needed by xmlsec in some cases where only strings exist when files
are expected.
:param content: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input content might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:param delete_tmpfiles: Whether to keep the tmp files or delete them when they are
no longer in use
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
content_encoded = (
content.encode("utf-8") if not isinstance(content, six.binary_type) else content
)
content_raw = base64.b64decode(content_encoded) if decode else content_encoded
ntf = NamedTemporaryFile(suffix=suffix, delete=delete_tmpfiles)
ntf.write(content_raw)
ntf.seek(0)
return ntf
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
M2_TIME_FORMAT = '%b %d %H:%M:%S %Y'
def to_time(_time):
if not _time.endswith(' GMT'):
raise ValueError('Time does not end with GMT')
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
try:
cert_str = pem_format(key)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
except AttributeError:
return False
now = pytz.UTC.localize(datetime.datetime.utcnow())
valid_from = dateutil.parser.parse(cert.get_notBefore())
valid_to = dateutil.parser.parse(cert.get_notAfter())
active = not cert.has_expired() and valid_from <= now < valid_to
return active
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = '\n'.join(split_len(''.join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if 'x509_data' not in key_info:
return res
for x509_data in key_info['x509_data']:
x509_certificate = x509_data['x509_certificate']
cert = x509_certificate['text'].strip()
cert = '\n'.join(split_len(''.join(
[s.strip() for s in cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info('Inactive cert')
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
def extract_rsa_key_from_x509_cert(pem):
cert = saml2.cryptography.pki.load_pem_x509_certificate(pem)
return cert.public_key()
def pem_format(key):
return os.linesep.join([
'-----BEGIN CERTIFICATE-----',
key,
'-----END CERTIFICATE-----'
]).encode('ascii')
def import_rsa_key_from_file(filename):
with open(filename, "rb") as fd:
data = fd.read()
key = saml2.cryptography.asymmetric.load_pem_private_key(data)
return key
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.splitlines():
if line == 'OK':
return True
elif line == 'FAIL':
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
class Signer(object):
"""Abstract base class for signing algorithms."""
def __init__(self, key):
self.key = key
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest, key=None):
Signer.__init__(self, key)
self.digest = digest
def sign(self, msg, key=None):
return saml2.cryptography.asymmetric.key_sign(
key or self.key, msg, self.digest)
def verify(self, msg, sig, key=None):
return saml2.cryptography.asymmetric.key_verify(
key or self.key, sig, msg, self.digest)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(saml2.cryptography.asymmetric.hashes.SHA1()),
SIG_RSA_SHA224: RSASigner(saml2.cryptography.asymmetric.hashes.SHA224()),
SIG_RSA_SHA256: RSASigner(saml2.cryptography.asymmetric.hashes.SHA256()),
SIG_RSA_SHA384: RSASigner(saml2.cryptography.asymmetric.hashes.SHA384()),
SIG_RSA_SHA512: RSASigner(saml2.cryptography.asymmetric.hashes.SHA512()),
}
REQ_ORDER = [
'SAMLRequest',
'RelayState',
'SigAlg',
]
RESP_ORDER = [
'SAMLResponse',
'RelayState',
'SigAlg',
]
class RSACrypto(object):
def __init__(self, key):
self.key = key
def get_signer(self, sigalg, sigkey=None):
try:
signer = SIGNER_ALGS[sigalg]
except KeyError:
return None
else:
if sigkey:
signer.key = sigkey
else:
signer.key = self.key
return signer
def verify_redirect_signature(saml_msg, crypto, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = crypto.get_signer(saml_msg['SigAlg'], sigkey)
except KeyError:
raise Unsupported('Signature algorithm: {alg}'.format(alg=saml_msg['SigAlg']))
else:
if saml_msg['SigAlg'] in SIGNER_ALGS:
if 'SAMLRequest' in saml_msg:
_order = REQ_ORDER
elif 'SAMLResponse' in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
'Verifying signature on something that should not be signed'
)
_args = saml_msg.copy()
del _args['Signature'] # everything but the signature
string = '&'.join(
[
parse.urlencode({k: _args[k]})
for k in _order
if k in _args
]
).encode('ascii')
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg['Signature'])
return bool(signer.verify(string, _sign, _key))
class CryptoBackend(object):
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type, node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name, node_id):
raise NotImplementedError()
ASSERT_XPATH = ''.join([
'/*[local-name()=\'{name}\']'.format(name=n)
for n in ['Response', 'EncryptedAssertion', 'Assertion']
])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, delete_tmpfiles=True, **kwargs):
CryptoBackend.__init__(self, **kwargs)
if not isinstance(xmlsec_binary, six.string_types):
raise ValueError("xmlsec_binary should be of type string")
self.xmlsec = xmlsec_binary
self.delete_tmpfiles = delete_tmpfiles
try:
self.non_xml_crypto = RSACrypto(kwargs['rsa_key'])
except KeyError:
pass
def version(self):
com_list = [self.xmlsec, '--version']
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
content, _ = pof.communicate()
content = content.decode('ascii')
try:
return content.split(' ')[1]
except IndexError:
return ''
def encrypt(self, text, recv_key, template, session_key_type, xpath=''):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
'des-192' generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug('Encryption input len: %d', len(text))
tmp = make_temp(text, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', recv_key,
'--session-key', session_key_type,
'--xml-data', tmp.name,
]
if xpath:
com_list.extend(['--node-xpath', xpath])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
tmp = make_temp(str(statement),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
tmp2 = make_temp(str(template),
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [
self.xmlsec,
'--encrypt',
'--pubkey-cert-pem', enc_key,
'--session-key', key_type,
'--xml-data', tmp.name,
'--node-xpath', node_xpath,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp2.name])
except XmlsecError as e:
six.raise_from(EncryptError(com_list), e)
return output.decode('utf-8')
def decrypt(self, enctext, key_file):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug('Decrypt input len: %d', len(enctext))
tmp = make_temp(enctext, decode=False, delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--decrypt',
'--privkey-pem', key_file,
'--id-attr:Id', ENC_KEY_CLASS,
]
try:
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(DecryptError(com_list), e)
return output.decode('utf-8')
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if isinstance(statement, SamlBase):
statement = str(statement)
tmp = make_temp(statement,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--sign',
'--privkey-pem', key_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(stdout, stderr, output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
raise SignatureError(com_list)
# this does not work if --store-signatures is used
if output:
return output.decode("utf-8")
if stdout:
return stdout.decode("utf-8")
raise SignatureError(stderr)
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
if not isinstance(signedtext, six.binary_type):
signedtext = signedtext.encode('utf-8')
tmp = make_temp(signedtext,
suffix=".xml",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
com_list = [
self.xmlsec,
'--verify',
'--enabled-reference-uris', 'empty,same-doc',
'--enabled-key-data', 'raw-x509-cert',
'--pubkey-cert-{type}'.format(type=cert_type), cert_file,
'--id-attr:ID', node_name,
]
if node_id:
com_list.extend(['--node-id', node_id])
try:
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [tmp.name])
except XmlsecError as e:
six.raise_from(SignatureError(com_list), e)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
"""
with NamedTemporaryFile(suffix='.xml') as ntf:
com_list.extend(['--output', ntf.name])
com_list += extra_args
logger.debug('xmlsec command: %s', ' '.join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self):
CryptoBackend.__init__(self)
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return 'XMLSecurity 0.0'
def sign_statement(self, statement, node_name, key_file, node_id):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
signed_str = lxml.etree.tostring(signed, xml_declaration=False, encoding="UTF-8")
if not isinstance(signed_str, six.string_types):
signed_str = signed_str.decode("utf-8")
return signed_str
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf):
""" Creates a security context based on the configuration
:param conf: The configuration, this is a Config instance
:return: A SecurityContext instance
"""
if not conf:
return None
try:
metadata = conf.metadata
except AttributeError:
metadata = None
sec_backend = None
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
# if not os.access(, os.F_OK):
err_msg = 'xmlsec binary not found: {binary}'
err_msg = err_msg.format(binary=xmlsec_binary)
raise SigverError(err_msg)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary,
delete_tmpfiles=conf.delete_tmpfiles)
_file_name = conf.getattr('key_file', '')
if _file_name:
try:
rsa_key = import_rsa_key_from_file(_file_name)
except Exception as err:
logger.error('Cannot import key from {file}: {err_msg}'.format(
file=_file_name, err_msg=err))
raise
else:
sec_backend = RSACrypto(rsa_key)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity()
else:
err_msg = 'Unknown crypto_backend {backend}'
err_msg = err_msg.format(backend=conf.crypto_backend)
raise SigverError(err_msg)
enc_key_files = []
if conf.encryption_keypairs is not None:
for _encryption_keypair in conf.encryption_keypairs:
if 'key_file' in _encryption_keypair:
enc_key_files.append(_encryption_keypair['key_file'])
return SecurityContext(
crypto,
conf.key_file,
cert_file=conf.cert_file,
metadata=metadata,
only_use_keys_in_metadata=conf.only_use_keys_in_metadata,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
enc_key_files=enc_key_files,
encryption_keypairs=conf.encryption_keypairs,
sec_backend=sec_backend,
delete_tmpfiles=conf.delete_tmpfiles)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
try:
_elem = extension_elements_to_elements(
item.extensions.extension_elements, [pefim, ds])
except:
_elem = extension_elements_to_elements(
item.extension_elements[0].children,
[pefim, ds])
for _tmp_elem in _elem:
if isinstance(_tmp_elem, SPCertEnc):
for _tmp_key_info in _tmp_elem.key_info:
if _tmp_key_info.x509_data is not None and len(
_tmp_key_info.x509_data) > 0:
_encrypt_cert = _tmp_key_info.x509_data[
0].x509_certificate.text
break
except Exception as _exception:
pass
if _encrypt_cert is not None:
wrapped_cert, unwrapped_cert = get_pem_wrapped_unwrapped(_encrypt_cert)
_encrypt_cert = wrapped_cert
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception('use_generate_cert_func function must be implemented')
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception('generate_cert function must be implemented')
# Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception('use_validate_cert_func function must be implemented')
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception('validate_cert function must be implemented')
# Excepts to return True/False
class CertHandler(object):
def __init__(
self,
security_context,
cert_file=None, cert_type='pem',
key_file=None, key_type='pem',
generate_cert_info=None,
cert_handler_extra_class=None,
tmp_cert_file=None,
tmp_key_file=None,
verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
# This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
self._last_validated_cert = None
if cert_type == 'pem' and key_type == 'pem':
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ''
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ''
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None \
and len(self._cert_str) > 0 \
and len(self._key_str) > 0 \
and tmp_key_file is not None \
and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
if cert_file and os.path.isfile(cert_file):
cert_str = self._osw.read_str_from_file(cert_file, 'pem')
else:
return False
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info('CertHandler.verify_cert: %s', mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
# No private key for signing
self._tmp_key_str = ''
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw \
.create_certificate(self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = 'pem'
self._security_context.cert_type = 'pem'
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
my_cert = None
def __init__(
self,
crypto,
key_file='', key_type='pem',
cert_file='', cert_type='pem',
metadata=None,
template='',
encrypt_key_type='des-192',
only_use_keys_in_metadata=False,
cert_handler_extra_class=None,
generate_cert_info=None,
tmp_cert_file=None, tmp_key_file=None,
validate_certificate=None,
enc_key_files=None, enc_key_type='pem',
encryption_keypairs=None,
enc_cert_type='pem',
sec_backend=None,
delete_tmpfiles=True):
if not isinstance(crypto, CryptoBackend):
raise ValueError("crypto should be of type CryptoBackend")
self.crypto = crypto
if sec_backend and not isinstance(sec_backend, RSACrypto):
raise ValueError("sec_backend should be of type RSACrypto")
self.sec_backend = sec_backend
# Your private key for signing
self.key_file = key_file
self.key_type = key_type
# Your public key for signing
self.cert_file = cert_file
self.cert_type = cert_type
# Your private key for encryption
self.enc_key_files = enc_key_files
self.enc_key_type = enc_key_type
# Your public key for encryption
self.encryption_keypairs = encryption_keypairs
self.enc_cert_type = enc_cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(
self,
cert_file, cert_type,
key_file, key_type,
generate_cert_info,
cert_handler_extra_class,
tmp_cert_file,
tmp_key_file,
validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
if not template:
fp = str(_resource_files(_data_template).joinpath("template_enc.xml"))
self.template = str(fp)
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
self.delete_tmpfiles = delete_tmpfiles
def correctly_signed(self, xml, must=False):
logger.debug('verify correct signature')
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key='', template='', key_type=''):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
return self.crypto.encrypt_assertion(
statement, enc_key, template, key_type, node_xpath)
def decrypt_keys(self, enctext, keys=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:param keys: Keys to try to decrypt enctext with
:return: The decrypted text
"""
key_files = []
if not isinstance(keys, list):
keys = [keys]
keys_filtered = (key for key in keys if key)
keys_encoded = (
key.encode("ascii") if not isinstance(key, six.binary_type) else key
for key in keys_filtered
)
key_files = list(
make_temp(key, decode=False, delete_tmpfiles=self.delete_tmpfiles)
for key in keys_encoded
)
key_file_names = list(tmp.name for tmp in key_files)
try:
dectext = self.decrypt(enctext, key_file=key_file_names)
except DecryptError as e:
raise
else:
return dectext
def decrypt(self, enctext, key_file=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if not isinstance(key_file, list):
key_file = [key_file]
key_files = [
key for key in itertools.chain(key_file, self.enc_key_files) if key
]
for key_file in key_files:
try:
dectext = self.crypto.decrypt(enctext, key_file)
except XmlsecError as e:
continue
else:
if dectext:
return dectext
errmsg = "No key was able to decrypt the ciphertext. Keys tried: {keys}"
errmsg = errmsg.format(keys=key_files)
raise DecryptError(errmsg)
def verify_signature(self, signedtext, cert_file=None, cert_type='pem', node_name=NODE_NAME, node_id=None):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
return self.crypto.validate_signature(
signedtext,
cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id,
)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, must=False, only_valid_cert=False, issuer=None):
try:
_issuer = item.issuer.text.strip()
except AttributeError:
_issuer = None
if _issuer is None:
try:
_issuer = issuer.text.strip()
except AttributeError:
_issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(_issuer, 'any', 'signing')
except KeyError:
_certs = []
certs = []
for cert_name, cert in _certs:
if isinstance(cert, six.string_types):
content = pem_format(cert)
tmp = make_temp(content,
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
certs.append(tmp)
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug('==== Certs from instance ====')
certs = [
make_temp(content=pem_format(cert),
suffix=".pem",
decode=False,
delete_tmpfiles=self.delete_tmpfiles)
for cert in cert_from_instance(item)
]
else:
logger.debug('==== Certs from metadata ==== %s: %s ====', _issuer, certs)
if not certs:
raise MissingKey(_issuer)
# validate XML with the appropriate schema
try:
_schema = node_to_schema[node_name]
except KeyError as e:
error_context = {
"message": "Signature verification failed. Unknown node type.",
"issuer": _issuer,
"type": node_name,
"document": decoded_xml,
}
raise SignatureError(error_context) from e
try:
_schema.validate(str(item))
except XMLSchemaError as e:
error_context = {
"message": "Signature verification failed. Invalid document format.",
"reason": str(e),
"ID": item.id,
"issuer": _issuer,
"type": node_name,
"document": decoded_xml,
}
raise SignatureError(error_context) from e
# saml-core section "5.4 XML Signature Profile" defines constrains on the
# xmldsig-core facilities. It explicitly dictates that enveloped signatures
# are the only signatures allowed. This means that:
# * Assertion/RequestType/ResponseType elements must have an ID attribute
# * signatures must have a single Reference element
# * the Reference element must have a URI attribute
# * the URI attribute contains an anchor
# * the anchor points to the enclosing element's ID attribute
signed_info = item.signature.signed_info
references = signed_info.reference
signatures_must_have_a_single_reference_element = len(references) == 1
the_Reference_element_must_have_a_URI_attribute = (
signatures_must_have_a_single_reference_element
and hasattr(references[0], "uri")
)
the_URI_attribute_contains_an_anchor = (
the_Reference_element_must_have_a_URI_attribute
and references[0].uri.startswith("#")
and len(references[0].uri) > 1
)
the_anchor_points_to_the_enclosing_element_ID_attribute = (
the_URI_attribute_contains_an_anchor
and references[0].uri == "#{id}".format(id=item.id)
)
# SAML implementations SHOULD use Exclusive Canonicalization,
# with or without comments
canonicalization_method_is_c14n = (
signed_info.canonicalization_method.algorithm in ALLOWED_CANONICALIZATIONS
)
# Signatures in SAML messages SHOULD NOT contain transforms other than the
# - enveloped signature transform
# (with the identifier http://www.w3.org/2000/09/xmldsig#enveloped-signature)
# - or the exclusive canonicalization transforms
# (with the identifier http://www.w3.org/2001/10/xml-exc-c14n#
# or http://www.w3.org/2001/10/xml-exc-c14n#WithComments).
transform_algos = [
transform.algorithm
for transform in references[0].transforms.transform
]
tranform_algos_valid = ALLOWED_TRANSFORMS.intersection(transform_algos)
transform_algos_n = len(transform_algos)
tranform_algos_valid_n = len(tranform_algos_valid)
the_number_of_transforms_is_one_or_two = (
signatures_must_have_a_single_reference_element
and 1 <= transform_algos_n <= 2
)
all_transform_algs_are_allowed = (
the_number_of_transforms_is_one_or_two
and transform_algos_n == tranform_algos_valid_n
)
the_enveloped_signature_transform_is_defined = (
the_number_of_transforms_is_one_or_two
and TRANSFORM_ENVELOPED in transform_algos
)
# The <ds:Object> element is not defined for use with SAML signatures,
# and SHOULD NOT be present.
# Since it can be used in service of an attacker by carrying unsigned data,
# verifiers SHOULD reject signatures that contain a <ds:Object> element.
object_element_is_not_present = not item.signature.object
validators = {
"signatures must have a single reference element": (
signatures_must_have_a_single_reference_element
),
"the Reference element must have a URI attribute": (
the_Reference_element_must_have_a_URI_attribute
),
"the URI attribute contains an anchor": (
the_URI_attribute_contains_an_anchor
),
"the anchor points to the enclosing element ID attribute": (
the_anchor_points_to_the_enclosing_element_ID_attribute
),
"canonicalization method is c14n": canonicalization_method_is_c14n,
"the number of transforms is one or two": (
the_number_of_transforms_is_one_or_two
),
"all transform algs are allowed": all_transform_algs_are_allowed,
"the enveloped signature transform is defined": (
the_enveloped_signature_transform_is_defined
),
"object element is not present": object_element_is_not_present,
}
if not all(validators.values()):
error_context = {
"message": "Signature failed to meet constraints on xmldsig",
"validators": validators,
"item ID": item.id,
"reference URI": item.signature.signed_info.reference[0].uri,
"issuer": _issuer,
"node name": node_name,
"xml document": decoded_xml,
}
raise SignatureError(error_context)
verified = False
last_pem_file = None
for pem_fd in certs:
try:
last_pem_file = pem_fd.name
if self.verify_signature(
decoded_xml,
pem_fd.name,
node_name=node_name,
node_id=item.id,
):
verified = True
break
except XmlsecError as exc:
logger.error('check_sig: %s', exc)
pass
except Exception as exc:
logger.error('check_sig: %s', exc)
raise
if verified or only_valid_cert:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError('Invalid certificate!')
else:
raise SignatureError('Failed to verify signature')
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None, must=False, issuer=None):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param must:
:return:
"""
return self._check_signature(
origdoc,
item,
node_name,
origdoc,
must=must,
issuer=issuer,
)
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
attr = '{type}_from_string'.format(type=msgtype)
_func = getattr(saml, attr, None)
_func = getattr(samlp, attr, _func)
msg = _func(decoded_xml)
if not msg:
raise TypeError('Not a {type}'.format(type=msgtype))
if not msg.signature:
if must:
err_msg = 'Required signature missing on {type}'
err_msg = err_msg.format(type=msgtype)
raise SignatureError(err_msg)
else:
return msg
return self._check_signature(
decoded_xml,
msg,
class_name(msg),
origdoc,
must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_request', must, origdoc, only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authn_query', must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_request', must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'logout_response', must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'attribute_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_query', must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'authz_decision_response', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_request', must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'name_id_mapping_response', must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_request', must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'artifact_response', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_request', must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'manage_name_id_response', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion_id_request', must, origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, 'assertion', must, origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None, only_valid_cert=False, require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError('Not a Response')
if response.signature:
if 'do_not_verify' in kwargs:
pass
else:
self._check_signature(decoded_xml, response,
class_name(response), origdoc)
elif require_response_signature:
raise SignatureError('Signature missing for response')
return response
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:return: The signed statement
"""
if not key_file and key:
content = str(key).encode()
tmp = make_temp(content, suffix=".pem", delete_tmpfiles=self.delete_tmpfiles)
key_file = tmp.name
if not key and not key_file:
key_file = self.key_file
return self.crypto.sign_statement(
statement,
node_name,
key_file,
node_id,
)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(saml.Assertion()), **kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(
statement, class_name(samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None, sign_alg=None, digest_alg=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id) tuples that specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(
ident=sid,
public_key=self.cert_file,
sign_alg=sign_alg,
digest_alg=digest_alg,
)
statement = self.sign_statement(
statement,
class_name(item),
key=key,
key_file=key_file,
node_id=sid,
)
return statement
# XXX FIXME calls DefaultSignature - remove to unveil chain of calls without proper args
def pre_signature_part(
ident,
public_key=None,
identifier=None,
digest_alg=None,
sign_alg=None,
):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
# XXX
if not digest_alg:
digest_alg = ds.DefaultSignature().get_digest_alg()
if not sign_alg:
sign_alg = ds.DefaultSignature().get_sign_alg()
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(algorithm=TRANSFORM_C14N)
trans0 = ds.Transform(algorithm=TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=TRANSFORM_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(
uri='#{id}'.format(id=ident),
digest_value=ds.DigestValue(),
transforms=transforms,
digest_method=digest_method)
signed_info = ds.SignedInfo(
signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(
signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = 'Signature{n}'.format(n=identifier)
# XXX remove - do not embed the cert
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(
*,
msg_enc=TRIPLE_DES_CBC,
key_enc=RSA_OAEP_MGF1P,
key_name=None,
encrypted_key_id=None,
encrypted_data_id=None,
encrypt_cert=None,
):
ek_id = encrypted_key_id or "EK_{id}".format(id=gen_random_key())
ed_id = encrypted_data_id or "ED_{id}".format(id=gen_random_key())
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
x509_data = (
ds.X509Data(x509_certificate=ds.X509Certificate(text=encrypt_cert))
if encrypt_cert
else None
)
key_name = ds.KeyName(text=key_name) if key_name else None
key_info = (
ds.KeyInfo(key_name=key_name, x509_data=x509_data)
if key_name or x509_data
else None
)
encrypted_key = EncryptedKey(
id=ek_id,
encryption_method=key_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')),
)
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id=ed_id,
type='http://www.w3.org/2001/04/xmlenc#Element',
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text='')),
)
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print('\n'.join([key for key, value in SIGNER_ALGS.items()]))
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
sts/client.go
|
package sts
import (
"os"
"github.com/bjlhlin/aliyungo/common"
)
const (
// STSDefaultEndpoint is the default API endpoint of STS services
STSDefaultEndpoint = "https://sts.aliyuncs.com"
STSAPIVersion = "2015-04-01"
)
type STSClient struct {
common.Client
}
func NewClient(accessKeyId string, accessKeySecret string) *STSClient {
return NewClientWithSecurityToken(accessKeyId, accessKeySecret, "")
}
func NewClientWithSecurityToken(accessKeyId string, accessKeySecret string, securityToken string) *STSClient {
endpoint := os.Getenv("STS_ENDPOINT")
if endpoint == "" {
endpoint = STSDefaultEndpoint
}
return NewClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, securityToken)
}
func NewClientWithEndpoint(endpoint string, accessKeyId string, accessKeySecret string) *STSClient {
return NewClientWithEndpointAndSecurityToken(endpoint, accessKeyId, accessKeySecret, "")
}
func NewClientWithEndpointAndSecurityToken(endpoint string, accessKeyId string, accessKeySecret string, securityToken string) *STSClient {
client := &STSClient{}
client.WithEndpoint(endpoint).
WithVersion(STSAPIVersion).
WithAccessKeyId(accessKeyId).
WithAccessKeySecret(accessKeySecret).
WithSecurityToken(securityToken).
InitClient()
return client
}
|
[
"\"STS_ENDPOINT\""
] |
[] |
[
"STS_ENDPOINT"
] |
[]
|
["STS_ENDPOINT"]
|
go
| 1 | 0 | |
internal/commands/install.go
|
package commands
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/manifoldco/promptui"
"github.com/spf13/cobra"
)
const (
goHost = "golang.org"
goDownloadBaseURL = "https://dl.google.com/go"
goSourceGitURL = "https://github.com/golang/go"
goSourceUpsteamGitURL = "https://go.googlesource.com/go"
)
var (
installCmdGoHostFlag string
)
func installCmd() *cobra.Command {
installCmd := &cobra.Command{
Use: "install [version]",
Short: `Install Go with a version`,
Long: `Install Go by providing a version. If no version is provided, install
the latest Go. If the version is 'tip', an optional change list (CL)
number can be provided.`,
Example: `
goup install
goup install 1.15.2
goup install go1.15.2
goup install tip # Compile Go tip
goup install tip 1234 # 1234 is the CL number
`,
PersistentPreRunE: preRunInstall,
RunE: runInstall,
}
gh := os.Getenv("GOUP_GO_HOST")
if gh == "" {
gh = goHost
}
installCmd.PersistentFlags().StringVar(&installCmdGoHostFlag, "host", gh, "host that is used to download Go. The GOUP_GO_HOST environment variable overrides this flag.")
return installCmd
}
func preRunInstall(cmd *cobra.Command, args []string) error {
http.DefaultTransport = &userAgentTransport{http.DefaultTransport}
return nil
}
func runInstall(cmd *cobra.Command, args []string) error {
var (
ver string
err error
)
if len(args) == 0 {
ver, err = latestGoVersion()
if err != nil {
return err
}
} else {
ver = args[0]
}
// Add go prefix, e.g., go1.15.2
if !strings.HasPrefix(ver, "go") {
ver = "go" + ver
}
if ver == "gotip" {
var cl string
if len(args) > 1 {
cl = args[1]
}
err = installTip(cl)
} else {
err = install(ver)
}
if err != nil {
return err
}
if err := symlink(ver); err != nil {
return err
}
logger.Printf("Default Go is set to '%s'", ver)
return nil
}
func latestGoVersion() (string, error) {
h, err := url.Parse(installCmdGoHostFlag)
if err != nil {
return "", err
}
if h.Scheme == "" {
h.Scheme = "https"
}
resp, err := http.Get(fmt.Sprintf("%s/VERSION?m=text", h.String()))
if err != nil {
return "", fmt.Errorf("Getting current Go version failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode > 299 {
b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
return "", fmt.Errorf("Could not get current Go version: HTTP %d: %q", resp.StatusCode, b)
}
version, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return strings.TrimSpace(string(version)), nil
}
func symlink(ver string) error {
current := GoupCurrentDir()
version := goupVersionDir(ver)
// ignore error, similar to rm -f
os.Remove(current)
return os.Symlink(version, current)
}
func install(version string) error {
targetDir := goupVersionDir(version)
if _, err := os.Stat(filepath.Join(targetDir, unpackedOkay)); err == nil {
logger.Printf("%s: already downloaded in %v", version, targetDir)
return nil
}
if err := os.MkdirAll(targetDir, 0755); err != nil {
return err
}
goURL := versionArchiveURL(version)
res, err := http.Head(goURL)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
return fmt.Errorf("no binary release of %v for %v/%v at %v", version, getOS(), runtime.GOARCH, goURL)
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("server returned %v checking size of %v", http.StatusText(res.StatusCode), goURL)
}
base := path.Base(goURL)
archiveFile := filepath.Join(targetDir, base)
if fi, err := os.Stat(archiveFile); err != nil || fi.Size() != res.ContentLength {
if err != nil && !os.IsNotExist(err) {
// Something weird. Don't try to download.
return err
}
if err := copyFromURL(archiveFile, goURL); err != nil {
return fmt.Errorf("error downloading %v: %v", goURL, err)
}
fi, err = os.Stat(archiveFile)
if err != nil {
return err
}
if fi.Size() != res.ContentLength {
return fmt.Errorf("downloaded file %s size %v doesn't match server size %v", archiveFile, fi.Size(), res.ContentLength)
}
}
wantSHA, err := slurpURLToString(goURL + ".sha256")
if err != nil {
return err
}
if err := verifySHA256(archiveFile, strings.TrimSpace(wantSHA)); err != nil {
return fmt.Errorf("error verifying SHA256 of %v: %v", archiveFile, err)
}
logger.Printf("Unpacking %v ...", archiveFile)
if err := unpackArchive(targetDir, archiveFile); err != nil {
return fmt.Errorf("extracting archive %v: %v", archiveFile, err)
}
if err := ioutil.WriteFile(filepath.Join(targetDir, unpackedOkay), nil, 0644); err != nil {
return err
}
logger.Printf("Success: %s downloaded in %v", version, targetDir)
return nil
}
func installTip(clNumber string) error {
root := goupVersionDir("gotip")
git := func(args ...string) error {
cmd := exec.Command("git", args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Dir = root
return cmd.Run()
}
gitOutput := func(args ...string) ([]byte, error) {
cmd := exec.Command("git", args...)
cmd.Dir = root
return cmd.Output()
}
if _, err := os.Stat(filepath.Join(root, ".git")); err != nil {
if err := os.MkdirAll(root, 0755); err != nil {
return fmt.Errorf("failed to create repository: %v", err)
}
if err := git("clone", "--depth=1", goSourceGitURL, root); err != nil {
return fmt.Errorf("failed to clone git repository: %v", err)
}
if err := git("remote", "add", "upstream", goSourceUpsteamGitURL); err != nil {
return fmt.Errorf("failed to add upstream git repository: %v", err)
}
}
if clNumber != "" {
prompt := promptui.Prompt{
Label: fmt.Sprintf("This will download and execute code from golang.org/cl/%s, continue", clNumber),
IsConfirm: true,
}
if _, err := prompt.Run(); err != nil {
return fmt.Errorf("interrupted")
}
// CL is for googlesource, ls-remote against upstream
// ls-remote outputs a number of lines like:
// 2621ba2c60d05ec0b9ef37cd71e45047b004cead refs/changes/37/227037/1
// 51f2af2be0878e1541d2769bd9d977a7e99db9ab refs/changes/37/227037/2
// af1f3b008281c61c54a5d203ffb69334b7af007c refs/changes/37/227037/3
// 6a10ebae05ce4b01cb93b73c47bef67c0f5c5f2a refs/changes/37/227037/meta
refs, err := gitOutput("ls-remote", "upstream")
if err != nil {
return fmt.Errorf("failed to list remotes: %v", err)
}
r := regexp.MustCompile(`refs/changes/\d\d/` + clNumber + `/(\d+)`)
match := r.FindAllStringSubmatch(string(refs), -1)
if match == nil {
return fmt.Errorf("CL %v not found", clNumber)
}
var ref string
var patchSet int
for _, m := range match {
ps, _ := strconv.Atoi(m[1])
if ps > patchSet {
patchSet = ps
ref = m[0]
}
}
logger.Printf("Fetching CL %v, Patch Set %v...", clNumber, patchSet)
if err := git("fetch", "upstream", ref); err != nil {
return fmt.Errorf("failed to fetch %s: %v", ref, err)
}
} else {
logger.Printf("Updating the go development tree...")
if err := git("fetch", "origin", "master"); err != nil {
return fmt.Errorf("failed to fetch git repository updates: %v", err)
}
}
// Use checkout and a detached HEAD, because it will refuse to overwrite
// local changes, and warn if commits are being left behind, but will not
// mind if master is force-pushed upstream.
if err := git("-c", "advice.detachedHead=false", "checkout", "FETCH_HEAD"); err != nil {
return fmt.Errorf("failed to checkout git repository: %v", err)
}
// It shouldn't be the case, but in practice sometimes binary artifacts
// generated by earlier Go versions interfere with the build.
//
// Ask the user what to do about them if they are not gitignored. They might
// be artifacts that used to be ignored in previous versions, or precious
// uncommitted source files.
if err := git("clean", "-i", "-d"); err != nil {
return fmt.Errorf("failed to cleanup git repository: %v", err)
}
// Wipe away probably boring ignored files without bothering the user.
if err := git("clean", "-q", "-f", "-d", "-X"); err != nil {
return fmt.Errorf("failed to cleanup git repository: %v", err)
}
cmd := exec.Command(filepath.Join(root, "src", makeScript()))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Dir = filepath.Join(root, "src")
if runtime.GOOS == "windows" {
// Workaround make.bat not autodetecting GOROOT_BOOTSTRAP. Issue 28641.
goroot, err := exec.Command("go", "env", "GOROOT").Output()
if err != nil {
return fmt.Errorf("failed to detect an existing go installation for bootstrap: %v", err)
}
cmd.Env = append(os.Environ(), "GOROOT_BOOTSTRAP="+strings.TrimSpace(string(goroot)))
}
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to build go: %v", err)
}
return nil
}
// unpackArchive unpacks the provided archive zip or tar.gz file to targetDir,
// removing the "go/" prefix from file entries.
func unpackArchive(targetDir, archiveFile string) error {
switch {
case strings.HasSuffix(archiveFile, ".zip"):
return unpackZip(targetDir, archiveFile)
case strings.HasSuffix(archiveFile, ".tar.gz"):
return unpackTarGz(targetDir, archiveFile)
default:
return errors.New("unsupported archive file")
}
}
// unpackTarGz is the tar.gz implementation of unpackArchive.
func unpackTarGz(targetDir, archiveFile string) error {
r, err := os.Open(archiveFile)
if err != nil {
return err
}
defer r.Close()
madeDir := map[string]bool{}
zr, err := gzip.NewReader(r)
if err != nil {
return err
}
tr := tar.NewReader(zr)
for {
f, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if !validRelPath(f.Name) {
return fmt.Errorf("tar file contained invalid name %q", f.Name)
}
rel := filepath.FromSlash(strings.TrimPrefix(f.Name, "go/"))
abs := filepath.Join(targetDir, rel)
fi := f.FileInfo()
mode := fi.Mode()
switch {
case mode.IsRegular():
// Make the directory. This is redundant because it should
// already be made by a directory entry in the tar
// beforehand. Thus, don't check for errors; the next
// write will fail with the same error.
dir := filepath.Dir(abs)
if !madeDir[dir] {
if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil {
return err
}
madeDir[dir] = true
}
wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm())
if err != nil {
return err
}
n, err := io.Copy(wf, tr)
if closeErr := wf.Close(); closeErr != nil && err == nil {
err = closeErr
}
if err != nil {
return fmt.Errorf("error writing to %s: %v", abs, err)
}
if n != f.Size {
return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size)
}
if !f.ModTime.IsZero() {
if err := os.Chtimes(abs, f.ModTime, f.ModTime); err != nil {
// benign error. Gerrit doesn't even set the
// modtime in these, and we don't end up relying
// on it anywhere (the gomote push command relies
// on digests only), so this is a little pointless
// for now.
logger.Printf("error changing modtime: %v", err)
}
}
case mode.IsDir():
if err := os.MkdirAll(abs, 0755); err != nil {
return err
}
madeDir[abs] = true
default:
return fmt.Errorf("tar file entry %s contained unsupported file type %v", f.Name, mode)
}
}
return nil
}
// unpackZip is the zip implementation of unpackArchive.
func unpackZip(targetDir, archiveFile string) error {
zr, err := zip.OpenReader(archiveFile)
if err != nil {
return err
}
defer zr.Close()
for _, f := range zr.File {
name := strings.TrimPrefix(f.Name, "go/")
outpath := filepath.Join(targetDir, name)
if f.FileInfo().IsDir() {
if err := os.MkdirAll(outpath, 0755); err != nil {
return err
}
continue
}
rc, err := f.Open()
if err != nil {
return err
}
// File
if err := os.MkdirAll(filepath.Dir(outpath), 0755); err != nil {
return err
}
out, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
_, err = io.Copy(out, rc)
rc.Close()
if err != nil {
out.Close()
return err
}
if err := out.Close(); err != nil {
return err
}
}
return nil
}
// verifySHA256 reports whether the named file has contents with
// SHA-256 of the given wantHex value.
func verifySHA256(file, wantHex string) error {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
hash := sha256.New()
if _, err := io.Copy(hash, f); err != nil {
return err
}
if fmt.Sprintf("%x", hash.Sum(nil)) != wantHex {
return fmt.Errorf("%s corrupt? does not have expected SHA-256 of %v", file, wantHex)
}
return nil
}
// slurpURLToString downloads the given URL and returns it as a string.
func slurpURLToString(url_ string) (string, error) {
res, err := http.Get(url_)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("%s: %v", url_, res.Status)
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("reading %s: %v", url_, err)
}
return string(slurp), nil
}
// copyFromURL downloads srcURL to dstFile.
func copyFromURL(dstFile, srcURL string) (err error) {
f, err := os.Create(dstFile)
if err != nil {
return err
}
defer func() {
if err != nil {
f.Close()
os.Remove(dstFile)
}
}()
c := &http.Client{
Transport: &userAgentTransport{&http.Transport{
// It's already compressed. Prefer accurate ContentLength.
// (Not that GCS would try to compress it, though)
DisableCompression: true,
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
}},
}
res, err := c.Get(srcURL)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return errors.New(res.Status)
}
pw := &progressWriter{w: f, total: res.ContentLength}
n, err := io.Copy(pw, res.Body)
if err != nil {
return err
}
if res.ContentLength != -1 && res.ContentLength != n {
return fmt.Errorf("copied %v bytes; expected %v", n, res.ContentLength)
}
pw.update() // 100%
return f.Close()
}
func makeScript() string {
switch runtime.GOOS {
case "plan9":
return "make.rc"
case "windows":
return "make.bat"
default:
return "make.bash"
}
}
type progressWriter struct {
w io.Writer
n int64
total int64
last time.Time
}
func (p *progressWriter) update() {
end := " ..."
if p.n == p.total {
end = ""
}
fmt.Fprintf(os.Stderr, "Downloaded %5.1f%% (%*d / %d bytes)%s\n",
(100.0*float64(p.n))/float64(p.total),
ndigits(p.total), p.n, p.total, end)
}
func ndigits(i int64) int {
var n int
for ; i != 0; i /= 10 {
n++
}
return n
}
func (p *progressWriter) Write(buf []byte) (n int, err error) {
n, err = p.w.Write(buf)
p.n += int64(n)
if now := time.Now(); now.Unix() != p.last.Unix() {
p.update()
p.last = now
}
return
}
// getOS returns runtime.GOOS. It exists as a function just for lazy
// testing of the Windows zip path when running on Linux/Darwin.
func getOS() string {
return runtime.GOOS
}
// versionArchiveURL returns the zip or tar.gz URL of the given Go version.
func versionArchiveURL(version string) string {
goos := getOS()
ext := "tar.gz"
if goos == "windows" {
ext = "zip"
}
arch := runtime.GOARCH
if goos == "linux" && runtime.GOARCH == "arm" {
arch = "armv6l"
}
return fmt.Sprintf("%s/%s.%s-%s.%s", goDownloadBaseURL, version, goos, arch, ext)
}
// unpackedOkay is a sentinel zero-byte file to indicate that the Go
// version was downloaded and unpacked successfully.
const unpackedOkay = ".unpacked-success"
func validRelPath(p string) bool {
if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") {
return false
}
return true
}
type userAgentTransport struct {
rt http.RoundTripper
}
func (uat userAgentTransport) RoundTrip(r *http.Request) (*http.Response, error) {
version := runtime.Version()
if strings.Contains(version, "devel") {
// Strip the SHA hash and date. We don't want spaces or other tokens (see RFC2616 14.43)
version = "devel"
}
r.Header.Set("User-Agent", "goup/"+version)
return uat.rt.RoundTrip(r)
}
|
[
"\"GOUP_GO_HOST\""
] |
[] |
[
"GOUP_GO_HOST"
] |
[]
|
["GOUP_GO_HOST"]
|
go
| 1 | 0 | |
Savethemblobs/PythonistaKit.framework/pylib/getopt.py
|
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Astrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
|
[] |
[] |
[
"POSIXLY_CORRECT"
] |
[]
|
["POSIXLY_CORRECT"]
|
python
| 1 | 0 | |
tests/lambda_functions/dispatcher/main_test.py
|
"""Unit tests for batcher main.py. Mocks out boto3 clients."""
import os
import unittest
from unittest import mock
import boto3
import moto
from tests import common
@moto.mock_sqs()
class MainTest(unittest.TestCase):
"""Test lambda_functions/dispatcher"""
# pylint: disable=protected-access
def setUp(self):
"""Set environment variables and setup the mocks."""
url1 = boto3.client('sqs').create_queue(QueueName='q1')['QueueUrl']
url2 = boto3.client('sqs').create_queue(QueueName='q2')['QueueUrl']
mock_environ = {
'LAMBDA_TARGETS': 'analyzer:production,downloader:staging',
'SQS_QUEUE_URLS': '{},{}'.format(url1, url2)
}
with mock.patch.dict(os.environ, values=mock_environ):
from lambda_functions.dispatcher import main
self.main = main
self.config1 = self.main.DISPATCH_CONFIGS[0]
self.config2 = self.main.DISPATCH_CONFIGS[1]
def test_dispatch_configs(self):
"""Environment variables were parsed correctly into 2 DispatchConfig tuples."""
self.assertTrue(self.config1.queue.url.endswith('q1'))
self.assertEqual('analyzer', self.config1.lambda_name)
self.assertEqual('production', self.config1.lambda_qualifier)
self.assertTrue(self.config2.queue.url.endswith('q2'))
self.assertNotEqual(self.config1.queue, self.config2.queue)
self.assertEqual('downloader', self.config2.lambda_name)
self.assertEqual('staging', self.config2.lambda_qualifier)
def test_sqs_poll(self):
"""Dispatcher invokes each of the Lambda targets with data from its respective queue."""
self.config1.queue.send_message(MessageBody='queue1-message1')
self.config1.queue.send_message(MessageBody='queue1-message2')
with mock.patch.object(self.main, 'LOGGER') as mock_logger, \
mock.patch.object(self.main, 'LAMBDA') as mock_lambda, \
mock.patch.object(self.main, 'WAIT_TIME_SECONDS', 0):
self.main._sqs_poll(self.config1, common.MockLambdaContext())
mock_logger.assert_has_calls([
mock.call.info(
'Polling process started: %s => lambda:%s:%s',
self.config1.queue.url,
self.config1.lambda_name, self.config1.lambda_qualifier),
mock.call.info('Sending %d messages to %s:%s', 2, 'analyzer', 'production')
])
mock_lambda.invoke.assert_called_once_with(
FunctionName='analyzer',
InvocationType='Event',
Payload=mock.ANY,
Qualifier='production'
)
def test_dispatch_handler(self):
"""Dispatch handler creates and starts processes."""
with mock.patch.object(self.main, 'Process') as mock_process:
self.main.dispatch_lambda_handler(None, common.MockLambdaContext())
mock_process.assert_has_calls([
mock.call(target=self.main._sqs_poll, args=(self.config1, mock.ANY)),
mock.call(target=self.main._sqs_poll, args=(self.config2, mock.ANY)),
mock.call().start(),
mock.call().start(),
mock.call().join(),
mock.call().join()
])
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/test/java/picocli/CommandLineHelpTest.java
|
/*
Copyright 2017 Remko Popma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package picocli;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.ProvideSystemProperty;
import picocli.CommandLine.*;
import picocli.CommandLine.Model.*;
import picocli.CommandLine.Help.Ansi.IStyle;
import picocli.CommandLine.Help.Ansi.Style;
import picocli.CommandLine.Help.Ansi.Text;
import picocli.CommandLine.Help.ColorScheme;
import picocli.CommandLine.Help.TextTable;
import java.io.*;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.URI;
import java.net.URL;
import java.util.*;
import java.util.concurrent.TimeUnit;
import static java.lang.String.format;
import org.fusesource.jansi.AnsiConsole;
import static org.junit.Assert.*;
import static picocli.CommandLine.Help.Ansi.ISATTY;
import static picocli.CommandLine.Help.Visibility.*;
import static picocli.HelpTestUtil.textArray;
import static picocli.HelpTestUtil.usageString;
import static picocli.ModelTestUtil.options;
/**
* Tests for picocli's "Usage" help functionality.
*/
public class CommandLineHelpTest {
private static final String LINESEP = System.getProperty("line.separator");
@Rule
public final ProvideSystemProperty ansiOFF = new ProvideSystemProperty("picocli.ansi", "false");
@After
public void after() {
System.getProperties().remove("picocli.color.commands");
System.getProperties().remove("picocli.color.options");
System.getProperties().remove("picocli.color.parameters");
System.getProperties().remove("picocli.color.optionParams");
}
@Test
public void testShowDefaultValuesDemo() {
@Command(showDefaultValues = true)
class FineGrainedDefaults {
@Option(names = "-a", description = "ALWAYS shown even if null", showDefaultValue = ALWAYS)
String optionA;
@Option(names = "-b", description = "NEVER shown", showDefaultValue = NEVER)
String optionB = "xyz";
@Option(names = "-c", description = "ON_DEMAND hides null", showDefaultValue = ON_DEMAND)
String optionC;
@Option(names = "-d", description = "ON_DEMAND shows non-null", showDefaultValue = ON_DEMAND)
String optionD = "abc";
}
String result = usageString(new FineGrainedDefaults(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> [-a=<optionA>] [-b=<optionB>] [-c=<optionC>] [-d=<optionD>]%n" +
" -a= <optionA> ALWAYS shown even if null%n" +
" Default: null%n" +
" -b= <optionB> NEVER shown%n" +
" -c= <optionC> ON_DEMAND hides null%n" +
" -d= <optionD> ON_DEMAND shows non-null%n" +
" Default: abc%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesOptionOnDemandNullValue_hidesDefault() {
@Command()
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file;
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" -o, --opt=<other> another option%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesOptionOnDemandNonNullValue_hidesDefault() {
@Command()
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file = new File("/tmp/file");
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other = new File("/tmp/other");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" -o, --opt=<other> another option%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesOptionAlwaysNullValue_showsNullDefault() {
@Command()
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use", showDefaultValue = Help.Visibility.ALWAYS)
File file;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file>%n" +
" -f, --file=<file> the file to use%n" +
" Default: null%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesOptionAlwaysNonNullValue_showsDefault() {
@Command()
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use", showDefaultValue = Help.Visibility.ALWAYS)
File file = new File("/tmp/file");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file>%n" +
" -f, --file=<file> the file to use%n" +
" Default: %s%n", new File("/tmp/file")), result);
}
@Test
public void testCommandShowDefaultValuesOptionOnDemandNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file;
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" -o, --opt=<other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionNeverNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file;
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.NEVER) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" -o, --opt=<other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionAlwaysNullValue_showsNullDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file;
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ALWAYS) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" -o, --opt=<other> another option%n" +
" Default: null%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionOnDemandNonNullValue_showsDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use")
File file = new File("theDefault.txt");
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" Default: theDefault.txt%n" +
" -o, --opt=<other> another option%n" +
" Default: other.txt%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionNeverNonNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use")
File file = new File("theDefault.txt");
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.NEVER)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" Default: theDefault.txt%n" +
" -o, --opt=<other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionAlwaysNonNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use")
File file = new File("theDefault.txt");
@Option(names = {"-o", "--opt"}, required = true, description = "another option", showDefaultValue = Help.Visibility.ALWAYS)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file> -o=<other>%n" +
" -f, --file=<file> the file to use%n" +
" Default: theDefault.txt%n" +
" -o, --opt=<other> another option%n" +
" Default: other.txt%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionOnDemandArrayField() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-x", "--array"}, required = true, description = "the array")
int[] array = {1, 5, 11, 23};
@Option(names = {"-y", "--other"}, required = true, description = "the other", showDefaultValue = Help.Visibility.ON_DEMAND)
int[] other = {1, 5, 11, 23};
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -x=<array> [-x=<array>]... -y=<other> [-y=<other>]...%n" +
" -x, --array=<array> the array%n" +
" Default: [1, 5, 11, 23]%n" +
" -y, --other=<other> the other%n" +
" Default: [1, 5, 11, 23]%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionNeverArrayField_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-x", "--array"}, required = true, description = "the array")
int[] array = {1, 5, 11, 23};
@Option(names = {"-y", "--other"}, required = true, description = "the other", showDefaultValue = Help.Visibility.NEVER)
int[] other = {1, 5, 11, 23};
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -x=<array> [-x=<array>]... -y=<other> [-y=<other>]...%n" +
" -x, --array=<array> the array%n" +
" Default: [1, 5, 11, 23]%n" +
" -y, --other=<other> the other%n"), result);
}
@Test
public void testCommandShowDefaultValuesOptionAlwaysNullArrayField_showsNull() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-x", "--array"}, required = true, description = "the array")
int[] array;
@Option(names = {"-y", "--other"}, required = true, description = "the other", showDefaultValue = Help.Visibility.ALWAYS)
int[] other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -x=<array> [-x=<array>]... -y=<other> [-y=<other>]...%n" +
" -x, --array=<array> the array%n" +
" -y, --other=<other> the other%n" +
" Default: null%n"), result);
}
@Test
public void testCommandShowDefaultValuesVariableForArrayField() {
@Command
class Params {
@Option(names = {"-x", "--array"}, required = true, description = "null array: Default: ${DEFAULT-VALUE}")
int[] nil;
@Option(names = {"-y", "--other"}, required = true, description = "non-null: Default: ${DEFAULT-VALUE}")
int[] other = {1, 5, 11, 23};
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -x=<nil> [-x=<nil>]... -y=<other> [-y=<other>]...%n" +
" -x, --array=<nil> null array: Default: null%n" +
" -y, --other=<other> non-null: Default: [1, 5, 11, 23]%n"), result);
}
@Test
public void testOptionSpec_defaultValue_overwritesInitialValue() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-x", "--array"}, required = true, paramLabel = "INT", description = "the array")
int[] array = {1, 5, 11, 23};
}
CommandLine cmd = new CommandLine(new Params());
OptionSpec x = cmd.getCommandSpec().posixOptionsMap().get('x').toBuilder().defaultValue("5,4,3,2,1").splitRegex(",").build();
cmd = new CommandLine(CommandSpec.create().addOption(x));
cmd.getCommandSpec().usageMessage().showDefaultValues(true);
String result = usageString(cmd, Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> [-x=INT[,INT...]]...%n" +
" -x, --array=INT[,INT...] the array%n" +
" Default: 5,4,3,2,1%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesPositionalOnDemandNullValue_hidesDefault() {
@Command()
class Params {
@Parameters(index = "0", description = "the file to use") File file;
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" <other> another option%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesPositionalOnDemandNonNullValue_hidesDefault() {
@Command()
class Params {
@Parameters(index = "0", description = "the file to use") File file = new File("/tmp/file");
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other = new File("/tmp/other");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" <other> another option%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesPositionalAlwaysNullValue_showsNullDefault() {
@Command()
class Params {
@Parameters(index = "0", description = "the file to use", showDefaultValue = Help.Visibility.ALWAYS)
File file;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file>%n" +
" <file> the file to use%n" +
" Default: null%n"), result);
}
@Test
public void testCommandWithoutShowDefaultValuesPositionalAlwaysNonNullValue_showsDefault() {
@Command()
class Params {
@Parameters(index = "0", description = "the file to use", showDefaultValue = Help.Visibility.ALWAYS)
File file = new File("/tmp/file");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file>%n" +
" <file> the file to use%n" +
" Default: %s%n", new File("/tmp/file")), result);
}
@Test
public void testCommandShowDefaultValuesPositionalOnDemandNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use") File file;
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" <other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesPositionalNeverNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use") File file;
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.NEVER) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" <other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesPositionalAlwaysNullValue_showsNullDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use") File file;
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ALWAYS) File other;
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" <other> another option%n" +
" Default: null%n"), result);
}
@Test
public void testCommandShowDefaultValuesPositionalOnDemandNonNullValue_showsDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use")
File file = new File("theDefault.txt");
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ON_DEMAND)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" Default: theDefault.txt%n" +
" <other> another option%n" +
" Default: other.txt%n"), result);
}
@Test
public void testCommandShowDefaultValuesPositionalNeverNonNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use")
File file = new File("theDefault.txt");
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.NEVER)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" Default: theDefault.txt%n" +
" <other> another option%n"), result);
}
@Test
public void testCommandShowDefaultValuesPositionalAlwaysNonNullValue_hidesDefault() {
@Command(showDefaultValues = true)
class Params {
@Parameters(index = "0", description = "the file to use")
File file = new File("theDefault.txt");
@Parameters(index = "1", description = "another option", showDefaultValue = Help.Visibility.ALWAYS)
File other = new File("other.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> <file> <other>%n" +
" <file> the file to use%n" +
" Default: theDefault.txt%n" +
" <other> another option%n" +
" Default: other.txt%n"), result);
}
@Test
public void testPositionalParamSpec_defaultValue_overwritesInitialValue() {
@Command(showDefaultValues = true)
class Params {
@Parameters(paramLabel = "INT", description = "the array")
int[] value = {1, 5, 11, 23};
}
CommandLine cmd = new CommandLine(new Params());
PositionalParamSpec x = cmd.getCommandSpec().positionalParameters().get(0).toBuilder().defaultValue("5,4,3,2,1").splitRegex(",").build();
cmd = new CommandLine(CommandSpec.create().add(x));
cmd.getCommandSpec().usageMessage().showDefaultValues(true);
String result = usageString(cmd, Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> [INT[,INT...]...]%n" +
" [INT[,INT...]...] the array%n" +
" Default: 5,4,3,2,1%n"), result);
}
@Test
public void testUsageSeparatorWithoutDefault() {
@Command()
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file = new File("def.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file>%n" +
" -f, --file=<file> the file to use%n"), result);
}
@Test
public void testUsageSeparator() {
@Command(showDefaultValues = true)
class Params {
@Option(names = {"-f", "--file"}, required = true, description = "the file to use") File file = new File("def.txt");
}
String result = usageString(new Params(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> -f=<file>%n" +
" -f, --file=<file> the file to use%n" +
" Default: def.txt%n"), result);
}
@Test
public void testUsageParamLabels() {
@Command()
class ParamLabels {
@Option(names = "-P", paramLabel = "KEY=VALUE", type = {String.class, String.class},
description = "Project properties (key-value pairs)") Map<String, String> props;
@Option(names = "-f", paramLabel = "FILE", description = "files") File[] f;
@Option(names = "-n", description = "a number option") int number;
@Parameters(index = "0", paramLabel = "NUM", description = "number param") int n;
@Parameters(index = "1", description = "the host parameter") InetAddress host;
}
String result = usageString(new ParamLabels(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> [-n=<number>] [-f=FILE]... [-P=KEY=VALUE]... NUM <host>%n" +
" NUM number param%n" +
" <host> the host parameter%n" +
" -f= FILE files%n" +
" -n= <number> a number option%n" +
" -P= KEY=VALUE Project properties (key-value pairs)%n"), result);
}
@Test
public void testUsageParamLabelsWithLongMapOptionName() {
@Command()
class ParamLabels {
@Option(names = {"-P", "--properties"},
paramLabel = "KEY=VALUE", type = {String.class, String.class},
description = "Project properties (key-value pairs)") Map<String, String> props;
@Option(names = "-f", paramLabel = "FILE", description = "a file") File f;
@Option(names = "-n", description = "a number option") int number;
@Parameters(index = "0", paramLabel = "NUM", description = "number param") int n;
@Parameters(index = "1", description = "the host parameter") InetAddress host;
}
String result = usageString(new ParamLabels(), Help.Ansi.OFF);
assertEquals(format("" +
"Usage: <main class> [-f=FILE] [-n=<number>] [-P=KEY=VALUE]... NUM <host>%n" +
" NUM number param%n" +
" <host> the host parameter%n" +
" -f= FILE a file%n" +
" -n= <number> a number option%n" +
" -P, --properties=KEY=VALUE%n" +
" Project properties (key-value pairs)%n"), result);
}
// ---------------
@Test
public void testUsageVariableArityRequiredShortOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, paramLabel = "ARG") // default
String[] a;
@Option(names = "-b", required = true, paramLabel = "ARG", arity = "0..*")
List<String> b;
@Option(names = "-c", required = true, paramLabel = "ARG", arity = "1..*")
String[] c;
@Option(names = "-d", required = true, paramLabel = "ARG", arity = "2..*")
List<String> d;
}
String expected = String.format("" +
"Usage: <main class> -a=ARG [-a=ARG]... -b[=ARG...] [-b[=ARG...]]... -c=ARG...%n" +
" [-c=ARG...]... -d=ARG ARG... [-d=ARG ARG...]...%n" +
" -a= ARG%n" +
" -b= [ARG...]%n" +
" -c= ARG...%n" +
" -d= ARG ARG...%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageVariableArityShortOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a", paramLabel = "ARG") // default
List<String> a;
@Option(names = "-b", paramLabel = "ARG", arity = "0..*")
String[] b;
@Option(names = "-c", paramLabel = "ARG", arity = "1..*")
List<String> c;
@Option(names = "-d", paramLabel = "ARG", arity = "2..*")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [-a=ARG]... [-b[=ARG...]]... [-c=ARG...]... [-d=ARG%n" +
" ARG...]...%n" +
" -a= ARG%n" +
" -b= [ARG...]%n" +
" -c= ARG...%n" +
" -d= ARG ARG...%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityRequiredShortOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, paramLabel = "ARG", arity = "0..1")
List<String> a;
@Option(names = "-b", required = true, paramLabel = "ARG", arity = "1..2")
String[] b;
@Option(names = "-c", required = true, paramLabel = "ARG", arity = "1..3")
String[] c;
@Option(names = "-d", required = true, paramLabel = "ARG", arity = "2..4")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> -a[=ARG] [-a[=ARG]]... -b=ARG [ARG] [-b=ARG [ARG]]...%n" +
" -c=ARG [ARG [ARG]] [-c=ARG [ARG [ARG]]]... -d=ARG ARG [ARG%n" +
" [ARG]] [-d=ARG ARG [ARG [ARG]]]...%n" +
" -a= [ARG]%n" +
" -b= ARG [ARG]%n" +
" -c= ARG [ARG [ARG]]%n" +
" -d= ARG ARG [ARG [ARG]]%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityShortOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a", paramLabel = "ARG", arity = "0..1")
List<String> a;
@Option(names = "-b", paramLabel = "ARG", arity = "1..2")
String[] b;
@Option(names = "-c", paramLabel = "ARG", arity = "1..3")
String[] c;
@Option(names = "-d", paramLabel = "ARG", arity = "2..4")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [-a[=ARG]]... [-b=ARG [ARG]]... [-c=ARG [ARG [ARG]]]...%n" +
" [-d=ARG ARG [ARG [ARG]]]...%n" +
" -a= [ARG]%n" +
" -b= ARG [ARG]%n" +
" -c= ARG [ARG [ARG]]%n" +
" -d= ARG ARG [ARG [ARG]]%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityRequiredShortOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, paramLabel = "ARG") // default
String[] a;
@Option(names = "-b", required = true, paramLabel = "ARG", arity = "0")
String[] b;
@Option(names = "-c", required = true, paramLabel = "ARG", arity = "1")
String[] c;
@Option(names = "-d", required = true, paramLabel = "ARG", arity = "2")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> -b [-b]... -a=ARG [-a=ARG]... -c=ARG [-c=ARG]... -d=ARG ARG%n" +
" [-d=ARG ARG]...%n" +
" -a= ARG%n" +
" -b%n" +
" -c= ARG%n" +
" -d= ARG ARG%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityShortOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a", paramLabel = "ARG") // default
String[] a;
@Option(names = "-b", paramLabel = "ARG", arity = "0")
String[] b;
@Option(names = "-c", paramLabel = "ARG", arity = "1")
String[] c;
@Option(names = "-d", paramLabel = "ARG", arity = "2")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [-b]... [-a=ARG]... [-c=ARG]... [-d=ARG ARG]...%n" +
" -a= ARG%n" +
" -b%n" +
" -c= ARG%n" +
" -d= ARG ARG%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
//--------------
@Test
public void testUsageVariableArityRequiredLongOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "--aa", required = true, paramLabel = "ARG") // default
String[] a;
@Option(names = "--bb", required = true, paramLabel = "ARG", arity = "0..*")
List<String> b;
@Option(names = "--cc", required = true, paramLabel = "ARG", arity = "1..*")
String[] c;
@Option(names = "--dd", required = true, paramLabel = "ARG", arity = "2..*")
List<String> d;
}
String expected = String.format("" +
"Usage: <main class> --aa=ARG [--aa=ARG]... --bb[=ARG...] [--bb[=ARG...]]...%n" +
" --cc=ARG... [--cc=ARG...]... --dd=ARG ARG... [--dd=ARG%n" +
" ARG...]...%n" +
" --aa=ARG%n" +
" --bb[=ARG...]%n" +
" --cc=ARG...%n" +
" --dd=ARG ARG...%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageVariableArityLongOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "--aa", paramLabel = "ARG") // default
List<String> a;
@Option(names = "--bb", paramLabel = "ARG", arity = "0..*")
String[] b;
@Option(names = "--cc", paramLabel = "ARG", arity = "1..*")
List<String> c;
@Option(names = "--dd", paramLabel = "ARG", arity = "2..*")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [--aa=ARG]... [--bb[=ARG...]]... [--cc=ARG...]... [--dd=ARG%n" +
" ARG...]...%n" +
" --aa=ARG%n" +
" --bb[=ARG...]%n" +
" --cc=ARG...%n" +
" --dd=ARG ARG...%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityRequiredLongOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "--aa", required = true, paramLabel = "ARG", arity = "0..1")
List<String> a;
@Option(names = "--bb", required = true, paramLabel = "ARG", arity = "1..2")
String[] b;
@Option(names = "--cc", required = true, paramLabel = "ARG", arity = "1..3")
String[] c;
@Option(names = "--dd", required = true, paramLabel = "ARG", arity = "2..4", description = "foobar")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> --aa[=ARG] [--aa[=ARG]]... --bb=ARG [ARG] [--bb=ARG%n" +
" [ARG]]... --cc=ARG [ARG [ARG]] [--cc=ARG [ARG [ARG]]]...%n" +
" --dd=ARG ARG [ARG [ARG]] [--dd=ARG ARG [ARG [ARG]]]...%n" +
" --aa[=ARG]%n" +
" --bb=ARG [ARG]%n" +
" --cc=ARG [ARG [ARG]]%n" +
" --dd=ARG ARG [ARG [ARG]]%n" +
" foobar%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityLongOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "--aa", paramLabel = "ARG", arity = "0..1")
List<String> a;
@Option(names = "--bb", paramLabel = "ARG", arity = "1..2")
String[] b;
@Option(names = "--cc", paramLabel = "ARG", arity = "1..3")
String[] c;
@Option(names = "--dd", paramLabel = "ARG", arity = "2..4", description = "foobar")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [--aa[=ARG]]... [--bb=ARG [ARG]]... [--cc=ARG [ARG%n" +
" [ARG]]]... [--dd=ARG ARG [ARG [ARG]]]...%n" +
" --aa[=ARG]%n" +
" --bb=ARG [ARG]%n" +
" --cc=ARG [ARG [ARG]]%n" +
" --dd=ARG ARG [ARG [ARG]]%n" +
" foobar%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityRequiredLongOptionArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "--aa", required = true, paramLabel = "ARG") // default
String[] a;
@Option(names = "--bb", required = true, paramLabel = "ARG", arity = "0")
String[] b;
@Option(names = "--cc", required = true, paramLabel = "ARG", arity = "1")
String[] c;
@Option(names = "--dd", required = true, paramLabel = "ARG", arity = "2")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> --bb [--bb]... --aa=ARG [--aa=ARG]... --cc=ARG%n" +
" [--cc=ARG]... --dd=ARG ARG [--dd=ARG ARG]...%n" +
" --aa=ARG%n" +
" --bb%n" +
" --cc=ARG%n" +
" --dd=ARG ARG%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityLongOptionArray() throws UnsupportedEncodingException {
class Args {
@Option(names = "--aa", paramLabel = "ARG") // default
String[] a;
@Option(names = "--bb", paramLabel = "ARG", arity = "0")
String[] b;
@Option(names = "--cc", paramLabel = "ARG", arity = "1")
String[] c;
@Option(names = "--dd", paramLabel = "ARG", arity = "2")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [--bb]... [--aa=ARG]... [--cc=ARG]... [--dd=ARG ARG]...%n" +
" --aa=ARG%n" +
" --bb%n" +
" --cc=ARG%n" +
" --dd=ARG ARG%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
//------------------
@Test
public void testUsageVariableArityRequiredShortOptionMap() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, paramLabel = "KEY=VAL") // default
Map<String, String> a;
@Option(names = "-b", required = true, arity = "0..*")
@SuppressWarnings("unchecked")
Map b;
@Option(names = "-c", required = true, arity = "1..*", type = {String.class, TimeUnit.class})
Map<String, TimeUnit> c;
@Option(names = "-d", required = true, arity = "2..*", type = {Integer.class, URL.class}, description = "description")
Map<Integer, URL> d;
}
String expected = String.format("" +
"Usage: <main class> -a=KEY=VAL [-a=KEY=VAL]... -b[=<String=String>...] [-b%n" +
" [=<String=String>...]]... -c=<String=TimeUnit>...%n" +
" [-c=<String=TimeUnit>...]... -d=<Integer=URL>%n" +
" <Integer=URL>... [-d=<Integer=URL> <Integer=URL>...]...%n" +
" -a= KEY=VAL%n" +
" -b= [<String=String>...]%n" +
" -c= <String=TimeUnit>...%n" +
" -d= <Integer=URL> <Integer=URL>...%n" +
" description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageVariableArityOptionMap() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a") // default
Map<String, String> a;
@Option(names = "-b", arity = "0..*", type = {Integer.class, Integer.class})
Map<Integer, Integer> b;
@Option(names = "-c", paramLabel = "KEY=VALUE", arity = "1..*", type = {String.class, TimeUnit.class})
Map<String, TimeUnit> c;
@Option(names = "-d", arity = "2..*", type = {String.class, URL.class}, description = "description")
Map<String, URL> d;
}
String expected = String.format("" +
"Usage: <main class> [-a=<String=String>]... [-b[=<Integer=Integer>...]]...%n" +
" [-c=KEY=VALUE...]... [-d=<String=URL> <String=URL>...]...%n" +
" -a= <String=String>%n" +
" -b= [<Integer=Integer>...]%n" +
"%n" + // TODO
" -c= KEY=VALUE...%n" +
" -d= <String=URL> <String=URL>...%n" +
" description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityRequiredOptionMap() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, arity = "0..1", description = "a description")
Map<String, String> a;
@Option(names = "-b", required = true, arity = "1..2", type = {Integer.class, Integer.class}, description = "b description")
Map<Integer, Integer> b;
@Option(names = "-c", required = true, arity = "1..3", type = {String.class, URL.class}, description = "c description")
Map<String, URL> c;
@Option(names = "-d", required = true, paramLabel = "K=URL", arity = "2..4", description = "d description")
Map<String, URL> d;
}
String expected = String.format("" +
"Usage: <main class> -a[=<String=String>] [-a[=<String=String>]]...%n" +
" -b=<Integer=Integer> [<Integer=Integer>]%n" +
" [-b=<Integer=Integer> [<Integer=Integer>]]...%n" +
" -c=<String=URL> [<String=URL> [<String=URL>]]%n" +
" [-c=<String=URL> [<String=URL> [<String=URL>]]]... -d=K=URL%n" +
" K=URL [K=URL [K=URL]] [-d=K=URL K=URL [K=URL [K=URL]]]...%n" +
" -a= [<String=String>] a description%n" +
" -b= <Integer=Integer> [<Integer=Integer>]%n" +
" b description%n" +
" -c= <String=URL> [<String=URL> [<String=URL>]]%n" +
" c description%n" +
" -d= K=URL K=URL [K=URL [K=URL]]%n" +
" d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityOptionMap() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a", arity = "0..1"/*, type = {UUID.class, URL.class}*/, description = "a description")
Map<UUID, URL> a;
@Option(names = "-b", arity = "1..2", type = {Long.class, UUID.class}, description = "b description")
Map<?, ?> b;
@Option(names = "-c", arity = "1..3", type = {Long.class}, description = "c description")
Map<?, ?> c;
@Option(names = "-d", paramLabel = "K=V", arity = "2..4", description = "d description")
Map<?, ?> d;
}
String expected = String.format("" +
"Usage: <main class> [-a[=<UUID=URL>]]... [-b=<Long=UUID> [<Long=UUID>]]...%n" +
" [-c=<String=String> [<String=String> [<String=String>]]]...%n" +
" [-d=K=V K=V [K=V [K=V]]]...%n" +
" -a= [<UUID=URL>] a description%n" +
" -b= <Long=UUID> [<Long=UUID>]%n" +
" b description%n" +
" -c= <String=String> [<String=String> [<String=String>]]%n" +
" c description%n" +
" -d= K=V K=V [K=V [K=V]] d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityRequiredOptionMap() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Option(names = "-a", required = true, description = "a description")
Map<Short, Field> a;
@Option(names = "-b", required = true, paramLabel = "KEY=VAL", arity = "0", description = "b description")
@SuppressWarnings("unchecked")
Map<?, ?> b;
@Option(names = "-c", required = true, arity = "1", type = {Long.class, File.class}, description = "c description")
Map<Long, File> c;
@Option(names = "-d", required = true, arity = "2", type = {URI.class, URL.class}, description = "d description")
Map<URI, URL> d;
}
String expected = String.format("" +
"Usage: <main class> -b [-b]... -a=<Short=Field> [-a=<Short=Field>]...%n" +
" -c=<Long=File> [-c=<Long=File>]... -d=<URI=URL> <URI=URL>%n" +
" [-d=<URI=URL> <URI=URL>]...%n" +
" -a= <Short=Field> a description%n" +
" -b b description%n" +
" -c= <Long=File> c description%n" +
" -d= <URI=URL> <URI=URL> d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityOptionMap() throws UnsupportedEncodingException {
class Args {
@Option(names = "-a", type = {Short.class, Field.class}, description = "a description")
Map<Short, Field> a;
@Option(names = "-b", arity = "0", type = {UUID.class, Long.class}, description = "b description")
@SuppressWarnings("unchecked")
Map<?, ?> b;
@Option(names = "-c", arity = "1", description = "c description")
Map<Long, File> c;
@Option(names = "-d", arity = "2", type = {URI.class, URL.class}, description = "d description")
Map<URI, URL> d;
}
String expected = String.format("" +
"Usage: <main class> [-b]... [-a=<Short=Field>]... [-c=<Long=File>]...%n" +
" [-d=<URI=URL> <URI=URL>]...%n" +
" -a= <Short=Field> a description%n" +
" -b b description%n" +
" -c= <Long=File> c description%n" +
" -d= <URI=URL> <URI=URL> d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
//--------------
@Test
public void testUsageVariableArityParametersArray() throws UnsupportedEncodingException {
// if option is required at least once and can be specified multiple times:
// -f=ARG [-f=ARG]...
class Args {
@Parameters(paramLabel = "APARAM", description = "APARAM description")
String[] a;
@Parameters(arity = "0..*", description = "b description")
List<String> b;
@Parameters(arity = "1..*", description = "c description")
String[] c;
@Parameters(arity = "2..*", description = "d description")
List<String> d;
}
String expected = String.format("" +
"Usage: <main class> [APARAM...] [<b>...] <c>... <d> <d>...%n" +
" [APARAM...] APARAM description%n" +
" [<b>...] b description%n" +
" <c>... c description%n" +
" <d> <d>... d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityParameterArray() throws UnsupportedEncodingException {
class Args {
@Parameters(index = "0", paramLabel = "PARAMA", arity = "0..1", description = "PARAMA description")
List<String> a;
@Parameters(index = "0", paramLabel = "PARAMB", arity = "1..2", description = "PARAMB description")
String[] b;
@Parameters(index = "0", paramLabel = "PARAMC", arity = "1..3", description = "PARAMC description")
String[] c;
@Parameters(index = "0", paramLabel = "PARAMD", arity = "2..4", description = "PARAMD description")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [PARAMA] PARAMB [PARAMB] PARAMC [PARAMC [PARAMC]] PARAMD%n" +
" PARAMD [PARAMD [PARAMD]]%n" +
" [PARAMA] PARAMA description%n" +
" PARAMB [PARAMB] PARAMB description%n" +
" PARAMC [PARAMC [PARAMC]]%n" +
" PARAMC description%n" +
" PARAMD PARAMD [PARAMD [PARAMD]]%n" +
" PARAMD description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityParametersArray() throws UnsupportedEncodingException {
class Args {
@Parameters(description = "a description (default arity)")
String[] a;
@Parameters(index = "0", arity = "0", description = "b description (arity=0)")
String[] b;
@Parameters(index = "1", arity = "1", description = "b description (arity=1)")
String[] c;
@Parameters(index = "2", arity = "2", description = "b description (arity=2)")
String[] d;
}
String expected = String.format("" +
"Usage: <main class> [<a>...] <c> <d> <d>%n" +
" b description (arity=0)%n" +
" [<a>...] a description (default arity)%n" +
" <c> b description (arity=1)%n" +
" <d> <d> b description (arity=2)%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageVariableArityParametersMap() throws UnsupportedEncodingException {
class Args {
@Parameters()
Map<String, String> a;
@Parameters(arity = "0..*", description = "a description (arity=0..*)")
Map<Integer, Integer> b;
@Parameters(paramLabel = "KEY=VALUE", arity = "1..*", type = {String.class, TimeUnit.class})
Map<String, TimeUnit> c;
@Parameters(arity = "2..*", type = {String.class, URL.class}, description = "description")
Map<String, URL> d;
}
String expected = String.format("" +
"Usage: <main class> [<String=String>...] [<Integer=Integer>...] KEY=VALUE...%n" +
" <String=URL> <String=URL>...%n" +
" [<String=String>...]%n" +
" [<Integer=Integer>...] a description (arity=0..*)%n" +
" KEY=VALUE...%n" +
" <String=URL> <String=URL>...%n" +
" description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageRangeArityParametersMap() throws UnsupportedEncodingException {
class Args {
@Parameters(index = "0", arity = "0..1"/*, type = {UUID.class, URL.class}*/, description = "a description")
Map<UUID, URL> a;
@Parameters(index = "1", arity = "1..2", type = {Long.class, UUID.class}, description = "b description")
Map<?, ?> b;
@Parameters(index = "2", arity = "1..3", type = {Long.class}, description = "c description")
Map<?, ?> c;
@Parameters(index = "3", paramLabel = "K=V", arity = "2..4", description = "d description")
Map<?, ?> d;
}
String expected = String.format("" +
"Usage: <main class> [<UUID=URL>] <Long=UUID> [<Long=UUID>] <String=String>%n" +
" [<String=String> [<String=String>]] K=V K=V [K=V [K=V]]%n" +
" [<UUID=URL>] a description%n" +
" <Long=UUID> [<Long=UUID>]%n" +
" b description%n" +
" <String=String> [<String=String> [<String=String>]]%n" +
" c description%n" +
" K=V K=V [K=V [K=V]] d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageFixedArityParametersMap() throws UnsupportedEncodingException {
class Args {
@Parameters(type = {Short.class, Field.class}, description = "a description")
Map<Short, Field> a;
@Parameters(index = "0", arity = "0", type = {UUID.class, Long.class}, description = "b description (arity=0)")
@SuppressWarnings("unchecked")
Map<?, ?> b;
@Parameters(index = "1", arity = "1", description = "c description")
Map<Long, File> c;
@Parameters(index = "2", arity = "2", type = {URI.class, URL.class}, description = "d description")
Map<URI, URL> d;
}
String expected = String.format("" +
"Usage: <main class> [<Short=Field>...] <Long=File> <URI=URL> <URI=URL>%n" +
" b description (arity=0)%n" +
" [<Short=Field>...] a description%n" +
" <Long=File> c description%n" +
" <URI=URL> <URI=URL> d description%n");
//CommandLine.usage(new Args(), System.out);
assertEquals(expected, usageString(new Args(), Help.Ansi.OFF));
}
@Test
public void testUsageWithCustomColorScheme() throws UnsupportedEncodingException {
Help.ColorScheme scheme = new Help.ColorScheme(Help.Ansi.ON)
.options(Style.bg_magenta).parameters(Style.bg_cyan).optionParams(Style.bg_yellow).commands(Style.reverse);
class Args {
@Parameters(description = "param desc") String[] params;
@Option(names = "-x", description = "option desc") String[] options;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.usage(new Args(), new PrintStream(baos, true, "UTF8"), scheme);
String actual = baos.toString("UTF8");
String expected = String.format("" +
"Usage: @|reverse <main class>|@ [@|bg_magenta -x|@=@|bg_yellow <options>|@]... [@|bg_cyan <params>|@...]%n" +
" [@|bg_cyan <params>|@...] param desc%n" +
" @|bg_magenta -x|@= @|bg_yellow <|@@|bg_yellow options>|@ option desc%n");
assertEquals(Help.Ansi.ON.new Text(expected).toString(), actual);
}
//----------
@Test
public void testShortestFirstComparator_sortsShortestFirst() {
String[] values = {"12345", "12", "123", "123456", "1", "", "1234"};
Arrays.sort(values, new Help.ShortestFirst());
String[] expected = {"", "1", "12", "123", "1234", "12345", "123456"};
assertArrayEquals(expected, values);
}
@Test
public void testShortestFirstComparator_sortsDeclarationOrderIfEqualLength() {
String[] values = {"-d", "-", "-a", "--alpha", "--b", "--a", "--beta"};
Arrays.sort(values, new Help.ShortestFirst());
String[] expected = {"-", "-d", "-a", "--b", "--a", "--beta", "--alpha"};
assertArrayEquals(expected, values);
}
@Test
public void testSortByShortestOptionNameComparator() throws Exception {
class App {
@Option(names = {"-t", "--aaaa"}) boolean aaaa;
@Option(names = {"--bbbb", "-k"}) boolean bbbb;
@Option(names = {"-c", "--cccc"}) boolean cccc;
}
OptionSpec[] fields = options(new App(), "aaaa", "bbbb", "cccc"); // -tkc
Arrays.sort(fields, new Help.SortByShortestOptionNameAlphabetically());
OptionSpec[] expected = options(new App(), "cccc", "bbbb", "aaaa"); // -ckt
assertEquals(expected[0], fields[0]);
assertEquals(expected[1], fields[1]);
assertEquals(expected[2], fields[2]);
assertArrayEquals(expected, fields);
}
@Test
public void testSortByOptionArityAndNameComparator_sortsByMaxThenMinThenName() throws Exception {
class App {
@Option(names = {"-t", "--aaaa"} ) boolean tImplicitArity0;
@Option(names = {"-e", "--EEE"}, arity = "1" ) boolean explicitArity1;
@Option(names = {"--bbbb", "-k"} ) boolean kImplicitArity0;
@Option(names = {"--AAAA", "-a"} ) int aImplicitArity1;
@Option(names = {"--BBBB", "-z"} ) String[] zImplicitArity1;
@Option(names = {"--ZZZZ", "-b"}, arity = "1..3") String[] bExplicitArity1_3;
@Option(names = {"-f", "--ffff"} ) boolean fImplicitArity0;
}
OptionSpec[] fields = options(new App(), "tImplicitArity0", "explicitArity1", "kImplicitArity0",
"aImplicitArity1", "zImplicitArity1", "bExplicitArity1_3", "fImplicitArity0");
Arrays.sort(fields, new Help.SortByOptionArityAndNameAlphabetically());
OptionSpec[] expected = options(new App(),
"fImplicitArity0",
"kImplicitArity0",
"tImplicitArity0",
"aImplicitArity1",
"explicitArity1",
"zImplicitArity1",
"bExplicitArity1_3");
assertArrayEquals(expected, fields);
}
@Test
public void testCreateMinimalOptionRenderer_ReturnsMinimalOptionRenderer() {
assertEquals(Help.MinimalOptionRenderer.class, Help.createMinimalOptionRenderer().getClass());
}
@Test
public void testMinimalOptionRenderer_rendersFirstDeclaredOptionNameAndDescription() {
class Example {
@Option(names = {"---long", "-L"}, description = "long description") String longField;
@Option(names = {"-b", "-a", "--alpha"}, description = "other") String otherField;
}
Help.IOptionRenderer renderer = Help.createMinimalOptionRenderer();
Help help = new Help(new Example(), Help.Ansi.ON);
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row1 = renderer.render(option, parameterRenderer, Help.defaultColorScheme(
help.ansi()));
assertEquals(1, row1.length);
//assertArrayEquals(new String[]{"---long=<longField>", "long description"}, row1[0]);
assertArrayEquals(new Text[]{
help.ansi().new Text(format("%s---long%s=%s<longField>%s", "@|fg(yellow) ", "|@", "@|italic ", "|@")),
help.ansi().new Text("long description")}, row1[0]);
OptionSpec option2 = help.options().get(1);
Text[][] row2 = renderer.render(option2, parameterRenderer, Help.defaultColorScheme(
help.ansi()));
assertEquals(1, row2.length);
//assertArrayEquals(new String[]{"-b=<otherField>", "other"}, row2[0]);
assertArrayEquals(new Text[]{
help.ansi().new Text(format("%s-b%s=%s<otherField>%s", "@|fg(yellow) ", "|@", "@|italic ", "|@")),
help.ansi().new Text("other")}, row2[0]);
}
@Test
public void testCreateDefaultOptionRenderer_ReturnsDefaultOptionRenderer() {
assertEquals(Help.DefaultOptionRenderer.class, new Help(new UsageDemo()).createDefaultOptionRenderer().getClass());
}
@Test
public void testDefaultOptionRenderer_rendersShortestOptionNameThenOtherOptionNamesAndDescription() {
@Command(showDefaultValues = true)
class Example {
@Option(names = {"---long", "-L"}, description = "long description") String longField;
@Option(names = {"-b", "-a", "--alpha"}, description = "other") String otherField = "abc";
}
Help help = new Help(new Example());
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row1 = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(1, row1.length);
assertArrayEquals(Arrays.toString(row1[0]), textArray(help, "", "-L", ",", "---long=<longField>", "long description"), row1[0]);
//assertArrayEquals(Arrays.toString(row1[1]), textArray(help, "", "", "", "", " Default: null"), row1[1]); // #201 don't show null defaults
option = help.options().get(1);
Text[][] row2 = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(2, row2.length);
assertArrayEquals(Arrays.toString(row2[0]), textArray(help, "", "-b", ",", "-a, --alpha=<otherField>", "other"), row2[0]);
assertArrayEquals(Arrays.toString(row2[1]), textArray(help, "", "", "", "", " Default: abc"), row2[1]);
}
@Test
public void testDefaultOptionRenderer_rendersSpecifiedMarkerForRequiredOptionsWithDefault() {
@Command(requiredOptionMarker = '*', showDefaultValues = true)
class Example {
@Option(names = {"-b", "-a", "--alpha"}, required = true, description = "other") String otherField ="abc";
}
Help help = new Help(new Example());
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(2, row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, "*", "-b", ",", "-a, --alpha=<otherField>", "other"), row[0]);
assertArrayEquals(Arrays.toString(row[1]), textArray(help, "", "", "", "", " Default: abc"), row[1]);
}
@Test
public void testDefaultOptionRenderer_rendersSpecifiedMarkerForRequiredOptionsWithoutDefault() {
@Command(requiredOptionMarker = '*')
class Example {
@Option(names = {"-b", "-a", "--alpha"}, required = true, description = "other") String otherField ="abc";
}
Help help = new Help(new Example());
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(1, row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, "*", "-b", ",", "-a, --alpha=<otherField>", "other"), row[0]);
}
@Test
public void testDefaultOptionRenderer_rendersSpacePrefixByDefaultForRequiredOptionsWithoutDefaultValue() {
class Example {
@Option(names = {"-b", "-a", "--alpha"}, required = true, description = "other") String otherField;
}
Help help = new Help(new Example());
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(1, row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, " ", "-b", ",", "-a, --alpha=<otherField>", "other"), row[0]);
}
@Test
public void testDefaultOptionRenderer_rendersSpacePrefixByDefaultForRequiredOptionsWithDefaultValue() {
//@Command(showDefaultValues = true) // set programmatically
class Example {
@Option(names = {"-b", "-a", "--alpha"}, required = true, description = "other") String otherField;
}
Help help = new Help(new Example());
help.commandSpec().usageMessage().showDefaultValues(true);
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
OptionSpec option = help.options().get(0);
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(1, row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, " ", "-b", ",", "-a, --alpha=<otherField>", "other"), row[0]);
// assertArrayEquals(Arrays.toString(row[1]), textArray(help, "", "", "", "", " Default: null"), row[1]); // #201 don't show null defaults
}
@Test
public void testDefaultParameterRenderer_rendersSpacePrefixByDefaultForParametersWithPositiveArity() {
class Required {
@Parameters(description = "required") String required;
}
Help help = new Help(new Required());
Help.IParameterRenderer renderer = help.createDefaultParameterRenderer();
Help.IParamLabelRenderer parameterRenderer = Help.createMinimalParamLabelRenderer();
PositionalParamSpec param = help.positionalParameters().get(0);
Text[][] row1 = renderer.render(param, parameterRenderer, help.colorScheme());
assertEquals(1, row1.length);
assertArrayEquals(Arrays.toString(row1[0]), textArray(help, " ", "", "", "<required>", "required"), row1[0]);
}
@Test
public void testDefaultParameterRenderer_rendersSpecifiedMarkerForParametersWithPositiveArity() {
@Command(requiredOptionMarker = '*')
class Required {
@Parameters(description = "required") String required;
}
Help help = new Help(new Required());
Help.IParameterRenderer renderer = help.createDefaultParameterRenderer();
Help.IParamLabelRenderer parameterRenderer = Help.createMinimalParamLabelRenderer();
PositionalParamSpec param = help.positionalParameters().get(0);
Text[][] row1 = renderer.render(param, parameterRenderer, help.colorScheme());
assertEquals(1, row1.length);
assertArrayEquals(Arrays.toString(row1[0]), textArray(help, "*", "", "", "<required>", "required"), row1[0]);
}
@Test
public void testDefaultParameterRenderer_rendersSpacePrefixForParametersWithZeroArity() {
@Command(requiredOptionMarker = '*')
class Optional {
@Parameters(arity = "0..1", description = "optional") String optional;
}
Help help = new Help(new Optional());
Help.IParameterRenderer renderer = help.createDefaultParameterRenderer();
Help.IParamLabelRenderer parameterRenderer = Help.createMinimalParamLabelRenderer();
PositionalParamSpec param = help.positionalParameters().get(0);
Text[][] row1 = renderer.render(param, parameterRenderer, help.colorScheme());
assertEquals(1, row1.length);
assertArrayEquals(Arrays.toString(row1[0]), textArray(help, "", "", "", "<optional>", "optional"), row1[0]);
}
@Test
public void testDefaultOptionRenderer_rendersCommaOnlyIfBothShortAndLongOptionNamesExist() {
class Example {
@Option(names = {"-v"}, description = "shortBool") boolean shortBoolean;
@Option(names = {"--verbose"}, description = "longBool") boolean longBoolean;
@Option(names = {"-x", "--xeno"}, description = "combiBool") boolean combiBoolean;
@Option(names = {"-s"}, description = "shortOnly") String shortOnlyField;
@Option(names = {"--long"}, description = "longOnly") String longOnlyField;
@Option(names = {"-b", "--beta"}, description = "combi") String combiField;
}
Help help = new Help(new Example());
help.commandSpec().usageMessage().showDefaultValues(false); // omit default values from description column
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
String[][] expected = new String[][] {
{"", "-v", "", "", "shortBool"},
{"", "", "", "--verbose", "longBool"},
{"", "-x", ",", "--xeno", "combiBool"},
{"", "-s", "=", "<shortOnlyField>", "shortOnly"},
{"", "", "", "--long=<longOnlyField>", "longOnly"},
{"", "-b", ",", "--beta=<combiField>", "combi"},
};
int i = -1;
for (OptionSpec option : help.options()) {
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(1, row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, expected[++i]), row[0]);
}
}
@Test
public void testDefaultOptionRenderer_omitsDefaultValuesForBooleanFields() {
@Command(showDefaultValues = true)
class Example {
@Option(names = {"-v"}, description = "shortBool") boolean shortBoolean;
@Option(names = {"--verbose"}, description = "longBool") Boolean longBoolean;
@Option(names = {"-s"}, description = "shortOnly") String shortOnlyField = "short";
@Option(names = {"--long"}, description = "longOnly") String longOnlyField = "long";
@Option(names = {"-b", "--beta"}, description = "combi") int combiField = 123;
}
Help help = new Help(new Example());
Help.IOptionRenderer renderer = help.createDefaultOptionRenderer();
Help.IParamLabelRenderer parameterRenderer = help.createDefaultParamLabelRenderer();
String[][] expected = new String[][] {
{"", "-v", "", "", "shortBool"},
{"", "", "", "--verbose", "longBool"},
{"", "-s", "=", "<shortOnlyField>", "shortOnly"},
{"", "", "", "", "Default: short"},
{"", "", "", "--long=<longOnlyField>", "longOnly"},
{"", "", "", "", "Default: long"},
{"", "-b", ",", "--beta=<combiField>", "combi"},
{"", "", "", "", "Default: 123"},
};
int[] rowCount = {1, 1, 2, 2, 2};
int i = -1;
int rowIndex = 0;
for (OptionSpec option : help.options()) {
Text[][] row = renderer.render(option, parameterRenderer, help.colorScheme());
assertEquals(rowCount[++i], row.length);
assertArrayEquals(Arrays.toString(row[0]), textArray(help, expected[rowIndex]), row[0]);
rowIndex += rowCount[i];
}
}
@Test
public void testCreateDefaultParameterRenderer_ReturnsDefaultParameterRenderer() {
assertEquals(Help.DefaultParamLabelRenderer.class, new Help(new UsageDemo()).createDefaultParamLabelRenderer().getClass());
}
@Test
public void testDefaultParameterRenderer_showsParamLabelIfPresentOrFieldNameOtherwise() {
class Example {
@Option(names = "--without" ) String longField;
@Option(names = "--with", paramLabel = "LABEL") String otherField;
}
Help help = new Help(new Example());
Help.IParamLabelRenderer equalSeparatedParameterRenderer = help.createDefaultParamLabelRenderer();
Help help2 = new Help(new Example());
help2.commandSpec().parser().separator(" ");
Help.IParamLabelRenderer spaceSeparatedParameterRenderer = help2.createDefaultParamLabelRenderer();
String[] expected = new String[] {
"<longField>",
"LABEL",
};
int i = -1;
for (OptionSpec option : help.options()) {
i++;
Text withSpace = spaceSeparatedParameterRenderer.renderParameterLabel(option, help.ansi(), Collections.<IStyle>emptyList());
assertEquals(withSpace.toString(), " " + expected[i], withSpace.toString());
Text withEquals = equalSeparatedParameterRenderer.renderParameterLabel(option, help.ansi(), Collections.<IStyle>emptyList());
assertEquals(withEquals.toString(), "=" + expected[i], withEquals.toString());
}
}
@Test
public void testDefaultParameterRenderer_appliesToPositionalArgumentsIgnoresSeparator() {
class WithLabel { @Parameters(paramLabel = "POSITIONAL_ARGS") String positional; }
class WithoutLabel { @Parameters() String positional; }
Help withLabel = new Help(new WithLabel());
Help.IParamLabelRenderer equals = withLabel.createDefaultParamLabelRenderer();
withLabel.commandSpec().parser().separator("=");
Help.IParamLabelRenderer spaced = withLabel.createDefaultParamLabelRenderer();
Text withSpace = spaced.renderParameterLabel(withLabel.positionalParameters().get(0), withLabel.ansi(), Collections.<IStyle>emptyList());
assertEquals(withSpace.toString(), "POSITIONAL_ARGS", withSpace.toString());
Text withEquals = equals.renderParameterLabel(withLabel.positionalParameters().get(0), withLabel.ansi(), Collections.<IStyle>emptyList());
assertEquals(withEquals.toString(), "POSITIONAL_ARGS", withEquals.toString());
Help withoutLabel = new Help(new WithoutLabel());
withSpace = spaced.renderParameterLabel(withoutLabel.positionalParameters().get(0), withoutLabel.ansi(), Collections.<IStyle>emptyList());
assertEquals(withSpace.toString(), "<positional>", withSpace.toString());
withEquals = equals.renderParameterLabel(withoutLabel.positionalParameters().get(0), withoutLabel.ansi(), Collections.<IStyle>emptyList());
assertEquals(withEquals.toString(), "<positional>", withEquals.toString());
}
@Test
public void testUsageOptions_hideParamSyntax_on() {
class App {
@Option(names = "-x1") String single;
@Option(names = "-s1", arity = "2") String[] multi;
@Option(names = "-x2", hideParamSyntax = true) String singleHide;
@Option(names = "-s2", hideParamSyntax = true, arity = "2") String[] multiHide;
@Option(names = "-o3", hideParamSyntax = false, split = ",") String[] multiSplit;
@Option(names = "-s3", hideParamSyntax = true, split = ",") String[] multiHideSplit;
}
String actual = new CommandLine(new App()).getUsageMessage(Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [-x1=<single>] [-x2=<singleHide>] [-o3=<multiSplit>[,%n" +
" <multiSplit>...]]... [-s3=<multiHideSplit>]... [-s1=<multi>%n" +
" <multi>]... [-s2=<multiHide>]...%n" +
" -o3=<multiSplit>[,<multiSplit>...]%n" +
"%n" +
" -s1=<multi> <multi>%n" +
" -s2=<multiHide>%n" +
" -s3=<multiHideSplit>%n" +
" -x1=<single>%n" +
" -x2=<singleHide>%n");
assertEquals(expected, actual);
}
@Test
public void testUsageParameters_hideParamSyntax_on() {
class App {
@Parameters() String single;
@Parameters(arity = "2") String[] multi;
@Parameters(hideParamSyntax = true) String singleHide;
@Parameters(hideParamSyntax = true, arity = "2") String[] multiHide;
@Parameters(hideParamSyntax = false, split = ",") String[] multiSplit;
@Parameters(hideParamSyntax = true, split = ",") String[] multiHideSplit;
}
String actual = new CommandLine(new App()).getUsageMessage(Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [<multiSplit>[,<multiSplit>...]...] <multiHideSplit>%n" +
" <single> <singleHide> (<multi> <multi>)... <multiHide>%n" +
" [<multiSplit>[,<multiSplit>...]...]%n" +
"%n" +
" <multiHideSplit>%n" +
" <single>%n" +
" <singleHide>%n" +
" (<multi> <multi>)...%n" +
" <multiHide>%n");
assertEquals(expected, actual);
}
@Test
public void testDefaultParameterRenderer_hideParamSyntax_on() {
class App {
@Parameters(index = "0") String single;
@Parameters(index = "1", arity = "2") String[] multi;
@Parameters(index = "2", hideParamSyntax = true) String singleHide;
@Parameters(index = "3", hideParamSyntax = true, arity = "2") String[] multiHide;
@Parameters(index = "4", hideParamSyntax = false, arity = "*", split = ",") String[] multiSplit;
@Parameters(index = "5", hideParamSyntax = true, arity = "*", split = ",") String[] multiHideSplit;
}
Help withLabel = new Help(new App(), Help.Ansi.OFF);
withLabel.commandSpec().parser().separator("=");
Help.IParamLabelRenderer equals = withLabel.createDefaultParamLabelRenderer();
withLabel.commandSpec().parser().separator(" ");
Help.IParamLabelRenderer spaced = withLabel.createDefaultParamLabelRenderer();
String[] expected = new String[] {
"<single>", //
"<multi> <multi>", //
"<singleHide>", //
"<multiHide>", //
"[<multiSplit>[,<multiSplit>...]...]", //
"<multiHideSplit>", //
};
for (int i = 0; i < expected.length; i++) {
Text withEquals = equals.renderParameterLabel(withLabel.positionalParameters().get(i), withLabel.ansi(), Collections.<IStyle>emptyList());
Text withSpace = spaced.renderParameterLabel(withLabel.positionalParameters().get(i), withLabel.ansi(), Collections.<IStyle>emptyList());
assertEquals(withEquals.toString(), expected[i], withEquals.toString());
assertEquals(withSpace.toString(), expected[i], withSpace.toString());
}
}
@Test
public void testDefaultLayout_addsEachRowToTable() {
final Text[][] values = {
textArray(Help.Ansi.OFF, "a", "b", "c", "d"),
textArray(Help.Ansi.OFF, "1", "2", "3", "4")
};
final int[] count = {0};
TextTable tt = TextTable.forDefaultColumns(Help.Ansi.OFF, UsageMessageSpec.DEFAULT_USAGE_WIDTH);
tt = new TextTable(Help.Ansi.OFF, tt.columns()) {
@Override public void addRowValues(Text[] columnValues) {
assertArrayEquals(values[count[0]], columnValues);
count[0]++;
}
};
Help.Layout layout = new Help.Layout(Help.defaultColorScheme(Help.Ansi.OFF), tt);
layout.layout(null, values);
assertEquals(2, count[0]);
}
@Test
public void testAbreviatedSynopsis_withoutParameters() {
@Command(abbreviateSynopsis = true)
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [OPTIONS]" + LINESEP, help.synopsis(0));
}
@Test
public void testAbreviatedSynopsis_withoutParameters_ANSI() {
@Command(abbreviateSynopsis = true)
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [OPTIONS]" + LINESEP).toString(), help.synopsis(0));
}
@Test
public void testAbreviatedSynopsis_withParameters() {
@Command(abbreviateSynopsis = true)
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [OPTIONS] [<files>...]" + LINESEP, help.synopsis(0));
}
@Test
public void testAbreviatedSynopsis_withParameters_ANSI() {
@Command(abbreviateSynopsis = true)
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [OPTIONS] [@|yellow <files>|@...]" + LINESEP).toString(), help.synopsis(0));
}
@Test
public void testAbreviatedSynopsis_commandNameCustomizableDeclaratively() throws UnsupportedEncodingException {
@Command(abbreviateSynopsis = true, name = "aprogram")
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
String expected = "" +
"Usage: aprogram [OPTIONS] [<files>...]%n" +
" [<files>...]%n" +
" -c, --count=<count>%n" +
" -v, --verbose%n";
String actual = usageString(new CommandLine(new App()), Help.Ansi.OFF);
assertEquals(String.format(expected), actual);
}
@Test
public void testAbreviatedSynopsis_commandNameCustomizableProgrammatically() throws UnsupportedEncodingException {
@Command(abbreviateSynopsis = true)
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
String expected = "" +
"Usage: anotherProgram [OPTIONS] [<files>...]%n" +
" [<files>...]%n" +
" -c, --count=<count>%n" +
" -v, --verbose%n";
String actual = usageString(new CommandLine(new App()).setCommandName("anotherProgram"), Help.Ansi.OFF);
assertEquals(String.format(expected), actual);
}
@Test
public void testSynopsis_commandNameCustomizableDeclaratively() throws UnsupportedEncodingException {
@Command(name = "aprogram")
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
String expected = "" +
"Usage: aprogram [-v] [-c=<count>] [<files>...]%n" +
" [<files>...]%n" +
" -c, --count=<count>%n" +
" -v, --verbose%n";
String actual = usageString(new CommandLine(new App()), Help.Ansi.OFF);
assertEquals(String.format(expected), actual);
}
@Test
public void testSynopsis_commandNameCustomizableProgrammatically() throws UnsupportedEncodingException {
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
String expected = "" +
"Usage: anotherProgram [-v] [-c=<count>] [<files>...]%n" +
" [<files>...]%n" +
" -c, --count=<count>%n" +
" -v, --verbose%n";
String actual = usageString(new CommandLine(new App()).setCommandName("anotherProgram"), Help.Ansi.OFF);
assertEquals(String.format(expected), actual);
}
@Test
public void testSynopsis_optionalOptionArity1_n_withDefaultSeparator() {
@Command() class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "1..*") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c=<count>...]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity1_n_withDefaultSeparator_ANSI() {
@Command() class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "1..*") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@=@|italic <count>|@...]" + LINESEP),
help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity0_1_withSpaceSeparator() {
@Command(separator = " ") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "0..1") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c [<count>]]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity0_1_withSpaceSeparator_ANSI() {
@Command(separator = " ") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "0..1") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@ [@|italic <count>|@]]" + LINESEP), help.synopsis(0));
}
@Test
public void testSynopsis_requiredOptionWithSeparator() {
@Command() class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, required = true) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] -c=<count>" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_requiredOptionWithSeparator_ANSI() {
@Command() class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, required = true) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] @|yellow -c|@=@|italic <count>|@" + LINESEP), help.synopsis(0));
}
@Test
public void testSynopsis_optionalOption_withSpaceSeparator() {
@Command(separator = " ") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c <count>]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity0_1__withSeparator() {
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "0..1") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c[=<count>]]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity0_n__withSeparator() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "0..*") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
// NOTE Expected :<main class> [-v] [-c[=<count>]...] but arity=0 for int field is weird anyway...
assertEquals("<main class> [-v] [-c[=<count>...]]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_optionalOptionArity1_n__withSeparator() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}, arity = "1..*") int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c=<count>...]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_withProgrammaticallySetSeparator_withParameters() throws UnsupportedEncodingException {
class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
CommandLine commandLine = new CommandLine(new App()).setSeparator(":");
String actual = usageString(commandLine, Help.Ansi.OFF);
String expected = "" +
"Usage: <main class> [-v] [-c:<count>] [<files>...]%n" +
" [<files>...]%n" +
" -c, --count:<count>%n" +
" -v, --verbose%n";
assertEquals(String.format(expected), actual);
}
@Test
public void testSynopsis_withSeparator_withParameters() {
@Command(separator = ":") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c:<count>] [<files>...]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_withSeparator_withParameters_ANSI() {
@Command(separator = ":") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters File[] files;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@:@|italic <count>|@] [@|yellow <files>|@...]" + LINESEP),
help.synopsis(0));
}
@Test
public void testSynopsis_withSeparator_withLabeledParameters() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE") File[] files;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c=<count>] [FILE...]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_withSeparator_withLabeledParameters_ANSI() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE") File[] files;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@=@|italic <count>|@] [@|yellow FILE|@...]" + LINESEP),
help.synopsis(0));
}
@Test
public void testSynopsis_withSeparator_withLabeledRequiredParameters() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE", arity = "1..*") File[] files;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-v] [-c=<count>] FILE..." + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_withSeparator_withLabeledRequiredParameters_ANSI() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE", arity = "1..*") File[] files;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@=@|italic <count>|@] @|yellow FILE|@..." + LINESEP),
help.synopsis(0));
}
@Test
public void testSynopsis_clustersBooleanOptions() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--aaaa", "-a"}) boolean aBoolean;
@Option(names = {"--xxxx", "-x"}) Boolean xBoolean;
@Option(names = {"--count", "-c"}, paramLabel = "COUNT") int count;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> [-avx] [-c=COUNT]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_clustersRequiredBooleanOptions() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}, required = true) boolean verbose;
@Option(names = {"--aaaa", "-a"}, required = true) boolean aBoolean;
@Option(names = {"--xxxx", "-x"}, required = true) Boolean xBoolean;
@Option(names = {"--count", "-c"}, paramLabel = "COUNT") int count;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> -avx [-c=COUNT]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_clustersRequiredBooleanOptionsSeparately() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--aaaa", "-a"}) boolean aBoolean;
@Option(names = {"--xxxx", "-x"}) Boolean xBoolean;
@Option(names = {"--Verbose", "-V"}, required = true) boolean requiredVerbose;
@Option(names = {"--Aaaa", "-A"}, required = true) boolean requiredABoolean;
@Option(names = {"--Xxxx", "-X"}, required = true) Boolean requiredXBoolean;
@Option(names = {"--count", "-c"}, paramLabel = "COUNT") int count;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals("<main class> -AVX [-avx] [-c=COUNT]" + LINESEP, help.synopsis(0));
}
@Test
public void testSynopsis_clustersRequiredBooleanOptionsSeparately_ANSI() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--aaaa", "-a"}) boolean aBoolean;
@Option(names = {"--xxxx", "-x"}) Boolean xBoolean;
@Option(names = {"--Verbose", "-V"}, required = true) boolean requiredVerbose;
@Option(names = {"--Aaaa", "-A"}, required = true) boolean requiredABoolean;
@Option(names = {"--Xxxx", "-X"}, required = true) Boolean requiredXBoolean;
@Option(names = {"--count", "-c"}, paramLabel = "COUNT") int count;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text("@|bold <main class>|@ @|yellow -AVX|@ [@|yellow -avx|@] [@|yellow -c|@=@|italic COUNT|@]" + LINESEP),
help.synopsis(0));
}
@Test
public void testSynopsis_firstLineLengthAdjustedForSynopsisHeading() {
//Usage: small-test-program [-acorv!?] [--version] [-h <number>] [-p <file>|<folder>] [-d
// <folder> [<folder>]] [-i <includePattern>
// [<includePattern>...]]
@Command(name="small-test-program", sortOptions = false, separator = " ")
class App {
@Option(names = "-a") boolean a;
@Option(names = "-c") boolean c;
@Option(names = "-o") boolean o;
@Option(names = "-r") boolean r;
@Option(names = "-v") boolean v;
@Option(names = "-!") boolean exclamation;
@Option(names = "-?") boolean question;
@Option(names = {"--version"}) boolean version;
@Option(names = {"--handle", "-h"}) int number;
@Option(names = {"--ppp", "-p"}, paramLabel = "<file>|<folder>") File f;
@Option(names = {"--ddd", "-d"}, paramLabel = "<folder>", arity="1..2") File[] d;
@Option(names = {"--include", "-i"}, paramLabel = "<includePattern>") String pattern;
}
Help help = new Help(new App(), Help.Ansi.OFF);
String expected = "" +
"Usage: small-test-program [-!?acorv] [--version] [-h <number>] [-i" + LINESEP +
" <includePattern>] [-p <file>|<folder>] [-d <folder>" + LINESEP +
" [<folder>]]..." + LINESEP;
assertEquals(expected, help.synopsisHeading() + help.synopsis(help.synopsisHeadingLength()));
help.commandSpec().usageMessage().synopsisHeading("Usage:%n");
expected = "" +
"Usage:" + LINESEP +
"small-test-program [-!?acorv] [--version] [-h <number>] [-i <includePattern>]" + LINESEP +
" [-p <file>|<folder>] [-d <folder> [<folder>]]..." + LINESEP;
assertEquals(expected, help.synopsisHeading() + help.synopsis(help.synopsisHeadingLength()));
}
@Test
public void testLongMultiLineSynopsisIndented() {
@Command(name = "<best-app-ever>")
class App {
@Option(names = "--long-option-name", paramLabel = "<long-option-value>") int a;
@Option(names = "--another-long-option-name", paramLabel = "<another-long-option-value>") int b;
@Option(names = "--third-long-option-name", paramLabel = "<third-long-option-value>") int c;
@Option(names = "--fourth-long-option-name", paramLabel = "<fourth-long-option-value>") int d;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals(String.format(
"<best-app-ever> [--another-long-option-name=<another-long-option-value>]%n" +
" [--fourth-long-option-name=<fourth-long-option-value>]%n" +
" [--long-option-name=<long-option-value>]%n" +
" [--third-long-option-name=<third-long-option-value>]%n"),
help.synopsis(0));
}
@Test
public void testLongMultiLineSynopsisWithAtMarkIndented() {
@Command(name = "<best-app-ever>")
class App {
@Option(names = "--long-option@-name", paramLabel = "<long-option-valu@@e>") int a;
@Option(names = "--another-long-option-name", paramLabel = "^[<another-long-option-value>]") int b;
@Option(names = "--third-long-option-name", paramLabel = "<third-long-option-value>") int c;
@Option(names = "--fourth-long-option-name", paramLabel = "<fourth-long-option-value>") int d;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals(String.format(
"<best-app-ever> [--another-long-option-name=^[<another-long-option-value>]]%n" +
" [--fourth-long-option-name=<fourth-long-option-value>]%n" +
" [--long-option@-name=<long-option-valu@@e>]%n" +
" [--third-long-option-name=<third-long-option-value>]%n"),
help.synopsis(0));
}
@Test
public void testLongMultiLineSynopsisWithAtMarkIndented_ANSI() {
@Command(name = "<best-app-ever>")
class App {
@Option(names = "--long-option@-name", paramLabel = "<long-option-valu@@e>") int a;
@Option(names = "--another-long-option-name", paramLabel = "^[<another-long-option-value>]") int b;
@Option(names = "--third-long-option-name", paramLabel = "<third-long-option-value>") int c;
@Option(names = "--fourth-long-option-name", paramLabel = "<fourth-long-option-value>") int d;
}
Help help = new Help(new App(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text(String.format(
"@|bold <best-app-ever>|@ [@|yellow --another-long-option-name|@=@|italic ^[<another-long-option-value>]|@]%n" +
" [@|yellow --fourth-long-option-name|@=@|italic <fourth-long-option-value>|@]%n" +
" [@|yellow --long-option@-name|@=@|italic <long-option-valu@@e>|@]%n" +
" [@|yellow --third-long-option-name|@=@|italic <third-long-option-value>|@]%n")),
help.synopsis(0));
}
@Test
public void testCustomSynopsis() {
@Command(customSynopsis = {
"<the-app> --number=NUMBER --other-option=<aargh>",
" --more=OTHER --and-other-option=<aargh>",
"<the-app> --number=NUMBER --and-other-option=<aargh>",
})
class App {@Option(names = "--ignored") boolean ignored;}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals(String.format(
"<the-app> --number=NUMBER --other-option=<aargh>%n" +
" --more=OTHER --and-other-option=<aargh>%n" +
"<the-app> --number=NUMBER --and-other-option=<aargh>%n"),
help.synopsis(0));
}
@Test
public void testTextTable() {
TextTable table = TextTable.forDefaultColumns(Help.Ansi.OFF, UsageMessageSpec.DEFAULT_USAGE_WIDTH);
table.addRowValues(textArray(Help.Ansi.OFF, "", "-v", ",", "--verbose", "show what you're doing while you are doing it"));
table.addRowValues(textArray(Help.Ansi.OFF, "", "-p", null, null, "the quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog."));
assertEquals(String.format(
" -v, --verbose show what you're doing while you are doing it%n" +
" -p the quick brown fox jumped over the lazy dog. The%n" +
" quick brown fox jumped over the lazy dog.%n"
,""), table.toString(new StringBuilder()).toString());
}
@Test(expected = IllegalArgumentException.class)
public void testTextTableAddsNewRowWhenTooManyValuesSpecified() {
TextTable table = TextTable.forDefaultColumns(Help.Ansi.OFF, UsageMessageSpec.DEFAULT_USAGE_WIDTH);
table.addRowValues(textArray(Help.Ansi.OFF, "", "-c", ",", "--create", "description", "INVALID", "Row 3"));
// assertEquals(String.format("" +
// " -c, --create description %n" +
// " INVALID %n" +
// " Row 3 %n"
// ,""), table.toString(new StringBuilder()).toString());
}
@Test
public void testTextTableAddsNewRowWhenAnyColumnTooLong() {
TextTable table = TextTable.forDefaultColumns(Help.Ansi.OFF, UsageMessageSpec.DEFAULT_USAGE_WIDTH);
table.addRowValues("*", "-c", ",",
"--create, --create2, --create3, --create4, --create5, --create6, --create7, --create8",
"description");
assertEquals(String.format("" +
"* -c, --create, --create2, --create3, --create4, --create5, --create6, --create7,%n" +
" --create8%n" +
" description%n"
,""), table.toString(new StringBuilder()).toString());
table = TextTable.forDefaultColumns(Help.Ansi.OFF, UsageMessageSpec.DEFAULT_USAGE_WIDTH);
table.addRowValues("", "-c", ",",
"--create, --create2, --create3, --create4, --create5, --create6, --createAA7, --create8",
"description");
assertEquals(String.format("" +
" -c, --create, --create2, --create3, --create4, --create5, --create6, --createAA7,%n" +
" --create8%n" +
" description%n"
,""), table.toString(new StringBuilder()).toString());
}
@Test
public void testCatUsageFormat() {
@Command(name = "cat",
customSynopsis = "cat [OPTIONS] [FILE...]",
description = "Concatenate FILE(s), or standard input, to standard output.",
footer = "Copyright(c) 2017")
class Cat {
@Parameters(paramLabel = "FILE", hidden = true, description = "Files whose contents to display") List<File> files;
@Option(names = "--help", help = true, description = "display this help and exit") boolean help;
@Option(names = "--version", help = true, description = "output version information and exit") boolean version;
@Option(names = "-u", description = "(ignored)") boolean u;
@Option(names = "-t", description = "equivalent to -vT") boolean t;
@Option(names = "-e", description = "equivalent to -vET") boolean e;
@Option(names = {"-A", "--show-all"}, description = "equivalent to -vET") boolean showAll;
@Option(names = {"-s", "--squeeze-blank"}, description = "suppress repeated empty output lines") boolean squeeze;
@Option(names = {"-v", "--show-nonprinting"}, description = "use ^ and M- notation, except for LDF and TAB") boolean v;
@Option(names = {"-b", "--number-nonblank"}, description = "number nonempty output lines, overrides -n") boolean b;
@Option(names = {"-T", "--show-tabs"}, description = "display TAB characters as ^I") boolean T;
@Option(names = {"-E", "--show-ends"}, description = "display $ at end of each line") boolean E;
@Option(names = {"-n", "--number"}, description = "number all output lines") boolean n;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.usage(new Cat(), new PrintStream(baos), Help.Ansi.OFF);
String expected = String.format(
"Usage: cat [OPTIONS] [FILE...]%n" +
"Concatenate FILE(s), or standard input, to standard output.%n" +
" -A, --show-all equivalent to -vET%n" +
" -b, --number-nonblank number nonempty output lines, overrides -n%n" +
" -e equivalent to -vET%n" +
" -E, --show-ends display $ at end of each line%n" +
" -n, --number number all output lines%n" +
" -s, --squeeze-blank suppress repeated empty output lines%n" +
" -t equivalent to -vT%n" +
" -T, --show-tabs display TAB characters as ^I%n" +
" -u (ignored)%n" +
" -v, --show-nonprinting use ^ and M- notation, except for LDF and TAB%n" +
" --help display this help and exit%n" +
" --version output version information and exit%n" +
"Copyright(c) 2017%n", "");
assertEquals(expected, baos.toString());
}
@Test
public void testZipUsageFormat() {
String expected = String.format("" +
"Copyright (c) 1990-2008 Info-ZIP - Type 'zip \"-L\"' for software license.%n" +
"Zip 3.0 (July 5th 2008). Command:%n" +
"zip [-options] [-b path] [-t mmddyyyy] [-n suffixes] [zipfile list] [-xi list]%n" +
" The default action is to add or replace zipfile entries from list, which%n" +
" can include the special name - to compress standard input.%n" +
" If zipfile and list are omitted, zip compresses stdin to stdout.%n" +
" -f freshen: only changed files -u update: only changed or new files%n" +
" -d delete entries in zipfile -m move into zipfile (delete OS files)%n" +
" -r recurse into directories -j junk (don't record) directory names%n" +
" -0 store only -l convert LF to CR LF (-ll CR LF to LF)%n" +
" -1 compress faster -9 compress better%n" +
" -q quiet operation -v verbose operation/print version info%n" +
" -c add one-line comments -z add zipfile comment%n" +
" -@ read names from stdin -o make zipfile as old as latest entry%n" +
" -x exclude the following names -i include only the following names%n" +
" -F fix zipfile (-FF try harder) -D do not add directory entries%n" +
" -A adjust self-extracting exe -J junk zipfile prefix (unzipsfx)%n" +
" -T test zipfile integrity -X eXclude eXtra file attributes%n" +
" -y store symbolic links as the link instead of the referenced file%n" +
" -e encrypt -n don't compress these suffixes%n" +
" -h2 show more help%n");
assertEquals(expected, CustomLayoutDemo.createZipUsageFormat(Help.Ansi.OFF));
}
@Test
public void testNetstatUsageFormat() {
String expected = String.format("" +
"Displays protocol statistics and current TCP/IP network connections.%n" +
"%n" +
"NETSTAT [-a] [-b] [-e] [-f] [-n] [-o] [-p proto] [-q] [-r] [-s] [-t] [-x] [-y]%n" +
" [interval]%n" +
"%n" +
" -a Displays all connections and listening ports.%n" +
" -b Displays the executable involved in creating each connection or%n" +
" listening port. In some cases well-known executables host%n" +
" multiple independent components, and in these cases the%n" +
" sequence of components involved in creating the connection or%n" +
" listening port is displayed. In this case the executable name%n" +
" is in [] at the bottom, on top is the component it called, and%n" +
" so forth until TCP/IP was reached. Note that this option can be%n" +
" time-consuming and will fail unless you have sufficient%n" +
" permissions.%n" +
" -e Displays Ethernet statistics. This may be combined with the -s%n" +
" option.%n" +
" -f Displays Fully Qualified Domain Names (FQDN) for foreign%n" +
" addresses.%n" +
" -n Displays addresses and port numbers in numerical form.%n" +
" -o Displays the owning process ID associated with each connection.%n" +
" -p proto Shows connections for the protocol specified by proto; proto%n" +
" may be any of: TCP, UDP, TCPv6, or UDPv6. If used with the -s%n" +
" option to display per-protocol statistics, proto may be any of:%n" +
" IP, IPv6, ICMP, ICMPv6, TCP, TCPv6, UDP, or UDPv6.%n" +
" -q Displays all connections, listening ports, and bound%n" +
" nonlistening TCP ports. Bound nonlistening ports may or may not%n" +
" be associated with an active connection.%n" +
" -r Displays the routing table.%n" +
" -s Displays per-protocol statistics. By default, statistics are%n" +
" shown for IP, IPv6, ICMP, ICMPv6, TCP, TCPv6, UDP, and UDPv6;%n" +
" the -p option may be used to specify a subset of the default.%n" +
" -t Displays the current connection offload state.%n" +
" -x Displays NetworkDirect connections, listeners, and shared%n" +
" endpoints.%n" +
" -y Displays the TCP connection template for all connections.%n" +
" Cannot be combined with the other options.%n" +
" interval Redisplays selected statistics, pausing interval seconds%n" +
" between each display. Press CTRL+C to stop redisplaying%n" +
" statistics. If omitted, netstat will print the current%n" +
" configuration information once.%n"
, "");
assertEquals(expected, CustomLayoutDemo.createNetstatUsageFormat(Help.Ansi.OFF));
}
@Test
public void testUsageIndexedPositionalParameters() throws UnsupportedEncodingException {
@Command()
class App {
@Parameters(index = "0", description = "source host") InetAddress host1;
@Parameters(index = "1", description = "source port") int port1;
@Parameters(index = "2", description = "destination host") InetAddress host2;
@Parameters(index = "3", arity = "1..2", description = "destination port range") int[] port2range;
@Parameters(index = "4..*", description = "files to transfer") String[] files;
@Parameters(hidden = true) String[] all;
}
String actual = usageString(new App(), Help.Ansi.OFF);
String expected = String.format(
"Usage: <main class> <host1> <port1> <host2> <port2range> [<port2range>]%n" +
" [<files>...]%n" +
" <host1> source host%n" +
" <port1> source port%n" +
" <host2> destination host%n" +
" <port2range> [<port2range>]%n" +
" destination port range%n" +
" [<files>...] files to transfer%n"
);
assertEquals(expected, actual);
}
@Command(name = "base", abbreviateSynopsis = true, commandListHeading = "c o m m a n d s",
customSynopsis = "cust", description = "base description", descriptionHeading = "base descr heading",
footer = "base footer", footerHeading = "base footer heading",
header = "base header", headerHeading = "base header heading",
optionListHeading = "base option heading", parameterListHeading = "base param heading",
requiredOptionMarker = '&', separator = ";", showDefaultValues = true,
sortOptions = false, synopsisHeading = "abcd")
class Base { }
@Test
public void testAttributesInheritedWhenSubclassingForReuse() throws UnsupportedEncodingException {
@Command
class EmptySub extends Base {}
Help help = new Help(new EmptySub());
assertEquals("base", help.commandName());
assertEquals(String.format("cust%n"), help.synopsis(0));
assertEquals(String.format("cust%n"), help.customSynopsis());
assertEquals(String.format("base%n"), help.abbreviatedSynopsis());
assertEquals(String.format("base%n"), help.detailedSynopsis(0,null, true));
assertEquals("abcd", help.synopsisHeading());
assertEquals("", help.commandList());
assertEquals("", help.commandListHeading());
assertEquals("c o m m a n d s", help.commandSpec().usageMessage().commandListHeading());
assertEquals(String.format("base description%n"), help.description());
assertEquals("base descr heading", help.descriptionHeading());
assertEquals(String.format("base footer%n"), help.footer());
assertEquals("base footer heading", help.footerHeading());
assertEquals(String.format("base header%n"), help.header());
assertEquals("base header heading", help.headerHeading());
assertEquals("", help.optionList());
assertEquals("", help.optionListHeading());
assertEquals("base option heading", help.commandSpec().usageMessage().optionListHeading());
assertEquals("", help.parameterList());
assertEquals("", help.parameterListHeading());
assertEquals("base param heading", help.commandSpec().usageMessage().parameterListHeading());
assertEquals(";", help.commandSpec().parser().separator());
assertEquals('&', help.commandSpec().usageMessage().requiredOptionMarker());
assertTrue(help.commandSpec().usageMessage().abbreviateSynopsis());
assertTrue(help.commandSpec().usageMessage().showDefaultValues());
assertFalse(help.commandSpec().usageMessage().sortOptions());
}
@Test
public void testSubclassAttributesOverrideEmptySuper() {
@Command
class EmptyBase {}
@Command(name = "base", abbreviateSynopsis = true, commandListHeading = "c o m m a n d s",
customSynopsis = "cust", description = "base description", descriptionHeading = "base descr heading",
footer = "base footer", footerHeading = "base footer heading",
header = "base header", headerHeading = "base header heading",
optionListHeading = "base option heading", parameterListHeading = "base param heading",
requiredOptionMarker = '&', separator = ";", showDefaultValues = true,
sortOptions = false, synopsisHeading = "abcd", subcommands = Sub.class)
class FullBase extends EmptyBase{ }
Help help = new Help(new FullBase());
assertEquals("base", help.commandName());
assertEquals(String.format("cust%n"), help.synopsis(0));
assertEquals(String.format("cust%n"), help.customSynopsis());
assertEquals(String.format("base [COMMAND]%n"), help.abbreviatedSynopsis());
assertEquals(String.format("base [COMMAND]%n"), help.detailedSynopsis(0, null, true));
assertEquals("abcd", help.synopsisHeading());
assertEquals(String.format(" sub This is a subcommand%n"), help.commandList());
assertEquals("c o m m a n d s", help.commandListHeading());
assertEquals(String.format("base description%n"), help.description());
assertEquals("base descr heading", help.descriptionHeading());
assertEquals(String.format("base footer%n"), help.footer());
assertEquals("base footer heading", help.footerHeading());
assertEquals(String.format("base header%n"), help.header());
assertEquals("base header heading", help.headerHeading());
assertEquals("", help.optionList());
assertEquals("base option heading", help.commandSpec().usageMessage().optionListHeading());
assertEquals("", help.optionListHeading()); // because no options
assertEquals("", help.parameterList());
assertEquals("base param heading", help.commandSpec().usageMessage().parameterListHeading());
assertEquals("", help.parameterListHeading()); // because no parameters
assertTrue(help.commandSpec().usageMessage().abbreviateSynopsis());
assertTrue(help.commandSpec().usageMessage().showDefaultValues());
assertFalse(help.commandSpec().usageMessage().sortOptions());
assertEquals(";", help.commandSpec().parser().separator());
assertEquals('&', help.commandSpec().usageMessage().requiredOptionMarker());
}
@Test
public void testSubclassAttributesOverrideSuperValues() {
@Command(name = "sub", abbreviateSynopsis = false, commandListHeading = "subc o m m a n d s",
customSynopsis = "subcust", description = "sub description", descriptionHeading = "sub descr heading",
footer = "sub footer", footerHeading = "sub footer heading",
header = "sub header", headerHeading = "sub header heading",
optionListHeading = "sub option heading", parameterListHeading = "sub param heading",
requiredOptionMarker = '%', separator = ":", showDefaultValues = false,
sortOptions = true, synopsisHeading = "xyz")
class FullSub extends Base{ }
Help help = new Help(new FullSub());
assertEquals("sub", help.commandName());
assertEquals(String.format("subcust%n"), help.synopsis(0));
assertEquals(String.format("subcust%n"), help.customSynopsis());
assertEquals(String.format("sub%n"), help.abbreviatedSynopsis());
assertEquals(String.format("sub%n"), help.detailedSynopsis(0,null, true));
assertEquals("xyz", help.synopsisHeading());
assertEquals("", help.commandList());
assertEquals("", help.commandListHeading()); // empty: no commands
assertEquals("subc o m m a n d s", help.commandSpec().usageMessage().commandListHeading());
assertEquals(String.format("sub description%n"), help.description());
assertEquals("sub descr heading", help.descriptionHeading());
assertEquals(String.format("sub footer%n"), help.footer());
assertEquals("sub footer heading", help.footerHeading());
assertEquals(String.format("sub header%n"), help.header());
assertEquals("sub header heading", help.headerHeading());
assertEquals("", help.optionList());
assertEquals("", help.optionListHeading());
assertEquals("sub option heading", help.commandSpec().usageMessage().optionListHeading());
assertEquals("", help.parameterList());
assertEquals("", help.parameterListHeading());
assertEquals("sub param heading", help.commandSpec().usageMessage().parameterListHeading());
assertTrue(help.commandSpec().usageMessage().abbreviateSynopsis());
assertTrue(help.commandSpec().usageMessage().showDefaultValues());
assertFalse(help.commandSpec().usageMessage().sortOptions());
assertEquals(":", help.commandSpec().parser().separator());
assertEquals('%', help.commandSpec().usageMessage().requiredOptionMarker());
}
static class UsageDemo {
@Option(names = "-a", description = "boolean option with short name only")
boolean a;
@Option(names = "-b", paramLabel = "INT", description = "short option with a parameter")
int b;
@Option(names = {"-c", "--c-option"}, description = "boolean option with short and long name")
boolean c;
@Option(names = {"-d", "--d-option"}, paramLabel = "FILE", description = "option with parameter and short and long name")
File d;
@Option(names = "--e-option", description = "boolean option with only a long name")
boolean e;
@Option(names = "--f-option", paramLabel = "STRING", description = "option with parameter and only a long name")
String f;
@Option(names = {"-g", "--g-option-with-a-name-so-long-that-it-runs-into-the-descriptions-column"}, description = "boolean option with short and long name")
boolean g;
@Parameters(index = "0", paramLabel = "0BLAH", description = "first parameter")
String param0;
@Parameters(index = "1", paramLabel = "1PARAMETER-with-a-name-so-long-that-it-runs-into-the-descriptions-column", description = "2nd parameter")
String param1;
@Parameters(index = "2..*", paramLabel = "remaining", description = "remaining parameters")
String param2_n;
@Parameters(index = "*", paramLabel = "all", description = "all parameters")
String param_n;
}
@Test
public void testSubclassedCommandHelp() {
@Command(name = "parent", description = "parent description")
class ParentOption {
}
@Command(name = "child", description = "child description")
class ChildOption extends ParentOption {
}
String actual = usageString(new ChildOption(), Help.Ansi.OFF);
assertEquals(String.format(
"Usage: child%n" +
"child description%n"), actual);
}
@Test
public void testSynopsisOrderCorrectWhenParametersDeclaredOutOfOrder() {
class WithParams {
@Parameters(index = "1") String param1;
@Parameters(index = "0") String param0;
}
Help help = new Help(new WithParams());
assertEquals(format("<main class> <param0> <param1>%n"), help.synopsis(0));
}
@Test
public void testSynopsisOrderCorrectWhenSubClassAddsParameters() {
class BaseWithParams {
@Parameters(index = "1") String param1;
@Parameters(index = "0") String param0;
}
class SubWithParams extends BaseWithParams {
@Parameters(index = "3") String param3;
@Parameters(index = "2") String param2;
}
Help help = new Help(new SubWithParams());
assertEquals(format("<main class> <param0> <param1> <param2> <param3>%n"), help.synopsis(0));
}
@Test
public void testUsageMainCommand_NoAnsi() {
String actual = usageString(Demo.mainCommand(), Help.Ansi.OFF);
assertEquals(String.format(Demo.EXPECTED_USAGE_MAIN), actual);
}
@Test
public void testUsageMainCommand_ANSI() {
String actual = usageString(Demo.mainCommand(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text(String.format(Demo.EXPECTED_USAGE_MAIN_ANSI)), actual);
}
@Test
public void testUsageSubcommandGitStatus_NoAnsi() {
String actual = usageString(new Demo.GitStatus(), Help.Ansi.OFF);
assertEquals(String.format(Demo.EXPECTED_USAGE_GITSTATUS), actual);
}
@Test
public void testUsageSubcommandGitStatus_ANSI() {
String actual = usageString(new Demo.GitStatus(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text(String.format(Demo.EXPECTED_USAGE_GITSTATUS_ANSI)), actual);
}
@Test
public void testUsageSubcommandGitCommit_NoAnsi() {
String actual = usageString(new Demo.GitCommit(), Help.Ansi.OFF);
assertEquals(String.format(Demo.EXPECTED_USAGE_GITCOMMIT), actual);
}
@Test
public void testUsageSubcommandGitCommit_ANSI() {
String actual = usageString(new Demo.GitCommit(), Help.Ansi.ON);
assertEquals(Help.Ansi.ON.new Text(String.format(Demo.EXPECTED_USAGE_GITCOMMIT_ANSI)), actual);
}
@Test
public void testUsageNestedSubcommand() throws IOException {
@Command(name = "main") class MainCommand { @Option(names = "-a") boolean a; @Option(names = "-h", help = true) boolean h;}
@Command(name = "cmd1") class ChildCommand1 { @Option(names = "-b") boolean b; }
@Command(name = "cmd2") class ChildCommand2 { @Option(names = "-c") boolean c; @Option(names = "-h", help = true) boolean h;}
@Command(name = "sub11") class GrandChild1Command1 { @Option(names = "-d") boolean d; }
@Command(name = "sub12") class GrandChild1Command2 { @Option(names = "-e") int e; }
@Command(name = "sub21") class GrandChild2Command1 { @Option(names = "-h", help = true) boolean h; }
@Command(name = "sub22") class GrandChild2Command2 { @Option(names = "-g") boolean g; }
@Command(name = "sub22sub1") class GreatGrandChild2Command2_1 {
@Option(names = "-h", help = true) boolean h;
@Option(names = {"-t", "--type"}) String customType;
}
CommandLine commandLine = new CommandLine(new MainCommand());
commandLine
.addSubcommand("cmd1", new CommandLine(new ChildCommand1())
.addSubcommand("sub11", new GrandChild1Command1())
.addSubcommand("sub12", new GrandChild1Command2())
)
.addSubcommand("cmd2", new CommandLine(new ChildCommand2())
.addSubcommand("sub21", new GrandChild2Command1())
.addSubcommand("sub22", new CommandLine(new GrandChild2Command2())
.addSubcommand("sub22sub1", new GreatGrandChild2Command2_1())
)
);
String main = usageString(commandLine, Help.Ansi.OFF);
assertEquals(String.format("" +
"Usage: main [-ah] [COMMAND]%n" +
" -a%n" +
" -h%n" +
"Commands:%n" +
" cmd1%n" +
" cmd2%n"), main);
String cmd2 = usageString(commandLine.getSubcommands().get("cmd2"), Help.Ansi.OFF);
assertEquals(String.format("" +
"Usage: main cmd2 [-ch] [COMMAND]%n" +
" -c%n" +
" -h%n" +
"Commands:%n" +
" sub21%n" +
" sub22%n"), cmd2);
String sub22 = usageString(commandLine.getSubcommands().get("cmd2").getSubcommands().get("sub22"), Help.Ansi.OFF);
assertEquals(String.format("" +
"Usage: main cmd2 sub22 [-g] [COMMAND]%n" +
" -g%n" +
"Commands:%n" +
" sub22sub1%n"), sub22);
}
@Test
public void testTextConstructorPlain() {
assertEquals("--NoAnsiFormat", Help.Ansi.ON.new Text("--NoAnsiFormat").toString());
}
@Test
public void testTextConstructorWithStyle() {
assertEquals("\u001B[1m--NoAnsiFormat\u001B[21m\u001B[0m", Help.Ansi.ON.new Text("@|bold --NoAnsiFormat|@").toString());
}
@Test
public void testTextApply() {
Text txt = Help.Ansi.ON.apply("--p", Arrays.<IStyle>asList(Style.fg_red, Style.bold));
assertEquals(Help.Ansi.ON.new Text("@|fg(red),bold --p|@"), txt);
}
@Test
public void testTextDefaultColorScheme() {
Help.Ansi ansi = Help.Ansi.ON;
ColorScheme scheme = Help.defaultColorScheme(ansi);
assertEquals(scheme.ansi().new Text("@|yellow -p|@"), scheme.optionText("-p"));
assertEquals(scheme.ansi().new Text("@|bold command|@"), scheme.commandText("command"));
assertEquals(scheme.ansi().new Text("@|yellow FILE|@"), scheme.parameterText("FILE"));
assertEquals(scheme.ansi().new Text("@|italic NUMBER|@"), scheme.optionParamText("NUMBER"));
}
@Test
public void testTextSubString() {
Help.Ansi ansi = Help.Ansi.ON;
Text txt = ansi.new Text("@|bold 01234|@").concat("56").concat("@|underline 7890|@");
assertEquals(ansi.new Text("@|bold 01234|@56@|underline 7890|@"), txt.substring(0));
assertEquals(ansi.new Text("@|bold 1234|@56@|underline 7890|@"), txt.substring(1));
assertEquals(ansi.new Text("@|bold 234|@56@|underline 7890|@"), txt.substring(2));
assertEquals(ansi.new Text("@|bold 34|@56@|underline 7890|@"), txt.substring(3));
assertEquals(ansi.new Text("@|bold 4|@56@|underline 7890|@"), txt.substring(4));
assertEquals(ansi.new Text("56@|underline 7890|@"), txt.substring(5));
assertEquals(ansi.new Text("6@|underline 7890|@"), txt.substring(6));
assertEquals(ansi.new Text("@|underline 7890|@"), txt.substring(7));
assertEquals(ansi.new Text("@|underline 890|@"), txt.substring(8));
assertEquals(ansi.new Text("@|underline 90|@"), txt.substring(9));
assertEquals(ansi.new Text("@|underline 0|@"), txt.substring(10));
assertEquals(ansi.new Text(""), txt.substring(11));
assertEquals(ansi.new Text("@|bold 01234|@56@|underline 7890|@"), txt.substring(0, 11));
assertEquals(ansi.new Text("@|bold 01234|@56@|underline 789|@"), txt.substring(0, 10));
assertEquals(ansi.new Text("@|bold 01234|@56@|underline 78|@"), txt.substring(0, 9));
assertEquals(ansi.new Text("@|bold 01234|@56@|underline 7|@"), txt.substring(0, 8));
assertEquals(ansi.new Text("@|bold 01234|@56"), txt.substring(0, 7));
assertEquals(ansi.new Text("@|bold 01234|@5"), txt.substring(0, 6));
assertEquals(ansi.new Text("@|bold 01234|@"), txt.substring(0, 5));
assertEquals(ansi.new Text("@|bold 0123|@"), txt.substring(0, 4));
assertEquals(ansi.new Text("@|bold 012|@"), txt.substring(0, 3));
assertEquals(ansi.new Text("@|bold 01|@"), txt.substring(0, 2));
assertEquals(ansi.new Text("@|bold 0|@"), txt.substring(0, 1));
assertEquals(ansi.new Text(""), txt.substring(0, 0));
assertEquals(ansi.new Text("@|bold 1234|@56@|underline 789|@"), txt.substring(1, 10));
assertEquals(ansi.new Text("@|bold 234|@56@|underline 78|@"), txt.substring(2, 9));
assertEquals(ansi.new Text("@|bold 34|@56@|underline 7|@"), txt.substring(3, 8));
assertEquals(ansi.new Text("@|bold 4|@56"), txt.substring(4, 7));
assertEquals(ansi.new Text("5"), txt.substring(5, 6));
assertEquals(ansi.new Text("@|bold 2|@"), txt.substring(2, 3));
assertEquals(ansi.new Text("@|underline 8|@"), txt.substring(8, 9));
Text txt2 = ansi.new Text("@|bold abc|@@|underline DEF|@");
assertEquals(ansi.new Text("@|bold abc|@@|underline DEF|@"), txt2.substring(0));
assertEquals(ansi.new Text("@|bold bc|@@|underline DEF|@"), txt2.substring(1));
assertEquals(ansi.new Text("@|bold abc|@@|underline DE|@"), txt2.substring(0,5));
assertEquals(ansi.new Text("@|bold bc|@@|underline DE|@"), txt2.substring(1,5));
}
@Test
public void testTextSplitLines() {
Help.Ansi ansi = Help.Ansi.ON;
Text[] all = {
ansi.new Text("@|bold 012\n34|@").concat("5\nAA\n6").concat("@|underline 78\n90|@"),
ansi.new Text("@|bold 012\r34|@").concat("5\rAA\r6").concat("@|underline 78\r90|@"),
ansi.new Text("@|bold 012\r\n34|@").concat("5\r\nAA\r\n6").concat("@|underline 78\r\n90|@"),
};
for (Text text : all) {
Text[] lines = text.splitLines();
int i = 0;
assertEquals(ansi.new Text("@|bold 012|@"), lines[i++]);
assertEquals(ansi.new Text("@|bold 34|@5"), lines[i++]);
assertEquals(ansi.new Text("AA"), lines[i++]);
assertEquals(ansi.new Text("6@|underline 78|@"), lines[i++]);
assertEquals(ansi.new Text("@|underline 90|@"), lines[i++]);
}
}
@Test
public void testTextSplitLinesStartEnd() {
Help.Ansi ansi = Help.Ansi.ON;
Text[] all = {
ansi.new Text("\n@|bold 012\n34|@").concat("5\nAA\n6").concat("@|underline 78\n90|@\n"),
ansi.new Text("\r@|bold 012\r34|@").concat("5\rAA\r6").concat("@|underline 78\r90|@\r"),
ansi.new Text("\r\n@|bold 012\r\n34|@").concat("5\r\nAA\r\n6").concat("@|underline 78\r\n90|@\r\n"),
};
for (Text text : all) {
Text[] lines = text.splitLines();
int i = 0;
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text("@|bold 012|@"), lines[i++]);
assertEquals(ansi.new Text("@|bold 34|@5"), lines[i++]);
assertEquals(ansi.new Text("AA"), lines[i++]);
assertEquals(ansi.new Text("6@|underline 78|@"), lines[i++]);
assertEquals(ansi.new Text("@|underline 90|@"), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
}
}
@Test
public void testTextSplitLinesStartEndIntermediate() {
Help.Ansi ansi = Help.Ansi.ON;
Text[] all = {
ansi.new Text("\n@|bold 012\n\n\n34|@").concat("5\n\n\nAA\n\n\n6").concat("@|underline 78\n90|@\n"),
ansi.new Text("\r@|bold 012\r\r\r34|@").concat("5\r\r\rAA\r\r\r6").concat("@|underline 78\r90|@\r"),
ansi.new Text("\r\n@|bold 012\r\n\r\n\r\n34|@").concat("5\r\n\r\n\r\nAA\r\n\r\n\r\n6").concat("@|underline 78\r\n90|@\r\n"),
};
for (Text text : all) {
Text[] lines = text.splitLines();
int i = 0;
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text("@|bold 012|@"), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text("@|bold 34|@5"), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text("AA"), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
assertEquals(ansi.new Text("6@|underline 78|@"), lines[i++]);
assertEquals(ansi.new Text("@|underline 90|@"), lines[i++]);
assertEquals(ansi.new Text(""), lines[i++]);
}
}
@Test
public void testEmbeddedNewLinesInUsageSections() throws UnsupportedEncodingException {
@Command(description = "first line\nsecond line\nthird line", headerHeading = "headerHeading1\nheaderHeading2",
header = "header1\nheader2", descriptionHeading = "descriptionHeading1\ndescriptionHeading2",
footerHeading = "footerHeading1\nfooterHeading2", footer = "footer1\nfooter2")
class App {
@Option(names = {"-v", "--verbose"}, description = "optionDescription1\noptionDescription2") boolean v;
@Parameters(description = "paramDescription1\nparamDescription2") String file;
}
String actual = usageString(new App(), Help.Ansi.OFF);
String expected = String.format("" +
"headerHeading1%n" +
"headerHeading2header1%n" +
"header2%n" +
"Usage: <main class> [-v] <file>%n" +
"descriptionHeading1%n" +
"descriptionHeading2first line%n" +
"second line%n" +
"third line%n" +
" <file> paramDescription1%n" +
" paramDescription2%n" +
" -v, --verbose optionDescription1%n" +
" optionDescription2%n" +
"footerHeading1%n" +
"footerHeading2footer1%n" +
"footer2%n");
assertEquals(expected, actual);
}
@Test
public void testTextWithMultipleStyledSections() {
assertEquals("\u001B[1m<main class>\u001B[21m\u001B[0m [\u001B[33m-v\u001B[39m\u001B[0m] [\u001B[33m-c\u001B[39m\u001B[0m [\u001B[3m<count>\u001B[23m\u001B[0m]]",
Help.Ansi.ON.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@ [@|italic <count>|@]]").toString());
}
@Test
public void testTextAdjacentStyles() {
assertEquals("\u001B[3m<commit\u001B[23m\u001B[0m\u001B[3m>\u001B[23m\u001B[0m%n",
Help.Ansi.ON.new Text("@|italic <commit|@@|italic >|@%n").toString());
}
@Test
public void testTextNoConversionWithoutClosingTag() {
assertEquals("\u001B[3mabc\u001B[23m\u001B[0m", Help.Ansi.ON.new Text("@|italic abc|@").toString());
assertEquals("@|italic abc", Help.Ansi.ON.new Text("@|italic abc").toString());
}
@Test
public void testTextNoConversionWithoutSpaceSeparator() {
assertEquals("\u001B[3ma\u001B[23m\u001B[0m", Help.Ansi.ON.new Text("@|italic a|@").toString());
assertEquals("@|italic|@", Help.Ansi.ON.new Text("@|italic|@").toString());
assertEquals("", Help.Ansi.ON.new Text("@|italic |@").toString());
}
@Test
public void testPalette236ColorForegroundIndex() {
assertEquals("\u001B[38;5;45mabc\u001B[39m\u001B[0m", Help.Ansi.ON.new Text("@|fg(45) abc|@").toString());
}
@Test
public void testPalette236ColorForegroundRgb() {
int num = 16 + 36 * 5 + 6 * 5 + 5;
assertEquals("\u001B[38;5;" + num + "mabc\u001B[39m\u001B[0m", Help.Ansi.ON.new Text("@|fg(5;5;5) abc|@").toString());
}
@Test
public void testPalette236ColorBackgroundIndex() {
assertEquals("\u001B[48;5;77mabc\u001B[49m\u001B[0m", Help.Ansi.ON.new Text("@|bg(77) abc|@").toString());
}
@Test
public void testPalette236ColorBackgroundRgb() {
int num = 16 + 36 * 3 + 6 * 3 + 3;
assertEquals("\u001B[48;5;" + num + "mabc\u001B[49m\u001B[0m", Help.Ansi.ON.new Text("@|bg(3;3;3) abc|@").toString());
}
@Test
public void testAnsiEnabled() {
assertTrue(Help.Ansi.ON.enabled());
assertFalse(Help.Ansi.OFF.enabled());
System.setProperty("picocli.ansi", "true");
assertEquals(true, Help.Ansi.AUTO.enabled());
System.setProperty("picocli.ansi", "false");
assertEquals(false, Help.Ansi.AUTO.enabled());
System.clearProperty("picocli.ansi");
boolean isWindows = System.getProperty("os.name").startsWith("Windows");
boolean isXterm = System.getenv("TERM") != null && System.getenv("TERM").startsWith("xterm");
boolean hasOsType = System.getenv("OSTYPE") != null; // null on Windows unless on Cygwin or MSYS
boolean isAtty = (isWindows && (isXterm || hasOsType)) // cygwin pseudo-tty
|| hasConsole();
assertEquals((isAtty && (!isWindows || isXterm || hasOsType)) || isJansiConsoleInstalled(), Help.Ansi.AUTO.enabled());
if (isWindows && !Help.Ansi.AUTO.enabled()) {
AnsiConsole.systemInstall();
assertTrue(Help.Ansi.AUTO.enabled());
AnsiConsole.systemUninstall();
}
}
private boolean hasConsole() {
try { return System.class.getDeclaredMethod("console").invoke(null) != null; }
catch (Throwable reflectionFailed) { return true; }
}
private static boolean isJansiConsoleInstalled() {
try {
Class<?> ansiConsole = Class.forName("org.fusesource.jansi.AnsiConsole");
Field out = ansiConsole.getField("out");
return out.get(null) == System.out;
} catch (Exception reflectionFailed) {
return false;
}
}
@Test
public void testSystemPropertiesOverrideDefaultColorScheme() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE", arity = "1..*") File[] files;
}
Help.Ansi ansi = Help.Ansi.ON;
// default color scheme
assertEquals(ansi.new Text("@|bold <main class>|@ [@|yellow -v|@] [@|yellow -c|@=@|italic <count>|@] @|yellow FILE|@..." + LINESEP),
new Help(new App(), ansi).synopsis(0));
System.setProperty("picocli.color.commands", "blue");
System.setProperty("picocli.color.options", "green");
System.setProperty("picocli.color.parameters", "cyan");
System.setProperty("picocli.color.optionParams", "magenta");
assertEquals(ansi.new Text("@|blue <main class>|@ [@|green -v|@] [@|green -c|@=@|magenta <count>|@] @|cyan FILE|@..." + LINESEP),
new Help(new App(), ansi).synopsis(0));
}
@Test
public void testSystemPropertiesOverrideExplicitColorScheme() {
@Command(separator = "=") class App {
@Option(names = {"--verbose", "-v"}) boolean verbose;
@Option(names = {"--count", "-c"}) int count;
@Option(names = {"--help", "-h"}, hidden = true) boolean helpRequested;
@Parameters(paramLabel = "FILE", arity = "1..*") File[] files;
}
Help.Ansi ansi = Help.Ansi.ON;
ColorScheme explicit = new ColorScheme(ansi)
.commands(Style.faint, Style.bg_magenta)
.options(Style.bg_red)
.parameters(Style.reverse)
.optionParams(Style.bg_green);
// default color scheme
assertEquals(ansi.new Text("@|faint,bg(magenta) <main class>|@ [@|bg(red) -v|@] [@|bg(red) -c|@=@|bg(green) <count>|@] @|reverse FILE|@..." + LINESEP),
new Help(CommandSpec.forAnnotatedObject(new App(), CommandLine.defaultFactory()), explicit).synopsis(0));
System.setProperty("picocli.color.commands", "blue");
System.setProperty("picocli.color.options", "blink");
System.setProperty("picocli.color.parameters", "red");
System.setProperty("picocli.color.optionParams", "magenta");
assertEquals(ansi.new Text("@|blue <main class>|@ [@|blink -v|@] [@|blink -c|@=@|magenta <count>|@] @|red FILE|@..." + LINESEP),
new Help(CommandSpec.forAnnotatedObject(new App(), CommandLine.defaultFactory()), explicit).synopsis(0));
}
@Test
public void testCommandLine_printVersionInfo_printsSinglePlainTextString() {
@Command(version = "1.0") class Versioned {}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new CommandLine(new Versioned()).printVersionHelp(new PrintStream(baos, true), Help.Ansi.OFF);
String result = baos.toString();
assertEquals(String.format("1.0%n"), result);
}
@Test
public void testCommandLine_printVersionInfo_printsArrayOfPlainTextStrings() {
@Command(version = {"Versioned Command 1.0", "512-bit superdeluxe", "(c) 2017"}) class Versioned {}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new CommandLine(new Versioned()).printVersionHelp(new PrintStream(baos, true), Help.Ansi.OFF);
String result = baos.toString();
assertEquals(String.format("Versioned Command 1.0%n512-bit superdeluxe%n(c) 2017%n"), result);
}
@Test
public void testCommandLine_printVersionInfo_printsSingleStringWithMarkup() {
@Command(version = "@|red 1.0|@") class Versioned {}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new CommandLine(new Versioned()).printVersionHelp(new PrintStream(baos, true), Help.Ansi.ON);
String result = baos.toString();
assertEquals(String.format("\u001B[31m1.0\u001B[39m\u001B[0m%n"), result);
}
@Test
public void testCommandLine_printVersionInfo_printsArrayOfStringsWithMarkup() {
@Command(version = {
"@|yellow Versioned Command 1.0|@",
"@|blue Build 12345|@",
"@|red,bg(white) (c) 2017|@" })
class Versioned {}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new CommandLine(new Versioned()).printVersionHelp(new PrintStream(baos, true), Help.Ansi.ON);
String result = baos.toString();
assertEquals(String.format("" +
"\u001B[33mVersioned Command 1.0\u001B[39m\u001B[0m%n" +
"\u001B[34mBuild 12345\u001B[39m\u001B[0m%n" +
"\u001B[31m\u001B[47m(c) 2017\u001B[49m\u001B[39m\u001B[0m%n"), result);
}
@Test
public void testCommandLine_printVersionInfo_formatsArguments() {
@Command(version = {"First line %1$s", "Second line %2$s", "Third line %s %s"}) class Versioned {}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos, true);
new CommandLine(new Versioned()).printVersionHelp(ps, Help.Ansi.OFF, "VALUE1", "VALUE2", "VALUE3");
String result = baos.toString();
assertEquals(String.format("First line VALUE1%nSecond line VALUE2%nThird line VALUE1 VALUE2%n"), result);
}
@Test
public void testCommandLine_printVersionInfo_withMarkupAndParameterContainingMarkup() {
@Command(version = {
"@|yellow Versioned Command 1.0|@",
"@|blue Build 12345|@%1$s",
"@|red,bg(white) (c) 2017|@%2$s" })
class Versioned {}
CommandLine commandLine = new CommandLine(new Versioned());
verifyVersionWithMarkup(commandLine);
}
static class MarkupVersionProvider implements IVersionProvider {
public String[] getVersion() {
return new String[] {
"@|yellow Versioned Command 1.0|@",
"@|blue Build 12345|@%1$s",
"@|red,bg(white) (c) 2017|@%2$s" };
}
}
@Test
public void testCommandLine_printVersionInfo_fromAnnotation_withMarkupAndParameterContainingMarkup() {
@Command(versionProvider = MarkupVersionProvider.class)
class Versioned {}
CommandLine commandLine = new CommandLine(new Versioned());
verifyVersionWithMarkup(commandLine);
}
@Test
public void testCommandLine_printVersionInfo_usesProviderIfBothProviderAndStaticVersionInfoExist() {
@Command(versionProvider = MarkupVersionProvider.class, version = "static version is ignored")
class Versioned {}
CommandLine commandLine = new CommandLine(new Versioned());
verifyVersionWithMarkup(commandLine);
}
private void verifyVersionWithMarkup(CommandLine commandLine) {
String[] args = {"@|bold VALUE1|@", "@|underline VALUE2|@", "VALUE3"};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos, true);
commandLine.printVersionHelp(ps, Help.Ansi.ON, (Object[]) args);
String result = baos.toString();
assertEquals(String.format("" +
"\u001B[33mVersioned Command 1.0\u001B[39m\u001B[0m%n" +
"\u001B[34mBuild 12345\u001B[39m\u001B[0m\u001B[1mVALUE1\u001B[21m\u001B[0m%n" +
"\u001B[31m\u001B[47m(c) 2017\u001B[49m\u001B[39m\u001B[0m\u001B[4mVALUE2\u001B[24m\u001B[0m%n"), result);
}
static class FailingVersionProvider implements IVersionProvider {
public String[] getVersion() {
throw new IllegalStateException("sorry can't give you a version");
}
}
@Test
public void testFailingVersionProvider() {
@Command(versionProvider = FailingVersionProvider.class)
class App {}
CommandLine cmd = new CommandLine(new App());
try {
cmd.printVersionHelp(System.out);
fail("Expected exception");
} catch (ExecutionException ex) {
assertEquals("Could not get version info from " + cmd.getCommandSpec().versionProvider() + ": java.lang.IllegalStateException: sorry can't give you a version", ex.getMessage());
}
}
@Test
public void testNoVersionProvider_errorWhenInvoked() {
try {
Class<?> cls = Class.forName("picocli.CommandLine$NoVersionProvider");
try {
Constructor<?> constructor = cls.getDeclaredConstructor(new Class[0]);
constructor.setAccessible(true);
IVersionProvider provider = (IVersionProvider) constructor.newInstance();
try {
provider.getVersion();
fail("expected an exception to be thrown here");
} catch (UnsupportedOperationException ex) {
// expected
}
} catch (Exception e) {
fail(e.getMessage());
}
} catch (ClassNotFoundException e) {
fail(e.getMessage());
}
}
@Test
public void testRepeatingGroup() {
class App {
@Parameters(arity = "2", description = "description") String[] twoArgs;
}
String actual = usageString(new App(), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> (<twoArgs> <twoArgs>)...%n" +
" (<twoArgs> <twoArgs>)...%n" +
" description%n");
assertEquals(expected, actual);
}
@Test
public void testMapFieldHelp_with_unlimitedSplit() {
class App {
@Parameters(arity = "2", split = "\\|",
paramLabel = "FIXTAG=VALUE",
description = "Repeating group of two lists of vertical bar '|'-separated FIXTAG=VALUE pairs.")
Map<Integer,String> message;
@Option(names = {"-P", "-map"}, split = ",",
paramLabel = "TIMEUNIT=VALUE",
description = "Any number of TIMEUNIT=VALUE pairs. These may be specified separately (-PTIMEUNIT=VALUE) or as a comma-separated list.")
Map<TimeUnit, String> map;
}
String actual = usageString(new App(), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [-P=TIMEUNIT=VALUE[,TIMEUNIT=VALUE...]]... (FIXTAG=VALUE%n" +
" [\\|FIXTAG=VALUE...] FIXTAG=VALUE[\\|FIXTAG=VALUE...])...%n" +
" (FIXTAG=VALUE[\\|FIXTAG=VALUE...] FIXTAG=VALUE[\\|FIXTAG=VALUE...])...%n" +
" Repeating group of two lists of vertical bar '|'-separated FIXTAG=VALUE%n" +
" pairs.%n" +
" -P, -map=TIMEUNIT=VALUE[,TIMEUNIT=VALUE...]%n" +
" Any number of TIMEUNIT=VALUE pairs. These may be specified separately%n" +
" (-PTIMEUNIT=VALUE) or as a comma-separated list.%n");
assertEquals(expected, actual);
}
@Test
public void testMapFieldHelpSplit_with_limitSplit() {
class App {
@Parameters(arity = "2", split = "\\|",
paramLabel = "FIXTAG=VALUE",
description = "Exactly two lists of vertical bar '|'-separated FIXTAG=VALUE pairs.")
Map<Integer,String> message;
@Option(names = {"-P", "-map"}, split = ",",
paramLabel = "TIMEUNIT=VALUE",
description = "Any number of TIMEUNIT=VALUE pairs. These may be specified separately (-PTIMEUNIT=VALUE) or as a comma-separated list.")
Map<TimeUnit, String> map;
}
CommandSpec spec = CommandSpec.forAnnotatedObject(new App());
spec.parser().limitSplit(true);
String actual = usageString(new CommandLine(spec), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [-P=TIMEUNIT=VALUE[,TIMEUNIT=VALUE]...]...%n" +
" (FIXTAG=VALUE\\|FIXTAG=VALUE)...%n" +
" (FIXTAG=VALUE\\|FIXTAG=VALUE)...%n" +
" Exactly two lists of vertical bar '|'-separated FIXTAG=VALUE pairs.%n" +
" -P, -map=TIMEUNIT=VALUE[,TIMEUNIT=VALUE]...%n" +
" Any number of TIMEUNIT=VALUE pairs. These may be specified separately%n" +
" (-PTIMEUNIT=VALUE) or as a comma-separated list.%n");
assertEquals(expected, actual);
}
// def cli = new CliBuilder(name:'ant',
// header:'Options:')
// cli.help('print this message')
// cli.logfile(type:File, argName:'file', 'use given file for log')
// cli.D(type:Map, argName:'property=value', args: '+', 'use value for given property')
// cli.lib(argName:'path', valueSeparator:',', args: '3',
// 'comma-separated list of up to 3 paths to search for jars and classes')
@Test
public void testMultiValueCliBuilderCompatibility() {
class App {
@Option(names = "--help", description = "print this message")
boolean help;
@Option(names = "--logfile", description = "use given file for log")
File file;
@Option(names = "-P", arity = "0..*", paramLabel = "<key=ppp>", description = "use value for project key")
Map projectMap;
@Option(names = "-D", arity = "1..*", paramLabel = "<key=ddd>", description = "use value for given property")
Map map;
@Option(names = "-S", arity = "0..*", split = ",", paramLabel = "<key=sss>", description = "use value for project key")
Map sss;
@Option(names = "-T", arity = "1..*", split = ",", paramLabel = "<key=ttt>", description = "use value for given property")
Map ttt;
@Option(names = "--x", arity = "0..2", split = ",", description = "comma-separated list of up to 2 xxx's")
String[] x;
@Option(names = "--y", arity = "3", split = ",", description = "exactly 3 y's")
String[] y;
@Option(names = "--lib", arity = "1..3", split = ",", description = "comma-separated list of up to 3 paths to search for jars and classes")
String[] path;
}
CommandSpec spec = CommandSpec.forAnnotatedObject(new App());
spec.parser().limitSplit(true);
String actual = usageString(new CommandLine(spec), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [--help] [--logfile=<file>] [--x[=<x>[,<x>]]]...%n" +
" [--lib=<path>[,<path>[,<path>]]]... [--y=<y>,<y>,<y>]... [-P%n" +
" [=<key=ppp>...]]... [-S[=<key=sss>[,<key=sss>]...]]...%n" +
" [-D=<key=ddd>...]... [-T=<key=ttt>[,<key=ttt>]...]...%n" +
" --help print this message%n" +
" --lib=<path>[,<path>[,<path>]]%n" +
" comma-separated list of up to 3 paths to search for jars%n" +
" and classes%n" +
" --logfile=<file> use given file for log%n" +
" --x[=<x>[,<x>]] comma-separated list of up to 2 xxx's%n" +
" --y=<y>,<y>,<y> exactly 3 y's%n" +
" -D= <key=ddd>... use value for given property%n" +
" -P= [<key=ppp>...] use value for project key%n" +
" -S= [<key=sss>[,<key=sss>]...]%n" +
" use value for project key%n" +
" -T= <key=ttt>[,<key=ttt>]...%n" +
" use value for given property%n");
assertEquals(expected, actual);
}
@Test
public void testMapFieldTypeInference() throws UnsupportedEncodingException {
class App {
@Option(names = "-a") Map<Integer, URI> a;
@Option(names = "-b") Map<TimeUnit, StringBuilder> b;
@SuppressWarnings("unchecked")
@Option(names = "-c") Map c;
@Option(names = "-d") List<File> d;
@Option(names = "-e") Map<? extends Integer, ? super Long> e;
@Option(names = "-f", type = {Long.class, Float.class}) Map<? extends Number, ? super Number> f;
@SuppressWarnings("unchecked")
@Option(names = "-g", type = {TimeUnit.class, Float.class}) Map<?, ?> g;
}
String actual = usageString(new App(), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: <main class> [-a=<Integer=URI>]... [-b=<TimeUnit=StringBuilder>]...%n" +
" [-c=<String=String>]... [-d=<d>]... [-e=<Integer=Long>]...%n" +
" [-f=<Long=Float>]... [-g=<TimeUnit=Float>]...%n" +
" -a= <Integer=URI>%n" +
" -b= <TimeUnit=StringBuilder>%n" +
"%n" +
" -c= <String=String>%n" +
" -d= <d>%n" +
" -e= <Integer=Long>%n" +
" -f= <Long=Float>%n" +
" -g= <TimeUnit=Float>%n");
assertEquals(expected, actual);
}
@Test
public void test200NPEWithEmptyCommandName() throws UnsupportedEncodingException {
@Command(name = "") class Args {}
String actual = usageString(new Args(), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: %n" +
"");
assertEquals(expected, actual);
}
@Test
public void testPrintHelpIfRequestedReturnsTrueForUsageHelp() throws IOException {
class App {
@Option(names = "-h", usageHelp = true) boolean usageRequested;
}
List<CommandLine> list = new CommandLine(new App()).parse("-h");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = String.format("" +
"Usage: <main class> [-h]%n" +
" -h%n");
assertEquals(expected, baos.toString());
}
@Test
public void testPrintHelpIfRequestedWithCustomColorScheme() {
ColorScheme customColorScheme = new Help.ColorScheme(Help.Ansi.ON)
.optionParams(Style.fg_magenta)
.commands(Style.bg_cyan)
.options(Style.fg_green)
.parameters(Style.bg_white);
@Command(mixinStandardHelpOptions = true)
class App {
@Option(names = { "-f" }, paramLabel = "ARCHIVE", description = "the archive file") File archive;
@Parameters(paramLabel = "POSITIONAL", description = "positional arg") String arg;
}
List<CommandLine> list = new CommandLine(new App()).parse("--help");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, customColorScheme));
String expected = Help.Ansi.ON.string(String.format("" +
"Usage: @|bg_cyan <main class>|@ [@|green -hV|@] [@|green -f|@=@|magenta ARCHIVE|@] @|bg_white POSITIONAL|@%n" +
"@|bg_white |@ @|bg_white POSITIONAL|@ positional arg%n" +
" @|green -f|@= @|magenta A|@@|magenta RCHIVE|@ the archive file%n" +
" @|green -h|@, @|green --help|@ Show this help message and exit.%n" +
" @|green -V|@, @|green --version|@ Print version information and exit.%n"));
assertEquals(expected, baos.toString());
}
@Test
public void testPrintHelpIfRequestedReturnsTrueForVersionHelp() throws IOException {
@Command(version = "abc 1.2.3 myversion")
class App {
@Option(names = "-V", versionHelp = true) boolean versionRequested;
}
List<CommandLine> list = new CommandLine(new App()).parse("-V");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = String.format("abc 1.2.3 myversion%n");
assertEquals(expected, baos.toString());
}
@Test
public void testPrintHelpIfRequestedReturnsFalseForNoHelp() throws IOException {
class App {
@Option(names = "-v") boolean verbose;
}
List<CommandLine> list = new CommandLine(new App()).parse("-v");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertFalse(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = "";
assertEquals(expected, baos.toString());
}
@Command(name = "top", subcommands = {Sub.class})
static class Top {
@Option(names = "-o", required = true) String mandatory;
@Option(names = "-h", usageHelp = true) boolean isUsageHelpRequested;
}
@Command(name = "sub", description = "This is a subcommand") static class Sub {}
@Test
public void test244SubcommandsNotParsed() {
List<CommandLine> list = new CommandLine(new Top()).parse("-h", "sub");
assertEquals(2, list.size());
assertTrue(list.get(0).getCommand() instanceof Top);
assertTrue(list.get(1).getCommand() instanceof Sub);
assertTrue(((Top) list.get(0).getCommand()).isUsageHelpRequested);
}
@Test
public void testDemoUsage() {
String expected = String.format("" +
" .__ .__ .__%n" +
"______ |__| ____ ____ ____ | | |__|%n" +
"\\____ \\| |/ ___\\/ _ \\_/ ___\\| | | |%n" +
"| |_> > \\ \\__( <_> ) \\___| |_| |%n" +
"| __/|__|\\___ >____/ \\___ >____/__|%n" +
"|__| \\/ \\/%n" +
"%n" +
"Usage: picocli.Demo [-123airtV] [--simple]%n" +
"%n" +
"Demonstrates picocli subcommands parsing and usage help.%n" +
"%n" +
"Options:%n" +
" -a, --autocomplete Generate sample autocomplete script for git%n" +
" -1, --showUsageForSubcommandGitCommit%n" +
" Shows usage help for the git-commit subcommand%n" +
" -2, --showUsageForMainCommand%n" +
" Shows usage help for a command with subcommands%n" +
" -3, --showUsageForSubcommandGitStatus%n" +
" Shows usage help for the git-status subcommand%n" +
" --simple Show help for the first simple Example in the manual%n" +
" -i, --index Show 256 color palette index values%n" +
" -r, --rgb Show 256 color palette RGB component values%n" +
" -t, --tests Runs all tests in this class%n" +
" -V, --version Show version information and exit%n" +
"%n" +
"VM Options:%n" +
"Run with -ea to enable assertions used in the tests.%n" +
"Run with -Dpicocli.ansi=true to force picocli to use ansi codes,%n" +
" or with -Dpicocli.ansi=false to force picocli to NOT use ansi codes.%n" +
"(By default picocli will use ansi codes if the platform supports it.)%n" +
"%n" +
"If you would like to contribute or report an issue%n" +
"go to github: https://github.com/remkop/picocli%n" +
"%n" +
"If you like the project star it on github and follow me on twitter!%n" +
"This project is created and maintained by Remko Popma (@remkopopma)%n" +
"%n");
assertEquals(expected, usageString(new Demo(), Help.Ansi.OFF));
}
@Test
public void testHelpCannotBeAddedAsSubcommand() {
@Command(subcommands = Help.class) class App{}
try {
new CommandLine(new App(), new InnerClassFactory(this));
} catch (InitializationException ex) {
assertEquals("picocli.CommandLine$Help is not a valid subcommand. Did you mean picocli.CommandLine$HelpCommand?", ex.getMessage());
}
}
@Test
public void testAutoHelpMixinUsageHelpOption() {
@Command(mixinStandardHelpOptions = true) class App {}
String[] helpOptions = {"-h", "--help"};
for (String option : helpOptions) {
List<CommandLine> list = new CommandLine(new App()).parse(option);
assertTrue(list.get(0).isUsageHelpRequested());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = String.format("" +
"Usage: <main class> [-hV]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n");
assertEquals(expected, baos.toString());
}
}
@Test
public void testAutoHelpMixinVersionHelpOption() {
@Command(mixinStandardHelpOptions = true, version = "1.2.3") class App {}
String[] versionOptions = {"-V", "--version"};
for (String option : versionOptions) {
List<CommandLine> list = new CommandLine(new App()).parse(option);
assertTrue(list.get(0).isVersionHelpRequested());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = String.format("1.2.3%n");
assertEquals(expected, baos.toString());
}
}
@Test
public void testAutoHelpMixinUsageHelpSubcommandOnAppWithoutSubcommands() {
@Command(mixinStandardHelpOptions = true, subcommands = HelpCommand.class) class App {}
List<CommandLine> list = new CommandLine(new App()).parse("help");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(baos);
assertTrue(CommandLine.printHelpIfRequested(list, out, out, Help.Ansi.OFF));
String expected = String.format("" +
"Usage: <main class> [-hV] [COMMAND]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n" +
"Commands:%n" +
" help Displays help information about the specified command%n");
assertEquals(expected, baos.toString());
}
@Test
public void testAutoHelpMixinRunHelpSubcommandOnAppWithoutSubcommands() {
@Command(mixinStandardHelpOptions = true, subcommands = HelpCommand.class)
class App implements Runnable{ public void run(){}}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), new PrintStream(baos), Help.Ansi.OFF, "help");
String expected = String.format("" +
"Usage: <main class> [-hV] [COMMAND]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n" +
"Commands:%n" +
" help Displays help information about the specified command%n");
assertEquals(expected, baos.toString());
}
@Test
public void testHelpSubcommandWithValidCommand() {
@Command(subcommands = {Sub.class, HelpCommand.class}) class App implements Runnable{ public void run(){}}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), new PrintStream(baos), Help.Ansi.OFF, "help", "sub");
String expected = String.format("" +
"Usage: <main class> sub%n" +
"This is a subcommand%n");
assertEquals(expected, baos.toString());
}
@Test
public void testHelpSubcommandWithInvalidCommand() {
@Command(mixinStandardHelpOptions = true, subcommands = {Sub.class, HelpCommand.class})
class App implements Runnable{ public void run(){}}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), System.out, new PrintStream(baos), Help.Ansi.OFF, "help", "abcd");
String expected = String.format("" +
"Unknown subcommand 'abcd'.%n" +
"Usage: <main class> [-hV] [COMMAND]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n" +
"Commands:%n" +
" sub This is a subcommand%n" +
" help Displays help information about the specified command%n");
assertEquals(expected, baos.toString());
}
@Test
public void testHelpSubcommandWithHelpOption() {
@Command(subcommands = {Sub.class, HelpCommand.class})
class App implements Runnable{ public void run(){}}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), new PrintStream(baos), Help.Ansi.OFF, "help", "-h");
String expected = String.format("" +
"Displays help information about the specified command%n" +
"%n" +
"Usage: <main class> help [-h] [COMMAND...]%n" +
"%n" +
"When no COMMAND is given, the usage help for the main command is displayed.%n" +
"If a COMMAND is specified, the help for that command is shown.%n" +
"%n" +
" [COMMAND...] The COMMAND to display the usage help message for.%n" +
" -h, --help Show usage help for the help command and exit.%n");
assertEquals(expected, baos.toString());
StringWriter sw = new StringWriter();
new CommandLine(new App()).getSubcommands().get("help").usage(new PrintWriter(sw));
assertEquals(expected, sw.toString());
}
@Test
public void testHelpSubcommandWithoutCommand() {
@Command(mixinStandardHelpOptions = true, subcommands = {Sub.class, HelpCommand.class})
class App implements Runnable{ public void run(){}}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), new PrintStream(baos), Help.Ansi.OFF, "help");
String expected = String.format("" +
"Usage: <main class> [-hV] [COMMAND]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n" +
"Commands:%n" +
" sub This is a subcommand%n" +
" help Displays help information about the specified command%n");
assertEquals(expected, baos.toString());
StringWriter sw = new StringWriter();
new CommandLine(new App()).usage(new PrintWriter(sw));
assertEquals(expected, sw.toString());
}
@Test
public void testUsageHelpForNestedSubcommands() {
@Command(name = "subsub", mixinStandardHelpOptions = true) class SubSub { }
@Command(name = "sub", subcommands = {SubSub.class}) class Sub { }
@Command(name = "main", subcommands = {Sub.class}) class App { }
CommandLine app = new CommandLine(new App(), new InnerClassFactory(this));
//ParseResult result = app.parseArgs("sub", "subsub", "--help");
//CommandLine.printHelpIfRequested(result);
CommandLine subsub = app.getSubcommands().get("sub").getSubcommands().get("subsub");
ByteArrayOutputStream baos = new ByteArrayOutputStream();
subsub.usage(new PrintStream(baos), Help.Ansi.OFF);
String expected = String.format("" +
"Usage: main sub subsub [-hV]%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n");
assertEquals(expected, baos.toString());
}
@Test
public void testUsageTextWithHiddenSubcommand() {
@Command(name = "foo", description = "This is a visible subcommand") class Foo { }
@Command(name = "bar", description = "This is a hidden subcommand", hidden = true) class Bar { }
@Command(name = "app", subcommands = {Foo.class, Bar.class}) class App { }
CommandLine app = new CommandLine(new App(), new InnerClassFactory(this));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
app.usage(new PrintStream(baos));
String expected = format("" +
"Usage: app [COMMAND]%n" +
"Commands:%n" +
" foo This is a visible subcommand%n");
assertEquals(expected, baos.toString());
}
@Test
public void testUsage_NoHeaderIfAllSubcommandHidden() {
@Command(name = "foo", description = "This is a foo sub-command", hidden = true) class Foo { }
@Command(name = "bar", description = "This is a foo sub-command", hidden = true) class Bar { }
@Command(name = "app", abbreviateSynopsis = true) class App { }
CommandLine app = new CommandLine(new App())
.addSubcommand("foo", new Foo())
.addSubcommand("bar", new Bar());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
app.usage(new PrintStream(baos));
String expected = format("" +
"Usage: app [COMMAND]%n");
assertEquals(expected, baos.toString());
}
@Test
public void test282BrokenValidationWhenNoOptionsToCompareWith() {
class App implements Runnable {
@Parameters(paramLabel = "FILES", arity = "1..*", description = "List of files")
private List<File> files = new ArrayList<File>();
public void run() { }
}
String[] args = new String[] {"-unknown"};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), System.out, new PrintStream(baos), Help.Ansi.OFF, args);
String expected = format("" +
"Missing required parameter: FILES%n" +
"Usage: <main class> FILES...%n" +
" FILES... List of files%n");
assertEquals(expected, baos.toString());
}
@Test
public void test282ValidationWorksWhenOptionToCompareWithExists() {
class App implements Runnable {
@Parameters(paramLabel = "FILES", arity = "1..*", description = "List of files")
private List<File> files = new ArrayList<File>();
@CommandLine.Option(names = {"-v"}, description = "Print output")
private boolean verbose;
public void run() { }
}
String[] args = new String[] {"-unknown"};
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CommandLine.run(new App(), System.out, new PrintStream(baos), Help.Ansi.OFF, args);
String expected = format("" +
"Missing required parameter: FILES%n" +
"Usage: <main class> [-v] FILES...%n" +
" FILES... List of files%n" +
" -v Print output%n");
assertEquals(expected, baos.toString());
}
@Test
public void testShouldGetUsageWidthFromSystemProperties() {
int defaultWidth = new UsageMessageSpec().width();
assertEquals(80, defaultWidth);
try {
System.setProperty("picocli.usage.width", "123");
int width = new UsageMessageSpec().width();
assertEquals(123, width);
} finally {
System.setProperty("picocli.usage.width", String.valueOf(defaultWidth));
}
}
@Test
public void testInvalidUsageWidthPropertyValue() throws UnsupportedEncodingException {
PrintStream originalErr = System.err;
ByteArrayOutputStream baos = new ByteArrayOutputStream(2500);
System.setErr(new PrintStream(baos));
System.clearProperty("picocli.trace");
System.setProperty("picocli.usage.width", "INVALID");
int actual = new UsageMessageSpec().width();
System.setErr(originalErr);
System.clearProperty("picocli.usage.width");
assertEquals(80, actual);
assertEquals(format("[picocli WARN] Invalid picocli.usage.width value 'INVALID'. Using usage width 80.%n"), baos.toString("UTF-8"));
}
@Test
public void testTooSmallUsageWidthPropertyValue() throws UnsupportedEncodingException {
PrintStream originalErr = System.err;
ByteArrayOutputStream baos = new ByteArrayOutputStream(2500);
System.setErr(new PrintStream(baos));
System.clearProperty("picocli.trace");
System.setProperty("picocli.usage.width", "54");
int actual = new UsageMessageSpec().width();
System.setErr(originalErr);
System.clearProperty("picocli.usage.width");
assertEquals(55, actual);
assertEquals(format("[picocli WARN] Invalid picocli.usage.width value 54. Using minimum usage width 55.%n"), baos.toString("UTF-8"));
}
@Test
public void testTextTableWithLargeWidth() {
TextTable table = TextTable.forDefaultColumns(Help.Ansi.OFF, 200);
table.addRowValues(textArray(Help.Ansi.OFF, "", "-v", ",", "--verbose", "show what you're doing while you are doing it"));
table.addRowValues(textArray(Help.Ansi.OFF, "", "-p", null, null, "the quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy doooooooooooooooog."));
assertEquals(String.format(
" -v, --verbose show what you're doing while you are doing it%n" +
" -p the quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy doooooooooooooooog.%n"
), table.toString(new StringBuilder()).toString());
}
@Test
public void testLongMultiLineSynopsisIndentedWithLargeWidth() {
System.setProperty("picocli.usage.width", "200");
try {
@Command(name = "<best-app-ever>")
class App {
@Option(names = "--long-option-name", paramLabel = "<long-option-value>") int a;
@Option(names = "--another-long-option-name", paramLabel = "<another-long-option-value>") int b;
@Option(names = "--third-long-option-name", paramLabel = "<third-long-option-value>") int c;
@Option(names = "--fourth-long-option-name", paramLabel = "<fourth-long-option-value>") int d;
}
Help help = new Help(new App(), Help.Ansi.OFF);
assertEquals(String.format(
"<best-app-ever> [--another-long-option-name=<another-long-option-value>] [--fourth-long-option-name=<fourth-long-option-value>] [--long-option-name=<long-option-value>]%n" +
" [--third-long-option-name=<third-long-option-value>]%n"),
help.synopsis(0));
} finally {
System.setProperty("picocli.usage.width", String.valueOf(UsageMessageSpec.DEFAULT_USAGE_WIDTH));
}
}
@Command(description = "The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog.")
static class WideDescriptionApp {
@Option(names = "-s", description = "The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog.")
String shortOption;
@Option(names = "--very-very-very-looooooooooooooooong-option-name", description = "The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog.")
String lengthyOption;
static final String expected = format("Usage: <main class> [--very-very-very-looooooooooooooooong-option-name=<lengthyOption>] [-s=<shortOption>]%n" +
"The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped%n" +
"over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The%n" +
"quick brown fox jumped over the lazy dog.%n" +
" --very-very-very-looooooooooooooooong-option-name=<lengthyOption>%n" +
" The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick%n" +
" brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog.%n" +
" -s= <shortOption> The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick%n" +
" brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog.%n");
}
@Test
public void testWideUsageViaSystemProperty() {
System.setProperty("picocli.usage.width", String.valueOf(120));
try {
String actual = usageString(new WideDescriptionApp(), Help.Ansi.OFF);
assertEquals(WideDescriptionApp.expected, actual);
} finally {
System.setProperty("picocli.usage.width", String.valueOf(80));
}
}
@Test
public void testWideUsage() {
CommandLine cmd = new CommandLine(new WideDescriptionApp());
cmd.setUsageHelpWidth(120);
String actual = usageString(cmd, Help.Ansi.OFF);
assertEquals(WideDescriptionApp.expected, actual);
}
@Test
public void testCliBuilderLsExample() {
@Command(name="ls")
class App {
@Option(names = "-a", description = "display all files") boolean a;
@Option(names = "-l", description = "use a long listing format") boolean l;
@Option(names = "-t", description = "sort by modification time") boolean t;
}
String actual = usageString(new App(), Help.Ansi.OFF);
assertEquals(String.format("" +
"Usage: ls [-alt]%n" +
" -a display all files%n" +
" -l use a long listing format%n" +
" -t sort by modification time%n"), actual);
}
@Test
public void testAnsiText() {
String markup = "@|bg(red),white,underline some text|@";
Help.Ansi.Text txt = Help.Ansi.ON.text(markup);
Help.Ansi.Text txt2 = Help.Ansi.ON.new Text(markup);
assertEquals(txt, txt2);
}
@Test
public void testAnsiString() {
String msg = "some text";
String markup = "@|bg(red),white,underline " + msg + "|@";
String ansiTxt = Help.Ansi.ON.string(markup);
String ansiTxt2 = Help.Ansi.ON.new Text(markup).toString();
assertEquals(ansiTxt, ansiTxt2);
}
@Test
public void testAnsiValueOf() {
assertEquals("true=ON", Help.Ansi.ON, Help.Ansi.valueOf(true));
assertEquals("false=OFF", Help.Ansi.OFF, Help.Ansi.valueOf(false));
}
@Test
public void testIssue430NewlineInSubcommandDescriptionList() { // courtesy [Benny Bottema](https://github.com/bbottema)
CommandSpec rootCmd = createCmd("newlines", "Displays subcommands, one of which contains description newlines");
rootCmd.addSubcommand("subA", createCmd("subA", "regular description for subA"));
rootCmd.addSubcommand("subB", createCmd("subB", "very,\nspecial,\nChristopher Walken style,\ndescription."));
rootCmd.addSubcommand("subC", createCmd("subC", "regular description for subC"));
assertEquals(String.format("" +
"Usage: newlines [-hV] [COMMAND]%n" +
"Displays subcommands, one of which contains description newlines%n" +
" -h, --help Show this help message and exit.%n" +
" -V, --version Print version information and exit.%n" +
"Commands:%n" +
" subA regular description for subA%n" +
" subB very,%n" +
" special,%n" +
" Christopher Walken style,%n" +
" description.%n" +
" subC regular description for subC%n"), new CommandLine(rootCmd).getUsageMessage());
}
private static CommandSpec createCmd(String name, String description) {
CommandSpec cmd = CommandSpec.create().name(name).mixinStandardHelpOptions(true);
cmd.usageMessage().description(description);
return cmd;
}
}
|
[
"\"TERM\"",
"\"TERM\"",
"\"OSTYPE\""
] |
[] |
[
"OSTYPE",
"TERM"
] |
[]
|
["OSTYPE", "TERM"]
|
java
| 2 | 0 | |
app.py
|
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response, request
import io
import time
import picamera
from base_camera import BaseCamera
from picamera.array import PiRGBArray
from picamera import PiCamera
import pygame
class Camera(BaseCamera):
global camera
global camera_flag
global x
global y
@staticmethod
def frames():
#with picamera.PiCamera() as camera:
# let camera warm up
while camera_flag:
time.sleep(2)
stream = io.BytesIO()
for _ in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
if camera_flag < 1:
break
# return current frame
stream.seek(0)
yield stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# import camera driver
#if os.environ.get('CAMERA'):
# Camera = import_module('camera_' + os.environ['CAMERA']).Camera
#else:
# from camera import Camera
# Raspberry Pi camera module (requires picamera package)
#from camera_pi import Camera
#from picamera.array import PiRGBArray
#from picamera import PiCamera
app = Flask(__name__)
#@app.route('/')
#def index():
# """Video streaming home page."""
# return render_template('index.html')
@app.route("/", methods=['GET', 'POST'])
def index():
global camera_flag
"""Video streaming home page."""
print(request.method)
if request.method == 'POST':
if request.form.get('Encrypt') == 'Encrypt':
# pass
print("Encrypted")
camera_flag = 0
func = request.environ.get('werkzeug.server.shutdown')
func()
#shutdown_server()
return
elif request.form.get('Decrypt') == 'Decrypt':
# pass # do something else
print("Decrypted")
else:
# pass # unknown
return render_template("index.html")
elif request.method == 'GET':
# return render_template("index.html")
print("No Post Back Call")
return render_template("index.html")
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/contact',methods=['GET', 'POST'])
def contact():
if request.method == 'POST':
if request.form['submit_button'] == 'Do Something':
print("a")
pass # do something
elif request.form['submit_button'] == 'Do Something Else':
print("b")
pass # do something else
else:
pass # unknown
elif request.method == 'GET':
return render_template('contact.html', form=form)
if __name__ == '__main__':
camera= PiCamera()
#camera = pygame.camera.Camera()
camera.resolution=(480,320)
camera.framerate=64
camera_flag = 1
app.run(host="0.0.0.0", threaded=True)
while True:
BLACK = 0,0,0
x = 0
y = 0
screen = pygame.display.set_mode((320,240))
screen.fill(BLACK)
pygame.display.flip()
WHITE = 0,0,0
x = 0
y = 0
screen = pygame.display.set_mode((320,240))
screen.fill(WHITE)
pygame.display.flip()
|
[] |
[] |
[
"CAMERA"
] |
[]
|
["CAMERA"]
|
python
| 1 | 0 | |
my_project/userauth_proj/wsgi.py
|
"""
WSGI config for userauth_proj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'userauth_proj.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/kubejob/main.go
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"syscall"
"github.com/goccy/kubejob"
"github.com/jessevdk/go-flags"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
type option struct {
Namespace string `description:"specify namespace" short:"n" long:"namespace" default:"default"`
File string `description:"specify yaml or json file for written job definition" short:"f" long:"file"`
Image string `description:"specify container image" short:"i" long:"image"`
}
func getKubeConfig() string {
if v := os.Getenv("KUBECONFIG"); v != "" {
return v
}
home := homedir.HomeDir()
config := filepath.Join(home, ".kube", "config")
if _, err := os.Stat(config); err == nil {
return config
}
return ""
}
func loadConfig() (*rest.Config, error) {
cfg, err := clientcmd.BuildConfigFromFlags("", getKubeConfig())
if err != nil {
return nil, fmt.Errorf("failed to create config: %w", err)
}
return cfg, nil
}
func namespace(opt option) (string, error) {
if opt.Namespace != "" {
return opt.Namespace, nil
}
rules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: getKubeConfig()}
c, err := rules.Load()
if err != nil {
return "", fmt.Errorf("failed to load default namespace: %w", err)
}
return c.Contexts[c.CurrentContext].Namespace, nil
}
func _main(args []string, opt option) error {
if opt.Image == "" && opt.File == "" {
return fmt.Errorf("image or file option must be specified")
}
cfg, err := loadConfig()
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
ns, err := namespace(opt)
if err != nil {
return fmt.Errorf("failed to get namespace: %w", err)
}
var job *kubejob.Job
if opt.File != "" {
file, err := os.Open(opt.File)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", opt.File, err)
}
j, err := kubejob.NewJobBuilder(cfg, ns).BuildWithReader(file)
if err != nil {
return err
}
job = j
} else {
if len(args) == 0 {
return fmt.Errorf("command is required. please speficy after '--' section")
}
j, err := kubejob.NewJobBuilder(cfg, ns).
SetImage(opt.Image).
SetCommand(args).
Build()
if err != nil {
return err
}
job = j
}
ctx, cancel := context.WithCancel(context.Background())
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case s := <-interrupt:
fmt.Printf("receive %s. try to graceful stop\n", s)
cancel()
}
}()
if err := job.Run(ctx); err != nil {
return err
}
return nil
}
func main() {
var opt option
parser := flags.NewParser(&opt, flags.Default)
args, err := parser.Parse()
if err != nil {
return
}
if err := _main(args, opt); err != nil {
fmt.Printf("%+v", err)
os.Exit(1)
}
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
examples/network.go
|
package main
import (
"bufio"
"context"
"os"
"github.com/gridscale/gsclient-go"
log "github.com/sirupsen/logrus"
)
var emptyCtx = context.Background()
func main() {
uuid := os.Getenv("GRIDSCALE_UUID")
token := os.Getenv("GRIDSCALE_TOKEN")
config := gsclient.DefaultConfiguration(uuid, token)
client := gsclient.NewClient(config)
log.Info("gridscale client configured")
log.Info("Create network: Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
networkRequest := gsclient.NetworkCreateRequest{
Name: "go-client-network",
}
cnetwork, err := client.CreateNetwork(emptyCtx, networkRequest)
if err != nil {
log.Error("Create network has failed with error", err)
return
}
log.WithFields(log.Fields{
"network_uuid": cnetwork.ObjectUUID,
}).Info("Network successfully created")
defer func() {
//delete network
err := client.DeleteNetwork(emptyCtx, cnetwork.ObjectUUID)
if err != nil {
log.Error("Delete network has failed with error", err)
return
}
log.Info("Network successfully deleted")
log.Info("Get deleted networks: Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
networks, err := client.GetDeletedNetworks(emptyCtx)
if err != nil {
log.Error("Get deleted networks has failed with error", err)
return
}
log.WithFields(log.Fields{
"networks": networks,
}).Info("Retrieved deleted networks successfully")
}()
//Get network to update
net, err := client.GetNetwork(emptyCtx, cnetwork.ObjectUUID)
if err != nil {
log.Error("Create network has failed ")
return
}
log.Info("Update network: Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
netUpdateRequest := gsclient.NetworkUpdateRequest{
Name: "Updated network",
}
err = client.UpdateNetwork(emptyCtx, net.Properties.ObjectUUID, netUpdateRequest)
if err != nil {
log.Error("Update network has failed with error", err)
return
}
log.WithFields(log.Fields{
"network_uuid": net.Properties.ObjectUUID,
}).Info("Network successfully updated")
log.Info("Retrieve network's events: Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//get network's events
events, err := client.GetNetworkEventList(emptyCtx, net.Properties.ObjectUUID)
if err != nil {
log.Error("Get network's events has failed with error", err)
return
}
log.WithFields(log.Fields{
"network_uuid": net.Properties.ObjectUUID,
"events": events,
}).Info("Events successfully retrieved")
log.Info("Delete network: Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
}
|
[
"\"GRIDSCALE_UUID\"",
"\"GRIDSCALE_TOKEN\""
] |
[] |
[
"GRIDSCALE_UUID",
"GRIDSCALE_TOKEN"
] |
[]
|
["GRIDSCALE_UUID", "GRIDSCALE_TOKEN"]
|
go
| 2 | 0 | |
app/elephant_queries.py
|
# app/elephant_queries.py
import os
import json
from dotenv import load_dotenv
import psycopg2
load_dotenv()
DB_NAME = os.getenv("DB_NAME")
DB_USER = os.getenv("DB_USER")
DB_PASSWORD = os.getenv("DB_PASSWORD")
DB_HOST = os.getenv("DB_HOST")
### Connect to ElephantSQL-hosted PostgreSQL
Connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER,
password=DB_PASSWORD, host=DB_HOST)
### A "cursor", a structure to iterate over db records to perform queries
cursor = connection.cursor()
### An example query
cursor.execute('SELECT * from test_table;')
### Note - nothing happened yet! We need to actually *fetch* from the cursor
results = cursor.fetchone()
print(results)
|
[] |
[] |
[
"DB_PASSWORD",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASSWORD", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
groups/item/drives/item/items/item/children/count/count_request_builder.go
|
package count
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/odataerrors"
)
// CountRequestBuilder provides operations to count the resources in the collection.
type CountRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// CountRequestBuilderGetOptions options for Get
type CountRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewCountRequestBuilderInternal instantiates a new CountRequestBuilder and sets the default values.
func NewCountRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*CountRequestBuilder) {
m := &CountRequestBuilder{
}
m.urlTemplate = "{+baseurl}/groups/{group_id}/drives/{drive_id}/items/{driveItem_id}/children/$count";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewCountRequestBuilder instantiates a new CountRequestBuilder and sets the default values.
func NewCountRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*CountRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewCountRequestBuilderInternal(urlParams, requestAdapter)
}
// CreateGetRequestInformation get the number of the resource
func (m *CountRequestBuilder) CreateGetRequestInformation(options *CountRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Get get the number of the resource
func (m *CountRequestBuilder) Get(options *CountRequestBuilderGetOptions)(*string, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings {
"4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
"5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendPrimitiveAsync(requestInfo, "string", nil, errorMapping)
if err != nil {
return nil, err
}
return res.(*string), nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/test/java/com/kasisoft/libs/common/utils/PropertyResolverTest.java
|
package com.kasisoft.libs.common.utils;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
import com.kasisoft.libs.common.io.*;
import com.kasisoft.libs.common.*;
import org.testng.annotations.*;
import java.util.*;
/**
* @author [email protected]
*/
public class PropertyResolverTest extends AbstractTestCase {
@Test
public void apply__env() {
var text = getResource("text_01.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withEnvironment()
;
var substituted = resolver.apply(loaded);
assertThat(substituted, is(String.format("My username is: %s", System.getenv().get("USER"))));
}
@Test
public void apply__sys() {
var text = getResource("text_02.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withSysProperties()
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My encoding is: %s", System.getProperty("file.encoding"))));
}
@Test
public void apply__envAndPrefix() {
var text = getResource("text_03.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withEnvironment("bibo")
;
var substituted = resolver.apply(loaded);
assertThat(substituted, is(String.format("My username is: %s", System.getenv().get("USER"))));
}
@Test
public void apply__sysAndPrefix() {
var text = getResource("text_04.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withSysProperties("frog")
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My encoding is: %s", System.getProperty("file.encoding"))));
}
@Test
public void apply__custom() {
var text = getResource("text_05.txt");
var loaded = IoFunctions.readText(text);
var map = new HashMap<String, String>();
map.put("val", "dodo");
var resolver = new PropertyResolver()
.withMap(map)
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is("Here is my value dodo. What ${fluffy}?"));
}
@Test
public void apply__envAndCustomVarFormat() {
var text = getResource("text_06.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withEnvironment()
;
var substituted = resolver.apply(loaded);
assertThat(substituted, is(String.format("My username is: %s", System.getenv().get("USER"))));
}
@Test
public void apply__sysAndCustomVarFormat() {
var text = getResource("text_07.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withSysProperties()
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My encoding is: %s", System.getProperty("file.encoding"))));
}
@Test
public void apply__envAndPrefixAndCustomVarFormat() {
var text = getResource("text_08.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withEnvironment("bibo")
;
var substituted = resolver.apply(loaded);
assertThat(substituted, is(String.format("My username is: %s", System.getenv().get("USER"))));
}
@Test
public void apply__sysAndPrefixAndCustomVarFormat() {
var text = getResource("text_09.txt");
var loaded = IoFunctions.readText(text);
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withSysProperties("frog")
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My encoding is: %s", System.getProperty("file.encoding"))));
}
@Test
public void apply__customAndCustomVarFormat() {
var text = getResource("text_10.txt");
var loaded = IoFunctions.readText(text);
var map = new HashMap<String, String>();
map.put("val", "dodo");
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withMap(map)
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is("Here is my value dodo. What #{fluffy}?"));
}
@Test
public void apply__all() {
var text = getResource("text_11.txt");
var loaded = IoFunctions.readText(text);
var map = new HashMap<String, String>();
map.put("val", "dodo");
var resolver = new PropertyResolver()
.withEnvironment()
.withSysProperties()
.withMap(map)
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My username is: %s, encoding=%s, value=%s, unreplaced=${dodo}", System.getenv().get("USER"), System.getProperty("file.encoding"), map.get("val"))));
}
@Test
public void apply__allAndCustomVarFormat() {
var text = getResource("text_12.txt");
var loaded = IoFunctions.readText(text);
var map = new HashMap<String, String>();
map.put("val", "dodo");
var resolver = new PropertyResolver()
.withVarFormat("#{%s}")
.withEnvironment()
.withSysProperties()
.withMap(map)
;
var substituted = resolver.apply(loaded).trim();
assertThat(substituted, is(String.format("My username is: %s, encoding=%s, value=%s, unreplaced=#{dodo}", System.getenv().get("USER"), System.getProperty("file.encoding"), map.get("val"))));
}
} /* ENDCLASS */
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
scripts/gentabcontent.go
|
package main
import (
"flag"
"fmt"
"os"
"text/template"
)
type BenchEnv struct {
TabID string
TabHeadline string
}
func main() {
env := BenchEnv{os.Getenv("TabID"),
os.Getenv("TabHeadline")}
var content = flag.String("content", "", "Specify the content template file")
var tabcontent = flag.String("tabcontentlist", "", "Specify the tabcontent template file")
flag.Usage = func() {
fmt.Println("-content <content tmpl> : specify the content tmpl")
fmt.Println("-tabcontentlist <tabcontent tmpl> : specify the tabcontent tmpl")
}
flag.Parse()
if content == nil || *content == "" {
fmt.Println("No content tmpl")
flag.Usage()
return
}
if tabcontent == nil || *tabcontent == "" {
fmt.Println("No tabcontent tmpl")
flag.Usage()
return
}
s1, _ := template.ParseFiles(*content, *tabcontent)
s1.ExecuteTemplate(os.Stdout, "content", env)
fmt.Println()
s1.Execute(os.Stdout, nil)
}
|
[
"\"TabID\"",
"\"TabHeadline\""
] |
[] |
[
"TabID",
"TabHeadline"
] |
[]
|
["TabID", "TabHeadline"]
|
go
| 2 | 0 | |
acc_test/resource_cluster_test.go
|
package test
import (
"fmt"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/instaclustr/terraform-provider-instaclustr/instaclustr"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"testing"
)
func TestAccCluster(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/valid.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
updatedConfig := strings.Replace(oriConfig, "testcluster", "newcluster", 1)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("valid", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("valid"),
testCheckResourceCreated("valid", hostname, username, apiKey),
),
},
{
Config: updatedConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("valid"),
testCheckResourceCreated("valid", hostname, username, apiKey),
),
},
},
})
}
func TestKafkaConnectClusterCreateInstaclustrAWS(t *testing.T) {
if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" {
t.Skip("Skipping TestKafkaConnectClusterCreateInstaclustrAWS because IC_TEST_KAFKA_CONNECT is not set")
}
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_instaclustr_aws.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
kafkaClusterId := os.Getenv("IC_TARGET_KAFKA_CLUSTER_ID")
awsAccessKey := os.Getenv("IC_AWS_ACCESS_KEY")
awsSecretKey := os.Getenv("IC_AWS_SECRET_KEY")
S3BucketName := os.Getenv("IC_S3_BUCKET_NAME")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, hostname, kafkaClusterId, awsAccessKey, awsSecretKey, S3BucketName)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriKCConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("validKC"),
testCheckResourceCreated("validKC", hostname, username, apiKey),
),
},
},
})
}
func TestKafkaConnectClusterCreateNonInstaclustrAZURE(t *testing.T) {
if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" {
t.Skip("Skipping TestKafkaConnectClusterCreateNonInstaclustrAZURE because IC_TEST_KAFKA_CONNECT is not set")
}
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_non_instaclustr_azure.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
azureStorageAccountName := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_NAME")
azureStorageAccountKey := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_KEY")
azureStorageContainerName := os.Getenv("IC_AZURE_STORAGE_CONTAINER_NAME")
sslEnabledProtocols := os.Getenv("IC_SSL_ENABLED_PROTOCOLS")
sslTruststorePassword := os.Getenv("IC_SSL_TRUSTSTORE_PASSWORD")
sslProtocol := os.Getenv("IC_SSL_PROTOCOL")
securityProtocol := os.Getenv("IC_SECURITY_PROTOCOL")
saslMechanism := os.Getenv("IC_SASL_MECHANISM")
saslJaasConfig := os.Getenv("IC_SASL_JAAS_CONFIG")
bootstrapServers := os.Getenv("IC_BOOTSTRAP_SERVER")
truststore := os.Getenv("IC_TRUSTSTORE")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, hostname, azureStorageAccountName,
azureStorageAccountKey, azureStorageContainerName, sslEnabledProtocols, sslTruststorePassword,
sslProtocol, securityProtocol, saslMechanism, saslJaasConfig, bootstrapServers, truststore)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriKCConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("validKC"),
testCheckResourceCreated("validKC", hostname, username, apiKey),
),
},
},
})
}
func TestAccClusterResize(t *testing.T) {
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": instaclustr.Provider(),
}
validConfig, _ := ioutil.ReadFile("data/valid_with_resizable_cluster.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
resourceName := "resizable_cluster"
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
validResizeConfig := strings.Replace(oriConfig, "resizeable-small(r5-l)-v2", "resizeable-small(r5-xl)-v2", 1)
invalidResizeClassConfig := strings.Replace(oriConfig, "resizeable-small(r5-l)-v2", "resizeable-large(r5-xl)-v2", 1)
invalidResizeConfig := strings.Replace(oriConfig, "resizeable-small(r5-l)-v2", "t3.medium", 1)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted(resourceName, hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid(resourceName),
testCheckResourceCreated(resourceName, hostname, username, apiKey),
checkClusterRunning(resourceName, hostname, username, apiKey),
testCheckContactIPCorrect(resourceName, hostname, username, apiKey, 3,3),
),
},
{
Config: validResizeConfig,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("instaclustr_cluster.resizable_cluster", "cluster_name", "tf-resizable-test"),
resource.TestCheckResourceAttr("instaclustr_cluster.resizable_cluster", "node_size", "resizeable-small(r5-xl)-v2"),
testCheckClusterResize("resizable_cluster", hostname, username, apiKey, "resizeable-small(r5-xl)-v2"),
),
},
{
Config: invalidResizeClassConfig,
ExpectError: regexp.MustCompile("Cannot resize nodes"),
},
{
Config: invalidResizeConfig,
ExpectError: regexp.MustCompile("Cannot resize nodes"),
},
},
})
}
func TestAccClusterInvalid(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/invalid.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(string(validConfig), username, apiKey, hostname),
ExpectError: regexp.MustCompile("Error creating cluster"),
},
},
})
}
func TestAccClusterInvalidBundleOptionCombo(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/invalid_with_wrong_bundle_option.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(string(validConfig), username, apiKey, hostname),
ExpectError: regexp.MustCompile("Error creating cluster"),
},
},
})
}
func TestAccClusterCustomVPC(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/valid_with_custom_vpc.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
providerAccountName := os.Getenv("IC_PROV_ACC_NAME")
providerVpcId := os.Getenv("IC_PROV_VPC_ID")
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname, providerAccountName, providerVpcId)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("vpc_cluster", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("vpc_cluster"),
testCheckResourceCreated("vpc_cluster", hostname, username, apiKey),
),
},
},
})
}
func TestAccClusterCustomVPCInvalid(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/invalid_with_custom_vpc.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
providerAccountName := os.Getenv("IC_PROV_ACC_NAME")
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(string(validConfig), username, hostname, apiKey, providerAccountName),
ExpectError: regexp.MustCompile("Error creating cluster"),
},
},
})
}
func testCheckResourceValid(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["instaclustr_cluster."+resourceName]
if resourceState == nil {
return fmt.Errorf("%s: resource not found in state", resourceName)
}
instanceState := resourceState.Primary
if instanceState == nil {
return fmt.Errorf("resource has no primary instance")
}
return nil
}
}
func testCheckResourceCreated(resourceName, hostname, username, apiKey string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["instaclustr_cluster."+resourceName]
id := resourceState.Primary.Attributes["cluster_id"]
client := new(instaclustr.APIClient)
client.InitClient(hostname, username, apiKey)
cluster, err := client.ReadCluster(id)
if err != nil {
return fmt.Errorf("Failed to read cluster %s: %s", id, err)
}
if cluster.ID != id {
return fmt.Errorf("Cluster expected %s but got %s", id, cluster.ID)
}
return nil
}
}
func testCheckContactIPCorrect(resourceName, hostname, username, apiKey string, expectedPrivateContactPointLength int,expectedPublicContactPointLength int) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["instaclustr_cluster."+resourceName]
privateContactPoints, _ := strconv.Atoi(resourceState.Primary.Attributes["private_contact_point.#"])
publicContactPoints, _ := strconv.Atoi(resourceState.Primary.Attributes["public_contact_point.#"])
if privateContactPoints != expectedPrivateContactPointLength {
return fmt.Errorf("[Error] Expected %v private contact points but found %v", expectedPrivateContactPointLength, privateContactPoints)
}
if publicContactPoints != expectedPublicContactPointLength {
return fmt.Errorf("[Error] Expected %v public contact points but found %v", expectedPublicContactPointLength, publicContactPoints)
}
return nil
}
}
func testCheckResourceDeleted(resourceName, hostname, username, apiKey string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["instaclustr_cluster."+resourceName]
id := resourceState.Primary.Attributes["cluster_id"]
client := new(instaclustr.APIClient)
client.InitClient(hostname, username, apiKey)
err := client.DeleteCluster(id)
if err == nil {
return fmt.Errorf("Cluster %s still exists", id)
}
return nil
}
}
func testCheckClusterResize(resourceName, hostname, username, apiKey, expectedNodeSize string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["instaclustr_cluster." + resourceName]
id := resourceState.Primary.Attributes["cluster_id"]
client := new(instaclustr.APIClient)
client.InitClient(hostname, username, apiKey)
cluster, err := client.ReadCluster(id)
if err != nil {
return fmt.Errorf("Failed to read cluster %s: %s", id, err)
}
targetNodeSize := cluster.DataCentres[0].ResizeTargetNodeSize
if targetNodeSize != expectedNodeSize {
return fmt.Errorf("Expected cluster to be resized to %s", expectedNodeSize)
}
return nil
}
}
func TestValidRedisClusterCreate(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/valid_redis_cluster_create.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("validRedis", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("validRedis"),
testCheckResourceCreated("validRedis", hostname, username, apiKey),
),
},
},
})
}
func TestValidApacheZookeeperClusterCreate(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
validConfig, _ := ioutil.ReadFile("data/valid_apache_zookeeper.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("validApacheZookeeper", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckResourceValid("validApacheZookeeper"),
testCheckResourceCreated("validApacheZookeeper", hostname, username, apiKey),
),
},
},
})
}
func TestAccClusterCredentials(t *testing.T) {
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": instaclustr.Provider(),
}
validConfig, _ := ioutil.ReadFile("data/valid_with_password_and_client_encryption.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testCheckResourceDeleted("valid_with_password_and_client_encryption", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckClusterCredentials(hostname, username, apiKey),
),
},
},
})
}
func testCheckClusterCredentials(hostname, username, apiKey string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["data.instaclustr_cluster_credentials.cluster_credentials"]
client := new(instaclustr.APIClient)
client.InitClient(hostname, username, apiKey)
clusterId := resourceState.Primary.Attributes["cluster_id"]
clusterCredentials, err := client.ReadCluster(clusterId)
if err != nil {
return fmt.Errorf("Failed to read Cluster Credentials from %s: %s", clusterId, err)
}
if clusterCredentials.InstaclustrUserPassword != resourceState.Primary.Attributes["cluster_password"] {
return fmt.Errorf("Password of the cluster and resource are different")
}
if clusterCredentials.ClusterCertificateDownload != resourceState.Primary.Attributes["certificate_download"] {
return fmt.Errorf("Certificate download link of the cluster and resource are different")
}
if clusterCredentials.ClusterCertificateDownload == "disabled" {
return fmt.Errorf("Client encryption is disabled")
}
return nil
}
}
|
[
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_TEST_KAFKA_CONNECT\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_TARGET_KAFKA_CLUSTER_ID\"",
"\"IC_AWS_ACCESS_KEY\"",
"\"IC_AWS_SECRET_KEY\"",
"\"IC_S3_BUCKET_NAME\"",
"\"IC_TEST_KAFKA_CONNECT\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_AZURE_STORAGE_ACCOUNT_NAME\"",
"\"IC_AZURE_STORAGE_ACCOUNT_KEY\"",
"\"IC_AZURE_STORAGE_CONTAINER_NAME\"",
"\"IC_SSL_ENABLED_PROTOCOLS\"",
"\"IC_SSL_TRUSTSTORE_PASSWORD\"",
"\"IC_SSL_PROTOCOL\"",
"\"IC_SECURITY_PROTOCOL\"",
"\"IC_SASL_MECHANISM\"",
"\"IC_SASL_JAAS_CONFIG\"",
"\"IC_BOOTSTRAP_SERVER\"",
"\"IC_TRUSTSTORE\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_PROV_ACC_NAME\"",
"\"IC_PROV_VPC_ID\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_PROV_ACC_NAME\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\"",
"\"IC_USERNAME\"",
"\"IC_API_KEY\""
] |
[] |
[
"IC_TEST_KAFKA_CONNECT",
"IC_PROV_VPC_ID",
"IC_AZURE_STORAGE_ACCOUNT_NAME",
"IC_TRUSTSTORE",
"IC_AZURE_STORAGE_ACCOUNT_KEY",
"IC_SSL_PROTOCOL",
"IC_SSL_TRUSTSTORE_PASSWORD",
"IC_BOOTSTRAP_SERVER",
"IC_AZURE_STORAGE_CONTAINER_NAME",
"IC_AWS_SECRET_KEY",
"IC_SASL_JAAS_CONFIG",
"IC_SASL_MECHANISM",
"IC_TARGET_KAFKA_CLUSTER_ID",
"IC_AWS_ACCESS_KEY",
"IC_USERNAME",
"IC_API_KEY",
"IC_S3_BUCKET_NAME",
"IC_SSL_ENABLED_PROTOCOLS",
"IC_PROV_ACC_NAME",
"IC_SECURITY_PROTOCOL"
] |
[]
|
["IC_TEST_KAFKA_CONNECT", "IC_PROV_VPC_ID", "IC_AZURE_STORAGE_ACCOUNT_NAME", "IC_TRUSTSTORE", "IC_AZURE_STORAGE_ACCOUNT_KEY", "IC_SSL_PROTOCOL", "IC_SSL_TRUSTSTORE_PASSWORD", "IC_BOOTSTRAP_SERVER", "IC_AZURE_STORAGE_CONTAINER_NAME", "IC_AWS_SECRET_KEY", "IC_SASL_JAAS_CONFIG", "IC_SASL_MECHANISM", "IC_TARGET_KAFKA_CLUSTER_ID", "IC_AWS_ACCESS_KEY", "IC_USERNAME", "IC_API_KEY", "IC_S3_BUCKET_NAME", "IC_SSL_ENABLED_PROTOCOLS", "IC_PROV_ACC_NAME", "IC_SECURITY_PROTOCOL"]
|
go
| 20 | 0 | |
src/_build/website.py
|
import docs
import hashlib
import json
import os
import sys
import util
FLAG_NAME_MAP={"check_output":"Check Output","optimizable":"Optimizable"}
IGNORE_FLAG_NAME=["func","var","var_arg"]
def _add_data(nm,dt):
nm=nm.replace("\\","/")[:255].encode("ascii","ignore")
dt=dt[:16777215]
if (nm[-5:]==b".html"):
dt=dt.replace(b"{{ROOT}}",(b"" if os.getenv("GITHUB_ACTIONS",None) is not None else bytes(os.path.abspath(os.getcwd()+"/build/web"),"utf-8")))
return bytearray([len(nm),len(dt)&0xff,(len(dt)>>8)&0xff,len(dt)>>16])+nm+dt
def _generate_data(dt,pg_src):
m={}
for k in dt["groups"]:
m[k]={"":[]}
for k in dt["data"]:
if (k["subgroup"] is None):
m[k["group"]][""].append(k)
elif (k["subgroup"] not in m[k["group"]]):
m[k["group"]][k["subgroup"]]=[k]
else:
m[k["group"]][k["subgroup"]].append(k)
toc=""
pg_dt=b""
for k,v in sorted(m.items(),key=lambda e:dt["groups"][e[0]]["name"]):
toc+=f"<div class=\"group\" id=\"{k}\"><a href=\"{{{{ROOT}}}}/{k}.html\"><h2 class=\"title\">{dt['groups'][k]['name']}</h2></a><div class=\"group-box\">"
pg=f"<h1>{dt['groups'][k]['name']}</h1><h3>{dt['groups'][k]['desc']}</h3>"
for sk,sv in sorted(v.items(),key=lambda e:("" if e[0]=="" else dt["subgroups"][e[0]]["name"])):
if (len(sv)==0):
continue
toc+="<div class=\"subgroup\">"
if (len(sk)!=0):
toc+=f"<a href=\"{{{{ROOT}}}}/{k}.html#{sk}\"><h3 class=\"sg-title\">{dt['subgroups'][sk]['name']}</h3></a>"
pg+=f"<a id=\"{sk}\" href=\"#{sk}\" style=\"text-decoration: none;color: #3010ff\"><h2>{dt['subgroups'][sk]['name']}</h2></a><h4>{dt['subgroups'][sk]['desc']}</h4>"
toc+="<ul>"
for e in sorted(sv,key=lambda se:se["name"]):
toc+=f"<li><a href=\"{{{{ROOT}}}}/{e['group']}.html#{e['name']}\">{e['name']+('()' if 'func' in e['flag'] else '')}</a></li>"
pg+=f"<div><a id=\"{e['name']}\" href=\"#{e['name']}\" style=\"text-decoration: none;color: #ff0000\"><pre>"
if ("func" in e["flag"]):
if ("check_output" in e["flag"]):
pg+="<span style=\"color: #cf89a2\">(check_output)</span> "
if (e["ret"] is not None):
pg+=e["ret"]["type"]
else:
pg+="void"
pg+=" "+e["name"]+"("
if (len(e["args"])==0):
pg+="void"
else:
st=True
for a in e["args"]:
if (st):
st=False
else:
pg+=","
pg+=a["type"]+" "+a["name"]
if ("var_arg" in e["flag"]):
pg+=",..."
pg+=")"
else:
pg+=e["name"]
pg+=f"</pre></a><pre>Description: {e['desc']}"
if (len(e["args"])!=0):
pg+="\nArguments:"
for a in e["args"]:
pg+=f"\n {a['name']} -> {a['desc']}"
if (e["ret"] is not None):
pg+=f"\nReturn Value: {e['ret']['desc']}"
pg+="</pre></div>"
toc+="</ul></div>"
toc+="</div></div>"
util.log(f" Generating '/{k}.html'...")
pg_dt+=_add_data(f"/{k}.html",pg_src.replace(b"{{DATA}}",bytes(pg,"utf-8")).replace(b"{{NAME}}",bytes(dt["groups"][k]["name"],"utf-8")))
return bytes(toc,"utf-8"),pg_dt
def generate():
cf_a_dt=b"-->"+bytes(os.getenv("ANALYTICS",""),"utf-8")+b"<!--"
o=bytearray()
util.log("Reading CSS Files...")
for k in os.listdir("src/web/client/css"):
util.log(f" Found file 'src/web/client/css/{k}'")
with open("src/web/client/css/"+k,"rb") as rf:
o+=_add_data("/css/"+k,rf.read())
util.log("Reading JS Files...")
for k in os.listdir("src/web/client/js"):
util.log(f" Found file 'src/web/client/js/{k}'")
with open("src/web/client/js/"+k,"rb") as rf:
o+=_add_data("/js/"+k,rf.read())
util.log("Collecting Documentation Files...")
d_fl=util.get_docs_files()
util.log(f" Found {len(d_fl)} Files\nGenerating Documentation...")
d_dt=docs.create_docs(d_fl)[0]
util.log("Reading 'src/web/client/page.html'...")
with open("src/web/client/page.html","rb") as rf:
pg_src=rf.read()
util.log(f"Generating Table of Content & Pages for {len(d_dt['data'])} Symbols...")
toc,pg_dt=_generate_data(d_dt,pg_src.replace(b"{{ANALYTICS}}",cf_a_dt))
o+=pg_dt
util.log("Reading 'src/web/client/index.html'...")
with open("src/web/client/index.html","rb") as rf:
o+=_add_data("/index.html",rf.read().replace(b"{{DATA}}",toc).replace(b"{{ANALYTICS}}",cf_a_dt))
util.log("Reading 'src/web/client/not_found.html'...")
with open("src/web/client/not_found.html","rb") as rf:
o+=_add_data("not_found.html",rf.read().replace(b"{{ANALYTICS}}",cf_a_dt))
util.log("Reading 'src/web/client/shell_install.sh'...")
with open("src/web/client/shell_install.sh","rb") as rf:
o+=_add_data("shell_install.sh",rf.read())
if (os.getenv("GITHUB_ACTIONS",None) is not None):
with open("web-bundle.dt","wb") as f:
f.write(o)
else:
util.log("Generating Local Website...")
i=0
while (i<len(o)):
l=o[i]
sz=o[i+1]|(o[i+2]<<8)|(o[i+3]<<16)
i+=4
fp="build/web/"+o[i:i+l].decode("ascii","ignore")
os.makedirs(fp[:fp.rindex("/")],exist_ok=True)
i+=l
util.log(f" Writing '{fp}' ({sz} bytes)...")
with open(fp,"wb") as f:
f.write(o[i:i+sz])
i+=sz
if (__name__=="__main__"):
import requests
if ("--server" in sys.argv):
with open("src/web/server/main.js","rb") as f:
util.log("Uploading Server Code...")
requests.put(f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/workers/scripts/{sys.argv[-2]}",data=f.read(),headers={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/javascript"})
else:
url=f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/storage/kv/namespaces/{sys.argv[-2]}/"
h={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/json"}
util.log("Listing Current KV Keys...")
tb_r=requests.get(url+"values/__table",headers=h,stream=True).raw
tb_r.decode_content=True
l=[]
n_tb=[]
for k in tb_r.read().split(b"\x00"):
if (k[:5]==b"/apt/" or k[:5]==b"/bin/"):
n_tb.append(k)
else:
util.log(f" Found Key '{k.decode('ascii','ignore')}' ")
l.append(hashlib.sha256(k).hexdigest())
util.log("Clearing KV Storage...")
requests.delete(url+"bulk",headers=h,data="["+",".join([f"\"{e}\"" for e in l])+"]")
util.log("Generating Request...")
with open("web-bundle.dt","rb") as f:
dt=f.read()
i=0
o=[]
while (i<len(dt)):
l=dt[i]
sz=dt[i+1]|(dt[i+2]<<8)|(dt[i+3]<<16)
i+=4
fp=dt[i:i+l]
i+=l
fp_h=hashlib.sha256(fp).hexdigest()
util.log(f" Encoding File '{fp.decode('ascii','ignore')}' ({sz} bytes) -> '{fp_h}'...")
n_tb.append(fp)
o.append({"key":fp_h,"value":util.encode(dt[i:i+sz]),"base64":True})
i+=sz
o.append({"key":"__table","value":util.encode(b"\x00".join(n_tb)),"base64":True})
util.log("Uploading Data...")
requests.put(url+"bulk",headers=h,data=json.dumps(o))
|
[] |
[] |
[
"GITHUB_ACTIONS",
"ANALYTICS"
] |
[]
|
["GITHUB_ACTIONS", "ANALYTICS"]
|
python
| 2 | 0 | |
tests/forms/widget_test.go
|
package forms
import (
"bytes"
"fmt"
"github.com/sergeyglazyrindev/go-monolith"
"github.com/sergeyglazyrindev/go-monolith/core"
"github.com/stretchr/testify/assert"
"io"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"testing"
"time"
)
type WidgetTestSuite struct {
gomonolith.TestSuite
}
func NewTestForm() *multipart.Form {
form1 := multipart.Form{
Value: make(map[string][]string),
}
return &form1
}
func (w *WidgetTestSuite) TestTextWidget() {
textWidget := &core.TextWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
textWidget.SetName("dsadas")
textWidget.SetValue("dsadas")
textWidget.SetRequired()
renderedWidget := textWidget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas\"")
form1 := NewTestForm()
err := textWidget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"test"}
err = textWidget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestNumberWidget() {
widget := &core.NumberWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"test"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"121"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestEmailWidget() {
widget := &core.EmailWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"[email protected]"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestURLWidget() {
widget := &core.URLWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"example.com"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestPasswordWidget() {
widget := &core.PasswordWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "type=\"password\"")
widget.SetRequired()
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"12345678901234567890"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestHiddenWidget() {
widget := &core.HiddenWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas<>")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas<>\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadasas"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestDateWidget() {
widget := &core.DateWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("11/01/2021")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "datetimepicker_dsadas")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"11/02/2021"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestDateTimeWidget() {
widget := &core.DateTimeWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("11/02/2021 10:04")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"11/02/2021 10:04\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"11/02/2021 10:04"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestTimeWidget() {
widget := &core.TimeWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("15:05")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"15:05\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"10:04"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestTextareaWidget() {
widget := &core.TextareaWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<textarea name=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"10:04"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestCheckboxWidget() {
widget := &core.CheckboxWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "checked=\"checked\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"10:04"}
widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), widget.GetOutputValue() == true)
}
func (w *WidgetTestSuite) TestSelectWidget() {
widget := &core.SelectWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
widget.OptGroups = make(map[string][]*core.SelectOptGroup)
widget.OptGroups["test"] = make([]*core.SelectOptGroup, 0)
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test1",
Value: "test1",
})
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test2",
Value: "dsadas",
})
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "name=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"10:04"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"dsadas"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestNullBooleanWidget() {
widget := &core.NullBooleanWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.OptGroups = make(map[string][]*core.SelectOptGroup)
widget.OptGroups["test"] = make([]*core.SelectOptGroup, 0)
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test1",
Value: "yes",
})
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test2",
Value: "no",
})
widget.SetName("dsadas")
widget.SetValue("yes")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<select name=\"dsadas\" data-placeholder=\"Select\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadasdasdas"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"no"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestSelectMultipleWidget() {
widget := &core.SelectMultipleWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue([]string{"dsadas"})
widget.OptGroups = make(map[string][]*core.SelectOptGroup)
widget.OptGroups["test"] = make([]*core.SelectOptGroup, 0)
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test1",
Value: "test1",
})
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.SelectOptGroup{
OptLabel: "test2",
Value: "dsadas",
})
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "name=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadasdasdas"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"test1"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestRadioSelectWidget() {
widget := &core.RadioSelectWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
ID: "test",
WrapLabel: true,
}
widget.SetName("dsadas")
widget.SetValue("dsadas")
widget.OptGroups = make(map[string][]*core.RadioOptGroup)
widget.OptGroups["test"] = make([]*core.RadioOptGroup, 0)
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.RadioOptGroup{
OptLabel: "test1",
Value: "test1",
})
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.RadioOptGroup{
OptLabel: "test2",
Value: "dsadas",
})
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<li>test<ul id=\"test_0\">")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadasdasdas"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"test1"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestCheckboxSelectMultipleWidget() {
widget := &core.CheckboxSelectMultipleWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
ID: "test",
WrapLabel: true,
}
widget.SetName("dsadas")
widget.SetValue([]string{"dsadas"})
widget.OptGroups = make(map[string][]*core.RadioOptGroup)
widget.OptGroups["test"] = make([]*core.RadioOptGroup, 0)
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.RadioOptGroup{
OptLabel: "test1",
Value: "test1",
})
widget.OptGroups["test"] = append(widget.OptGroups["test"], &core.RadioOptGroup{
OptLabel: "test2",
Value: "dsadas",
})
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<ul id=\"test\">\n \n \n \n <li>test<ul id=\"test_0\">")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadasdasdas"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err != nil)
form1.Value["dsadas"] = []string{"test1"}
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestFileWidget() {
widget := &core.FileWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "type=\"file\"")
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
err := writer.SetBoundary("foo")
if err != nil {
assert.True(w.T(), false)
return
}
path := os.Getenv("GOMONOLITH_PATH") + "/tests/file_for_uploading.txt"
file, err := os.Open(path)
if err != nil {
assert.True(w.T(), false)
return
}
err = os.Mkdir(fmt.Sprintf("%s/%s", os.Getenv("GOMONOLITH_PATH"), "upload-for-tests"), 0755)
if err != nil {
assert.True(w.T(), false, "Couldnt create directory for file uploading", err)
return
}
defer file.Close()
defer os.RemoveAll(fmt.Sprintf("%s/%s", os.Getenv("GOMONOLITH_PATH"), "upload-for-tests"))
part, err := writer.CreateFormFile("dsadas", filepath.Base(path))
if err != nil {
assert.True(w.T(), false)
return
}
_, err = io.Copy(part, file)
if err != nil {
assert.True(w.T(), false)
return
}
err = writer.Close()
if err != nil {
assert.True(w.T(), false)
return
}
form1, _ := multipart.NewReader(bytes.NewReader(body.Bytes()), "foo").ReadForm(1000000)
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestClearableFileWidget() {
widget := &core.ClearableFileWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
InitialText: "test",
Required: true,
ID: "test",
ClearCheckboxLabel: "clear file",
InputText: "upload your image",
}
widget.SetName("dsadas")
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<p class=\"file-upload\">test:")
assert.Contains(w.T(), renderedWidget, "upload your image:")
// <input type="file" name="dsadas" test="test1"></p>
widget = &core.ClearableFileWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
InitialText: "test",
Required: true,
ID: "test",
ClearCheckboxLabel: "clear file",
InputText: "upload your image",
CurrentValue: &core.URLValue{URL: "https://microsoft.com"},
}
widget.SetName("dsadas")
renderedWidget = widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<input type=\"file\" ")
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
err := writer.SetBoundary("foo")
if err != nil {
assert.True(w.T(), false)
return
}
path := os.Getenv("GOMONOLITH_PATH") + "/tests/file_for_uploading.txt"
file, err := os.Open(path)
if err != nil {
assert.True(w.T(), false)
return
}
err = os.Mkdir(fmt.Sprintf("%s/%s", os.Getenv("GOMONOLITH_PATH"), "upload-for-tests"), 0755)
if err != nil {
assert.True(w.T(), false, "Couldnt create directory for file uploading", err)
return
}
defer file.Close()
defer os.RemoveAll(fmt.Sprintf("%s/%s", os.Getenv("GOMONOLITH_PATH"), "upload-for-tests"))
part, err := writer.CreateFormFile("dsadas", filepath.Base(path))
if err != nil {
assert.True(w.T(), false)
return
}
_, err = io.Copy(part, file)
if err != nil {
assert.True(w.T(), false)
return
}
err = writer.Close()
if err != nil {
assert.True(w.T(), false)
return
}
form1, _ := multipart.NewReader(bytes.NewReader(body.Bytes()), "foo").ReadForm(1000000)
err = widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestMultipleHiddenInputWidget() {
widget := &core.MultipleInputHiddenWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
}
widget.SetName("dsadas")
widget.SetValue([]string{"dsadas", "test1"})
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "value=\"dsadas\"")
form1 := NewTestForm()
form1.Value["dsadas"] = []string{"dsadas", "test1"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestSplitDateTimeWidget() {
widget := &core.SplitDateTimeWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
DateAttrs: map[string]string{"test": "test1"},
TimeAttrs: map[string]string{"test": "test1"},
TimeFormat: "15:04",
DateFormat: "Mon Jan _2",
}
widget.SetName("dsadas")
nowTime := time.Now()
widget.SetValue(&nowTime)
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "name=\"dsadas_date\"")
assert.Contains(w.T(), renderedWidget, "name=\"dsadas_time\"")
form1 := NewTestForm()
form1.Value["dsadas_date"] = []string{"Mon Jan 12"}
form1.Value["dsadas_time"] = []string{"10:20"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestSplitHiddenDateTimeWidget() {
widget := &core.SplitHiddenDateTimeWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
DateAttrs: map[string]string{"test": "test1"},
TimeAttrs: map[string]string{"test": "test1"},
TimeFormat: "15:04",
DateFormat: "Mon Jan _2",
}
widget.SetName("dsadas")
nowTime := time.Now()
widget.SetValue(&nowTime)
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "name=\"dsadas_date\"")
assert.Contains(w.T(), renderedWidget, "name=\"dsadas_time\"")
form1 := NewTestForm()
form1.Value["dsadas_date"] = []string{"Mon Jan 12"}
form1.Value["dsadas_time"] = []string{"10:20"}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
func (w *WidgetTestSuite) TestSelectDateWidget() {
widget := &core.SelectDateWidget{
Widget: core.Widget{
Attrs: map[string]string{"test": "test1"},
BaseFuncMap: core.FuncMap,
},
EmptyLabelString: "choose any",
}
widget.SetName("dsadas")
nowTime := time.Now()
widget.SetValue(&nowTime)
renderedWidget := widget.Render(core.NewFormRenderContext(), nil)
assert.Contains(w.T(), renderedWidget, "<select name=\"dsadas_month\"")
assert.Contains(w.T(), renderedWidget, "<select name=\"dsadas_day\"")
assert.Contains(w.T(), renderedWidget, "<select name=\"dsadas_year\"")
form1 := NewTestForm()
form1.Value["dsadas_month"] = []string{"1"}
form1.Value["dsadas_day"] = []string{"1"}
form1.Value["dsadas_year"] = []string{strconv.Itoa(time.Now().Year())}
err := widget.ProceedForm(form1, nil, nil)
assert.True(w.T(), err == nil)
}
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestWidget(t *testing.T) {
gomonolith.RunTests(t, new(WidgetTestSuite))
}
|
[
"\"GOMONOLITH_PATH\"",
"\"GOMONOLITH_PATH\"",
"\"GOMONOLITH_PATH\"",
"\"GOMONOLITH_PATH\"",
"\"GOMONOLITH_PATH\"",
"\"GOMONOLITH_PATH\""
] |
[] |
[
"GOMONOLITH_PATH"
] |
[]
|
["GOMONOLITH_PATH"]
|
go
| 1 | 0 | |
clients/google-api-services-artifactregistry/v1/1.31.0/com/google/api/services/artifactregistry/v1/ArtifactRegistry.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.artifactregistry.v1;
/**
* Service definition for ArtifactRegistry (v1).
*
* <p>
* Store and manage build artifacts in a scalable and integrated service built on Google infrastructure.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/artifacts/docs/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link ArtifactRegistryRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class ArtifactRegistry extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the Artifact Registry API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://artifactregistry.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://artifactregistry.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public ArtifactRegistry(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
ArtifactRegistry(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ArtifactRegistry artifactregistry = new ArtifactRegistry(...);}
* {@code ArtifactRegistry.Operations.List request = artifactregistry.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Starts asynchronous cancellation on a long-running operation. The server makes a best effort to
* cancel the operation, but success is not guaranteed. If the server doesn't support this method,
* it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other
* methods to check whether the cancellation succeeded or whether the operation completed despite
* cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an
* operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to
* `Code.CANCELLED`.
*
* Create a request for the method "operations.cancel".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link Cancel#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource to be cancelled.
* @param content the {@link com.google.api.services.artifactregistry.v1.model.CancelOperationRequest}
* @return the request
*/
public Cancel cancel(java.lang.String name, com.google.api.services.artifactregistry.v1.model.CancelOperationRequest content) throws java.io.IOException {
Cancel result = new Cancel(name, content);
initialize(result);
return result;
}
public class Cancel extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.Empty> {
private static final String REST_PATH = "v1/{+name}:cancel";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/.*$");
/**
* Starts asynchronous cancellation on a long-running operation. The server makes a best effort to
* cancel the operation, but success is not guaranteed. If the server doesn't support this method,
* it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other
* methods to check whether the cancellation succeeded or whether the operation completed despite
* cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an
* operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to
* `Code.CANCELLED`.
*
* Create a request for the method "operations.cancel".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link Cancel#execute()} method to invoke the remote operation.
* <p> {@link
* Cancel#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource to be cancelled.
* @param content the {@link com.google.api.services.artifactregistry.v1.model.CancelOperationRequest}
* @since 1.13
*/
protected Cancel(java.lang.String name, com.google.api.services.artifactregistry.v1.model.CancelOperationRequest content) {
super(ArtifactRegistry.this, "POST", REST_PATH, content, com.google.api.services.artifactregistry.v1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
}
@Override
public Cancel set$Xgafv(java.lang.String $Xgafv) {
return (Cancel) super.set$Xgafv($Xgafv);
}
@Override
public Cancel setAccessToken(java.lang.String accessToken) {
return (Cancel) super.setAccessToken(accessToken);
}
@Override
public Cancel setAlt(java.lang.String alt) {
return (Cancel) super.setAlt(alt);
}
@Override
public Cancel setCallback(java.lang.String callback) {
return (Cancel) super.setCallback(callback);
}
@Override
public Cancel setFields(java.lang.String fields) {
return (Cancel) super.setFields(fields);
}
@Override
public Cancel setKey(java.lang.String key) {
return (Cancel) super.setKey(key);
}
@Override
public Cancel setOauthToken(java.lang.String oauthToken) {
return (Cancel) super.setOauthToken(oauthToken);
}
@Override
public Cancel setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Cancel) super.setPrettyPrint(prettyPrint);
}
@Override
public Cancel setQuotaUser(java.lang.String quotaUser) {
return (Cancel) super.setQuotaUser(quotaUser);
}
@Override
public Cancel setUploadType(java.lang.String uploadType) {
return (Cancel) super.setUploadType(uploadType);
}
@Override
public Cancel setUploadProtocol(java.lang.String uploadProtocol) {
return (Cancel) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource to be cancelled. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource to be cancelled.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource to be cancelled. */
public Cancel setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
this.name = name;
return this;
}
@Override
public Cancel set(String parameterName, Object value) {
return (Cancel) super.set(parameterName, value);
}
}
/**
* Deletes a long-running operation. This method indicates that the client is no longer interested
* in the operation result. It does not cancel the operation. If the server doesn't support this
* method, it returns `google.rpc.Code.UNIMPLEMENTED`.
*
* Create a request for the method "operations.delete".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource to be deleted.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.Empty> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/.*$");
/**
* Deletes a long-running operation. This method indicates that the client is no longer interested
* in the operation result. It does not cancel the operation. If the server doesn't support this
* method, it returns `google.rpc.Code.UNIMPLEMENTED`.
*
* Create a request for the method "operations.delete".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource to be deleted.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(ArtifactRegistry.this, "DELETE", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource to be deleted. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource to be deleted.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource to be deleted. */
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/.*$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^operations/[^/]+$");
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(ArtifactRegistry.this, "GET", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override
* the binding to use different resource name schemes, such as `users/operations`. To override the
* binding, API services can add a binding such as `"/v1/{name=users}/operations"` to their service
* configuration. For backwards compatibility, the default name includes the operations collection
* id, however overriding users must ensure the name binding is the parent resource, without the
* operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.ListOperationsResponse> {
private static final String REST_PATH = "v1/operations";
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
* override the binding to use different resource name schemes, such as `users/operations`. To
* override the binding, API services can add a binding such as `"/v1/{name=users}/operations"` to
* their service configuration. For backwards compatibility, the default name includes the
* operations collection id, however overriding users must ensure the name binding is the parent
* resource, without the operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(ArtifactRegistry.this, "GET", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.ListOperationsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The standard list filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** The standard list filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** The standard list filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The name of the operation's parent resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation's parent resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation's parent resource. */
public List setName(java.lang.String name) {
this.name = name;
return this;
}
/** The standard list page size. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The standard list page size.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The standard list page size. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The standard list page token. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The standard list page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The standard list page token. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ArtifactRegistry artifactregistry = new ArtifactRegistry(...);}
* {@code ArtifactRegistry.Projects.List request = artifactregistry.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ArtifactRegistry artifactregistry = new ArtifactRegistry(...);}
* {@code ArtifactRegistry.Locations.List request = artifactregistry.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* An accessor for creating requests from the Repositories collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ArtifactRegistry artifactregistry = new ArtifactRegistry(...);}
* {@code ArtifactRegistry.Repositories.List request = artifactregistry.repositories().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Repositories repositories() {
return new Repositories();
}
/**
* The "repositories" collection of methods.
*/
public class Repositories {
/**
* Gets a repository.
*
* Create a request for the method "repositories.get".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The name of the repository to retrieve.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.Repository> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
/**
* Gets a repository.
*
* Create a request for the method "repositories.get".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the repository to retrieve.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(ArtifactRegistry.this, "GET", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.Repository.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the repository to retrieve. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the repository to retrieve.
*/
public java.lang.String getName() {
return name;
}
/** Required. The name of the repository to retrieve. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists repositories.
*
* Create a request for the method "repositories.list".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The name of the parent resource whose repositories will be listed.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.ListRepositoriesResponse> {
private static final String REST_PATH = "v1/{+parent}/repositories";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists repositories.
*
* Create a request for the method "repositories.list".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The name of the parent resource whose repositories will be listed.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(ArtifactRegistry.this, "GET", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.ListRepositoriesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the parent resource whose repositories will be listed. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The name of the parent resource whose repositories will be listed.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The name of the parent resource whose repositories will be listed. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** The maximum number of repositories to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of repositories to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of repositories to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous list request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous list request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous list request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the DockerImages collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code ArtifactRegistry artifactregistry = new ArtifactRegistry(...);}
* {@code ArtifactRegistry.DockerImages.List request = artifactregistry.dockerImages().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public DockerImages dockerImages() {
return new DockerImages();
}
/**
* The "dockerImages" collection of methods.
*/
public class DockerImages {
/**
* Lists docker images.
*
* Create a request for the method "dockerImages.list".
*
* This request holds the parameters needed by the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The name of the parent resource whose docker images will be listed.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends ArtifactRegistryRequest<com.google.api.services.artifactregistry.v1.model.ListDockerImagesResponse> {
private static final String REST_PATH = "v1/{+parent}/dockerImages";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
/**
* Lists docker images.
*
* Create a request for the method "dockerImages.list".
*
* This request holds the parameters needed by the the artifactregistry server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The name of the parent resource whose docker images will be listed.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(ArtifactRegistry.this, "GET", REST_PATH, null, com.google.api.services.artifactregistry.v1.model.ListDockerImagesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The name of the parent resource whose docker images will be listed. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The name of the parent resource whose docker images will be listed.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The name of the parent resource whose docker images will be listed. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/repositories/[^/]+$");
}
this.parent = parent;
return this;
}
/** The maximum number of artifacts to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of artifacts to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of artifacts to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous list request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous list request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous list request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
}
/**
* Builder for {@link ArtifactRegistry}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link ArtifactRegistry}. */
@Override
public ArtifactRegistry build() {
return new ArtifactRegistry(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link ArtifactRegistryRequestInitializer}.
*
* @since 1.12
*/
public Builder setArtifactRegistryRequestInitializer(
ArtifactRegistryRequestInitializer artifactregistryRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(artifactregistryRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
pkg/toxic/perspective/perspective.go
|
package perspective
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/mitchellh/mapstructure"
"github.com/wongma7/perspectiveapi-irc-bot/pkg/toxic"
)
const (
modelToxicity = "TOXICITY"
baseURL = "https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze"
)
type Request struct {
Comment Comment `json:"comment"`
RequestedAttributes map[string]Score `json:"requestedAttributes"`
Languages []string `json:"languages",omitempty"`
DoNotStore bool `json:"doNotStore,omitempty"`
}
type Comment struct {
Text string `json:"text"`
Ctype string `json:"type,omitempty"`
}
type Score struct {
ScoreType string `json:"scoreType,omitempty"`
ScoreThreshold float64 `json:"scoreThreshold,omitempty"`
}
type Response struct {
AttributeScores map[string]Scores
}
type Scores struct {
SummaryScore SummaryScore `json:"summaryScore"`
}
type SummaryScore struct {
Value float64 `json:"value"`
Stype string `json:"type"`
}
type Perspective struct {
}
var _ toxic.Analyzer = &Perspective{}
func (p *Perspective) ScoreComment(comment string) (float64, error) {
request := &Request{
Comment: Comment{
Text: comment,
},
RequestedAttributes: map[string]Score{
modelToxicity: {},
},
Languages: []string{"en"},
DoNotStore: true,
}
requ, err := json.Marshal(request)
if err != nil {
return 0, err
}
apiKey := os.Getenv("PERSPECTIVE_API_KEY")
url := baseURL + "?key=" + apiKey
resp, err := http.Post(url, "application/json", bytes.NewBuffer(requ))
if err != nil {
return 0, err
}
var f interface{}
json.NewDecoder(resp.Body).Decode(&f)
m := f.(map[string]interface{})
for k, v := range m {
switch k {
case "error":
return 0, fmt.Errorf("%v", v)
case "attributeScores":
var response Response
err = mapstructure.Decode(m, &response)
if err != nil {
return 0, err
}
return response.AttributeScores[modelToxicity].SummaryScore.Value, nil
}
}
return 0, nil
}
|
[
"\"PERSPECTIVE_API_KEY\""
] |
[] |
[
"PERSPECTIVE_API_KEY"
] |
[]
|
["PERSPECTIVE_API_KEY"]
|
go
| 1 | 0 | |
gee/v4_group/gee.go
|
package gee
import (
"net/http"
"os"
)
// references: https://geektutu.com/post/gee-day2.html
/*
实现下面的效果
func main() {
r := gee.New()
r.GET("/", func(c *gee.Context) {
c.HTML(http.StatusOK, "<h1>Hello Gee</h1>")
})
r.GET("/hello", func(c *gee.Context) {
// expect /hello?name=geektutu
c.String(http.StatusOK, "hello %s, you're at %s\n", c.Query("name"), c.Path)
})
r.POST("/login", func(c *gee.Context) {
c.JSON(http.StatusOK, gee.H{
"username": c.PostForm("username"),
"password": c.PostForm("password"),
})
})
r.Run(":9999")
}
Handler的参数变成成了gee.Context,提供了查询Query/PostForm参数的功能。
gee.Context封装了HTML/String/JSON函数,能够快速构造HTTP响应。
对Web服务来说,无非是根据请求*http.Request,构造响应http.ResponseWriter。但是这两个对象提供的接口粒度太细,
比如我们要构造一个完整的响应,需要考虑消息头(Header)和消息体(Body),而 Header 包含了状态码(StatusCode),消息类型(ContentType)等
几乎每次请求都需要设置的信息。因此,如果不进行有效的封装,那么框架的用户将需要写大量重复,繁杂的代码,而且容易出错。
针对常用场景,能够高效地构造出 HTTP 响应是一个好的框架必须考虑的点。
用返回 JSON 数据作比较,感受下封装前后的差距。
封装前
obj = map[string]interface{}{
"name": "geektutu",
"password": "1234",
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
encoder := json.NewEncoder(w)
if err := encoder.Encode(obj); err != nil {
http.Error(w, err.Error(), 500)
}
VS 封装后:
c.JSON(http.StatusOK, gee.H{
"username": c.PostForm("username"),
"password": c.PostForm("password"),
})
*/
type HandleFunc func(c *Context)
// 2.实现http.Handler接口,拦截所有的HTTP请求,拥有了统一的控制入口。在这里我们可以自由定义路由映射的规则,也可以统一添加一些处理逻辑,例如日志、异常处理等。
type Engine struct {
router *router
}
// 3.构造Engine
func New() *Engine {
return &Engine{router: newRouter()}
}
func (e *Engine) addRouter(method, pattern string, handler HandleFunc) {
e.router.addRoute(method, pattern, handler)
}
func (e *Engine) Get(pattern string, handler HandleFunc) {
e.addRouter("GET", pattern, handler)
}
func (e *Engine) Post(pattern string, handler HandleFunc) {
e.addRouter("POST", pattern, handler)
}
// default run on localhost:9999
func (e *Engine) Run(addr ...string) error {
l := len(addr)
address := ":9999"
switch l {
case 1:
address = addr[0]
case 0:
if port := os.Getenv("PORT"); port != "" {
address = ":" + port
}
}
return http.ListenAndServe(address, e)
}
// 解析请求路径,查找路由映射表,执行对应的处理函数
func (e Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
c := newContext(w, r)
e.router.handle(c)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
cmd/helm/helm_test.go
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"
"testing"
shellwords "github.com/mattn/go-shellwords"
"github.com/spf13/cobra"
"github.com/povils/helm/v3/internal/test"
"github.com/povils/helm/v3/pkg/action"
"github.com/povils/helm/v3/pkg/chartutil"
kubefake "github.com/povils/helm/v3/pkg/kube/fake"
"github.com/povils/helm/v3/pkg/release"
"github.com/povils/helm/v3/pkg/storage"
"github.com/povils/helm/v3/pkg/storage/driver"
"github.com/povils/helm/v3/pkg/time"
)
func testTimestamper() time.Time { return time.Unix(242085845, 0).UTC() }
func init() {
action.Timestamper = testTimestamper
}
func runTestCmd(t *testing.T, tests []cmdTestCase) {
t.Helper()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer resetEnv()()
storage := storageFixture()
for _, rel := range tt.rels {
if err := storage.Create(rel); err != nil {
t.Fatal(err)
}
}
t.Log("running cmd: ", tt.cmd)
_, out, err := executeActionCommandC(storage, tt.cmd)
if (err != nil) != tt.wantError {
t.Errorf("expected error, got '%v'", err)
}
if tt.golden != "" {
test.AssertGoldenString(t, out, tt.golden)
}
})
}
}
func runTestActionCmd(t *testing.T, tests []cmdTestCase) {
t.Helper()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer resetEnv()()
store := storageFixture()
for _, rel := range tt.rels {
store.Create(rel)
}
_, out, err := executeActionCommandC(store, tt.cmd)
if (err != nil) != tt.wantError {
t.Errorf("expected error, got '%v'", err)
}
if tt.golden != "" {
test.AssertGoldenString(t, out, tt.golden)
}
})
}
}
func storageFixture() *storage.Storage {
return storage.Init(driver.NewMemory())
}
func executeActionCommandC(store *storage.Storage, cmd string) (*cobra.Command, string, error) {
args, err := shellwords.Parse(cmd)
if err != nil {
return nil, "", err
}
buf := new(bytes.Buffer)
actionConfig := &action.Configuration{
Releases: store,
KubeClient: &kubefake.PrintingKubeClient{Out: ioutil.Discard},
Capabilities: chartutil.DefaultCapabilities,
Log: func(format string, v ...interface{}) {},
}
root := newRootCmd(actionConfig, buf, args)
root.SetOutput(buf)
root.SetArgs(args)
c, err := root.ExecuteC()
return c, buf.String(), err
}
// cmdTestCase describes a test case that works with releases.
type cmdTestCase struct {
name string
cmd string
golden string
wantError bool
// Rels are the available releases at the start of the test.
rels []*release.Release
}
func executeActionCommand(cmd string) (*cobra.Command, string, error) {
return executeActionCommandC(storageFixture(), cmd)
}
func resetEnv() func() {
origSettings, origEnv := settings, os.Environ()
return func() {
os.Clearenv()
settings = origSettings
for _, pair := range origEnv {
kv := strings.SplitN(pair, "=", 2)
os.Setenv(kv[0], kv[1])
}
}
}
func testChdir(t *testing.T, dir string) func() {
t.Helper()
old, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(dir); err != nil {
t.Fatal(err)
}
return func() { os.Chdir(old) }
}
func TestPluginExitCode(t *testing.T) {
if os.Getenv("RUN_MAIN_FOR_TESTING") == "1" {
os.Args = []string{"helm", "exitwith", "2"}
// We DO call helm's main() here. So this looks like a normal `helm` process.
main()
// As main calls os.Exit, we never reach this line.
// But the test called this block of code catches and verifies the exit code.
return
}
// Currently, plugins assume a Linux subsystem. Skip the execution
// tests until this is fixed
if runtime.GOOS != "windows" {
// Do a second run of this specific test(TestPluginExitCode) with RUN_MAIN_FOR_TESTING=1 set,
// So that the second run is able to run main() and this first run can verify the exit status returned by that.
//
// This technique originates from https://talks.golang.org/2014/testing.slide#23.
cmd := exec.Command(os.Args[0], "-test.run=TestPluginExitCode")
cmd.Env = append(
os.Environ(),
"RUN_MAIN_FOR_TESTING=1",
// See pkg/cli/environment.go for which envvars can be used for configuring these passes
// and also see plugin_test.go for how a plugin env can be set up.
// We just does the same setup as plugin_test.go via envvars
"HELM_PLUGINS=testdata/helmhome/helm/plugins",
"HELM_REPOSITORY_CONFIG=testdata/helmhome/helm/repositories.yaml",
"HELM_REPOSITORY_CACHE=testdata/helmhome/helm/repository",
)
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
exiterr, ok := err.(*exec.ExitError)
if !ok {
t.Fatalf("Unexpected error returned by os.Exit: %T", err)
}
if stdout.String() != "" {
t.Errorf("Expected no write to stdout: Got %q", stdout.String())
}
expectedStderr := "Error: plugin \"exitwith\" exited with error\n"
if stderr.String() != expectedStderr {
t.Errorf("Expected %q written to stderr: Got %q", expectedStderr, stderr.String())
}
if exiterr.ExitCode() != 2 {
t.Errorf("Expected exit code 2: Got %d", exiterr.ExitCode())
}
}
}
|
[
"\"RUN_MAIN_FOR_TESTING\""
] |
[] |
[
"RUN_MAIN_FOR_TESTING"
] |
[]
|
["RUN_MAIN_FOR_TESTING"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"encoding/json"
"errors"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/cloudflare/cloudflare-go"
_ "github.com/joho/godotenv/autoload"
)
type recordsJSON struct {
Record []record `json:"records"`
}
type record struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
func main() {
useIPv6, _ := strconv.ParseBool(os.Getenv("IPV6"))
currentIP := ""
for {
newIP, err := getExternalIP(useIPv6)
if err != nil {
log.Fatal(err)
} else if newIP == currentIP {
continue
}
currentIP = newIP
records, err := readRecords()
if err != nil {
log.Fatal(err)
}
api, err := cloudflare.NewWithAPIToken(os.Getenv("API_TOKEN"))
if err != nil {
log.Fatal(err)
}
log.Printf("updating dns records to '%v'\n", newIP)
for i := range records.Record {
recordType := "A"
if useIPv6 {
recordType = "AAAA"
}
err = updateDNS(api, newIP, recordType, records.Record[i].Zone, records.Record[i].Name)
if err != nil {
log.Fatal(err)
}
}
log.Print("dns records updated!\n\n")
time.Sleep(time.Minute)
}
}
func updateDNS(api *cloudflare.API, newIP string, recordType string, zone string, name string) error {
zoneID, err := api.ZoneIDByName(zone)
if err != nil {
return err
}
records, err := api.DNSRecords(context.Background(), zoneID, cloudflare.DNSRecord{Name: name, Type: recordType})
if err != nil {
return err
} else if len(records) != 1 {
return errors.New("invalid number of DNS records retrieved")
}
records[0].Content = newIP
err = api.UpdateDNSRecord(context.Background(), zoneID, records[0].ID, records[0])
if err != nil {
return err
}
log.Printf("updated %v\n", name)
return nil
}
func readRecords() (recordsJSON, error) {
var records recordsJSON
data, err := ioutil.ReadFile("data/records.json")
if err != nil {
return records, err
}
err = json.Unmarshal(data, &records)
if err != nil {
return records, err
}
return records, nil
}
func getExternalIP(useIPv6 bool) (string, error) {
service := "https://v4.ident.me"
if useIPv6 {
service = "https://v6.ident.me"
}
response, err := http.Get(service)
if err != nil {
return "", err
}
defer response.Body.Close()
currentIP, err := ioutil.ReadAll(response.Body)
if err != nil {
return "", err
}
return string(currentIP), nil
}
|
[
"\"IPV6\"",
"\"API_TOKEN\""
] |
[] |
[
"IPV6",
"API_TOKEN"
] |
[]
|
["IPV6", "API_TOKEN"]
|
go
| 2 | 0 | |
DjangoCrudMongoDB/DjangoCrudMongoDB/wsgi.py
|
"""
WSGI config for DjangoCrudMongoDB project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoCrudMongoDB.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
subprojects/internal-integ-testing/src/main/groovy/org/gradle/integtests/fixtures/executer/InProcessGradleExecuter.java
|
/*
* Copyright 2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.integtests.fixtures.executer;
import org.gradle.BuildResult;
import org.gradle.StartParameter;
import org.gradle.api.GradleException;
import org.gradle.api.Task;
import org.gradle.api.execution.TaskExecutionGraph;
import org.gradle.api.execution.TaskExecutionGraphListener;
import org.gradle.api.execution.TaskExecutionListener;
import org.gradle.api.internal.changedetection.state.InMemoryTaskArtifactCache;
import org.gradle.api.internal.classpath.ModuleRegistry;
import org.gradle.api.internal.file.TestFiles;
import org.gradle.api.logging.StandardOutputListener;
import org.gradle.api.logging.configuration.ShowStacktrace;
import org.gradle.api.tasks.TaskState;
import org.gradle.cli.CommandLineParser;
import org.gradle.configuration.GradleLauncherMetaData;
import org.gradle.execution.MultipleBuildFailures;
import org.gradle.initialization.BuildRequestContext;
import org.gradle.initialization.DefaultBuildCancellationToken;
import org.gradle.initialization.DefaultBuildRequestContext;
import org.gradle.initialization.DefaultBuildRequestMetaData;
import org.gradle.initialization.NoOpBuildEventConsumer;
import org.gradle.initialization.ReportedException;
import org.gradle.internal.Factory;
import org.gradle.internal.SystemProperties;
import org.gradle.internal.classpath.ClassPath;
import org.gradle.internal.event.ListenerManager;
import org.gradle.internal.exceptions.LocationAwareException;
import org.gradle.internal.invocation.BuildAction;
import org.gradle.internal.jvm.Jvm;
import org.gradle.internal.logging.LoggingManagerInternal;
import org.gradle.internal.nativeintegration.ProcessEnvironment;
import org.gradle.launcher.Main;
import org.gradle.launcher.cli.ExecuteBuildAction;
import org.gradle.launcher.cli.Parameters;
import org.gradle.launcher.cli.ParametersConverter;
import org.gradle.launcher.exec.BuildActionExecuter;
import org.gradle.launcher.exec.BuildActionParameters;
import org.gradle.launcher.exec.DefaultBuildActionParameters;
import org.gradle.process.internal.JavaExecHandleBuilder;
import org.gradle.test.fixtures.file.TestDirectoryProvider;
import org.gradle.test.fixtures.file.TestFile;
import org.gradle.util.DeprecationLogger;
import org.gradle.util.GUtil;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import java.io.File;
import java.io.InputStream;
import java.io.StringWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.regex.Pattern;
import static org.gradle.util.Matchers.*;
import static org.hamcrest.Matchers.*;
import static org.junit.Assert.*;
public class InProcessGradleExecuter extends AbstractGradleExecuter {
private final ProcessEnvironment processEnvironment = GLOBAL_SERVICES.get(ProcessEnvironment.class);
public static final TestFile COMMON_TMP = new TestFile(new File("build/tmp"));
static {
LoggingManagerInternal loggingManager = GLOBAL_SERVICES.getFactory(LoggingManagerInternal.class).create();
loggingManager.start();
}
public InProcessGradleExecuter(GradleDistribution distribution, TestDirectoryProvider testDirectoryProvider) {
super(distribution, testDirectoryProvider);
}
@Override
public GradleExecuter reset() {
DeprecationLogger.reset();
return super.reset();
}
@Override
protected ExecutionResult doRun() {
if (isForkRequired()) {
return doStart().waitForFinish();
}
StandardOutputListener outputListener = new OutputListenerImpl();
StandardOutputListener errorListener = new OutputListenerImpl();
BuildListenerImpl buildListener = new BuildListenerImpl();
BuildResult result = doRun(outputListener, errorListener, buildListener);
try {
result.rethrowFailure();
} catch (Exception e) {
throw new UnexpectedBuildFailure(e);
}
return assertResult(new InProcessExecutionResult(buildListener.executedTasks, buildListener.skippedTasks,
new OutputScrapingExecutionResult(outputListener.toString(), errorListener.toString())));
}
@Override
protected ExecutionFailure doRunWithFailure() {
if (isForkRequired()) {
return doStart().waitForFailure();
}
StandardOutputListener outputListener = new OutputListenerImpl();
StandardOutputListener errorListener = new OutputListenerImpl();
BuildListenerImpl buildListener = new BuildListenerImpl();
try {
doRun(outputListener, errorListener, buildListener).rethrowFailure();
throw new AssertionError("expected build to fail but it did not.");
} catch (GradleException e) {
return assertResult(new InProcessExecutionFailure(buildListener.executedTasks, buildListener.skippedTasks,
new OutputScrapingExecutionFailure(outputListener.toString(), errorListener.toString()), e));
}
}
private boolean isForkRequired() {
if (isUseDaemon() || !getJavaHome().equals(Jvm.current().getJavaHome())) {
return true;
}
File gradleProperties = new File(getWorkingDir(), "gradle.properties");
if (gradleProperties.isFile()) {
Properties properties = GUtil.loadProperties(gradleProperties);
if (properties.getProperty("org.gradle.java.home") != null || properties.getProperty("org.gradle.jvmargs") != null) {
return true;
}
}
return false;
}
private <T extends ExecutionResult> T assertResult(T result) {
getResultAssertion().execute(result);
return result;
}
@Override
protected GradleHandle doStart() {
return new ForkingGradleHandle(getStdinPipe(), isUseDaemon(), getResultAssertion(), getDefaultCharacterEncoding(), getJavaExecBuilder()).start();
}
private Factory<JavaExecHandleBuilder> getJavaExecBuilder() {
return new Factory<JavaExecHandleBuilder>() {
public JavaExecHandleBuilder create() {
GradleInvocation invocation = buildInvocation();
JavaExecHandleBuilder builder = new JavaExecHandleBuilder(TestFiles.resolver());
builder.workingDir(getWorkingDir());
builder.setExecutable(new File(getJavaHome(), "bin/java"));
Collection<File> classpath = cleanup(GLOBAL_SERVICES.get(ModuleRegistry.class).getAdditionalClassPath().getAsFiles());
builder.classpath(classpath);
builder.jvmArgs(invocation.launcherJvmArgs);
builder.setMain(Main.class.getName());
builder.args(invocation.args);
builder.setStandardInput(connectStdIn());
return builder;
}
};
}
private Collection<File> cleanup(List<File> files) {
List<File> result = new LinkedList<File>();
String prefix = Jvm.current().getJavaHome().getPath() + File.separator;
for (File file : files) {
if (file.getPath().startsWith(prefix)) {
// IDEA adds the JDK's bootstrap classpath to the classpath it uses to run test - remove this
continue;
}
result.add(file);
}
return result;
}
private BuildResult doRun(StandardOutputListener outputListener, StandardOutputListener errorListener, BuildListenerImpl listener) {
// Capture the current state of things that we will change during execution
InputStream originalStdIn = System.in;
Properties originalSysProperties = new Properties();
originalSysProperties.putAll(System.getProperties());
File originalUserDir = new File(originalSysProperties.getProperty("user.dir")).getAbsoluteFile();
Map<String, String> originalEnv = new HashMap<String, String>(System.getenv());
GradleInvocation invocation = buildInvocation();
Set<String> changedEnvVars = new HashSet<String>(invocation.environmentVars.keySet());
try {
return executeBuild(invocation, outputListener, errorListener, listener);
} finally {
// Restore the environment
System.setProperties(originalSysProperties);
processEnvironment.maybeSetProcessDir(originalUserDir);
for (String envVar : changedEnvVars) {
String oldValue = originalEnv.get(envVar);
if (oldValue != null) {
processEnvironment.maybeSetEnvironmentVariable(envVar, oldValue);
} else {
processEnvironment.maybeRemoveEnvironmentVariable(envVar);
}
}
System.setProperty("user.dir", originalSysProperties.getProperty("user.dir"));
System.setIn(originalStdIn);
}
}
private BuildResult executeBuild(GradleInvocation invocation, StandardOutputListener outputListener, StandardOutputListener errorListener, BuildListenerImpl listener) {
// Augment the environment for the execution
System.setIn(connectStdIn());
processEnvironment.maybeSetProcessDir(getWorkingDir());
for (Map.Entry<String, String> entry : invocation.environmentVars.entrySet()) {
processEnvironment.maybeSetEnvironmentVariable(entry.getKey(), entry.getValue());
}
Map<String, String> implicitJvmSystemProperties = getImplicitJvmSystemProperties();
System.getProperties().putAll(implicitJvmSystemProperties);
// TODO: Fix tests that rely on this being set before we process arguments like this...
StartParameter startParameter = new StartParameter();
startParameter.setCurrentDir(getWorkingDir());
startParameter.setShowStacktrace(ShowStacktrace.ALWAYS);
CommandLineParser parser = new CommandLineParser();
ParametersConverter parametersConverter = new ParametersConverter();
parametersConverter.configure(parser);
final Parameters parameters = new Parameters(startParameter);
parametersConverter.convert(parser.parse(getAllArgs()), parameters);
if (parameters.getDaemonParameters().isStop()) {
// --stop should simulate stopping the daemon
cleanupCachedClassLoaders();
GLOBAL_SERVICES.get(InMemoryTaskArtifactCache.class).invalidateAll();
}
BuildActionExecuter<BuildActionParameters> actionExecuter = GLOBAL_SERVICES.get(BuildActionExecuter.class);
ListenerManager listenerManager = GLOBAL_SERVICES.get(ListenerManager.class);
listenerManager.addListener(listener);
try {
// TODO: Reuse more of BuildActionsFactory
BuildAction action = new ExecuteBuildAction(startParameter);
BuildActionParameters buildActionParameters = createBuildActionParameters(startParameter);
BuildRequestContext buildRequestContext = createBuildRequestContext(outputListener, errorListener);
actionExecuter.execute(action, buildRequestContext, buildActionParameters, GLOBAL_SERVICES);
return new BuildResult(null, null);
} catch (ReportedException e) {
return new BuildResult(null, e.getCause());
} finally {
listenerManager.removeListener(listener);
}
}
private BuildActionParameters createBuildActionParameters(StartParameter startParameter) {
return new DefaultBuildActionParameters(
System.getProperties(),
System.getenv(),
SystemProperties.getInstance().getCurrentDir(),
startParameter.getLogLevel(),
false,
startParameter.isContinuous(),
interactive,
ClassPath.EMPTY
);
}
private BuildRequestContext createBuildRequestContext(StandardOutputListener outputListener, StandardOutputListener errorListener) {
return new DefaultBuildRequestContext(
new DefaultBuildRequestMetaData(new GradleLauncherMetaData()),
new DefaultBuildCancellationToken(),
new NoOpBuildEventConsumer(),
outputListener, errorListener);
}
public void assertCanExecute() {
assertNull(getExecutable());
String defaultEncoding = getImplicitJvmSystemProperties().get("file.encoding");
if (defaultEncoding != null) {
assertEquals(Charset.forName(defaultEncoding), Charset.defaultCharset());
}
Locale defaultLocale = getDefaultLocale();
if (defaultLocale != null) {
assertEquals(defaultLocale, Locale.getDefault());
}
assertFalse(isRequiresGradleDistribution());
}
@Override
protected TestFile getDefaultTmpDir() {
// File.createTempFile sets the location of the temp directory to a static variable on the first call. This prevents future
// changes to java.io.tmpdir from having any effect in the same process. We set this to use a common tmp directory for all
// tests running in the same process so that we don't have a situation where one process initializes with a tmp directory
// that it then removes, causing an IOException for any future tests that run in the same process and call File.createTempFile.
return COMMON_TMP;
}
private static class BuildListenerImpl implements TaskExecutionGraphListener {
private final List<String> executedTasks = new CopyOnWriteArrayList<String>();
private final Set<String> skippedTasks = new CopyOnWriteArraySet<String>();
public void graphPopulated(TaskExecutionGraph graph) {
List<Task> planned = new ArrayList<Task>(graph.getAllTasks());
graph.addTaskExecutionListener(new TaskListenerImpl(planned, executedTasks, skippedTasks));
}
}
private static class OutputListenerImpl implements StandardOutputListener {
private StringWriter writer = new StringWriter();
@Override
public String toString() {
return writer.toString();
}
public void onOutput(CharSequence output) {
writer.append(output);
}
}
private static class TaskListenerImpl implements TaskExecutionListener {
private final List<Task> planned;
private final List<String> executedTasks;
private final Set<String> skippedTasks;
public TaskListenerImpl(List<Task> planned, List<String> executedTasks, Set<String> skippedTasks) {
this.planned = planned;
this.executedTasks = executedTasks;
this.skippedTasks = skippedTasks;
}
public void beforeExecute(Task task) {
assertTrue(planned.contains(task));
String taskPath = path(task);
if (taskPath.startsWith(":buildSrc:")) {
return;
}
executedTasks.add(taskPath);
}
public void afterExecute(Task task, TaskState state) {
String taskPath = path(task);
if (taskPath.startsWith(":buildSrc:")) {
return;
}
if (state.getSkipped()) {
skippedTasks.add(taskPath);
}
}
private String path(Task task) {
return task.getProject().getGradle().getParent() == null ? task.getPath() : ":" + task.getProject().getRootProject().getName() + task.getPath();
}
}
public static class InProcessExecutionResult implements ExecutionResult {
private final List<String> plannedTasks;
private final Set<String> skippedTasks;
private final OutputScrapingExecutionResult outputResult;
public InProcessExecutionResult(List<String> plannedTasks, Set<String> skippedTasks, OutputScrapingExecutionResult outputResult) {
this.plannedTasks = plannedTasks;
this.skippedTasks = skippedTasks;
this.outputResult = outputResult;
}
public String getOutput() {
return outputResult.getOutput();
}
@Override
public String getNormalizedOutput() {
return outputResult.getNormalizedOutput();
}
public ExecutionResult assertOutputEquals(String expectedOutput, boolean ignoreExtraLines, boolean ignoreLineOrder) {
outputResult.assertOutputEquals(expectedOutput, ignoreExtraLines, ignoreLineOrder);
return this;
}
@Override
public ExecutionResult assertOutputContains(String expectedOutput) {
outputResult.assertOutputContains(expectedOutput);
return this;
}
public String getError() {
return outputResult.getError();
}
public List<String> getExecutedTasks() {
return new ArrayList<String>(plannedTasks);
}
public ExecutionResult assertTasksExecuted(String... taskPaths) {
List<String> expected = Arrays.asList(taskPaths);
assertThat(plannedTasks, equalTo(expected));
outputResult.assertTasksExecuted(taskPaths);
return this;
}
public Set<String> getSkippedTasks() {
return new HashSet<String>(skippedTasks);
}
public ExecutionResult assertTasksSkipped(String... taskPaths) {
if (GradleContextualExecuter.isParallel()) {
return this;
}
Set<String> expected = new HashSet<String>(Arrays.asList(taskPaths));
assertThat(skippedTasks, equalTo(expected));
outputResult.assertTasksSkipped(taskPaths);
return this;
}
public ExecutionResult assertTaskSkipped(String taskPath) {
if (GradleContextualExecuter.isParallel()) {
return this;
}
assertThat(skippedTasks, hasItem(taskPath));
outputResult.assertTaskSkipped(taskPath);
return this;
}
public ExecutionResult assertTasksNotSkipped(String... taskPaths) {
if (GradleContextualExecuter.isParallel()) {
return this;
}
Set<String> expected = new HashSet<String>(Arrays.asList(taskPaths));
Set<String> notSkipped = getNotSkippedTasks();
assertThat(notSkipped, equalTo(expected));
outputResult.assertTasksNotSkipped(taskPaths);
return this;
}
public ExecutionResult assertTaskNotSkipped(String taskPath) {
if (GradleContextualExecuter.isParallel()) {
return this;
}
assertThat(getNotSkippedTasks(), hasItem(taskPath));
outputResult.assertTaskNotSkipped(taskPath);
return this;
}
private Set<String> getNotSkippedTasks() {
Set<String> notSkipped = new HashSet<String>(plannedTasks);
notSkipped.removeAll(skippedTasks);
return notSkipped;
}
}
private static class InProcessExecutionFailure extends InProcessExecutionResult implements ExecutionFailure {
private static final Pattern LOCATION_PATTERN = Pattern.compile("(?m)^((\\w+ )+'.+') line: (\\d+)$");
private final OutputScrapingExecutionFailure outputFailure;
private final GradleException failure;
private final String fileName;
private final String lineNumber;
private final String description;
public InProcessExecutionFailure(List<String> tasks, Set<String> skippedTasks, OutputScrapingExecutionFailure outputFailure,
GradleException failure) {
super(tasks, skippedTasks, outputFailure);
this.outputFailure = outputFailure;
this.failure = failure;
// Chop up the exception message into its expected parts
java.util.regex.Matcher matcher = LOCATION_PATTERN.matcher(failure.getMessage());
if (matcher.find()) {
fileName = matcher.group(1);
lineNumber = matcher.group(3);
description = failure.getMessage().substring(matcher.end()).trim();
} else {
fileName = "";
lineNumber = "";
description = failure.getMessage().trim();
}
}
public ExecutionFailure assertHasLineNumber(int lineNumber) {
assertThat(this.lineNumber, equalTo(String.valueOf(lineNumber)));
outputFailure.assertHasLineNumber(lineNumber);
return this;
}
public ExecutionFailure assertHasFileName(String filename) {
assertThat(this.fileName, equalTo(filename));
outputFailure.assertHasFileName(filename);
return this;
}
public ExecutionFailure assertHasResolution(String resolution) {
outputFailure.assertHasResolution(resolution);
return this;
}
public ExecutionFailure assertHasCause(String description) {
assertThatCause(startsWith(description));
return this;
}
public ExecutionFailure assertThatCause(Matcher<String> matcher) {
List<Throwable> causes = new ArrayList<Throwable>();
extractCauses(failure, causes);
assertThat(causes, Matchers.<Throwable>hasItem(hasMessage(normalizedLineSeparators(matcher))));
outputFailure.assertThatCause(matcher);
return this;
}
private void extractCauses(Throwable failure, List<Throwable> causes) {
if (failure instanceof MultipleBuildFailures) {
MultipleBuildFailures exception = (MultipleBuildFailures) failure;
for (Throwable componentFailure : exception.getCauses()) {
extractCauses(componentFailure, causes);
}
} else if (failure instanceof LocationAwareException) {
causes.addAll(((LocationAwareException) failure).getReportableCauses());
} else {
causes.add(failure);
}
}
public ExecutionFailure assertHasNoCause() {
if (failure instanceof LocationAwareException) {
LocationAwareException exception = (LocationAwareException) failure;
assertThat(exception.getReportableCauses(), isEmpty());
} else {
assertThat(failure.getCause(), nullValue());
}
outputFailure.assertHasNoCause();
return this;
}
public ExecutionFailure assertHasDescription(String context) {
assertThatDescription(startsWith(context));
return this;
}
public ExecutionFailure assertThatDescription(Matcher<String> matcher) {
assertThat(description, normalizedLineSeparators(matcher));
outputFailure.assertThatDescription(matcher);
return this;
}
public ExecutionFailure assertTestsFailed() {
new DetailedExecutionFailure(this).assertTestsFailed();
return this;
}
public DependencyResolutionFailure assertResolutionFailure(String configurationPath) {
return new DependencyResolutionFailure(this, configurationPath);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend skyruss received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a skyrusd or Skyrus-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SKX values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the skyrus data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Skyrus/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Skyrus")
return os.path.expanduser("~/.skyrus")
def read_skyrus_config(dbdir):
"""Read the skyrus.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "skyrus.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a skyrus JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 15555 if testnet else 8332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the skyrusd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(skyrusd):
info = skyrusd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
skyrusd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = skyrusd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(skyrusd):
address_summary = dict()
address_to_account = dict()
for info in skyrusd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = skyrusd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = skyrusd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-skyrus-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(skyrusd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(skyrusd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SKX available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to skyrusd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = skyrusd.createrawtransaction(inputs, outputs)
signed_rawtx = skyrusd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(skyrusd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = skyrusd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(skyrusd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = skyrusd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(skyrusd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get skyruss from")
parser.add_option("--to", dest="to", default=None,
help="address to get send skyruss to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of skyrus.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_skyrus_config(options.datadir)
if options.testnet: config['testnet'] = True
skyrusd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(skyrusd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(skyrusd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(skyrusd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(skyrusd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = skyrusd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
benchmark/cloud/aws/kylin.py
|
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
from typing import List
from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService
from benchmark.tools import get_random_id
logger = logging.getLogger()
class KylinMode:
ALL = 'all'
JOB = 'job'
QUERY = 'query'
class KylinMaster(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-master-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinMaster',
aws=aws,
region=region,
stack_name='Raven-Kylin-Master-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type
)
@property
def spark_master_url(self):
return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl')
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info('Kylin master is launching...')
super().launch()
logger.info('Kylin master has launched.')
def terminate(self):
logger.info('Kylin master is terminating...')
super().terminate()
logger.info('Kylin master has terminated.')
class KylinWorker(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str, worker_id: int = 1):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-worker-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinWorker',
aws=aws,
region=region,
stack_name=f'Raven-Kylin-Worker{worker_id}-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type,
KylinWorkerId=worker_id,
)
self._worker_id = worker_id
self._spark_master_private_ip = ''
@property
def worker_id(self):
return self._worker_id
@property
def spark_master_private_ip(self):
return self._spark_master_private_ip
@spark_master_private_ip.setter
def spark_master_private_ip(self, private_ip: str):
self._spark_master_private_ip = private_ip
self.kwargs['SparkMasterPrivateIp'] = private_ip
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info(f'Kylin worker {self._worker_id} is launching...')
super().launch()
logger.info(f'Kylin worker {self._worker_id} has launched.')
def terminate(self):
logger.info(f'Kylin worker {self._worker_id} is terminating...')
super().terminate()
logger.info(f'Kylin worker {self._worker_id} has terminated.')
class KylinCluster:
def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0,
worker_instance_type: str = 't2.small'):
self._aws = aws
self._master_instance_type = master_instance_type
self._worker_instance_type = worker_instance_type
self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type)
self._workers: List[KylinWorker] = [
KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in
range(0, worker_num)]
self._cluster_id = get_random_id(16)
@property
def master(self):
return self._master
@property
def workers(self):
return self._workers
def __str__(self):
return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})'
def launch(self):
logger.info('Kylin cluster is launching...')
self.master.launch()
threads: List[threading.Thread] = []
for worker in self.workers:
worker.spark_master_private_ip = self.master.private_ip
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has launched.')
def terminate(self):
logger.info('Kylin cluster is terminating...')
threads: List[threading.Thread] = []
for worker in self.workers:
thread = threading.Thread(target=worker.terminate)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.master.terminate()
logger.info('Kylin cluster has terminated.')
def install_cloud_watch_agent(self):
logger.debug('Kylin cluster is installing cloudwatch agent...')
threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)]
for worker in self.workers:
threads.append(threading.Thread(target=worker.install_cloudwatch_agent))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished installing cloudwatch agent.')
def collect_cluster_info(self, output_dir: str = None):
"""Collect kylin cluster information.
:param output_dir:
:return:
"""
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
info = {
'Master': self.master.to_dict(),
'Workers': [worker.to_dict() for worker in self.workers]
}
with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w',
encoding='utf-8') as file:
json.dump(info, file, indent=2)
def collect_metrics(self, output_dir: str = None):
logger.debug('Kylin cluster is pulling metrics cloudwatch agent...')
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
threads: List[threading.Thread] = [
threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})]
for worker in self.workers:
threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir}))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...')
def scale(self, worker_num: int):
logger.info('Kylin cluster is scaling...')
n = len(self.workers)
threads: List[threading.Thread] = []
if worker_num < n:
for worker_id in range(worker_num, n):
thread = threading.Thread(target=self.workers[worker_id].terminate)
thread.start()
threads.append(thread)
elif worker_num > n:
for worker_id in range(n, worker_num):
worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id)
worker.spark_master_private_ip = self.master.private_ip
self.workers.append(worker)
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has finished scaling.')
|
[] |
[] |
[
"RAVEN_HOME"
] |
[]
|
["RAVEN_HOME"]
|
python
| 1 | 0 | |
chain/sync.go
|
package chain
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
"github.com/Gurpartap/async"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/connmgr"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/whyrusleeping/pubsub"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
blst "github.com/supranational/blst/bindings/go"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/blocksync"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
bstore "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/lib/sigs/bls"
"github.com/filecoin-project/lotus/metrics"
)
// Blocks that are more than MaxHeightDrift epochs above
//the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var defaultMessageFetchWindowSize = 200
func init() {
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
val, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse LOTUS_BSYNC_MSG_WINDOW: %s", err)
return
}
defaultMessageFetchWindowSize = val
}
}
var log = logging.Logger("chain")
var LocalIncoming = "incoming"
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasn’t deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct {
// The interface for accessing and putting tipsets into local storage
store *store.ChainStore
// handle to the random beacon for verification
beacon beacon.RandomBeacon
// the state manager handles making state queries
sm *stmgr.StateManager
// The known Genesis tipset
Genesis *types.TipSet
// TipSets known to be invalid
bad *BadBlockCache
// handle to the block sync service
Bsync *blocksync.BlockSync
self peer.ID
syncmgr *SyncManager
connmgr connmgr.ConnManager
incoming *pubsub.PubSub
receiptTracker *blockReceiptTracker
verifier ffiwrapper.Verifier
windowSize int
tickerCtxCancel context.CancelFunc
}
// NewSyncer creates a new Syncer object.
func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
return nil, xerrors.Errorf("getting genesis block: %w", err)
}
gent, err := types.NewTipSet([]*types.BlockHeader{gen})
if err != nil {
return nil, err
}
s := &Syncer{
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
Bsync: bsync,
store: sm.ChainStore(),
sm: sm,
self: self,
receiptTracker: newBlockReceiptTracker(),
connmgr: connmgr,
verifier: verifier,
windowSize: defaultMessageFetchWindowSize,
incoming: pubsub.New(50),
}
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
log.Warn("*********************************************************************************************")
}
s.syncmgr = NewSyncManager(s.Sync)
return s, nil
}
func (syncer *Syncer) Start() {
tickerCtx, tickerCtxCancel := context.WithCancel(context.Background())
syncer.syncmgr.Start()
syncer.tickerCtxCancel = tickerCtxCancel
go syncer.runMetricsTricker(tickerCtx)
}
func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) {
genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0)
ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
sinceGenesis := build.Clock.Now().Sub(genesisTime)
expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs)
stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight))
case <-tickerCtx.Done():
return
}
}
}
func (syncer *Syncer) Stop() {
syncer.syncmgr.Stop()
syncer.tickerCtxCancel()
}
// InformNewHead informs the syncer about a new potential tipset
// This should be called when connecting to new peers, and additionally
// when receiving new blocks from the network
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
ctx := context.Background()
if fts == nil {
log.Errorf("got nil tipset in InformNewHead")
return false
}
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
return false
}
for _, b := range fts.Blocks {
if reason, ok := syncer.bad.Has(b.Cid()); ok {
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
return false
}
if err := syncer.ValidateMsgMeta(b); err != nil {
log.Warnf("invalid block received: %s", err)
return false
}
}
syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming)
if from == syncer.self {
// TODO: this is kindof a hack...
log.Debug("got block from ourselves")
if err := syncer.Sync(ctx, fts.TipSet()); err != nil {
log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err)
return false
}
return true
}
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
// the blockstore
if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil {
log.Warn("failed to persist incoming block header: ", err)
return false
}
syncer.Bsync.AddPeer(from)
bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight()
targetWeight := fts.TipSet().ParentWeight()
if targetWeight.LessThan(bestPweight) {
var miners []string
for _, blk := range fts.TipSet().Blocks() {
miners = append(miners, blk.Miner.String())
}
log.Infof("incoming tipset from %s does not appear to be better than our best chain, ignoring for now", miners)
return false
}
syncer.syncmgr.SetPeerHead(ctx, from, fts.TipSet())
return true
}
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10)
go func() {
defer syncer.incoming.Unsub(sub, LocalIncoming)
for {
select {
case r := <-sub:
hs := r.([]*types.BlockHeader)
for _, h := range hs {
select {
case out <- h:
case <-ctx.Done():
return
}
}
case <-ctx.Done():
return
}
}
}()
return out, nil
}
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
}
// Collect the CIDs of both types of messages separately: BLS and Secpk.
var bcids, scids []cid.Cid
for _, m := range fblk.BlsMessages {
bcids = append(bcids, m.Cid())
}
for _, m := range fblk.SecpkMessages {
scids = append(scids, m.Cid())
}
// TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta
// computation need to go into the 'temporary' side of the blockstore when
// we implement that
blockstore := syncer.store.Blockstore()
bs := cbor.NewCborStore(blockstore)
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(bs, bcids, scids)
if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
}
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
}
for _, m := range fblk.BlsMessages {
_, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
}
for _, m := range fblk.SecpkMessages {
_, err := store.PutMessage(blockstore, m)
if err != nil {
return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err)
}
}
return nil
}
func (syncer *Syncer) LocalPeer() peer.ID {
return syncer.self
}
func (syncer *Syncer) ChainStore() *store.ChainStore {
return syncer.store
}
func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool {
// TODO: search for other blocks that could form a tipset with this block
// and then send that tipset to InformNewHead
fts := &store.FullTipSet{Blocks: []*types.FullBlock{blk}}
return syncer.InformNewHead(from, fts)
}
func copyBlockstore(from, to bstore.Blockstore) error {
cids, err := from.AllKeysChan(context.TODO())
if err != nil {
return err
}
for c := range cids {
b, err := from.Get(c)
if err != nil {
return err
}
if err := to.Put(b); err != nil {
return err
}
}
return nil
}
// TODO: this function effectively accepts unchecked input from the network,
// either validate it here, or ensure that its validated elsewhere (maybe make
// sure the blocksync code checks it?)
// maybe this code should actually live in blocksync??
func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) {
if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) {
return nil, fmt.Errorf("msgincl length didnt match tipset size")
}
fts := &store.FullTipSet{}
for bi, b := range ts.Blocks() {
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
}
var smsgs []*types.SignedMessage
var smsgCids []cid.Cid
for _, m := range smi[bi] {
smsgs = append(smsgs, allsmsgs[m])
smsgCids = append(smsgCids, allsmsgs[m].Cid())
}
var bmsgs []*types.Message
var bmsgCids []cid.Cid
for _, m := range bmi[bi] {
bmsgs = append(bmsgs, allbmsgs[m])
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
}
mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids)
if err != nil {
return nil, err
}
if b.Messages != mrcid {
return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) {
store := adt.WrapStore(context.TODO(), bs)
bmArr := adt.MakeEmptyArray(store)
smArr := adt.MakeEmptyArray(store)
for i, m := range bmsgCids {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
for i, m := range smsgCids {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return cid.Undef, err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return cid.Undef, err
}
smroot, err := smArr.Root()
if err != nil {
return cid.Undef, err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err)
}
return mrcid, nil
}
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (BlockSync) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil
}
// fall back to the network.
return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
}
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk)
if err != nil {
return nil, err
}
fts := &store.FullTipSet{}
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := syncer.store.MessagesForBlock(b)
if err != nil {
return nil, err
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
fts.Blocks = append(fts.Blocks, fb)
}
return fts, nil
}
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End()
if span.IsRecordingEvents() {
span.AddAttributes(
trace.StringAttribute("tipset", fmt.Sprint(maybeHead.Cids())),
trace.Int64Attribute("height", int64(maybeHead.Height())),
)
}
if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
return nil
}
if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) {
return nil
}
if err := syncer.collectChain(ctx, maybeHead); err != nil {
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("collectChain failed: %w", err)
}
// At this point we have accepted and synced to the new `maybeHead`
// (`StageSyncComplete`).
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
Message: err.Error(),
})
return xerrors.Errorf("failed to put synced tipset to chainstore: %w", err)
}
peers := syncer.receiptTracker.GetPeers(maybeHead)
if len(peers) > 0 {
syncer.connmgr.TagPeer(peers[0], "new-block", 40)
for _, p := range peers[1:] {
syncer.connmgr.TagPeer(p, "new-block", 25)
}
}
return nil
}
func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
}
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) error {
ctx, span := trace.StartSpan(ctx, "validateTipSet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("height", int64(fts.TipSet().Height())))
ts := fts.TipSet()
if ts.Equals(syncer.Genesis) {
return nil
}
var futures []async.ErrorFuture
for _, b := range fts.Blocks {
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error()))
}
return xerrors.Errorf("validating block %s: %w", b.Cid(), err)
}
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
}
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
}
return nil
}
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
var spast power.State
_, err := syncer.sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spast, baseTs)
if err != nil {
return err
}
cm, err := adt.AsMap(syncer.store.Store(ctx), spast.Claims)
if err != nil {
return err
}
var claim power.Claim
exist, err := cm.Get(adt.AddrKey(maddr), &claim)
if err != nil {
return err
}
if !exist {
return xerrors.New("miner isn't valid")
}
return nil
}
var ErrTemporal = errors.New("temporal error")
func blockSanityChecks(h *types.BlockHeader) error {
if h.ElectionProof == nil {
return xerrors.Errorf("block cannot have nil election proof")
}
if h.Ticket == nil {
return xerrors.Errorf("block cannot have nil ticket")
}
if h.BlockSig == nil {
return xerrors.Errorf("block had nil signature")
}
if h.BLSAggregate == nil {
return xerrors.Errorf("block had nil bls aggregate signature")
}
return nil
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("validate block panic: %w", rerr)
return
}
}()
isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
if err != nil {
return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
}
if isValidated {
return nil
}
validationStart := build.Clock.Now()
defer func() {
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0)))
}()
ctx, span := trace.StartSpan(ctx, "validateBlock")
defer span.End()
if err := blockSanityChecks(b.Header); err != nil {
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
}
h := b.Header
baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
if err != nil {
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
lbst, _, err := syncer.sm.TipSetState(ctx, lbts)
if err != nil {
return xerrors.Errorf("failed to compute lookback tipset state: %w", err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
// fast checks first
nulls := h.Height - (baseTs.Height() + 1)
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
}
now := uint64(build.Clock.Now().Unix())
if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
}
if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := syncer.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
}
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight)
}
// Stuff that needs stateroot / worker address
stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := syncer.store.MessagesForTipset(baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
}
winnerCheck := async.Err(func() error {
if h.ElectionProof.WinCount < 1 {
return xerrors.Errorf("block is not claiming to be a winner")
}
hp, err := stmgr.MinerHasMinPower(ctx, syncer.sm, h.Miner, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
if !hp {
return xerrors.New("block's miner does not meet minimum power threshold")
}
rBeacon := *prevBeacon
if len(h.BeaconEntries) != 0 {
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner)
if err != nil {
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
}
if slashed {
return xerrors.Errorf("received block was from slashed or invalid miner")
}
mpow, tpow, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
if h.ElectionProof.WinCount != j {
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
}
return nil
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return nil
}
if err := beacon.ValidateBlockValues(syncer.beacon, h, *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
})
tktsCheck := async.Err(func() error {
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
beaconBase := *prevBeacon
if len(h.BeaconEntries) == 0 {
buf.Write(baseTs.MinTicket().VRFProof)
} else {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
return nil
})
wproofCheck := async.Err(func() error {
if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
})
await := []async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
}
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
}
return nil
}
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
}
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
return nil
}
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address: %w", err)
}
rbase := prevBeacon
if len(h.BeaconEntries) > 0 {
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winningPost proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
if err != nil {
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
Prover: abi.ActorID(mid),
})
if err != nil {
return xerrors.Errorf("failed to verify election post: %w", err)
}
if !ok {
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
return xerrors.Errorf("winning post was invalid")
}
return nil
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return err
}
cst := cbor.NewCborStore(syncer.store.Blockstore())
st, err := state.LoadStateTree(cst, stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
pl := vm.PricelistByEpoch(baseTs.Height())
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total()); err != nil {
return err
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
if _, ok := nonces[m.From]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(m.From)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !act.IsAccountActor() {
return xerrors.New("Sender must be an account actor")
}
nonces[m.From] = act.Nonce
}
if nonces[m.From] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce)
}
nonces[m.From]++
return nil
}
store := adt.WrapStore(ctx, cst)
bmArr := adt.MakeEmptyArray(store)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c := cbg.CborCid(m.Cid())
if err := bmArr.Set(uint64(i), &c); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := adt.MakeEmptyArray(store)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
}
c := cbg.CborCid(m.Cid())
if err := smArr.Set(uint64(i), &c); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := cst.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
return nil
}
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
_, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate")
defer span.End()
span.AddAttributes(
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
msgsS := make([]blst.Message, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
}
if len(msgs) == 0 {
return nil
}
valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks,
msgsS, []byte(bls.DST))
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
return nil
}
type syncStateKey struct{}
func extractSyncState(ctx context.Context) *SyncerState {
v := ctx.Value(syncStateKey{})
if v != nil {
return v.(*SyncerState)
}
return nil
}
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `incoming` is the heaviest/projected/target tipset we have learned about, and
// `known` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Traverse the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via BlockSync in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End()
ss := extractSyncState(ctx)
span.AddAttributes(
trace.Int64Attribute("incomingHeight", int64(incoming.Height())),
trace.Int64Attribute("knownHeight", int64(known.Height())),
)
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range incoming.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok {
newReason := reason.Linked("linked to %s", pcid)
for _, b := range incoming.Cids() {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), pcid, reason)
}
}
{
// ensure consistency of beacon entires
targetBE := incoming.Blocks()[0].BeaconEntries
sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool {
return targetBE[i].Round < targetBE[j].Round
})
if !sorted {
syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires"))
return nil, xerrors.Errorf("wrong order of beacon entires")
}
for _, bh := range incoming.Blocks()[1:] {
if len(targetBE) != len(bh.BeaconEntries) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different number for beacon entires")
}
for i, be := range bh.BeaconEntries {
if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) {
// cannot mark bad, I think @Kubuxu
return nil, xerrors.Errorf("tipset contained different beacon entires")
}
}
}
}
blockSet := []*types.TipSet{incoming}
at := incoming.Parents()
// we want to sync all the blocks until the height above the block we have
untilHeight := known.Height() + 1
ss.SetHeight(blockSet[len(blockSet)-1].Height())
var acceptedBlocks []cid.Cid
loop:
for blockSet[len(blockSet)-1].Height() > untilHeight {
for _, bc := range at.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
// If, for some reason, we have a suffix of the chain locally, handle that here
ts, err := syncer.store.LoadTipSet(at)
if err == nil {
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
blockSet = append(blockSet, ts)
at = ts.Parents()
continue
}
if !xerrors.Is(err, bstore.ErrNotFound) {
log.Warn("loading local tipset: %s", err)
}
// NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to one another. It does
// not validate any state transitions.
window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap
}
blks, err := syncer.Bsync.GetBlocks(ctx, at, window)
if err != nil {
// Most likely our peers aren't fully synced yet, but forwarded
// new block message (ideally we'd find better peers)
log.Errorf("failed to get blocks: %+v", err)
span.AddAttributes(trace.StringAttribute("error", err.Error()))
// This error will only be logged above,
return nil, xerrors.Errorf("failed to get blocks: %w", err)
}
log.Info("Got blocks: ", blks[0].Height(), len(blks))
// Check that the fetched segment of the chain matches what we already
// have. Since we fetch from the head backwards our reassembled chain
// is sorted in reverse here: we have a child -> parent order, our last
// tipset then should be child of the first tipset retrieved.
// FIXME: The reassembly logic should be part of the `BlockSync`
// service, the consumer should not be concerned with the
// `MaxRequestLength` limitation, it should just be able to request
// an segment of arbitrary length. The same burden is put on
// `syncFork()` which needs to be aware this as well.
if blockSet[len(blockSet)-1].IsChildOf(blks[0]) == false {
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
blockSet[len(blockSet)-1].Height(), blks[0].Height())
// A successful `GetBlocks()` call is guaranteed to fetch at least
// one tipset so the acess `blks[0]` is safe.
}
for _, b := range blks {
if b.Height() < untilHeight {
break loop
}
for _, bc := range b.Cids() {
if reason, ok := syncer.bad.Has(bc); ok {
newReason := reason.Linked("change contained %s", bc)
for _, b := range acceptedBlocks {
syncer.bad.Add(b, newReason)
}
return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason)
}
}
blockSet = append(blockSet, b)
}
acceptedBlocks = append(acceptedBlocks, at.Cids()...)
ss.SetHeight(blks[len(blks)-1].Height())
at = blks[len(blks)-1].Parents()
}
base := blockSet[len(blockSet)-1]
if base.Parents() == known.Parents() {
// common case: receiving a block thats potentially part of the same tipset as our best block
return blockSet, nil
}
if types.CidArrsEqual(base.Parents().Cids(), known.Cids()) {
// common case: receiving blocks that are building on top of our best tipset
return blockSet, nil
}
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height())
fork, err := syncer.syncFork(ctx, base, known)
if err != nil {
if xerrors.Is(err, ErrForkTooLong) {
// TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish?
log.Warn("adding forked chain to our bad tipset cache")
for _, b := range incoming.Blocks() {
syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality"))
}
}
return nil, xerrors.Errorf("failed to sync fork: %w", err)
}
blockSet = append(blockSet, fork...)
return blockSet, nil
}
var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
// denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) {
tips, err := syncer.Bsync.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
if err != nil {
return nil, err
}
nts, err := syncer.store.LoadTipSet(known.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}
for cur := 0; cur < len(tips); {
if nts.Height() == 0 {
if !syncer.Genesis.Equals(nts) {
return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key())
}
return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.Cids())
}
if nts.Equals(tips[cur]) {
return tips[:cur+1], nil
}
if nts.Height() < tips[cur].Height() {
cur++
} else {
nts, err = syncer.store.LoadTipSet(nts.Parents())
if err != nil {
return nil, xerrors.Errorf("loading next local tipset: %w", err)
}
}
}
return nil, ErrForkTooLong
}
func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error {
ss := extractSyncState(ctx)
ss.SetHeight(0)
return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error {
log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids()))
if err := syncer.ValidateTipSet(ctx, fts); err != nil {
log.Errorf("failed to validate tipset: %+v", err)
return xerrors.Errorf("message processing failed: %w", err)
}
stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height())))
ss.SetHeight(fts.TipSet().Height())
return nil
})
}
// fills out each of the given tipsets with messages and calls the callback with it
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
windowSize := syncer.windowSize
mainLoop:
for i := len(headers) - 1; i >= 0; {
fts, err := syncer.store.TryFillTipSet(headers[i])
if err != nil {
return err
}
if fts != nil {
if err := cb(ctx, fts); err != nil {
return err
}
i--
continue
}
batchSize := windowSize
if i < batchSize {
batchSize = i
}
nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index
var bstout []*blocksync.CompactedMessages
for len(bstout) < batchSize {
next := headers[nextI]
nreq := batchSize - len(bstout)
bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
if err != nil {
// TODO check errors for temporary nature
if windowSize > 1 {
windowSize /= 2
log.Infof("error fetching messages: %s; reducing window size to %d and trying again", err, windowSize)
continue mainLoop
}
return xerrors.Errorf("message processing failed: %w", err)
}
bstout = append(bstout, bstips...)
nextI += len(bstips)
}
for bsi := 0; bsi < len(bstout); bsi++ {
// temp storage so we don't persist data we dont want to
bs := bstore.NewTemporary()
blks := cbor.NewCborStore(bs)
this := headers[i-bsi]
bstip := bstout[len(bstout)-(bsi+1)]
fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes)
if err != nil {
log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i,
"height", this.Height(),
"next-height", i+batchSize)
return xerrors.Errorf("message processing failed: %w", err)
}
if err := cb(ctx, fts); err != nil {
return err
}
if err := persistMessages(bs, bstip); err != nil {
return err
}
if err := copyBlockstore(bs, syncer.store.Blockstore()); err != nil {
return xerrors.Errorf("message processing failed: %w", err)
}
}
if i >= windowSize {
newWindowSize := windowSize + 10
if newWindowSize > int(blocksync.MaxRequestLength) {
newWindowSize = int(blocksync.MaxRequestLength)
}
if newWindowSize > windowSize {
windowSize = newWindowSize
log.Infof("successfully fetched %d messages; increasing window size to %d", len(bstout), windowSize)
}
}
i -= batchSize
}
// remember our window size
syncer.windowSize = windowSize
return nil
}
func persistMessages(bs bstore.Blockstore, bst *blocksync.CompactedMessages) error {
for _, m := range bst.Bls {
//log.Infof("putting BLS message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("BLS message processing failed: %w", err)
}
}
for _, m := range bst.Secpk {
if m.Signature.Type != crypto.SigTypeSecp256k1 {
return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type)
}
//log.Infof("putting secp256k1 message: %s", m.Cid())
if _, err := store.PutMessage(bs, m); err != nil {
log.Errorf("failed to persist messages: %+v", err)
return xerrors.Errorf("secp256k1 message processing failed: %w", err)
}
}
return nil
}
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End()
ss := extractSyncState(ctx)
ss.Init(syncer.store.GetHeaviestTipSet(), ts)
headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet())
if err != nil {
ss.Error(err)
return err
}
span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers))))
if !headers[0].Equals(ts) {
log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids())
}
ss.SetStage(api.StagePersistHeaders)
toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch))
for _, ts := range headers {
toPersist = append(toPersist, ts.Blocks()...)
}
if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil {
err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err)
ss.Error(err)
return err
}
toPersist = nil
ss.SetStage(api.StageMessages)
if err := syncer.syncMessagesAndCheckState(ctx, headers); err != nil {
err = xerrors.Errorf("collectChain syncMessages: %w", err)
ss.Error(err)
return err
}
ss.SetStage(api.StageSyncComplete)
log.Debugw("new tipset", "height", ts.Height(), "tipset", types.LogCids(ts.Cids()))
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
if build.InsecurePoStValidation {
return nil
}
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
func (syncer *Syncer) State() []SyncerState {
var out []SyncerState
for _, ss := range syncer.syncmgr.syncStates {
out = append(out, ss.Snapshot())
}
return out
}
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad"))
}
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
bbr, ok := syncer.bad.Has(blk)
return bbr.String(), ok
}
func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := syncer.store.LoadTipSet(cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
g, err := syncer.store.GetGenesis()
if err != nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
}
|
[
"\"LOTUS_BSYNC_MSG_WINDOW\"",
"\"LOTUS_IGNORE_DRAND\""
] |
[] |
[
"LOTUS_IGNORE_DRAND",
"LOTUS_BSYNC_MSG_WINDOW"
] |
[]
|
["LOTUS_IGNORE_DRAND", "LOTUS_BSYNC_MSG_WINDOW"]
|
go
| 2 | 0 | |
tests/e2e/kubetest2-kops/builder/build.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package builder
import (
"fmt"
"os"
"sigs.k8s.io/kubetest2/pkg/exec"
)
type BuildOptions struct {
KopsRoot string `flag:"-"`
StageLocation string `flag:"-"`
}
// Build will build the kops artifacts and publish them to the stage location
func (b *BuildOptions) Build() error {
cmd := exec.Command("make", "gcs-publish-ci")
cmd.SetEnv(
fmt.Sprintf("HOME=%v", os.Getenv("HOME")),
fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
fmt.Sprintf("GCS_LOCATION=%v", b.StageLocation),
fmt.Sprintf("GOPATH=%v", os.Getenv("GOPATH")),
)
cmd.SetDir(b.KopsRoot)
exec.InheritOutput(cmd)
return cmd.Run()
}
|
[
"\"HOME\"",
"\"PATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"HOME",
"PATH"
] |
[]
|
["GOPATH", "HOME", "PATH"]
|
go
| 3 | 0 | |
doc/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Pachyderm documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 7 10:45:21 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx_markdown_tables',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Pachyderm'
copyright = '2019, Pachyderm Inc.'
author = 'Pachyderm'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9.7'
# The full version, including alpha/beta/rc tags.
release = '1.9.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_archive', 'images/screencast_getting_started_pachub.gif', 'tutorials']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'custom_theme'
html_theme_path = ['.']
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
],
}
html_theme_options = {
'collapse_navigation': True,
'display_version': False,
'navigation_depth': -1,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Pachyderm v1.7.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['custom_theme/static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = { '**': ['searchbox.html', 'globaltoc.html', 'relations.html', 'sourcelink.html'], }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pachydermdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pachyderm.tex', 'Pachyderm Documentation',
'Joe Doliner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pachyderm', 'Pachyderm Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pachyderm', 'Pachyderm Documentation',
author, 'Pachyderm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
SampleDjango/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SampleDjango.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Mathematics/Fundamentals/IsFibo.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
class Result {
public static String isFibo(long n) {
long a = 1, b = 1;
while(b<=n) {
if(a == n || b == n) return "IsFibo";
long c = a+b;
a = b;
b = c;
}
return "IsNotFibo";
}
}
public class Solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int t = Integer.parseInt(bufferedReader.readLine().trim());
for (int tItr = 0; tItr < t; tItr++) {
long n = Long.parseLong(bufferedReader.readLine().trim());
String result = Result.isFibo(n);
bufferedWriter.write(result);
bufferedWriter.newLine();
}
bufferedReader.close();
bufferedWriter.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
uniflex_module_wifi_gnuradio/wifi_gnuradio.py
|
import os
import sh
import time
import logging
import pyric.utils.channels as channels
import uniflex_module_gnuradio
from uniflex.core import modules
__author__ = "Anatolij Zubow, Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "{zubow, gawlowicz}@tkn.tu-berlin.de"
class WiFiGnuRadioModule(uniflex_module_gnuradio.GnuRadioModule):
"""
WiFi GNURadio connector module.
IEEE 802.11 WiFi implemented in GnuRadio.
Implementation is based on https://github.com/bastibl/gr-ieee802-11
Supported functionality:
- all functions from generic GnuRadio module
- freq
- samp_rate
- rx_gain
- tx_gain
- encoding *
- chan_est *
- lo_offset *
- * (not yet implemented)
Howto:
1) activate the radio program using activate_radio_program
(gr_scripts/uniflex_wifi_transceiver.grc)
2) read/write parameters
"""
def __init__(self, usrp_addr="addr=192.168.30.2",
ctrl_socket_host="localhost",
ctrl_socket_port=8080,
src_mac="12:34:56:78:90:ab",
dst_mac="30:14:4a:e6:46:e4",
bss_mac="66:66:66:66:66:66",
src_ipv4_address="192.168.123.1",
dst_ipv4_address="192.168.123.2",
gnu_rp_name="uniflex_wifi_transceiver"):
super(WiFiGnuRadioModule, self).__init__(usrp_addr, ctrl_socket_host,
ctrl_socket_port)
self.log = logging.getLogger('WiFiGnuRadioModule')
self.uniflex_path = os.environ['UNIFLEX_PATH']
self.grc_radio_program_name = gnu_rp_name
self.fid = open(os.path.join(self.uniflex_path, "modules", "wifi_gnuradio", "gr_scripts", gnu_rp_name + ".grc"))
self.grc_xml = self.fid.read()
# WiFi Configuration
self.src_mac = src_mac
self.dst_mac = dst_mac
self.bss_mac = bss_mac
self.src_ipv4_address = src_ipv4_address
self.dst_ipv4_address = dst_ipv4_address
sh_logger = logging.getLogger('sh.command')
sh_logger.setLevel(logging.CRITICAL)
@modules.on_start()
def _activate_rp(self):
self.log.info('Activate GR80211 radio program')
self.activate_radio_program(self.grc_radio_program_name, self.grc_xml)
tapIface = "tap0"
while True:
try:
time.sleep(1)
sh.ifconfig(tapIface)
break
except sh.ErrorReturnCode_1:
self.log.debug("Waiting for device: {}".format(tapIface))
self.set_src_mac(self.src_mac)
self.set_dst_mac(self.dst_mac)
self.set_bss_mac(self.bss_mac)
# configure interface
sh.ifconfig(tapIface, "down")
sh.ifconfig(tapIface, "hw", "ether", self.src_mac)
sh.ifconfig(tapIface, "mtu", 440)
sh.ifconfig(tapIface, self.src_ipv4_address, "netmask", "255.255.255.0", "up")
# configure routing
sh.route("del", "-net", "192.168.123.0/24")
sh.route("add", "-net", "192.168.123.0/24", "mss", "400", "dev", tapIface)
# configure arp
sh.arp("-s", self.dst_ipv4_address, self.dst_mac)
def deactivate_radio_program(self, grc_radio_program_name=None, do_pause=False):
# override
super(WiFiGnuRadioModule, self).deactivate_radio_program(self.grc_radio_program_name, False)
def set_channel(self, channel, ifaceName):
# convert channel to freq
freq = channels.ch2rf(channel)
self.log.info('Setting channel for {}:{} to {}/{}'
.format(ifaceName, self.device, channel, freq))
inval = {}
inval['freq'] = freq * 1e6
# delegate to generic function
self.set_parameters(inval)
def get_channel(self, ifaceName):
self.log.info('Getting channel for {}:{}'
.format(ifaceName, self.device))
gvals = ['freq']
# delegate to generic function
freq = self.get_parameters(gvals)
if freq == None:
return None
freq = freq['freq']
freq = float(freq) * 1e-6
# convert channel to freq
ch = channels.rf2ch(int(freq))
return ch
def set_tx_power(self, power_dBm, ifaceName):
# TODO convert power_dBm to tx power of USRP
power_usrp = power_dBm
self.log.info('Setting power on iface {}:{} to {}'
.format(ifaceName, self.device, str(power_usrp)))
inval = {}
inval['tx_gain'] = power_usrp
# delegate to generic function
self.set_parameters(inval)
def get_tx_power(self, ifaceName):
self.log.debug("getting power of interface: {}".format(ifaceName))
gvals = ['tx_gain']
# delegate to generic function
tx_gain = self.get_parameters(gvals)
# TODO convert to dBm
tx_gain_dBm = tx_gain
return tx_gain_dBm
def set_bandwidth(self, bw, ifaceName):
self.log.info('Setting bandwidth on iface {}:{} to {}'
.format(ifaceName, self.device, str(bw)))
inval = {}
inval['samp_rate'] = bw
# delegate to generic function
self.set_parameters(inval)
def get_bandwidth(self, ifaceName):
self.log.debug("getting bandwidth of interface: {}".format(ifaceName))
gvals = ['samp_rate']
# delegate to generic function
samp_rate = self.get_parameters(gvals)
return samp_rate
def set_rx_gain(self, rx_gain_dBm, ifaceName):
# TODO convert power_dBm to tx power of USRP
rx_gain = rx_gain_dBm
self.log.info('Setting rx gain on iface {}:{} to {}'
.format(ifaceName, self.device, str(rx_gain)))
inval = {}
inval['rx_gain'] = rx_gain
# delegate to generic function
self.set_parameters(inval)
def get_rx_gain(self, ifaceName):
self.log.debug("getting rx gain of interface: {}".format(ifaceName))
gvals = ['rx_gain']
# delegate to generic function
rx_gain = self.get_parameters(gvals)
# TODO convert to dBm
rx_gain_dBm = rx_gain
return rx_gain_dBm
def _convert_mac(self, mac):
return str(list(map(lambda x: hex(int(x, 16)), mac.split(":"))))
def set_src_mac(self, mac_addr, ifaceName=None):
self.log.info('Set SRC MAC address to {}'.format(mac_addr))
mac_addr = self._convert_mac(mac_addr)
inval = {}
inval['src_mac'] = mac_addr
self.set_parameters(inval)
def get_src_mac(self, ifaceName=None):
self.log.info('Get SRC MAC address')
gvals = ['src_mac']
src_mac = self.get_parameters(gvals)
return src_mac
def set_dst_mac(self, mac_addr, ifaceName=None):
self.log.info('Set DST MAC address to {}'.format(mac_addr))
mac_addr = self._convert_mac(mac_addr)
inval = {}
inval['dst_mac'] = mac_addr
self.set_parameters(inval)
def get_dst_mac(self, ifaceName=None):
self.log.info('Get DST MAC address')
gvals = ['dst_mac']
dst_mac = self.get_parameters(gvals)
return dst_mac
def set_bss_mac(self, mac_addr, ifaceName=None):
self.log.info('Set BSS MAC address to {}'.format(mac_addr))
mac_addr = self._convert_mac(mac_addr)
inval = {}
inval['bss_mac'] = mac_addr
self.set_parameters(inval)
def get_bss_mac(self, ifaceName=None):
self.log.info('Get BSS MAC address')
gvals = ['bss_mac']
bss_mac = self.get_parameters(gvals)
return bss_mac
|
[] |
[] |
[
"UNIFLEX_PATH"
] |
[]
|
["UNIFLEX_PATH"]
|
python
| 1 | 0 | |
cmd/kubeapps-apis/plugins/fluxv2/packages/v1alpha1/cache/chart_cache.go
|
/*
Copyright © 2021 VMware
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"bytes"
"encoding/gob"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/kubeapps/kubeapps/cmd/kubeapps-apis/plugins/fluxv2/packages/v1alpha1/common"
"github.com/kubeapps/kubeapps/pkg/chart/models"
httpclient "github.com/kubeapps/kubeapps/pkg/http-client"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
k8scache "k8s.io/client-go/tools/cache"
log "k8s.io/klog/v2"
)
const (
// max number of retries due to transient errors
maxChartCacheRetries = 5
// number of background workers to process work queue items
maxChartCacheWorkers = 2
)
var (
// pretty much a constant, init pattern similar to that of asset-syncer
verboseChartCacheQueue = os.Getenv("DEBUG_CHART_CACHE_QUEUE") == "true"
)
type ChartCache struct {
// the redis client
redisCli *redis.Client
// queue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time and makes it easy to ensure we are never processing the same item
// simultaneously in different workers.
queue RateLimitingInterface
// this is a transient (temporary) store only used to keep track of
// state (chart url, etc) during the time window between AddRateLimited()
// is called by the producer and runWorker consumer picks up
// the corresponding item from the queue. Upon successful processing
// of the item, the corresponding store entry is deleted
processing k8scache.Store
// I am using a Read/Write Mutex to gate access to cache's resync() operation, which is
// significant in that it flushes the whole redis cache and re-populates the state from k8s.
// When that happens we don't really want any concurrent access to the cache until the resync()
// operation is complete. In other words, we want to:
// - be able to have multiple concurrent readers (goroutines doing GetForOne()/GetForMultiple())
// - only a single writer (goroutine doing a resync()) is allowed, and while its doing its job
// no readers are allowed
resyncCond *sync.Cond
// bi-directional channel used exclusively by unit tests
resyncCh chan int
}
// chartCacheStoreEntry is what we'll be storing in the processing store
// note that url and delete fields are mutually exclusive, you must either:
// - set url to non-empty string or
// - deleted flag to true
// setting both does not make sense
type chartCacheStoreEntry struct {
namespace string
id string
version string
url string
clientOptions *common.ClientOptions
deleted bool
}
func NewChartCache(name string, redisCli *redis.Client, stopCh <-chan struct{}) (*ChartCache, error) {
log.Infof("+NewChartCache(%s, %v)", name, redisCli)
if redisCli == nil {
return nil, fmt.Errorf("server not configured with redis client")
}
c := ChartCache{
redisCli: redisCli,
queue: NewRateLimitingQueue(name, verboseChartCacheQueue),
processing: k8scache.NewStore(chartCacheKeyFunc),
resyncCond: sync.NewCond(&sync.RWMutex{}),
}
// each loop iteration will launch a single worker that processes items on the work
// queue as they come in. runWorker will loop until "something bad" happens.
// The .Until will then rekick the worker after one second
for i := 0; i < maxChartCacheWorkers; i++ {
// let's give each worker a unique name - easier to debug
name := fmt.Sprintf("%s-worker-%d", c.queue.Name(), i)
fn := func() {
c.runWorker(name)
}
go wait.Until(fn, time.Second, stopCh)
}
return &c, nil
}
// this func will enqueue work items into chart work queue and return.
// the charts will be synced by a worker thread running in the background
func (c *ChartCache) SyncCharts(charts []models.Chart, clientOptions *common.ClientOptions) error {
log.Infof("+SyncCharts()")
totalToSync := 0
defer func() {
log.Infof("-SyncCharts(): [%d] total charts to sync", totalToSync)
}()
// let's just cache the latest one for now. The chart versions array would
// have already been sorted and the latest chart version will be at array index 0
for _, chart := range charts {
// add chart to temp store. It will be removed when processed by background
// runWorker/syncHandler
if len(chart.ChartVersions) == 0 {
log.Warningf("Skipping chart [%s] due to empty version array", chart.ID)
continue
} else if len(chart.ChartVersions[0].URLs) == 0 {
log.Warningf("Chart: [%s], version: [%s] has no URLs", chart.ID, chart.ChartVersions[0].Version)
continue
}
// The tarball URL will always be the first URL in the repo.chartVersions.
// So says the helm plugin :-)
entry := chartCacheStoreEntry{
namespace: chart.Repo.Namespace,
id: chart.ID,
version: chart.ChartVersions[0].Version,
url: chart.ChartVersions[0].URLs[0],
clientOptions: clientOptions,
deleted: false,
}
if key, err := chartCacheKeyFunc(entry); err != nil {
log.Errorf("Failed to get key for chart due to %+v", err)
} else {
c.processing.Add(entry)
c.queue.AddRateLimited(key)
totalToSync++
}
}
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *ChartCache) runWorker(workerName string) {
log.Infof("+runWorker(%s)", workerName)
defer log.Infof("-runWorker(%s)", workerName)
for c.processNextWorkItem(workerName) {
}
}
// processNextWorkItem will read a single work item off the work queue and
// attempt to process it, by calling the syncHandler.
// ref: https://engineering.bitnami.com/articles/kubewatch-an-example-of-kubernetes-custom-controller.html
// ref: https://github.com/bitnami-labs/kubewatch/blob/master/pkg/controller/controller.go
func (c *ChartCache) processNextWorkItem(workerName string) bool {
log.Infof("+processNextWorkItem(%s)", workerName)
defer log.Infof("-processNextWorkItem(%s)", workerName)
obj, shutdown := c.queue.Get()
if shutdown {
log.Infof("[%s] shutting down...", workerName)
return false
}
c.resyncCond.L.(*sync.RWMutex).RLock()
defer c.resyncCond.L.(*sync.RWMutex).RUnlock()
// We must remember to call Done so the queue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the queue and attempted again after a back-off
// period.
key, ok := obj.(string)
if !ok {
c.queue.Done(obj)
// As the item in the work queue is actually invalid, we call
// Forget() here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.queue.Forget(obj)
runtime.HandleError(fmt.Errorf("expected string in work queue but got %#v", obj))
return true
}
if !c.queue.IsProcessing(key) {
// This is the scenario where between the call to .Get() and
// here there was a resync event, so we can discard this item
return true
}
defer c.queue.Done(obj)
if err := c.syncHandler(workerName, key); err == nil {
// No error, reset the ratelimit counters
c.queue.Forget(key)
c.processing.Delete(key)
} else if c.queue.NumRequeues(key) < maxChartCacheRetries {
log.Errorf("Error processing [%s] (will retry [%d] times): %v",
key, maxChartCacheRetries-c.queue.NumRequeues(key), err)
c.queue.AddRateLimited(key)
} else {
// err != nil and too many retries
log.Errorf("Error processing %s (giving up): %v", key, err)
c.queue.Forget(key)
c.processing.Delete(key)
runtime.HandleError(fmt.Errorf("error syncing key [%s] due to: %v", key, err))
}
return true
}
func (c *ChartCache) DeleteChartsForRepo(repo *types.NamespacedName) error {
log.Infof("+DeleteChartsFor(%s)", repo)
defer log.Infof("-DeleteChartsFor(%s)", repo)
// need to get a list of all charts/versions for this repo that are either:
// a. already in the cache OR
// b. being processed
// this loop should take care of (a)
// glob-style pattern, you can use https://www.digitalocean.com/community/tools/glob to test
// also ref. https://stackoverflow.com/questions/4006324/how-to-atomically-delete-keys-matching-a-pattern-using-redis
match := fmt.Sprintf("helmcharts%s%s%s%s/*%s*",
keySegmentsSeparator,
repo.Namespace,
keySegmentsSeparator,
repo.Name,
keySegmentsSeparator)
redisKeysToDelete := sets.String{}
// https://redis.io/commands/scan An iteration starts when the cursor is set to 0,
// and terminates when the cursor returned by the server is 0
cursor := uint64(0)
for {
var keys []string
var err error
keys, cursor, err = c.redisCli.Scan(c.redisCli.Context(), cursor, match, 0).Result()
if err != nil {
return err
}
for _, k := range keys {
redisKeysToDelete.Insert(k)
}
if cursor == 0 {
break
}
}
// we still need to take care of (b)
for _, k := range c.processing.ListKeys() {
if namespace, chartID, _, err := c.fromKey(k); err != nil {
log.Errorf("%+v", err)
} else {
parts := strings.Split(chartID, "/")
if repo.Namespace == namespace && repo.Name == parts[0] {
redisKeysToDelete.Insert(k)
}
}
}
for k := range redisKeysToDelete {
if namespace, chartID, chartVersion, err := c.fromKey(k); err != nil {
log.Errorf("%+v", err)
} else {
entry := chartCacheStoreEntry{
namespace: namespace,
id: chartID,
version: chartVersion,
deleted: true,
}
c.processing.Add(entry)
log.V(4).Infof("Marked key [%s] to be deleted", k)
c.queue.Add(k)
}
}
return nil
}
func (c *ChartCache) OnResync() error {
log.Infof("+OnResync(), queue: [%s], size: [%d]", c.queue.Name(), c.queue.Len())
c.resyncCond.L.Lock()
defer func() {
if c.resyncCh != nil {
close(c.resyncCh)
c.resyncCh = nil
}
c.resyncCond.L.Unlock()
c.resyncCond.Broadcast()
log.Info("-OnResync()")
}()
if c.resyncCh != nil {
c.resyncCh <- c.queue.Len()
// now let's wait for the client (unit test code) that it's ok to proceed
// to re-build the whole cache. Presumably the client will have set up the
// right expectations for redis mock. Don't care what the client sends,
// just need an indication its ok to proceed
<-c.resyncCh
}
log.Infof("Resetting work queue [%s] and store...", c.queue.Name())
c.queue.Reset()
c.processing = k8scache.NewStore(chartCacheKeyFunc)
return nil
}
// this is what we store in the cache for each cached repo
// all struct fields are capitalized so they're exported by gob encoding
type chartCacheEntryValue struct {
ChartTarball []byte
}
// syncs the current state of the given resource in k8s with that in the cache
func (c *ChartCache) syncHandler(workerName, key string) error {
log.Infof("+syncHandler(%s, %s)", workerName, key)
defer log.Infof("-syncHandler(%s, %s)", workerName, key)
entry, exists, err := c.processing.GetByKey(key)
if err != nil {
return err
} else if !exists {
return fmt.Errorf("no object exists in cache store for key: [%s]", key)
}
chart, ok := entry.(chartCacheStoreEntry)
if !ok {
return fmt.Errorf("unexpected object in cache store: [%s]", reflect.TypeOf(entry))
}
if chart.deleted {
// TODO: (gfichtenholt) DEL has the capability to delete multiple keys in one
// atomic operation. It would be nice to come up with a way to utilize that here
// the problem is the queue is designed to work on one item at a time. I think to
// be able to do this, we need to add a .GetAll() method to RateLimitingInterface,
// which will be a little tricky to make sure to get the logic right t be atomic and
// also when *SOME* of the items fail and some succeed
keysRemoved, _ := c.redisCli.Del(c.redisCli.Context(), key).Result()
log.Infof("Redis [DEL %s]: %d", key, keysRemoved)
} else {
// unlike helm repositories, specific version chart tarball contents never changes
// so before embarking on expensive operation such as getting chart tarball
// via HTTP/S, first see if the cache already's got this entry
if keysExist, err := c.redisCli.Exists(c.redisCli.Context(), key).Result(); err != nil {
return fmt.Errorf("error checking whether key [%s] exists in redis: %+v", key, err)
} else {
log.Infof("Redis [EXISTS %s]: %d", key, keysExist)
if keysExist == 1 {
// nothing to do
return nil
}
}
byteArray, err := ChartCacheComputeValue(chart.id, chart.url, chart.version, chart.clientOptions)
if err != nil {
return err
}
startTime := time.Now()
result, err := c.redisCli.Set(c.redisCli.Context(), key, byteArray, 0).Result()
if err != nil {
return fmt.Errorf("failed to set value for object with key [%s] in cache due to: %v", key, err)
} else {
duration := time.Since(startTime)
usedMemory, totalMemory := common.RedisMemoryStats(c.redisCli)
log.Infof("Redis [SET %s]: %s in [%d] ms. Redis [INFO memory]: [%s/%s]",
key, result, duration.Milliseconds(), usedMemory, totalMemory)
}
}
return err
}
// this is effectively a cache GET operation
func (c *ChartCache) FetchForOne(key string) ([]byte, error) {
log.Infof("+FetchForOne(%s)", key)
// read back from cache: should be either:
// - what we previously wrote OR
// - redis.Nil if the key does not exist or has been evicted due to memory pressure/TTL expiry
//
byteArray, err := c.redisCli.Get(c.redisCli.Context(), key).Bytes()
// debugging an intermittent issue
if err == redis.Nil {
log.Infof("Redis [GET %s]: Nil", key)
return nil, nil
} else if err != nil {
return nil, fmt.Errorf("fetchForOne() failed to get value for key [%s] from cache due to: %v", key, err)
}
log.Infof("Redis [GET %s]: %d bytes read", key, len(byteArray))
dec := gob.NewDecoder(bytes.NewReader(byteArray))
var entryValue chartCacheEntryValue
if err := dec.Decode(&entryValue); err != nil {
return nil, err
}
return entryValue.ChartTarball, nil
}
/*
GetForOne() is like FetchForOne() but if there is a cache miss, it will then get chart data based on
the corresponding repo object, process it and then add it to the cache and return the
result.
This func should:
• return an error if the entry could not be computed due to not being able to read
repos secretRef.
• return nil for any invalid chart name.
• otherwise return the bytes stored in the
chart cache for the given entry
*/
func (c *ChartCache) GetForOne(key string, chart *models.Chart, clientOptions *common.ClientOptions) ([]byte, error) {
// TODO (gfichtenholt) it'd be nice to get rid of all arguments except for the key, similar to that of
// NamespacedResourceWatcherCache.GetForOne()
log.Infof("+GetForOne(%s)", key)
var value []byte
var err error
if value, err = c.FetchForOne(key); err != nil {
return nil, err
} else if value == nil {
// cache miss
namespace, chartID, version, err := c.fromKey(key)
if err != nil {
return nil, err
}
if namespace != chart.Repo.Namespace || chartID != chart.ID {
return nil, fmt.Errorf("unexpected state for chart with key [%s]", key)
}
var entry *chartCacheStoreEntry
for _, v := range chart.ChartVersions {
if v.Version == version {
if len(v.URLs) == 0 {
log.Warningf("chart: [%s], version: [%s] has no URLs", chart.ID, v.Version)
} else {
entry = &chartCacheStoreEntry{
namespace: namespace,
id: chartID,
version: v.Version,
url: v.URLs[0],
clientOptions: clientOptions,
}
}
break
}
}
if entry != nil {
c.processing.Add(*entry)
c.queue.Add(key)
// now need to wait until this item has been processed by runWorker().
c.queue.WaitUntilForgotten(key)
return c.FetchForOne(key)
}
}
return value, nil
}
func (c *ChartCache) KeyFor(namespace, chartID, chartVersion string) (string, error) {
return chartCacheKeyFor(namespace, chartID, chartVersion)
}
func (c *ChartCache) String() string {
return fmt.Sprintf("ChartCache[queue size: [%d]]", c.queue.Len())
}
// the opposite of keyFor
// the goal is to keep the details of what exactly the key looks like localized to one piece of code
func (c *ChartCache) fromKey(key string) (namespace, chartID, chartVersion string, err error) {
parts := strings.Split(key, keySegmentsSeparator)
if len(parts) != 4 || parts[0] != "helmcharts" || len(parts[1]) == 0 || len(parts[2]) == 0 || len(parts[3]) == 0 {
return "", "", "", status.Errorf(codes.Internal, "invalid key [%s]", key)
}
return parts[1], parts[2], parts[3], nil
}
// this func is used by unit tests only
func (c *ChartCache) ExpectAdd(key string) {
c.queue.ExpectAdd(key)
}
// this func is used by unit tests only
func (c *ChartCache) WaitUntilForgotten(key string) {
c.queue.WaitUntilForgotten(key)
}
func (c *ChartCache) Shutdown() {
c.queue.ShutDown()
}
// this func is used by unit tests only
// returns birectional channel where the number of items in the work queue will be sent
// at the time of the resync() call and guarantees no more work items will be processed
// until resync() finishes
func (c *ChartCache) ExpectResync() (chan int, error) {
log.Infof("+ExpectResync()")
c.resyncCond.L.Lock()
defer func() {
c.resyncCond.L.Unlock()
log.Infof("-ExpectResync()")
}()
if c.resyncCh != nil {
return nil, status.Errorf(codes.Internal, "ExpectSync() already called")
} else {
c.resyncCh = make(chan int, 1)
return c.resyncCh, nil
}
}
// this func is used by unit tests only
// By the end of the call the work queue should be empty
func (c *ChartCache) WaitUntilResyncComplete() {
log.Infof("+WaitUntilResyncComplete()")
c.resyncCond.L.Lock()
defer func() {
c.resyncCond.L.Unlock()
log.Infof("-WaitUntilResyncComplete()")
}()
for c.resyncCh != nil {
c.resyncCond.Wait()
}
}
func chartCacheKeyFunc(obj interface{}) (string, error) {
if entry, ok := obj.(chartCacheStoreEntry); !ok {
return "", fmt.Errorf("unexpected object in chartCacheKeyFunc: [%s]", reflect.TypeOf(obj))
} else {
return chartCacheKeyFor(entry.namespace, entry.id, entry.version)
}
}
func chartCacheKeyFor(namespace, chartID, chartVersion string) (string, error) {
if namespace == "" || chartID == "" || chartVersion == "" {
return "", fmt.Errorf("invalid chart in chartCacheKeyFor: [%s,%s,%s]", namespace, chartID, chartVersion)
}
var err error
if chartID, err = common.GetUnescapedChartID(chartID); err != nil {
return "", fmt.Errorf("invalid chart ID in chartCacheKeyFor: [%s]: %v", chartID, err)
}
// redis convention on key format
// https://redis.io/topics/data-types-intro
// Try to stick with a schema. For instance "object-type:id" is a good idea, as in "user:1000".
// We will use "helmcharts:ns:chartID:chartVersion"
// notice that chartID is of the form "repoName/id", so it includes the repo name
return fmt.Sprintf("helmcharts%s%s%s%s%s%s",
keySegmentsSeparator,
namespace,
keySegmentsSeparator,
chartID,
keySegmentsSeparator,
chartVersion), nil
}
// FYI: The work queue is able to retry transient HTTP errors
func ChartCacheComputeValue(chartID, chartUrl, chartVersion string, clientOptions *common.ClientOptions) ([]byte, error) {
client, headers, err := common.NewHttpClientAndHeaders(clientOptions)
if err != nil {
return nil, err
}
reader, _, err := httpclient.GetStream(chartUrl, client, headers)
if reader != nil {
defer reader.Close()
}
if err != nil {
return nil, err
}
chartTgz, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
log.Infof("Successfully fetched details for chart: [%s], version: [%s], url: [%s], details: [%d] bytes",
chartID, chartVersion, chartUrl, len(chartTgz))
cacheEntryValue := chartCacheEntryValue{
ChartTarball: chartTgz,
}
// use gob encoding instead of json, it peforms much better
var gobBuf bytes.Buffer
enc := gob.NewEncoder(&gobBuf)
if err := enc.Encode(cacheEntryValue); err != nil {
return nil, err
} else {
return gobBuf.Bytes(), nil
}
}
|
[
"\"DEBUG_CHART_CACHE_QUEUE\""
] |
[] |
[
"DEBUG_CHART_CACHE_QUEUE"
] |
[]
|
["DEBUG_CHART_CACHE_QUEUE"]
|
go
| 1 | 0 | |
examples/pwr_run/checkpointing/nonpc_short/timed_feedback/job11.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback/' + job_name + '*'
total_epochs = 8
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
def on_epoch_end(self, epoch, logs=None):
# send message of epoch end
message = job_name + ' epoch_end'
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
helm-service/pkg/mesh/ingress_config.go
|
package mesh
import (
keptnv2 "github.com/keptn/go-utils/pkg/lib/v0_2_0"
"os"
"strings"
)
// GetIngressHostnameSuffix returns the ingress hostname suffix
func GetIngressHostnameSuffix() string {
if os.Getenv("INGRESS_HOSTNAME_SUFFIX") != "" {
return os.Getenv("INGRESS_HOSTNAME_SUFFIX")
}
return "svc.cluster.local"
}
// GetIngressProtocol returns the ingress protocol
func GetIngressProtocol() string {
if os.Getenv("INGRESS_PROTOCOL") != "" {
return strings.ToLower(os.Getenv("INGRESS_PROTOCOL"))
}
return "http"
}
// GetIngressPort returns the ingress port
func GetIngressPort() string {
if os.Getenv("INGRESS_PORT") != "" {
return os.Getenv("INGRESS_PORT")
}
return "80"
}
// GetIngressGateway returns the ingress gateway
func GetIngressGateway() string {
if os.Getenv("ISTIO_GATEWAY") != "" {
return os.Getenv("ISTIO_GATEWAY")
}
return "public-gateway.istio-system"
}
// GetLocalDeploymentURI returns URIs where a service is accessible from within the cluster
func GetLocalDeploymentURI(event keptnv2.EventData, port string) []string {
return []string{"http://" + event.Service + "." + event.Project + "-" + event.Stage + ":" + port}
}
// GetPublicDeploymentURI returns URIs where a service is exposed
func GetPublicDeploymentURI(event keptnv2.EventData) []string {
publicHostName := GetPublicDeploymentHostNameTemplate()
publicHostName = strings.ReplaceAll(publicHostName, "${INGRESS_PROTOCOL}", GetIngressProtocol())
publicHostName = strings.ReplaceAll(publicHostName, "${SERVICE}", event.Service)
publicHostName = strings.ReplaceAll(publicHostName, "${PROJECT}", event.Project)
publicHostName = strings.ReplaceAll(publicHostName, "${STAGE}", event.Stage)
publicHostName = strings.ReplaceAll(publicHostName, "${INGRESS_HOSTNAME_SUFFIX}", GetIngressHostnameSuffix())
publicHostName = strings.ReplaceAll(publicHostName, "${INGRESS_PORT}", GetIngressPort())
return []string{publicHostName}
}
// GetPublicDeploymentHostNameTemplate returns the HostName of the service
func GetPublicDeploymentHostNameTemplate() string {
hostNameTemplate := os.Getenv("HOSTNAME_TEMPLATE")
if hostNameTemplate == "" {
return "${INGRESS_PROTOCOL}://${SERVICE}.${PROJECT}-${STAGE}.${INGRESS_HOSTNAME_SUFFIX}:${INGRESS_PORT}"
}
return strings.ToUpper(hostNameTemplate)
}
|
[
"\"INGRESS_HOSTNAME_SUFFIX\"",
"\"INGRESS_HOSTNAME_SUFFIX\"",
"\"INGRESS_PROTOCOL\"",
"\"INGRESS_PROTOCOL\"",
"\"INGRESS_PORT\"",
"\"INGRESS_PORT\"",
"\"ISTIO_GATEWAY\"",
"\"ISTIO_GATEWAY\"",
"\"HOSTNAME_TEMPLATE\""
] |
[] |
[
"HOSTNAME_TEMPLATE",
"INGRESS_HOSTNAME_SUFFIX",
"INGRESS_PROTOCOL",
"INGRESS_PORT",
"ISTIO_GATEWAY"
] |
[]
|
["HOSTNAME_TEMPLATE", "INGRESS_HOSTNAME_SUFFIX", "INGRESS_PROTOCOL", "INGRESS_PORT", "ISTIO_GATEWAY"]
|
go
| 5 | 0 | |
migration_test.go
|
package gorm_test
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/remohammadi/gorm"
)
type User struct {
Id int64
Age int64
UserNum Num
Name string `sql:"size:255"`
Email string
Birthday *time.Time // Time
CreatedAt time.Time // CreatedAt: Time of record is created, will be insert automatically
UpdatedAt time.Time // UpdatedAt: Time of record is updated, will be updated automatically
Emails []Email // Embedded structs
BillingAddress Address // Embedded struct
BillingAddressID sql.NullInt64 // Embedded struct's foreign key
ShippingAddress Address // Embedded struct
ShippingAddressId int64 // Embedded struct's foreign key
CreditCard CreditCard
Latitude float64
Languages []Language `gorm:"many2many:user_languages;"`
CompanyID *int
Company Company
Role Role
Password EncryptedData
PasswordHash []byte
IgnoreMe int64 `sql:"-"`
IgnoreStringSlice []string `sql:"-"`
Ignored struct{ Name string } `sql:"-"`
IgnoredPointer *User `sql:"-"`
}
type NotSoLongTableName struct {
Id int64
ReallyLongThingID int64
ReallyLongThing ReallyLongTableNameToTestMySQLNameLengthLimit
}
type ReallyLongTableNameToTestMySQLNameLengthLimit struct {
Id int64
}
type ReallyLongThingThatReferencesShort struct {
Id int64
ShortID int64
Short Short
}
type Short struct {
Id int64
}
type CreditCard struct {
ID int8
Number string
UserId sql.NullInt64
CreatedAt time.Time `sql:"not null"`
UpdatedAt time.Time
DeletedAt *time.Time `sql:"column:deleted_time"`
}
type Email struct {
Id int16
UserId int
Email string `sql:"type:varchar(100);"`
CreatedAt time.Time
UpdatedAt time.Time
}
type Address struct {
ID int
Address1 string
Address2 string
Post string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
}
type Language struct {
gorm.Model
Name string
Users []User `gorm:"many2many:user_languages;"`
}
type Product struct {
Id int64
Code string
Price int64
CreatedAt time.Time
UpdatedAt time.Time
AfterFindCallTimes int64
BeforeCreateCallTimes int64
AfterCreateCallTimes int64
BeforeUpdateCallTimes int64
AfterUpdateCallTimes int64
BeforeSaveCallTimes int64
AfterSaveCallTimes int64
BeforeDeleteCallTimes int64
AfterDeleteCallTimes int64
}
type Company struct {
Id int64
Name string
Owner *User `sql:"-"`
}
type EncryptedData []byte
func (data *EncryptedData) Scan(value interface{}) error {
if b, ok := value.([]byte); ok {
if len(b) < 3 || b[0] != '*' || b[1] != '*' || b[2] != '*' {
return errors.New("Too short")
}
*data = b[3:]
return nil
}
return errors.New("Bytes expected")
}
func (data EncryptedData) Value() (driver.Value, error) {
if len(data) > 0 && data[0] == 'x' {
//needed to test failures
return nil, errors.New("Should not start with 'x'")
}
//prepend asterisks
return append([]byte("***"), data...), nil
}
type Role struct {
Name string `gorm:"size:256"`
}
func (role *Role) Scan(value interface{}) error {
if b, ok := value.([]uint8); ok {
role.Name = string(b)
} else {
role.Name = value.(string)
}
return nil
}
func (role Role) Value() (driver.Value, error) {
return role.Name, nil
}
func (role Role) IsAdmin() bool {
return role.Name == "admin"
}
type Num int64
func (i *Num) Scan(src interface{}) error {
switch s := src.(type) {
case []byte:
n, _ := strconv.Atoi(string(s))
*i = Num(n)
case int64:
*i = Num(s)
default:
return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String())
}
return nil
}
type Animal struct {
Counter uint64 `gorm:"primary_key:yes"`
Name string `sql:"DEFAULT:'galeone'"`
From string //test reserved sql keyword as field name
Age time.Time `sql:"DEFAULT:current_timestamp"`
unexported string // unexported value
CreatedAt time.Time
UpdatedAt time.Time
}
type JoinTable struct {
From uint64
To uint64
Time time.Time `sql:"default: null"`
}
type Post struct {
Id int64
CategoryId sql.NullInt64
MainCategoryId int64
Title string
Body string
Comments []*Comment
Category Category
MainCategory Category
}
type Category struct {
gorm.Model
Name string
Categories []Category
CategoryID *uint
}
type Comment struct {
gorm.Model
PostId int64
Content string
Post Post
}
// Scanner
type NullValue struct {
Id int64
Name sql.NullString `sql:"not null"`
Gender *sql.NullString `sql:"not null"`
Age sql.NullInt64
Male sql.NullBool
Height sql.NullFloat64
AddedAt NullTime
}
type NullTime struct {
Time time.Time
Valid bool
}
func (nt *NullTime) Scan(value interface{}) error {
if value == nil {
nt.Valid = false
return nil
}
nt.Time, nt.Valid = value.(time.Time), true
return nil
}
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
func getPreparedUser(name string, role string) *User {
var company Company
DB.Where(Company{Name: role}).FirstOrCreate(&company)
return &User{
Name: name,
Age: 20,
Role: Role{role},
BillingAddress: Address{Address1: fmt.Sprintf("Billing Address %v", name)},
ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)},
CreditCard: CreditCard{Number: fmt.Sprintf("123456%v", name)},
Emails: []Email{
{Email: fmt.Sprintf("user_%[email protected]", name)}, {Email: fmt.Sprintf("user_%[email protected]", name)},
},
Company: company,
Languages: []Language{
{Name: fmt.Sprintf("lang_1_%v", name)},
{Name: fmt.Sprintf("lang_2_%v", name)},
},
}
}
func runMigration() {
if err := DB.DropTableIfExists(&User{}).Error; err != nil {
fmt.Printf("Got error when try to delete table users, %+v\n", err)
}
for _, table := range []string{"animals", "user_languages"} {
DB.Exec(fmt.Sprintf("drop table %v;", table))
}
values := []interface{}{&Short{}, &ReallyLongThingThatReferencesShort{}, &ReallyLongTableNameToTestMySQLNameLengthLimit{}, &NotSoLongTableName{}, &Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}, &Post{}, &Category{}, &Comment{}, &Cat{}, &Dog{}, &Hamster{}, &Toy{}, &ElementWithIgnoredField{}}
for _, value := range values {
DB.DropTable(value)
}
if err := DB.AutoMigrate(values...).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
}
func TestIndexes(t *testing.T) {
if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil {
t.Errorf("Got error when tried to create index: %+v", err)
}
scope := DB.NewScope(&Email{})
if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") {
t.Errorf("Email should have index idx_email_email")
}
if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil {
t.Errorf("Got error when tried to remove index: %+v", err)
}
if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") {
t.Errorf("Email's index idx_email_email should be deleted")
}
if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil {
t.Errorf("Got error when tried to create index: %+v", err)
}
if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
t.Errorf("Email should have index idx_email_email_and_user_id")
}
if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil {
t.Errorf("Got error when tried to remove index: %+v", err)
}
if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
t.Errorf("Email's index idx_email_email_and_user_id should be deleted")
}
if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil {
t.Errorf("Got error when tried to create index: %+v", err)
}
if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
t.Errorf("Email should have index idx_email_email_and_user_id")
}
if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}, {Email: "[email protected]"}}}).Error == nil {
t.Errorf("Should get to create duplicate record when having unique index")
}
var user = User{Name: "sample_user"}
DB.Save(&user)
if DB.Model(&user).Association("Emails").Append(Email{Email: "[email protected]"}, Email{Email: "[email protected]"}).Error != nil {
t.Errorf("Should get no error when append two emails for user")
}
if DB.Model(&user).Association("Emails").Append(Email{Email: "[email protected]"}, Email{Email: "[email protected]"}).Error == nil {
t.Errorf("Should get no duplicated email error when insert duplicated emails for a user")
}
if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil {
t.Errorf("Got error when tried to remove index: %+v", err)
}
if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") {
t.Errorf("Email's index idx_email_email_and_user_id should be deleted")
}
if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}}}).Error != nil {
t.Errorf("Should be able to create duplicated emails after remove unique index")
}
}
type EmailWithIdx struct {
Id int64
UserId int64
Email string `sql:"index:idx_email_agent"`
UserAgent string `sql:"index:idx_email_agent"`
RegisteredAt *time.Time `sql:"unique_index"`
CreatedAt time.Time
UpdatedAt time.Time
}
func TestAutoMigration(t *testing.T) {
DB.AutoMigrate(&Address{})
DB.DropTable(&EmailWithIdx{})
if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil {
t.Errorf("Auto Migrate should not raise any error")
}
now := time.Now()
DB.Save(&EmailWithIdx{Email: "[email protected]", UserAgent: "pc", RegisteredAt: &now})
scope := DB.NewScope(&EmailWithIdx{})
if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_agent") {
t.Errorf("Failed to create index")
}
if !scope.Dialect().HasIndex(scope.TableName(), "uix_email_with_idxes_registered_at") {
t.Errorf("Failed to create index")
}
var bigemail EmailWithIdx
DB.First(&bigemail, "user_agent = ?", "pc")
if bigemail.Email != "[email protected]" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() {
t.Error("Big Emails should be saved and fetched correctly")
}
}
type MultipleIndexes struct {
ID int64
UserID int64 `sql:"unique_index:uix_multipleindexes_user_name,uix_multipleindexes_user_email;index:idx_multipleindexes_user_other"`
Name string `sql:"unique_index:uix_multipleindexes_user_name"`
Email string `sql:"unique_index:,uix_multipleindexes_user_email"`
Other string `sql:"index:,idx_multipleindexes_user_other"`
}
func TestMultipleIndexes(t *testing.T) {
if err := DB.DropTableIfExists(&MultipleIndexes{}).Error; err != nil {
fmt.Printf("Got error when try to delete table multiple_indexes, %+v\n", err)
}
DB.AutoMigrate(&MultipleIndexes{})
if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil {
t.Errorf("Auto Migrate should not raise any error")
}
DB.Save(&MultipleIndexes{UserID: 1, Name: "jinzhu", Email: "[email protected]", Other: "foo"})
scope := DB.NewScope(&MultipleIndexes{})
if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_name") {
t.Errorf("Failed to create index")
}
if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_email") {
t.Errorf("Failed to create index")
}
if !scope.Dialect().HasIndex(scope.TableName(), "uix_multiple_indexes_email") {
t.Errorf("Failed to create index")
}
if !scope.Dialect().HasIndex(scope.TableName(), "idx_multipleindexes_user_other") {
t.Errorf("Failed to create index")
}
if !scope.Dialect().HasIndex(scope.TableName(), "idx_multiple_indexes_other") {
t.Errorf("Failed to create index")
}
var mutipleIndexes MultipleIndexes
DB.First(&mutipleIndexes, "name = ?", "jinzhu")
if mutipleIndexes.Email != "[email protected]" || mutipleIndexes.Name != "jinzhu" {
t.Error("MutipleIndexes should be saved and fetched correctly")
}
// Check unique constraints
if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "[email protected]", Other: "foo"}).Error; err == nil {
t.Error("MultipleIndexes unique index failed")
}
if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "[email protected]", Other: "foo"}).Error; err != nil {
t.Error("MultipleIndexes unique index failed")
}
if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "[email protected]", Other: "foo"}).Error; err == nil {
t.Error("MultipleIndexes unique index failed")
}
if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "[email protected]", Other: "foo"}).Error; err != nil {
t.Error("MultipleIndexes unique index failed")
}
}
func TestModifyColumnType(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" && dialect != "mysql" && dialect != "mssql" {
t.Skip("Skipping this because only postgres, mysql and mssql support altering a column type")
}
type ModifyColumnType struct {
gorm.Model
Name1 string `gorm:"length:100"`
Name2 string `gorm:"length:200"`
}
DB.DropTable(&ModifyColumnType{})
DB.CreateTable(&ModifyColumnType{})
name2Field, _ := DB.NewScope(&ModifyColumnType{}).FieldByName("Name2")
name2Type := DB.Dialect().DataTypeOf(name2Field.StructField)
if err := DB.Model(&ModifyColumnType{}).ModifyColumn("name1", name2Type).Error; err != nil {
t.Errorf("No error should happen when ModifyColumn, but got %v", err)
}
}
|
[
"\"GORM_DIALECT\""
] |
[] |
[
"GORM_DIALECT"
] |
[]
|
["GORM_DIALECT"]
|
go
| 1 | 0 | |
src/main/java/no/uio/ifi/localega/doa/LocalEGADOAApplication.java
|
package no.uio.ifi.localega.doa;
import io.minio.MinioClient;
import io.minio.errors.*;
import lombok.extern.slf4j.Slf4j;
import okhttp3.OkHttpClient;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import javax.net.ssl.*;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.security.cert.Certificate;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.util.*;
/**
* Spring Boot main file containing the application entry-point and all necessary Spring beans configuration.
*/
@Slf4j
@SpringBootApplication
public class LocalEGADOAApplication {
/**
* Spring boot entry-point.
*
* @param args Command-line arguments.
*/
public static void main(String[] args) {
SpringApplication application = new SpringApplication(LocalEGADOAApplication.class);
Properties properties = new Properties();
String rootCertPath = System.getenv("ROOT_CERT_PATH");
String rootCertPass = System.getenv("ROOT_CERT_PASSWORD");
if (StringUtils.isNotEmpty(rootCertPath) && StringUtils.isNotEmpty(rootCertPass)) {
properties.put("spring.rabbitmq.ssl.trust-store", "file:" + rootCertPath);
properties.put("spring.rabbitmq.ssl.trust-store-password", rootCertPass);
}
String clientCertPath = System.getenv("CLIENT_CERT_PATH");
String clientCertPass = System.getenv("CLIENT_CERT_PASSWORD");
if (StringUtils.isNotEmpty(clientCertPath) && StringUtils.isNotEmpty(clientCertPass)) {
properties.put("spring.rabbitmq.ssl.key-store", "file:" + clientCertPath);
properties.put("spring.rabbitmq.ssl.key-store-password", clientCertPass);
}
application.setDefaultProperties(properties);
application.run(args);
}
/**
* Archive Minio Client Spring bean.
*
* @return <code>MinioClient</code>
* @throws GeneralSecurityException In case of SSL/TLS related errors.
*/
@Bean
public MinioClient archiveClient(@Value("${s3.endpoint}") String s3Endpoint,
@Value("${s3.port}") int s3Port,
@Value("${s3.access-key}") String s3AccessKey,
@Value("${s3.secret-key}") String s3SecretKey,
@Value("${s3.region}") String s3Region,
@Value("${s3.secure}") boolean s3Secure,
@Value("${s3.root-ca}") String s3RootCA) throws GeneralSecurityException, ServerException, InsufficientDataException, InternalException, IOException, InvalidResponseException, InvalidBucketNameException, XmlParserException, ErrorResponseException, RegionConflictException {
MinioClient.Builder builder = MinioClient.builder().endpoint(s3Endpoint, s3Port, s3Secure).region(s3Region).credentials(s3AccessKey, s3SecretKey);
Optional<OkHttpClient> optionalOkHttpClient = buildOkHttpClient(s3RootCA);
optionalOkHttpClient.ifPresent(builder::httpClient);
return builder.build();
}
/**
* Outbox Minio Client Spring bean.
*
* @return <code>MinioClient</code>
* @throws GeneralSecurityException In case of SSL/TLS related errors.
*/
@Bean
public MinioClient outboxClient(@Value("${s3.out.endpoint}") String s3Endpoint,
@Value("${s3.out.port}") int s3Port,
@Value("${s3.out.access-key}") String s3AccessKey,
@Value("${s3.out.secret-key}") String s3SecretKey,
@Value("${s3.out.region}") String s3Region,
@Value("${s3.out.secure}") boolean s3Secure,
@Value("${s3.out.root-ca}") String s3RootCA) throws GeneralSecurityException, ServerException, InsufficientDataException, InternalException, IOException, InvalidResponseException, InvalidBucketNameException, XmlParserException, ErrorResponseException, RegionConflictException {
MinioClient.Builder builder = MinioClient.builder().endpoint(s3Endpoint, s3Port, s3Secure).region(s3Region).credentials(s3AccessKey, s3SecretKey);
Optional<OkHttpClient> optionalOkHttpClient = buildOkHttpClient(s3RootCA);
optionalOkHttpClient.ifPresent(builder::httpClient);
return builder.build();
}
private Optional<OkHttpClient> buildOkHttpClient(String s3RootCA) throws GeneralSecurityException {
try {
X509TrustManager trustManager = trustManagerForCertificates(Files.newInputStream(Path.of(s3RootCA)));
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, new TrustManager[]{trustManager}, null);
return Optional.of(new OkHttpClient.Builder().sslSocketFactory(sslContext.getSocketFactory(), trustManager).build());
} catch (CertificateException | IOException e) {
log.warn("S3 Root CA file {} does not exist or can't be opened, skipping...", s3RootCA);
return Optional.empty();
}
}
private X509TrustManager trustManagerForCertificates(InputStream in) throws GeneralSecurityException {
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
Collection<? extends Certificate> certificates = certificateFactory.generateCertificates(in);
if (certificates.isEmpty()) {
throw new IllegalArgumentException("Expected non-empty set of trusted certificates");
}
// put the certificates into a key store
char[] password = UUID.randomUUID().toString().toCharArray(); // any password will do
KeyStore keyStore = newEmptyKeyStore(password);
for (Certificate certificate : certificates) {
keyStore.setCertificateEntry(UUID.randomUUID().toString(), certificate);
}
// use it to build an X509 trust manager
KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyManagerFactory.init(keyStore, password);
TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustManagerFactory.init(keyStore);
TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
if (trustManagers.length != 1 || !(trustManagers[0] instanceof X509TrustManager)) {
throw new IllegalStateException("Unexpected default trust managers: " + Arrays.toString(trustManagers));
}
return (X509TrustManager) trustManagers[0];
}
private KeyStore newEmptyKeyStore(char[] password) throws GeneralSecurityException {
try {
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
keyStore.load(null, password);
return keyStore;
} catch (IOException e) {
throw new AssertionError(e);
}
}
}
|
[
"\"ROOT_CERT_PATH\"",
"\"ROOT_CERT_PASSWORD\"",
"\"CLIENT_CERT_PATH\"",
"\"CLIENT_CERT_PASSWORD\""
] |
[] |
[
"ROOT_CERT_PATH",
"CLIENT_CERT_PATH",
"CLIENT_CERT_PASSWORD",
"ROOT_CERT_PASSWORD"
] |
[]
|
["ROOT_CERT_PATH", "CLIENT_CERT_PATH", "CLIENT_CERT_PASSWORD", "ROOT_CERT_PASSWORD"]
|
java
| 4 | 0 | |
nuclio/ingest/ingest_example_test.go
|
package ingest
import (
"fmt"
"github.com/nuclio/nuclio-test-go"
"os"
"testing"
"time"
)
func TestIngest(t *testing.T) {
data := nutest.DataBind{Name: "db0", Url: os.Getenv("V3IO_URL"), Container: "1"}
tc, err := nutest.NewTestContext(Handler, true, &data)
if err != nil {
t.Fatal(err)
}
err = tc.InitContext(InitContext)
if err != nil {
t.Fatal(err)
}
testEvent := nutest.TestEvent{
Body: []byte(pushEvent),
}
resp, err := tc.Invoke(&testEvent)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
resp, err = tc.Invoke(&testEvent)
time.Sleep(time.Second * 1)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
fmt.Println(resp)
time.Sleep(time.Second * 10)
}
|
[
"\"V3IO_URL\""
] |
[] |
[
"V3IO_URL"
] |
[]
|
["V3IO_URL"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pylibrary.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/ray/tune/tests/test_trial_runner_callbacks.py
|
import os
import shutil
import sys
import tempfile
import time
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune.checkpoint_manager import Checkpoint
from ray.tune.logger import DEFAULT_LOGGERS, ExperimentLogger, \
LegacyExperimentLogger
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.result import TRAINING_ITERATION
from ray.tune.syncer import SyncConfig, SyncerCallback
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune import Callback
from ray.tune.utils.callback import create_default_callbacks
class TestCallback(Callback):
def __init__(self):
self.state = {}
def on_step_begin(self, **info):
self.state["step_begin"] = info
def on_step_end(self, **info):
self.state["step_end"] = info
def on_trial_start(self, **info):
self.state["trial_start"] = info
def on_trial_restore(self, **info):
self.state["trial_restore"] = info
def on_trial_save(self, **info):
self.state["trial_save"] = info
def on_trial_result(self, **info):
self.state["trial_result"] = info
result = info["result"]
trial = info["trial"]
assert result.get(TRAINING_ITERATION, None) != trial.last_result.get(
TRAINING_ITERATION, None)
def on_trial_complete(self, **info):
self.state["trial_complete"] = info
def on_trial_error(self, **info):
self.state["trial_fail"] = info
class _MockTrialExecutor(RayTrialExecutor):
def __init__(self):
super().__init__()
self.results = {}
self.next_trial = None
self.failed_trial = None
def fetch_result(self, trial):
return self.results.get(trial, {})
def get_next_available_trial(self):
return self.next_trial or super().get_next_available_trial()
def get_next_failed_trial(self):
return self.failed_trial or super().get_next_failed_trial()
class TrialRunnerCallbacks(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.callback = TestCallback()
self.executor = _MockTrialExecutor()
self.trial_runner = TrialRunner(
trial_executor=self.executor, callbacks=[self.callback])
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if "CUDA_VISIBLE_DEVICES" in os.environ:
del os.environ["CUDA_VISIBLE_DEVICES"]
shutil.rmtree(self.tmpdir)
def testCallbackSteps(self):
trials = [
Trial("__fake", trial_id="one"),
Trial("__fake", trial_id="two")
]
for t in trials:
self.trial_runner.add_trial(t)
self.executor.next_trial = trials[0]
self.trial_runner.step()
# Trial 1 has been started
self.assertEqual(self.callback.state["trial_start"]["iteration"], 0)
self.assertEqual(self.callback.state["trial_start"]["trial"].trial_id,
"one")
# All these events haven't happened, yet
self.assertTrue(
all(k not in self.callback.state for k in [
"trial_restore", "trial_save", "trial_result",
"trial_complete", "trial_fail"
]))
self.executor.next_trial = trials[1]
self.trial_runner.step()
# Iteration not increased yet
self.assertEqual(self.callback.state["step_begin"]["iteration"], 1)
# Iteration increased
self.assertEqual(self.callback.state["step_end"]["iteration"], 2)
# Second trial has been just started
self.assertEqual(self.callback.state["trial_start"]["iteration"], 1)
self.assertEqual(self.callback.state["trial_start"]["trial"].trial_id,
"two")
cp = Checkpoint(Checkpoint.PERSISTENT, "__checkpoint",
{TRAINING_ITERATION: 0})
# Let the first trial save a checkpoint
self.executor.next_trial = trials[0]
trials[0].saving_to = cp
self.trial_runner.step()
self.assertEqual(self.callback.state["trial_save"]["iteration"], 2)
self.assertEqual(self.callback.state["trial_save"]["trial"].trial_id,
"one")
# Let the second trial send a result
result = {TRAINING_ITERATION: 1, "metric": 800, "done": False}
self.executor.results[trials[1]] = result
self.executor.next_trial = trials[1]
self.assertEqual(trials[1].last_result, {})
self.trial_runner.step()
self.assertEqual(self.callback.state["trial_result"]["iteration"], 3)
self.assertEqual(self.callback.state["trial_result"]["trial"].trial_id,
"two")
self.assertEqual(
self.callback.state["trial_result"]["result"]["metric"], 800)
self.assertEqual(trials[1].last_result["metric"], 800)
# Let the second trial restore from a checkpoint
trials[1].restoring_from = cp
self.executor.results[trials[1]] = trials[1].last_result
self.trial_runner.step()
self.assertEqual(self.callback.state["trial_restore"]["iteration"], 4)
self.assertEqual(
self.callback.state["trial_restore"]["trial"].trial_id, "two")
# Let the second trial finish
trials[1].restoring_from = None
self.executor.results[trials[1]] = {
TRAINING_ITERATION: 2,
"metric": 900,
"done": True
}
self.trial_runner.step()
self.assertEqual(self.callback.state["trial_complete"]["iteration"], 5)
self.assertEqual(
self.callback.state["trial_complete"]["trial"].trial_id, "two")
# Let the first trial error
self.executor.failed_trial = trials[0]
self.trial_runner.step()
self.assertEqual(self.callback.state["trial_fail"]["iteration"], 6)
self.assertEqual(self.callback.state["trial_fail"]["trial"].trial_id,
"one")
def testCallbacksEndToEnd(self):
def train(config):
if config["do"] == "save":
with tune.checkpoint_dir(0):
pass
tune.report(metric=1)
elif config["do"] == "fail":
raise RuntimeError("I am failing on purpose.")
elif config["do"] == "delay":
time.sleep(2)
tune.report(metric=20)
config = {"do": tune.grid_search(["save", "fail", "delay"])}
tune.run(
train,
config=config,
raise_on_failed_trial=False,
callbacks=[self.callback])
self.assertEqual(
self.callback.state["trial_fail"]["trial"].config["do"], "fail")
self.assertEqual(
self.callback.state["trial_save"]["trial"].config["do"], "save")
self.assertEqual(
self.callback.state["trial_result"]["trial"].config["do"], "delay")
self.assertEqual(
self.callback.state["trial_complete"]["trial"].config["do"],
"delay")
def testCallbackReordering(self):
"""SyncerCallback should come after ExperimentLogger callbacks"""
def get_positions(callbacks):
first_logger_pos = None
last_logger_pos = None
syncer_pos = None
for i, callback in enumerate(callbacks):
if isinstance(callback, ExperimentLogger):
if first_logger_pos is None:
first_logger_pos = i
last_logger_pos = i
elif isinstance(callback, SyncerCallback):
syncer_pos = i
return first_logger_pos, last_logger_pos, syncer_pos
# Auto creation of loggers, no callbacks, no syncer
callbacks = create_default_callbacks(None, SyncConfig(), None)
first_logger_pos, last_logger_pos, syncer_pos = get_positions(
callbacks)
self.assertLess(last_logger_pos, syncer_pos)
# Auto creation of loggers with callbacks
callbacks = create_default_callbacks([Callback()], SyncConfig(), None)
first_logger_pos, last_logger_pos, syncer_pos = get_positions(
callbacks)
self.assertLess(last_logger_pos, syncer_pos)
# Auto creation of loggers with existing logger (but no CSV/JSON)
callbacks = create_default_callbacks([ExperimentLogger()],
SyncConfig(), None)
first_logger_pos, last_logger_pos, syncer_pos = get_positions(
callbacks)
self.assertLess(last_logger_pos, syncer_pos)
# This should throw an error as the syncer comes before the logger
with self.assertRaises(ValueError):
callbacks = create_default_callbacks(
[SyncerCallback(None),
ExperimentLogger()], SyncConfig(), None)
# This should be reordered but preserve the regular callback order
[mc1, mc2, mc3] = [Callback(), Callback(), Callback()]
# Has to be legacy logger to avoid logger callback creation
lc = LegacyExperimentLogger(logger_classes=DEFAULT_LOGGERS)
callbacks = create_default_callbacks([mc1, mc2, lc, mc3], SyncConfig(),
None)
print(callbacks)
first_logger_pos, last_logger_pos, syncer_pos = get_positions(
callbacks)
self.assertLess(last_logger_pos, syncer_pos)
self.assertLess(callbacks.index(mc1), callbacks.index(mc2))
self.assertLess(callbacks.index(mc2), callbacks.index(mc3))
self.assertLess(callbacks.index(lc), callbacks.index(mc3))
# Syncer callback is appended
self.assertLess(callbacks.index(mc3), syncer_pos)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"golang.org/x/tools/imports"
)
const moduleName = "typedjson"
const (
ExitCodeError = 1
)
type GeneratorArgs struct {
OutputPath string
Interface string
Typed string
Package string
Imports []string
Structs []string
AllArgs []string
}
func main() {
args, err := parseArguments()
if err != nil {
exitf(ExitCodeError, "error while parsing arguments: %v\n", err)
}
buff := bytes.NewBuffer([]byte{})
if err := generateCode(args, buff); err != nil {
exitf(ExitCodeError, "error while generating code: %v\n", err)
}
code, err := imports.Process(filepath.Dir(args.OutputPath), buff.Bytes(), nil)
if err != nil {
exitf(ExitCodeError, "error while processing imports: %v\n", err)
}
if args.OutputPath == "stdout" {
_, err = os.Stdout.Write(code)
} else {
err = ioutil.WriteFile(args.OutputPath, code, 0644)
}
if err != nil {
exitf(ExitCodeError, "error while writing code to %s: %v\n", args.OutputPath, err)
}
}
func parseArguments() (*GeneratorArgs, error) {
ga := GeneratorArgs{}
flag.StringVar(&ga.Package, "package", os.Getenv("GOPACKAGE"), "package name in generated file (default to GOPACKAGE)")
flag.StringVar(&ga.Interface, "interface", "", "name of the interface that encompass all types")
flag.StringVar(&ga.Typed, "typed", "", "name of struct that will used for typed interface (default to %%interface%%Typed")
flag.StringVar(&ga.OutputPath, "output", "", "output path where generated code should be saved")
flag.Parse()
ga.Structs = flag.Args()
if ga.Typed == "" {
ga.Typed = ga.Interface + "Typed"
}
if ga.OutputPath == "" {
ga.OutputPath = strings.ToLower(fmt.Sprintf("%s_%s.go", ga.Interface, moduleName))
}
ga.AllArgs = os.Args
ga.AllArgs[0] = moduleName
if err := checkArgs(&ga); err != nil {
return nil, err
}
return &ga, nil
}
func checkArgs(args *GeneratorArgs) error {
if args.Package == "" {
return errors.New("package name should not be empty")
}
if args.OutputPath == "" {
return errors.New("output path should not be empty")
}
return nil
}
func exitf(code int, format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
os.Exit(code)
}
|
[
"\"GOPACKAGE\""
] |
[] |
[
"GOPACKAGE"
] |
[]
|
["GOPACKAGE"]
|
go
| 1 | 0 | |
backend/api/controllers/sqs.go
|
package controllers
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
)
// SendMessage function
func SendMessage(sess *session.Session, matchID string) error {
svc := sqs.New(sess)
queueURL := os.Getenv("SQS_QUEUE_URL")
sendParams := &sqs.SendMessageInput{
MessageBody: aws.String(matchID),
QueueUrl: aws.String(queueURL),
}
_, err := svc.SendMessage(sendParams)
if err != nil {
return err
}
return nil
}
|
[
"\"SQS_QUEUE_URL\""
] |
[] |
[
"SQS_QUEUE_URL"
] |
[]
|
["SQS_QUEUE_URL"]
|
go
| 1 | 0 | |
flink-runtime/src/main/java/org/apache/flink/runtime/util/EnvironmentInformation.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.util;
import java.io.InputStream;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.lang.reflect.Method;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Utility class that gives access to the execution environment of the JVM, like
* the executing user, startup options, or the JVM version.
*/
public class EnvironmentInformation {
private static final Logger LOG = LoggerFactory.getLogger(EnvironmentInformation.class);
public static final String UNKNOWN = "<unknown>";
/**
* Returns the version of the code as String. If version == null, then the JobManager does not run from a
* Maven build. An example is a source code checkout, compile, and run from inside an IDE.
*
* @return The version string.
*/
public static String getVersion() {
String version = EnvironmentInformation.class.getPackage().getImplementationVersion();
return version != null ? version : UNKNOWN;
}
/**
* Returns the code revision (commit and commit date) of Flink, as generated by the Maven builds.
*
* @return The code revision.
*/
public static RevisionInformation getRevisionInformation() {
String revision = UNKNOWN;
String commitDate = UNKNOWN;
try {
InputStream propFile = EnvironmentInformation.class.getClassLoader().getResourceAsStream(".version.properties");
if (propFile != null) {
Properties properties = new Properties();
properties.load(propFile);
String propRevision = properties.getProperty("git.commit.id.abbrev");
String propCommitDate = properties.getProperty("git.commit.time");
revision = propRevision != null ? propRevision : UNKNOWN;
commitDate = propCommitDate != null ? propCommitDate : UNKNOWN;
}
}
catch (Throwable t) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot determine code revision: Unable to read version property file.", t);
} else {
LOG.info("Cannot determine code revision: Unable to read version property file.");
}
}
return new RevisionInformation(revision, commitDate);
}
/**
* Gets the name of the user that is running the JVM.
*
* @return The name of the user that is running the JVM.
*/
public static String getUserRunning() {
try {
return UserGroupInformation.getCurrentUser().getShortUserName();
}
catch (LinkageError e) {
// hadoop classes are not in the classpath
LOG.debug("Cannot determine user/group information using Hadoop utils. " +
"Hadoop classes not loaded or compatible", e);
}
catch (Throwable t) {
// some other error occurred that we should log and make known
LOG.warn("Error while accessing user/group information via Hadoop utils.", t);
}
String user = System.getProperty("user.name");
if (user == null) {
user = UNKNOWN;
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot determine user/group information for the current user.");
}
}
return user;
}
/**
* The maximum JVM heap size, in bytes.
*
* @return The maximum JVM heap size, in bytes.
*/
public static long getMaxJvmHeapMemory() {
long maxMemory = Runtime.getRuntime().maxMemory();
if (maxMemory == Long.MAX_VALUE) {
// amount of free memory unknown
try {
// workaround for Oracle JDK
OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean();
Class<?> clazz = Class.forName("com.sun.management.OperatingSystemMXBean");
Method method = clazz.getMethod("getTotalPhysicalMemorySize");
maxMemory = (Long) method.invoke(operatingSystemMXBean) / 4;
}
catch (Throwable e) {
throw new RuntimeException("Could not determine the amount of free memory.\n" +
"Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes.");
}
}
return maxMemory;
}
/**
* Gets an estimate of the size of the free heap memory.
*
* NOTE: This method is heavy-weight. It triggers a garbage collection to reduce fragmentation and get
* a better estimate at the size of free memory. It is typically more accurate than the plain version
* {@link #getSizeOfFreeHeapMemory()}.
*
* @return An estimate of the size of the free heap memory, in bytes.
*/
public static long getSizeOfFreeHeapMemoryWithDefrag() {
// trigger a garbage collection, to reduce fragmentation
System.gc();
return getSizeOfFreeHeapMemory();
}
/**
* Gets an estimate of the size of the free heap memory. The estimate may vary, depending on the current
* level of memory fragmentation and the number of dead objects. For a better (but more heavy-weight)
* estimate, use {@link #getSizeOfFreeHeapMemoryWithDefrag()}.
*
* @return An estimate of the size of the free heap memory, in bytes.
*/
public static long getSizeOfFreeHeapMemory() {
Runtime r = Runtime.getRuntime();
long maxMemory = r.maxMemory();
if (maxMemory == Long.MAX_VALUE) {
// amount of free memory unknown
try {
// workaround for Oracle JDK
OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean();
Class<?> clazz = Class.forName("com.sun.management.OperatingSystemMXBean");
Method method = clazz.getMethod("getTotalPhysicalMemorySize");
maxMemory = (Long) method.invoke(operatingSystemMXBean) / 4;
} catch (Throwable e) {
throw new RuntimeException("Could not determine the amount of free memory.\n" +
"Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes.");
}
}
return maxMemory - r.totalMemory() + r.freeMemory();
}
/**
* Gets the version of the JVM in the form "VM_Name - Vendor - Spec/Version".
*
* @return The JVM version.
*/
public static String getJvmVersion() {
try {
final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
return bean.getVmName() + " - " + bean.getVmVendor() + " - " + bean.getSpecVersion() + '/' + bean.getVmVersion();
}
catch (Throwable t) {
return UNKNOWN;
}
}
/**
* Gets the system parameters and environment parameters that were passed to the JVM on startup.
*
* @return The options passed to the JVM on startup.
*/
public static String getJvmStartupOptions() {
try {
final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
final StringBuilder bld = new StringBuilder();
for (String s : bean.getInputArguments()) {
bld.append(s).append(' ');
}
return bld.toString();
}
catch (Throwable t) {
return UNKNOWN;
}
}
/**
* Gets the system parameters and environment parameters that were passed to the JVM on startup.
*
* @return The options passed to the JVM on startup.
*/
public static String[] getJvmStartupOptionsArray() {
try {
RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean();
List<String> options = bean.getInputArguments();
return options.toArray(new String[options.size()]);
}
catch (Throwable t) {
return new String[0];
}
}
/**
* Gets the directory for temporary files, as returned by the JVM system property "java.io.tmpdir".
*
* @return The directory for temporary files.
*/
public static String getTemporaryFileDirectory() {
return System.getProperty("java.io.tmpdir");
}
/**
* Tries to retrieve the maximum number of open file handles. This method will only work on
* UNIX-based operating systems with Sun/Oracle Java versions.
*
* <p>If the number of max open file handles cannot be determined, this method returns {@code -1}.</p>
*
* @return The limit of open file handles, or {@code -1}, if the limit could not be determined.
*/
public static long getOpenFileHandlesLimit() {
Class<?> sunBeanClass;
try {
sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
}
catch (ClassNotFoundException e) {
return -1L;
}
try {
Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount");
Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean());
return (Long) result;
}
catch (Throwable t) {
LOG.warn("Unexpected error when accessing file handle limit", t);
return -1L;
}
}
/**
* Logs a information about the environment, like code revision, current user, java version,
* and JVM parameters.
*
* @param log The logger to log the information to.
* @param componentName The component name to mention in the log.
* @param commandLineArgs The arguments accompanying the starting the component.
*/
public static void logEnvironmentInfo(Logger log, String componentName, String[] commandLineArgs) {
if (log.isInfoEnabled()) {
RevisionInformation rev = getRevisionInformation();
String version = getVersion();
String user = getUserRunning();
String jvmVersion = getJvmVersion();
String[] options = getJvmStartupOptionsArray();
String javaHome = System.getenv("JAVA_HOME");
long maxHeapMegabytes = getMaxJvmHeapMemory() >>> 20;
log.info("--------------------------------------------------------------------------------");
log.info(" Starting " + componentName + " (Version: " + version + ", "
+ "Rev:" + rev.commitId + ", " + "Date:" + rev.commitDate + ")");
log.info(" Current user: " + user);
log.info(" JVM: " + jvmVersion);
log.info(" Maximum heap size: " + maxHeapMegabytes + " MiBytes");
log.info(" JAVA_HOME: " + (javaHome == null ? "(not set)" : javaHome));
log.info(" Hadoop version: " + VersionInfo.getVersion());
if (options.length == 0) {
log.info(" JVM Options: (none)");
}
else {
log.info(" JVM Options:");
for (String s: options) {
log.info(" " + s);
}
}
if (commandLineArgs == null || commandLineArgs.length == 0) {
log.info(" Program Arguments: (none)");
}
else {
log.info(" Program Arguments:");
for (String s: commandLineArgs) {
log.info(" " + s);
}
}
log.info(" Classpath: " + System.getProperty("java.class.path"));
log.info("--------------------------------------------------------------------------------");
}
}
// --------------------------------------------------------------------------------------------
/** Don't instantiate this class */
private EnvironmentInformation() {}
// --------------------------------------------------------------------------------------------
/**
* Revision information encapsulates information about the source code revision of the Flink
* code.
*/
public static class RevisionInformation {
/** The git commit id (hash) */
public final String commitId;
/** The git commit date */
public final String commitDate;
public RevisionInformation(String commitId, String commitDate) {
this.commitId = commitId;
this.commitDate = commitDate;
}
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
cmd/troubleshoot/cli/util.go
|
package cli
import (
"fmt"
"net/url"
"os"
"github.com/pkg/errors"
troubleshootclientv1beta1 "github.com/replicatedhq/troubleshoot/pkg/client/troubleshootclientset/typed/troubleshoot/v1beta1"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func isURL(str string) bool {
parsed, err := url.ParseRequestURI(str)
if err != nil {
return false
}
return parsed.Scheme != ""
}
func createTroubleshootK8sClient(configFlags *genericclioptions.ConfigFlags) (*troubleshootclientv1beta1.TroubleshootV1beta1Client, error) {
config, err := configFlags.ToRESTConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to convert kube flags to rest config")
}
troubleshootClient, err := troubleshootclientv1beta1.NewForConfig(config)
if err != nil {
return nil, errors.Wrap(err, "failed to create troubleshoot client")
}
return troubleshootClient, nil
}
func findFileName(basename, extension string) (string, error) {
n := 1
name := basename
for {
filename := name + "." + extension
if _, err := os.Stat(filename); os.IsNotExist(err) {
return filename, nil
} else if err != nil {
return "", errors.Wrap(err, "check file exists")
}
name = fmt.Sprintf("%s (%d)", basename, n)
n = n + 1
}
}
|
[
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
pkg/cmd/post.go
|
package cmd
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/suzuki-shunsuke/github-comment/pkg/api"
"github.com/suzuki-shunsuke/github-comment/pkg/comment"
"github.com/suzuki-shunsuke/github-comment/pkg/config"
"github.com/suzuki-shunsuke/github-comment/pkg/option"
"github.com/suzuki-shunsuke/github-comment/pkg/platform"
"github.com/suzuki-shunsuke/github-comment/pkg/template"
"github.com/suzuki-shunsuke/go-httpclient/httpclient"
"github.com/urfave/cli/v2"
"golang.org/x/crypto/ssh/terminal"
)
func (runner Runner) postCommand() cli.Command { //nolint:funlen
return cli.Command{
Name: "post",
Usage: "post a comment",
Action: runner.postAction,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "org",
Usage: "GitHub organization name",
},
&cli.StringFlag{
Name: "repo",
Usage: "GitHub repository name",
},
&cli.StringFlag{
Name: "token",
Usage: "GitHub API token",
EnvVars: []string{"GITHUB_TOKEN", "GITHUB_ACCESS_TOKEN"},
},
&cli.StringFlag{
Name: "sha1",
Usage: "commit sha1",
},
&cli.StringFlag{
Name: "template",
Usage: "comment template",
},
&cli.StringFlag{
Name: "template-key",
Aliases: []string{"k"},
Usage: "comment template key",
Value: "default",
},
&cli.StringFlag{
Name: "config",
Usage: "configuration file path",
},
&cli.IntFlag{
Name: "pr",
Usage: "GitHub pull request number",
},
&cli.StringSliceFlag{
Name: "var",
Usage: "template variable",
},
&cli.BoolFlag{
Name: "dry-run",
Usage: "output a comment to standard error output instead of posting to GitHub",
},
&cli.BoolFlag{
Name: "skip-no-token",
Aliases: []string{"n"},
Usage: "works like dry-run if the GitHub Access Token isn't set",
EnvVars: []string{"GITHUB_COMMENT_SKIP_NO_TOKEN"},
},
&cli.BoolFlag{
Name: "silent",
Aliases: []string{"s"},
Usage: "suppress the output of dry-run and skip-no-token",
},
&cli.BoolFlag{
Name: "stdin-template",
Usage: "read standard input as the template",
},
},
}
}
func parseVarsFlag(varsSlice []string) (map[string]string, error) {
vars := make(map[string]string, len(varsSlice))
for _, v := range varsSlice {
a := strings.SplitN(v, ":", 2)
if len(a) < 2 { //nolint:gomnd
return nil, errors.New("invalid var flag. The format should be '--var <key>:<value>")
}
vars[a[0]] = a[1]
}
return vars, nil
}
// parsePostOptions parses the command line arguments of the subcommand "post".
func parsePostOptions(opts *option.PostOptions, c *cli.Context) error {
opts.Org = c.String("org")
opts.Repo = c.String("repo")
opts.Token = c.String("token")
opts.SHA1 = c.String("sha1")
opts.Template = c.String("template")
opts.TemplateKey = c.String("template-key")
opts.ConfigPath = c.String("config")
opts.PRNumber = c.Int("pr")
opts.DryRun = c.Bool("dry-run")
opts.SkipNoToken = c.Bool("skip-no-token")
opts.Silent = c.Bool("silent")
opts.StdinTemplate = c.Bool("stdin-template")
vars, err := parseVarsFlag(c.StringSlice("var"))
if err != nil {
return err
}
opts.Vars = vars
return nil
}
func getPostCommenter(opts option.PostOptions) api.Commenter {
if opts.DryRun {
return comment.Mock{
Stderr: os.Stderr,
Silent: opts.Silent,
}
}
if opts.SkipNoToken && opts.Token == "" {
return comment.Mock{
Stderr: os.Stderr,
Silent: opts.Silent,
}
}
return comment.Commenter{
Token: opts.Token,
HTTPClient: httpclient.New("https://api.github.com"),
}
}
// postAction is an entrypoint of the subcommand "post".
func (runner Runner) postAction(c *cli.Context) error {
if a := os.Getenv("GITHUB_COMMENT_SKIP"); a != "" {
skipComment, err := strconv.ParseBool(a)
if err != nil {
return fmt.Errorf("parse the environment variable GITHUB_COMMENT_SKIP as a bool: %w", err)
}
if skipComment {
return nil
}
}
opts := option.PostOptions{}
if err := parsePostOptions(&opts, c); err != nil {
return err
}
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get a current directory path: %w", err)
}
var pt api.Platform
if p, f := platform.Get(); f {
pt = p
}
cfgReader := config.Reader{
ExistFile: existFile,
}
cfg, err := cfgReader.FindAndRead(opts.ConfigPath, wd)
if err != nil {
return fmt.Errorf("find and read a configuration file: %w", err)
}
opts.SkipNoToken = opts.SkipNoToken || cfg.SkipNoToken
ctrl := api.PostController{
Wd: wd,
Getenv: os.Getenv,
HasStdin: func() bool {
return !terminal.IsTerminal(0)
},
Stdin: runner.Stdin,
Commenter: getPostCommenter(opts),
Renderer: template.Renderer{
Getenv: os.Getenv,
},
Platform: pt,
Config: cfg,
}
return ctrl.Post(c.Context, opts)
}
|
[
"\"GITHUB_COMMENT_SKIP\""
] |
[] |
[
"GITHUB_COMMENT_SKIP"
] |
[]
|
["GITHUB_COMMENT_SKIP"]
|
go
| 1 | 0 | |
platform/testFramework/src/com/intellij/testFramework/UsefulTestCase.java
|
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.testFramework;
import com.intellij.codeInsight.CodeInsightSettings;
import com.intellij.concurrency.IdeaForkJoinWorkerThreadFactory;
import com.intellij.diagnostic.PerformanceWatcher;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.PathManager;
import com.intellij.openapi.application.impl.ApplicationInfoImpl;
import com.intellij.openapi.command.impl.StartMarkAction;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.fileTypes.StdFileTypes;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.IconLoader;
import com.intellij.openapi.util.JDOMUtil;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.VirtualFileVisitor;
import com.intellij.psi.PsiDocumentManager;
import com.intellij.psi.codeStyle.CodeStyleSettings;
import com.intellij.psi.impl.DocumentCommitProcessor;
import com.intellij.psi.impl.DocumentCommitThread;
import com.intellij.psi.impl.source.PostprocessReformattingAspect;
import com.intellij.refactoring.rename.inplace.InplaceRefactoring;
import com.intellij.rt.execution.junit.FileComparisonFailure;
import com.intellij.testFramework.exceptionCases.AbstractExceptionCase;
import com.intellij.testFramework.fixtures.IdeaTestExecutionPolicy;
import com.intellij.util.*;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.containers.PeekableIterator;
import com.intellij.util.containers.PeekableIteratorWrapper;
import com.intellij.util.indexing.FileBasedIndex;
import com.intellij.util.indexing.FileBasedIndexImpl;
import com.intellij.util.lang.CompoundRuntimeException;
import com.intellij.util.ui.UIUtil;
import gnu.trove.Equality;
import gnu.trove.THashSet;
import junit.framework.AssertionFailedError;
import junit.framework.TestCase;
import org.jdom.Element;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.junit.Assert;
import org.junit.ComparisonFailure;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
/**
* @author peter
*/
public abstract class UsefulTestCase extends TestCase {
public static final boolean IS_UNDER_TEAMCITY = System.getenv("TEAMCITY_VERSION") != null;
public static final String TEMP_DIR_MARKER = "unitTest_";
public static final boolean OVERWRITE_TESTDATA = Boolean.getBoolean("idea.tests.overwrite.data");
private static final String ORIGINAL_TEMP_DIR = FileUtil.getTempDirectory();
private static final Map<String, Long> TOTAL_SETUP_COST_MILLIS = new HashMap<>();
private static final Map<String, Long> TOTAL_TEARDOWN_COST_MILLIS = new HashMap<>();
static {
IdeaForkJoinWorkerThreadFactory.setupPoisonFactory();
Logger.setFactory(TestLoggerFactory.class);
}
protected static final Logger LOG = Logger.getInstance(UsefulTestCase.class);
@NotNull
private final Disposable myTestRootDisposable = new TestDisposable();
static Path ourPathToKeep;
private final List<String> myPathsToKeep = new ArrayList<>();
private String myTempDir;
private static final String DEFAULT_SETTINGS_EXTERNALIZED;
private static final CodeInsightSettings defaultSettings = new CodeInsightSettings();
static {
// Radar #5755208: Command line Java applications need a way to launch without a Dock icon.
System.setProperty("apple.awt.UIElement", "true");
try {
Element oldS = new Element("temp");
defaultSettings.writeExternal(oldS);
DEFAULT_SETTINGS_EXTERNALIZED = JDOMUtil.writeElement(oldS);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Pass here the exception you want to be thrown first
* E.g.<pre>
* {@code
* void tearDown() {
* try {
* doTearDowns();
* }
* catch(Exception e) {
* addSuppressedException(e);
* }
* finally {
* super.tearDown();
* }
* }
* }
* </pre>
*
*/
protected void addSuppressedException(@NotNull Throwable e) {
List<Throwable> list = mySuppressedExceptions;
if (list == null) {
mySuppressedExceptions = list = new SmartList<>();
}
list.add(e);
}
private List<Throwable> mySuppressedExceptions;
public UsefulTestCase() {
}
public UsefulTestCase(@NotNull String name) {
super(name);
}
protected boolean shouldContainTempFiles() {
return true;
}
@Override
protected void setUp() throws Exception {
super.setUp();
if (shouldContainTempFiles()) {
IdeaTestExecutionPolicy policy = IdeaTestExecutionPolicy.current();
String testName = null;
if (policy != null) {
testName = policy.getPerTestTempDirName();
}
if (testName == null) {
testName = FileUtil.sanitizeFileName(getTestName(true));
}
testName = new File(testName).getName(); // in case the test name contains file separators
myTempDir = FileUtil.createTempDirectory(TEMP_DIR_MARKER + testName, "", false).getPath();
FileUtil.resetCanonicalTempPathCache(myTempDir);
}
boolean isStressTest = isStressTest();
ApplicationInfoImpl.setInStressTest(isStressTest);
if (isPerformanceTest()) {
Timings.getStatistics();
}
// turn off Disposer debugging for performance tests
Disposer.setDebugMode(!isStressTest);
if (isIconRequired()) {
// ensure that IconLoader will use dummy empty icon
IconLoader.deactivate();
//IconManager.activate();
}
}
protected boolean isIconRequired() {
return false;
}
@Override
protected void tearDown() throws Exception {
// don't use method references here to make stack trace reading easier
//noinspection Convert2MethodRef
new RunAll(
() -> {
if (isIconRequired()) {
//IconManager.deactivate();
}
},
() -> disposeRootDisposable(),
() -> cleanupSwingDataStructures(),
() -> cleanupDeleteOnExitHookList(),
() -> Disposer.setDebugMode(true),
() -> {
if (shouldContainTempFiles()) {
FileUtil.resetCanonicalTempPathCache(ORIGINAL_TEMP_DIR);
if (hasTmpFilesToKeep()) {
File[] files = new File(myTempDir).listFiles();
if (files != null) {
for (File file : files) {
if (!shouldKeepTmpFile(file)) {
FileUtil.delete(file);
}
}
}
}
else {
FileUtil.delete(new File(myTempDir));
}
}
},
() -> UIUtil.removeLeakingAppleListeners(),
() -> waitForAppLeakingThreads(10, TimeUnit.SECONDS)
).run(ObjectUtils.notNull(mySuppressedExceptions, Collections.emptyList()));
}
protected final void disposeRootDisposable() {
Disposer.dispose(getTestRootDisposable());
}
protected void addTmpFileToKeep(@NotNull File file) {
myPathsToKeep.add(file.getPath());
}
private boolean hasTmpFilesToKeep() {
return ourPathToKeep != null && FileUtil.isAncestor(myTempDir, ourPathToKeep.toString(), false) || !myPathsToKeep.isEmpty();
}
private boolean shouldKeepTmpFile(@NotNull File file) {
String path = file.getPath();
if (FileUtil.pathsEqual(path, ourPathToKeep.toString())) return true;
for (String pathToKeep : myPathsToKeep) {
if (FileUtil.pathsEqual(path, pathToKeep)) return true;
}
return false;
}
private static final Set<String> DELETE_ON_EXIT_HOOK_DOT_FILES;
private static final Class<?> DELETE_ON_EXIT_HOOK_CLASS;
static {
Class<?> aClass;
try {
aClass = Class.forName("java.io.DeleteOnExitHook");
}
catch (Exception e) {
throw new RuntimeException(e);
}
@SuppressWarnings("unchecked") Set<String> files = ReflectionUtil.getStaticFieldValue(aClass, Set.class, "files");
DELETE_ON_EXIT_HOOK_CLASS = aClass;
DELETE_ON_EXIT_HOOK_DOT_FILES = files;
}
@SuppressWarnings("SynchronizeOnThis")
private static void cleanupDeleteOnExitHookList() {
// try to reduce file set retained by java.io.DeleteOnExitHook
List<String> list;
synchronized (DELETE_ON_EXIT_HOOK_CLASS) {
if (DELETE_ON_EXIT_HOOK_DOT_FILES.isEmpty()) return;
list = new ArrayList<>(DELETE_ON_EXIT_HOOK_DOT_FILES);
}
for (int i = list.size() - 1; i >= 0; i--) {
String path = list.get(i);
File file = new File(path);
if (file.delete() || !file.exists()) {
synchronized (DELETE_ON_EXIT_HOOK_CLASS) {
DELETE_ON_EXIT_HOOK_DOT_FILES.remove(path);
}
}
}
}
@SuppressWarnings("ConstantConditions")
private static void cleanupSwingDataStructures() throws Exception {
Object manager = ReflectionUtil.getDeclaredMethod(Class.forName("javax.swing.KeyboardManager"), "getCurrentManager").invoke(null);
Map componentKeyStrokeMap = ReflectionUtil.getField(manager.getClass(), manager, Hashtable.class, "componentKeyStrokeMap");
componentKeyStrokeMap.clear();
Map containerMap = ReflectionUtil.getField(manager.getClass(), manager, Hashtable.class, "containerMap");
containerMap.clear();
}
static void doCheckForSettingsDamage(@NotNull CodeStyleSettings oldCodeStyleSettings, @NotNull CodeStyleSettings currentCodeStyleSettings) {
final CodeInsightSettings settings = CodeInsightSettings.getInstance();
// don't use method references here to make stack trace reading easier
//noinspection Convert2MethodRef
new RunAll()
.append(() -> {
try {
checkCodeInsightSettingsEqual(defaultSettings, settings);
}
catch (AssertionError error) {
CodeInsightSettings clean = new CodeInsightSettings();
for (Field field : clean.getClass().getFields()) {
try {
ReflectionUtil.copyFieldValue(clean, settings, field);
}
catch (Exception ignored) {
}
}
throw error;
}
})
.append(() -> {
currentCodeStyleSettings.getIndentOptions(StdFileTypes.JAVA);
try {
checkCodeStyleSettingsEqual(oldCodeStyleSettings, currentCodeStyleSettings);
}
finally {
currentCodeStyleSettings.clearCodeStyleSettings();
}
})
.append(() -> InplaceRefactoring.checkCleared())
.append(() -> StartMarkAction.checkCleared())
.run();
}
@NotNull
public Disposable getTestRootDisposable() {
return myTestRootDisposable;
}
@Override
protected void runTest() throws Throwable {
final Throwable[] throwables = new Throwable[1];
Runnable runnable = () -> {
try {
TestLoggerFactory.onTestStarted();
super.runTest();
TestLoggerFactory.onTestFinished(true);
}
catch (InvocationTargetException e) {
TestLoggerFactory.onTestFinished(false);
e.fillInStackTrace();
throwables[0] = e.getTargetException();
}
catch (IllegalAccessException e) {
TestLoggerFactory.onTestFinished(false);
e.fillInStackTrace();
throwables[0] = e;
}
catch (Throwable e) {
TestLoggerFactory.onTestFinished(false);
throwables[0] = e;
}
};
invokeTestRunnable(runnable);
if (throwables[0] != null) {
throw throwables[0];
}
}
protected boolean shouldRunTest() {
IdeaTestExecutionPolicy policy = IdeaTestExecutionPolicy.current();
if (policy != null && !policy.canRun(getClass())) {
return false;
}
return TestFrameworkUtil.canRunTest(getClass());
}
protected void invokeTestRunnable(@NotNull Runnable runnable) throws Exception {
if (runInDispatchThread()) {
EdtTestUtilKt.runInEdtAndWait(() -> {
runnable.run();
return null;
});
}
else {
runnable.run();
}
}
protected void defaultRunBare() throws Throwable {
Throwable exception = null;
try {
long setupStart = System.nanoTime();
setUp();
long setupCost = (System.nanoTime() - setupStart) / 1000000;
logPerClassCost(setupCost, TOTAL_SETUP_COST_MILLIS);
runTest();
}
catch (Throwable running) {
exception = running;
}
finally {
try {
long teardownStart = System.nanoTime();
tearDown();
long teardownCost = (System.nanoTime() - teardownStart) / 1000000;
logPerClassCost(teardownCost, TOTAL_TEARDOWN_COST_MILLIS);
}
catch (Throwable tearingDown) {
if (exception == null) exception = tearingDown;
else exception = new CompoundRuntimeException(Arrays.asList(exception, tearingDown));
}
}
if (exception != null) throw exception;
}
/**
* Logs the setup cost grouped by test fixture class (superclass of the current test class).
*
* @param cost setup cost in milliseconds
*/
private void logPerClassCost(long cost, @NotNull Map<String, Long> costMap) {
Class<?> superclass = getClass().getSuperclass();
Long oldCost = costMap.get(superclass.getName());
long newCost = oldCost == null ? cost : oldCost + cost;
costMap.put(superclass.getName(), newCost);
}
@SuppressWarnings("UseOfSystemOutOrSystemErr")
static void logSetupTeardownCosts() {
System.out.println("Setup costs");
long totalSetup = 0;
for (Map.Entry<String, Long> entry : TOTAL_SETUP_COST_MILLIS.entrySet()) {
System.out.println(String.format(" %s: %d ms", entry.getKey(), entry.getValue()));
totalSetup += entry.getValue();
}
System.out.println("Teardown costs");
long totalTeardown = 0;
for (Map.Entry<String, Long> entry : TOTAL_TEARDOWN_COST_MILLIS.entrySet()) {
System.out.println(String.format(" %s: %d ms", entry.getKey(), entry.getValue()));
totalTeardown += entry.getValue();
}
System.out.println(String.format("Total overhead: setup %d ms, teardown %d ms", totalSetup, totalTeardown));
System.out.println(String.format("##teamcity[buildStatisticValue key='ideaTests.totalSetupMs' value='%d']", totalSetup));
System.out.println(String.format("##teamcity[buildStatisticValue key='ideaTests.totalTeardownMs' value='%d']", totalTeardown));
}
@Override
public void runBare() throws Throwable {
if (!shouldRunTest()) return;
if (runInDispatchThread()) {
TestRunnerUtil.replaceIdeEventQueueSafely();
EdtTestUtil.runInEdtAndWait(this::defaultRunBare);
}
else {
defaultRunBare();
}
}
protected boolean runInDispatchThread() {
IdeaTestExecutionPolicy policy = IdeaTestExecutionPolicy.current();
if (policy != null) {
return policy.runInDispatchThread();
}
return true;
}
/**
* If you want a more shorter name than runInEdtAndWait.
*/
protected void edt(@NotNull ThrowableRunnable<Throwable> runnable) {
EdtTestUtil.runInEdtAndWait(runnable);
}
@NotNull
public static String toString(@NotNull Iterable<?> collection) {
if (!collection.iterator().hasNext()) {
return "<empty>";
}
final StringBuilder builder = new StringBuilder();
for (final Object o : collection) {
if (o instanceof THashSet) {
builder.append(new TreeSet<>((THashSet<?>)o));
}
else {
builder.append(o);
}
builder.append('\n');
}
return builder.toString();
}
@SafeVarargs
public static <T> void assertOrderedEquals(@NotNull T[] actual, @NotNull T... expected) {
assertOrderedEquals(Arrays.asList(actual), expected);
}
@SafeVarargs
public static <T> void assertOrderedEquals(@NotNull Iterable<? extends T> actual, @NotNull T... expected) {
assertOrderedEquals("", actual, expected);
}
public static void assertOrderedEquals(@NotNull byte[] actual, @NotNull byte[] expected) {
assertEquals(expected.length, actual.length);
for (int i = 0; i < actual.length; i++) {
byte a = actual[i];
byte e = expected[i];
assertEquals("not equals at index: "+i, e, a);
}
}
public static void assertOrderedEquals(@NotNull int[] actual, @NotNull int[] expected) {
if (actual.length != expected.length) {
fail("Expected size: "+expected.length+"; actual: "+actual.length+"\nexpected: "+Arrays.toString(expected)+"\nactual : "+Arrays.toString(actual));
}
for (int i = 0; i < actual.length; i++) {
int a = actual[i];
int e = expected[i];
assertEquals("not equals at index: "+i, e, a);
}
}
@SafeVarargs
public static <T> void assertOrderedEquals(@NotNull String errorMsg, @NotNull Iterable<? extends T> actual, @NotNull T... expected) {
assertOrderedEquals(errorMsg, actual, Arrays.asList(expected));
}
public static <T> void assertOrderedEquals(@NotNull Iterable<? extends T> actual, @NotNull Iterable<? extends T> expected) {
assertOrderedEquals("", actual, expected);
}
public static <T> void assertOrderedEquals(@NotNull String errorMsg,
@NotNull Iterable<? extends T> actual,
@NotNull Iterable<? extends T> expected) {
//noinspection unchecked
assertOrderedEquals(errorMsg, actual, expected, Equality.CANONICAL);
}
public static <T> void assertOrderedEquals(@NotNull String errorMsg,
@NotNull Iterable<? extends T> actual,
@NotNull Iterable<? extends T> expected,
@NotNull Equality<? super T> comparator) {
if (!equals(actual, expected, comparator)) {
String expectedString = toString(expected);
String actualString = toString(actual);
Assert.assertEquals(errorMsg, expectedString, actualString);
Assert.fail("Warning! 'toString' does not reflect the difference.\nExpected: " + expectedString + "\nActual: " + actualString);
}
}
private static <T> boolean equals(@NotNull Iterable<? extends T> a1,
@NotNull Iterable<? extends T> a2,
@NotNull Equality<? super T> comparator) {
Iterator<? extends T> it1 = a1.iterator();
Iterator<? extends T> it2 = a2.iterator();
while (it1.hasNext() || it2.hasNext()) {
if (!it1.hasNext() || !it2.hasNext()) return false;
if (!comparator.equals(it1.next(), it2.next())) return false;
}
return true;
}
@SafeVarargs
public static <T> void assertOrderedCollection(@NotNull T[] collection, @NotNull Consumer<T>... checkers) {
assertOrderedCollection(Arrays.asList(collection), checkers);
}
/**
* Checks {@code actual} contains same elements (in {@link #equals(Object)} meaning) as {@code expected} irrespective of their order
*/
@SafeVarargs
public static <T> void assertSameElements(@NotNull T[] actual, @NotNull T... expected) {
assertSameElements(Arrays.asList(actual), expected);
}
/**
* Checks {@code actual} contains same elements (in {@link #equals(Object)} meaning) as {@code expected} irrespective of their order
*/
@SafeVarargs
public static <T> void assertSameElements(@NotNull Collection<? extends T> actual, @NotNull T... expected) {
assertSameElements(actual, Arrays.asList(expected));
}
/**
* Checks {@code actual} contains same elements (in {@link #equals(Object)} meaning) as {@code expected} irrespective of their order
*/
public static <T> void assertSameElements(@NotNull Collection<? extends T> actual, @NotNull Collection<? extends T> expected) {
assertSameElements("", actual, expected);
}
/**
* Checks {@code actual} contains same elements (in {@link #equals(Object)} meaning) as {@code expected} irrespective of their order
*/
public static <T> void assertSameElements(@NotNull String message, @NotNull Collection<? extends T> actual, @NotNull Collection<? extends T> expected) {
if (actual.size() != expected.size() || !new HashSet<>(expected).equals(new HashSet<T>(actual))) {
Assert.assertEquals(message, new HashSet<>(expected), new HashSet<T>(actual));
}
}
@SafeVarargs
public static <T> void assertContainsOrdered(@NotNull Collection<? extends T> collection, @NotNull T... expected) {
assertContainsOrdered(collection, Arrays.asList(expected));
}
public static <T> void assertContainsOrdered(@NotNull Collection<? extends T> collection, @NotNull Collection<? extends T> expected) {
PeekableIterator<T> expectedIt = new PeekableIteratorWrapper<>(expected.iterator());
PeekableIterator<T> actualIt = new PeekableIteratorWrapper<>(collection.iterator());
while (actualIt.hasNext() && expectedIt.hasNext()) {
T expectedElem = expectedIt.peek();
T actualElem = actualIt.peek();
if (expectedElem.equals(actualElem)) {
expectedIt.next();
}
actualIt.next();
}
if (expectedIt.hasNext()) {
throw new ComparisonFailure("", toString(expected), toString(collection));
}
}
@SafeVarargs
public static <T> void assertContainsElements(@NotNull Collection<? extends T> collection, @NotNull T... expected) {
assertContainsElements(collection, Arrays.asList(expected));
}
public static <T> void assertContainsElements(@NotNull Collection<? extends T> collection, @NotNull Collection<? extends T> expected) {
ArrayList<T> copy = new ArrayList<>(collection);
copy.retainAll(expected);
assertSameElements(toString(collection), copy, expected);
}
@NotNull
public static String toString(@NotNull Object[] collection, @NotNull String separator) {
return toString(Arrays.asList(collection), separator);
}
@SafeVarargs
public static <T> void assertDoesntContain(@NotNull Collection<? extends T> collection, @NotNull T... notExpected) {
assertDoesntContain(collection, Arrays.asList(notExpected));
}
public static <T> void assertDoesntContain(@NotNull Collection<? extends T> collection, @NotNull Collection<? extends T> notExpected) {
ArrayList<T> expected = new ArrayList<>(collection);
expected.removeAll(notExpected);
assertSameElements(collection, expected);
}
@NotNull
public static String toString(@NotNull Collection<?> collection, @NotNull String separator) {
List<String> list = ContainerUtil.map2List(collection, String::valueOf);
Collections.sort(list);
StringBuilder builder = new StringBuilder();
boolean flag = false;
for (final String o : list) {
if (flag) {
builder.append(separator);
}
builder.append(o);
flag = true;
}
return builder.toString();
}
@SafeVarargs
public static <T> void assertOrderedCollection(@NotNull Collection<? extends T> collection, @NotNull Consumer<T>... checkers) {
if (collection.size() != checkers.length) {
Assert.fail(toString(collection));
}
int i = 0;
for (final T actual : collection) {
try {
checkers[i].consume(actual);
}
catch (AssertionFailedError e) {
//noinspection UseOfSystemOutOrSystemErr
System.out.println(i + ": " + actual);
throw e;
}
i++;
}
}
@SafeVarargs
public static <T> void assertUnorderedCollection(@NotNull T[] collection, @NotNull Consumer<T>... checkers) {
assertUnorderedCollection(Arrays.asList(collection), checkers);
}
@SafeVarargs
public static <T> void assertUnorderedCollection(@NotNull Collection<? extends T> collection, @NotNull Consumer<T>... checkers) {
if (collection.size() != checkers.length) {
Assert.fail(toString(collection));
}
Set<Consumer<T>> checkerSet = new HashSet<>(Arrays.asList(checkers));
int i = 0;
Throwable lastError = null;
for (final T actual : collection) {
boolean flag = true;
for (final Consumer<T> condition : checkerSet) {
Throwable error = accepts(condition, actual);
if (error == null) {
checkerSet.remove(condition);
flag = false;
break;
}
else {
lastError = error;
}
}
if (flag) {
//noinspection ConstantConditions,CallToPrintStackTrace
lastError.printStackTrace();
Assert.fail("Incorrect element(" + i + "): " + actual);
}
i++;
}
}
private static <T> Throwable accepts(@NotNull Consumer<? super T> condition, final T actual) {
try {
condition.consume(actual);
return null;
}
catch (Throwable e) {
return e;
}
}
@Contract("null, _ -> fail")
@NotNull
public static <T> T assertInstanceOf(Object o, @NotNull Class<T> aClass) {
Assert.assertNotNull("Expected instance of: " + aClass.getName() + " actual: " + null, o);
Assert.assertTrue("Expected instance of: " + aClass.getName() + " actual: " + o.getClass().getName(), aClass.isInstance(o));
@SuppressWarnings("unchecked") T t = (T)o;
return t;
}
public static <T> T assertOneElement(@NotNull Collection<? extends T> collection) {
Iterator<? extends T> iterator = collection.iterator();
String toString = toString(collection);
Assert.assertTrue(toString, iterator.hasNext());
T t = iterator.next();
Assert.assertFalse(toString, iterator.hasNext());
return t;
}
public static <T> T assertOneElement(@NotNull T[] ts) {
Assert.assertEquals(Arrays.asList(ts).toString(), 1, ts.length);
return ts[0];
}
@SafeVarargs
public static <T> void assertOneOf(T value, @NotNull T... values) {
for (T v : values) {
if (Objects.equals(value, v)) {
return;
}
}
Assert.fail(value + " should be equal to one of " + Arrays.toString(values));
}
public static void printThreadDump() {
PerformanceWatcher.dumpThreadsToConsole("Thread dump:");
}
public static void assertEmpty(@NotNull Object[] array) {
assertOrderedEquals(array);
}
public static void assertNotEmpty(final Collection<?> collection) {
assertNotNull(collection);
assertFalse(collection.isEmpty());
}
public static void assertEmpty(@NotNull Collection<?> collection) {
assertEmpty(collection.toString(), collection);
}
public static void assertNullOrEmpty(@Nullable Collection<?> collection) {
if (collection == null) return;
assertEmpty("", collection);
}
public static void assertEmpty(final String s) {
assertTrue(s, StringUtil.isEmpty(s));
}
public static <T> void assertEmpty(@NotNull String errorMsg, @NotNull Collection<? extends T> collection) {
assertOrderedEquals(errorMsg, collection, Collections.emptyList());
}
public static void assertSize(int expectedSize, @NotNull Object[] array) {
if (array.length != expectedSize) {
assertEquals(toString(Arrays.asList(array)), expectedSize, array.length);
}
}
public static void assertSize(int expectedSize, @NotNull Collection<?> c) {
if (c.size() != expectedSize) {
assertEquals(toString(c), expectedSize, c.size());
}
}
@NotNull
protected <T extends Disposable> T disposeOnTearDown(@NotNull T disposable) {
Disposer.register(getTestRootDisposable(), disposable);
return disposable;
}
public static void assertSameLines(@NotNull String expected, @NotNull String actual) {
assertSameLines(null, expected, actual);
}
public static void assertSameLines(@Nullable String message, @NotNull String expected, @NotNull String actual) {
String expectedText = StringUtil.convertLineSeparators(expected.trim());
String actualText = StringUtil.convertLineSeparators(actual.trim());
Assert.assertEquals(message, expectedText, actualText);
}
public static void assertExists(@NotNull File file){
assertTrue("File should exist " + file, file.exists());
}
public static void assertDoesntExist(@NotNull File file){
assertFalse("File should not exist " + file, file.exists());
}
@NotNull
protected String getTestName(boolean lowercaseFirstLetter) {
return getTestName(getName(), lowercaseFirstLetter);
}
@NotNull
public static String getTestName(@Nullable String name, boolean lowercaseFirstLetter) {
return name == null ? "" : PlatformTestUtil.getTestName(name, lowercaseFirstLetter);
}
@NotNull
protected String getTestDirectoryName() {
final String testName = getTestName(true);
return testName.replaceAll("_.*", "");
}
public static void assertSameLinesWithFile(@NotNull String filePath, @NotNull String actualText) {
assertSameLinesWithFile(filePath, actualText, true);
}
public static void assertSameLinesWithFile(@NotNull String filePath,
@NotNull String actualText,
@NotNull Supplier<String> messageProducer) {
assertSameLinesWithFile(filePath, actualText, true, messageProducer);
}
public static void assertSameLinesWithFile(@NotNull String filePath, @NotNull String actualText, boolean trimBeforeComparing) {
assertSameLinesWithFile(filePath, actualText, trimBeforeComparing, null);
}
public static void assertSameLinesWithFile(@NotNull String filePath,
@NotNull String actualText,
boolean trimBeforeComparing,
@Nullable Supplier<String> messageProducer) {
String fileText;
try {
if (OVERWRITE_TESTDATA) {
VfsTestUtil.overwriteTestData(filePath, actualText);
//noinspection UseOfSystemOutOrSystemErr
System.out.println("File " + filePath + " created.");
}
fileText = FileUtil.loadFile(new File(filePath), StandardCharsets.UTF_8);
}
catch (FileNotFoundException e) {
VfsTestUtil.overwriteTestData(filePath, actualText);
throw new AssertionFailedError("No output text found. File " + filePath + " created.");
}
catch (IOException e) {
throw new RuntimeException(e);
}
String expected = StringUtil.convertLineSeparators(trimBeforeComparing ? fileText.trim() : fileText);
String actual = StringUtil.convertLineSeparators(trimBeforeComparing ? actualText.trim() : actualText);
if (!Comparing.equal(expected, actual)) {
throw new FileComparisonFailure(messageProducer == null ? null : messageProducer.get(), expected, actual, filePath);
}
}
protected static void clearFields(@NotNull Object test) throws IllegalAccessException {
Class<?> aClass = test.getClass();
while (aClass != null) {
clearDeclaredFields(test, aClass);
aClass = aClass.getSuperclass();
}
}
public static void clearDeclaredFields(@NotNull Object test, @NotNull Class<?> aClass) throws IllegalAccessException {
for (final Field field : aClass.getDeclaredFields()) {
final String name = field.getDeclaringClass().getName();
if (!name.startsWith("junit.framework.") && !name.startsWith("com.intellij.testFramework.")) {
final int modifiers = field.getModifiers();
if ((modifiers & Modifier.FINAL) == 0 && (modifiers & Modifier.STATIC) == 0 && !field.getType().isPrimitive()) {
field.setAccessible(true);
field.set(test, null);
}
}
}
}
private static void checkCodeStyleSettingsEqual(@NotNull CodeStyleSettings expected, @NotNull CodeStyleSettings settings) {
if (!expected.equals(settings)) {
Element oldS = new Element("temp");
expected.writeExternal(oldS);
Element newS = new Element("temp");
settings.writeExternal(newS);
String newString = JDOMUtil.writeElement(newS);
String oldString = JDOMUtil.writeElement(oldS);
Assert.assertEquals("Code style settings damaged", oldString, newString);
}
}
private static void checkCodeInsightSettingsEqual(@NotNull CodeInsightSettings oldSettings, @NotNull CodeInsightSettings settings) {
if (!oldSettings.equals(settings)) {
Element newS = new Element("temp");
settings.writeExternal(newS);
Assert.assertEquals("Code insight settings damaged", DEFAULT_SETTINGS_EXTERNALIZED, JDOMUtil.writeElement(newS));
}
}
public boolean isPerformanceTest() {
String testName = getName();
String className = getClass().getSimpleName();
return TestFrameworkUtil.isPerformanceTest(testName, className);
}
/**
* @return true for a test which performs A LOT of computations.
* Such test should typically avoid performing expensive checks, e.g. data structure consistency complex validations.
* If you want your test to be treated as "Stress", please mention one of these words in its name: "Stress", "Slow".
* For example: {@code public void testStressPSIFromDifferentThreads()}
*/
public boolean isStressTest() {
return isStressTest(getName(), getClass().getName());
}
private static boolean isStressTest(String testName, String className) {
return TestFrameworkUtil.isPerformanceTest(testName, className) ||
containsStressWords(testName) ||
containsStressWords(className);
}
private static boolean containsStressWords(@Nullable String name) {
return name != null && (name.contains("Stress") || name.contains("Slow"));
}
public static void doPostponedFormatting(@NotNull Project project) {
DocumentUtil.writeInRunUndoTransparentAction(() -> {
PsiDocumentManager.getInstance(project).commitAllDocuments();
PostprocessReformattingAspect.getInstance(project).doPostponedFormatting();
});
}
/**
* Checks that code block throw corresponding exception.
*
* @param exceptionCase Block annotated with some exception type
*/
protected void assertException(@NotNull AbstractExceptionCase<?> exceptionCase) {
assertException(exceptionCase, null);
}
/**
* Checks that code block throw corresponding exception with expected error msg.
* If expected error message is null it will not be checked.
*
* @param exceptionCase Block annotated with some exception type
* @param expectedErrorMsg expected error message
*/
protected void assertException(@NotNull AbstractExceptionCase exceptionCase, @Nullable String expectedErrorMsg) {
//noinspection unchecked
assertExceptionOccurred(true, exceptionCase, expectedErrorMsg);
}
/**
* Checks that the code block throws an exception of the specified class.
*
* @param exceptionClass Expected exception type
* @param runnable Block annotated with some exception type
*/
public static <T extends Throwable> void assertThrows(@NotNull Class<? extends Throwable> exceptionClass,
@NotNull ThrowableRunnable<T> runnable) {
assertThrows(exceptionClass, null, runnable);
}
/**
* Checks that the code block throws an exception of the specified class with expected error msg.
* If expected error message is null it will not be checked.
*
* @param exceptionClass Expected exception type
* @param expectedErrorMsg expected error message, of any
* @param runnable Block annotated with some exception type
*/
@SuppressWarnings({"unchecked", "SameParameterValue"})
public static <T extends Throwable> void assertThrows(@NotNull Class<? extends Throwable> exceptionClass,
@Nullable String expectedErrorMsg,
@NotNull ThrowableRunnable<T> runnable) {
assertExceptionOccurred(true, new AbstractExceptionCase() {
@Override
public Class<Throwable> getExpectedExceptionClass() {
return (Class<Throwable>)exceptionClass;
}
@Override
public void tryClosure() throws Throwable {
runnable.run();
}
}, expectedErrorMsg);
}
/**
* Checks that code block doesn't throw corresponding exception.
*
* @param exceptionCase Block annotated with some exception type
*/
protected <T extends Throwable> void assertNoException(@NotNull AbstractExceptionCase<T> exceptionCase) throws T {
assertExceptionOccurred(false, exceptionCase, null);
}
protected void assertNoThrowable(@NotNull Runnable closure) {
String throwableName = null;
try {
closure.run();
}
catch (Throwable thr) {
throwableName = thr.getClass().getName();
}
assertNull(throwableName);
}
private static <T extends Throwable> void assertExceptionOccurred(boolean shouldOccur,
@NotNull AbstractExceptionCase<T> exceptionCase,
String expectedErrorMsg) throws T {
boolean wasThrown = false;
try {
exceptionCase.tryClosure();
}
catch (Throwable e) {
Throwable cause = e;
while (cause instanceof LoggedErrorProcessor.TestLoggerAssertionError && cause.getCause() != null) {
cause = cause.getCause();
}
if (shouldOccur) {
wasThrown = true;
final String errorMessage = exceptionCase.getAssertionErrorMessage();
assertEquals(errorMessage, exceptionCase.getExpectedExceptionClass(), cause.getClass());
if (expectedErrorMsg != null) {
assertEquals("Compare error messages", expectedErrorMsg, cause.getMessage());
}
}
else if (exceptionCase.getExpectedExceptionClass().equals(cause.getClass())) {
wasThrown = true;
//noinspection UseOfSystemOutOrSystemErr
System.out.println();
//noinspection UseOfSystemOutOrSystemErr
e.printStackTrace(System.out);
fail("Exception isn't expected here. Exception message: " + cause.getMessage());
}
else {
throw e;
}
}
finally {
if (shouldOccur && !wasThrown) {
fail(exceptionCase.getAssertionErrorMessage());
}
}
}
protected boolean annotatedWith(@NotNull Class<? extends Annotation> annotationClass) {
Class<?> aClass = getClass();
String methodName = "test" + getTestName(false);
boolean methodChecked = false;
while (aClass != null && aClass != Object.class) {
if (aClass.getAnnotation(annotationClass) != null) return true;
if (!methodChecked) {
Method method = ReflectionUtil.getDeclaredMethod(aClass, methodName);
if (method != null) {
if (method.getAnnotation(annotationClass) != null) return true;
methodChecked = true;
}
}
aClass = aClass.getSuperclass();
}
return false;
}
@NotNull
protected String getHomePath() {
return PathManager.getHomePath().replace(File.separatorChar, '/');
}
public static void refreshRecursively(@NotNull VirtualFile file) {
VfsUtilCore.visitChildrenRecursively(file, new VirtualFileVisitor<Void>() {
@Override
public boolean visitFile(@NotNull VirtualFile file) {
file.getChildren();
return true;
}
});
file.refresh(false, true);
}
public static VirtualFile refreshAndFindFile(@NotNull final File file) {
return UIUtil.invokeAndWaitIfNeeded(() -> LocalFileSystem.getInstance().refreshAndFindFileByIoFile(file));
}
public static void waitForAppLeakingThreads(long timeout, @NotNull TimeUnit timeUnit) {
EdtTestUtil.runInEdtAndWait(() -> {
Application application = ApplicationManager.getApplication();
if (application != null && !application.isDisposed()) {
FileBasedIndexImpl index = (FileBasedIndexImpl)FileBasedIndex.getInstance();
if (index != null) index.waitForVfsEventsExecuted(timeout, timeUnit);
DocumentCommitThread commitThread = (DocumentCommitThread)ServiceManager.getService(DocumentCommitProcessor.class);
if (commitThread != null) {
commitThread.waitForAllCommits(timeout, timeUnit);
}
}
});
}
protected class TestDisposable implements Disposable {
private volatile boolean myDisposed;
public TestDisposable() {
}
@Override
public void dispose() {
myDisposed = true;
}
public boolean isDisposed() {
return myDisposed;
}
@Override
public String toString() {
String testName = getTestName(false);
return UsefulTestCase.this.getClass() + (StringUtil.isEmpty(testName) ? "" : ".test" + testName);
}
}
}
|
[
"\"TEAMCITY_VERSION\""
] |
[] |
[
"TEAMCITY_VERSION"
] |
[]
|
["TEAMCITY_VERSION"]
|
java
| 1 | 0 | |
test/e2e/e2e.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"os"
"testing"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/gomega"
"k8s.io/apiserver/pkg/util/logs"
// required
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/ingress-nginx/test/e2e/framework"
// tests to run
_ "k8s.io/ingress-nginx/test/e2e/annotations"
_ "k8s.io/ingress-nginx/test/e2e/dbg"
_ "k8s.io/ingress-nginx/test/e2e/defaultbackend"
_ "k8s.io/ingress-nginx/test/e2e/gracefulshutdown"
_ "k8s.io/ingress-nginx/test/e2e/loadbalance"
_ "k8s.io/ingress-nginx/test/e2e/lua"
_ "k8s.io/ingress-nginx/test/e2e/servicebackend"
_ "k8s.io/ingress-nginx/test/e2e/settings"
_ "k8s.io/ingress-nginx/test/e2e/ssl"
_ "k8s.io/ingress-nginx/test/e2e/status"
_ "k8s.io/ingress-nginx/test/e2e/tcpudp"
)
// RunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
func RunE2ETests(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgo.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
if os.Getenv("KUBECTL_PATH") != "" {
framework.KubectlPath = os.Getenv("KUBECTL_PATH")
framework.Logf("Using kubectl path '%s'", framework.KubectlPath)
}
framework.Logf("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite")
}
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}, func() {})
|
[
"\"KUBECTL_PATH\"",
"\"KUBECTL_PATH\""
] |
[] |
[
"KUBECTL_PATH"
] |
[]
|
["KUBECTL_PATH"]
|
go
| 1 | 0 | |
common/openecomp-common-configuration-management/openecomp-configuration-management-test/src/test/java/org/openecomp/config/test/CLITest.java
|
package org.openecomp.config.test;
import org.openecomp.config.Constants;
import org.openecomp.config.api.ConfigurationChangeListener;
import org.openecomp.config.api.ConfigurationManager;
import org.openecomp.config.util.ConfigTestConstant;
import org.openecomp.config.util.TestUtil;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import javax.management.JMX;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.HashMap;
import java.util.Map;
import static org.openecomp.config.util.ConfigTestConstant.*;
/**
* Created by sheetalm on 10/18/2016.
* Scenario 17
* Verify Configuration Management System - Command Line Interface for query, update and list operations
*/
public class CLITest {
public final static String NAMESPACE = "CLI";
public final static String TENANT = "OPENECOMP";
private String updatedValue = "";
@Before
public void setUp() throws IOException {
String data = "{name:\"SCM\"}";
TestUtil.writeFile(data);
}
@Test
public void testCLIApi() throws Exception{
//Verify without fallback
Map<String, Object> input = new HashMap<>();
input.put("ImplClass", "org.openecomp.config.type.ConfigurationQuery");
input.put("tenant", TENANT);
input.put("namespace", NAMESPACE);
input.put("key", ConfigTestConstant.ARTIFACT_NAME_MAXLENGTH);
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName mbeanName = new ObjectName(Constants.MBEAN_NAME);
ConfigurationManager conf = JMX.newMBeanProxy(mbsc, mbeanName, org.openecomp.config.api.ConfigurationManager.class, true);
String maxLength = conf.getConfigurationValue(input);
Assert.assertEquals("14",maxLength);
conf.addConfigurationChangeListener(TENANT,NAMESPACE, ConfigTestConstant.ARTIFACT_NAME_MAXLENGTH, new CLIListener());
//Update maxlength
input.put("ImplClass", "org.openecomp.config.type.ConfigurationUpdate");
input.put("value", "24");
conf.updateConfigurationValue(input);
Thread.sleep(35000);
Assert.assertEquals("24",updatedValue);
//Reset value and fetch updated value again
input.put("value", "");
input.put("ImplClass", "org.openecomp.config.type.ConfigurationQuery");
String updatedMaxLength = conf.getConfigurationValue(input);
Assert.assertEquals("24",updatedMaxLength);
Map<String, String> outputMap = conf.listConfiguration(input);
for(Map.Entry<String, String> entry : outputMap.entrySet()){
System.out.println(entry.getKey()+" : "+entry.getValue());
validateCLIListConfig(outputMap);
}
}
private class CLIListener implements ConfigurationChangeListener {
@Override
public void notify(String key, Object oldValue, Object newValue) {
System.out.println("received notification::oldValue=="+oldValue+" newValue=="+newValue);
updatedValue = newValue.toString();
}
}
private void validateCLIListConfig(Map<String, String> outputMap ) {
Assert.assertEquals("@"+System.getProperty("user.home")+"/TestResources/GeneratorsList.json" , outputMap.get(ARTIFACT_JSON_SCHEMA));
Assert.assertEquals("appc,catalog", outputMap.get(ARTIFACT_CONSUMER));
Assert.assertEquals("6", outputMap.get(ARTIFACT_NAME_MINLENGTH));
Assert.assertEquals("true", outputMap.get(ARTIFACT_ENCODED));
Assert.assertEquals("24", outputMap.get(ARTIFACT_NAME_MAXLENGTH));
Assert.assertEquals("pdf,zip,xml,pdf,tgz,xls", outputMap.get(ARTIFACT_EXT));
Assert.assertEquals("Base64,MD5", outputMap.get(ARTIFACT_ENC));
Assert.assertEquals("@"+System.getenv("Path")+"/myschema.json", outputMap.get(ARTIFACT_XML_SCHEMA));
Assert.assertEquals("a-zA-Z_0-9", outputMap.get(ARTIFACT_NAME_UPPER));
Assert.assertEquals("/opt/spool,"+System.getProperty("user.home")+"/asdc", outputMap.get(ARTIFACT_LOC));
Assert.assertEquals("deleted,Deleted", outputMap.get(ARTIFACT_STATUS));
}
@After
public void tearDown() throws Exception {
TestUtil.cleanUp();
}
}
|
[
"\"Path\""
] |
[] |
[
"Path"
] |
[]
|
["Path"]
|
java
| 1 | 0 | |
bootstrap/start.go
|
package bootstrap
import (
"context"
"log"
"net"
"os"
proto "github.com/ISTE-SC-MANIT/megatreopuz-user/protos"
"github.com/ISTE-SC-MANIT/megatreopuz-user/user"
"github.com/joho/godotenv"
"google.golang.org/grpc"
)
// Start function starts up the server
func Start() {
// Load environment variables
err := godotenv.Load(".env")
if err != nil {
log.Fatal(err)
}
app, err := ConnectToFirebase()
if err != nil {
log.Fatalf("error initialising firebase app: %v", err)
}
client, err := app.Auth(context.Background())
if err != nil {
log.Fatalf("error connecting to firebase user: %v", err)
}
mongo, err := ConnectToMongoDB()
log.Print(`Pinging MongoDB`)
log.Print(err)
// Test the mongoDB connection
err = mongo.Ping(context.Background(), nil)
if err != nil {
log.Fatal(err)
}
log.Print(`Pinged MongoDB successfully`)
// Connect to redis
// redisClient := redis.NewClient(&redis.Options{
// Addr: os.Getenv("REDIS_ADDRESS"),
// Password: os.Getenv("REDIS_PASSWORD"),
// })
// log.Print(`Connecting to Redis`)
// // Test the redis connection
// _, err = redisClient.Ping(context.Background()).Result()
// if err != nil {
// log.Fatalf("Ran into an error while connecting to Redis: %v", err.Error())
// }
// log.Print(`Pinged Redis successfully`)
// Start a tcp listener on given port
port := os.Getenv("PORT")
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("Cannot listen on tcp port: %v. Error: %v", port, err.Error())
}
// Make a gRPC server
grpcServer := grpc.NewServer()
proto.RegisterUserServiceServer(grpcServer, &user.Server{
// RedisClient: redisClient,
MongoClient: mongo,
AuthClient: client,
})
log.Print("Listening on port ", port)
log.Print("Starting gRPC server")
// Attach gRPC server to the listener
err = grpcServer.Serve(lis)
if err != nil {
log.Fatalf("Could not start gRPC server. Error: %v", err.Error())
}
}
|
[
"\"REDIS_ADDRESS\"",
"\"REDIS_PASSWORD\"",
"\"PORT\""
] |
[] |
[
"PORT",
"REDIS_PASSWORD",
"REDIS_ADDRESS"
] |
[]
|
["PORT", "REDIS_PASSWORD", "REDIS_ADDRESS"]
|
go
| 3 | 0 | |
python/example_code/deeplens/deeplens_view_output.py
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[deeplens.python.deeplens_view_output.lambda_function]
import os
import greengrasssdk
from threading import Timer
import time
import awscam
import cv2
from threading import Thread
# Create an AWS Greengrass core SDK client.
client = greengrasssdk.client('iot-data')
# The information exchanged between AWS IoT and the AWS Cloud has
# a topic and a message body.
# This is the topic that this code uses to send messages to the Cloud.
iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
ret, frame = awscam.getLastFrame()
ret,jpeg = cv2.imencode('.jpg', frame)
Write_To_FIFO = True
class FIFO_Thread(Thread):
def __init__(self):
''' Constructor. '''
Thread.__init__(self)
def run(self):
fifo_path = "/tmp/results.mjpeg"
if not os.path.exists(fifo_path):
os.mkfifo(fifo_path)
f = open(fifo_path,'w')
client.publish(topic=iotTopic, payload="Opened Pipe")
while Write_To_FIFO:
try:
f.write(jpeg.tobytes())
except IOError as e:
continue
def greengrass_infinite_infer_run():
try:
modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml"
modelType = "ssd"
input_width = 300
input_height = 300
max_threshold = 0.25
outMap = ({ 1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',
5: 'bottle', 6: 'bus', 7 : 'car', 8 : 'cat',
9 : 'chair', 10 : 'cow', 11 : 'dinning table',
12 : 'dog', 13 : 'horse', 14 : 'motorbike',
15 : 'person', 16 : 'pottedplant', 17 : 'sheep',
18 : 'sofa', 19 : 'train', 20 : 'tvmonitor' })
results_thread = FIFO_Thread()
results_thread.start()
# Send a starting message to the AWS IoT console.
client.publish(topic=iotTopic, payload="Object detection starts now")
# Load the model to the GPU (use {"GPU": 0} for CPU).
mcfg = {"GPU": 1}
model = awscam.Model(modelPath, mcfg)
client.publish(topic=iotTopic, payload="Model loaded")
ret, frame = awscam.getLastFrame()
if ret == False:
raise Exception("Failed to get frame from the stream")
yscale = float(frame.shape[0]/input_height)
xscale = float(frame.shape[1]/input_width)
doInfer = True
while doInfer:
# Get a frame from the video stream.
ret, frame = awscam.getLastFrame()
# If you fail to get a frame, raise an exception.
if ret == False:
raise Exception("Failed to get frame from the stream")
# Resize the frame to meet the model input requirement.
frameResize = cv2.resize(frame, (input_width, input_height))
# Run model inference on the resized frame.
inferOutput = model.doInference(frameResize)
# Output the result of inference to the fifo file so it can be viewed with mplayer.
parsed_results = model.parseResult(modelType, inferOutput)['ssd']
label = '{'
for obj in parsed_results:
if obj['prob'] > max_threshold:
xmin = int( xscale * obj['xmin'] ) + int((obj['xmin'] - input_width/2) + input_width/2)
ymin = int( yscale * obj['ymin'] )
xmax = int( xscale * obj['xmax'] ) + int((obj['xmax'] - input_width/2) + input_width/2)
ymax = int( yscale * obj['ymax'] )
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
label += '"{}": {:.2f},'.format(outMap[obj['label']], obj['prob'] )
label_show = "{}: {:.2f}%".format(outMap[obj['label']], obj['prob']*100 )
cv2.putText(frame, label_show, (xmin, ymin-15),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4)
label += '"null": 0.0'
label += '}'
client.publish(topic=iotTopic, payload = label)
global jpeg
ret,jpeg = cv2.imencode('.jpg', frame)
except Exception as e:
msg = "Test failed: " + str(e)
client.publish(topic=iotTopic, payload=msg)
# Asynchronously schedule this function to be run again in 15 seconds.
Timer(15, greengrass_infinite_infer_run).start()
# Execute the function.
greengrass_infinite_infer_run()
# This is a dummy handler and will not be invoked.
# Instead, the code is executed in an infinite loop for our example.
def function_handler(event, context):
return
#snippet-end:[deeplens.python.deeplens_view_output.lambda_function]
#snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
#snippet-sourcedescription:[deeplens_view_output.py demonstrates how to create an inference Lambda function on an AWS DeepLens model.]
#snippet-keyword:[Python]
#snippet-keyword:[AWS GreenGrass SDK]
#snippet-keyword:[Code Sample]
#snippet-keyword:[AWS DeepLens]
#snippet-keyword:[AWS Lambda]
#snippet-service:[deeplens]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2019-01-07]
#snippet-sourceauthor:[AWS]
|
[] |
[] |
[
"AWS_IOT_THING_NAME"
] |
[]
|
["AWS_IOT_THING_NAME"]
|
python
| 1 | 0 | |
references_bot.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""Simple Telegram bot that looks for keywords and return media references to a movie, TV show, etc.
Created using python-telegram-bot wrapper <https://github.com/python-telegram-bot>"""
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
import sys
import logging
import importlib
__author__ = "Eliel Parra"
__email__ = "[email protected]"
__credits__ = ["Eliel Parra", "python-telegram-bot"]
__licence__ = "MIT"
__maintainer__ = "Eliel Parra"
# Import configuration
config_file = os.getenv('BOT_CONFIG', 'default_config')
config = importlib.import_module(config_file)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handles
def start(bot, update):
update.message.reply_text(config.start_message)
def help(bot, update):
update.message.reply_text(config.help_response)
def reference(bot, update):
reference = get_reference(update.message.text)
if reference:
update.message.reply_text(format_reference(reference))
def error(bot, update, error):
logger.warn('Update "%s" caused error: "%s"' % (update, error))
# Data-processing functions
def get_reference(text):
for reference in config.references:
if unicode(text.lower()) == reference[0]:
return reference
return False
def format_reference(reference):
return reference[1] + "\n\n" + reference[2]
# Main
def main():
# Create the EventHandler
if config.api_key:
updater = Updater(config.api_key)
else:
logger.error('API key not provided, check your configuration file.')
sys.exit()
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Answers to commands
dp.add_handler(CommandHandler('start', start))
dp.add_handler(CommandHandler(('ayuda','help'), help))
# Answers to non-commands
dp.add_handler(MessageHandler(Filters.text, reference))
# Log all errors
dp.add_error_handler(error)
# Start the bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
print(config.console_start_message)
main()
|
[] |
[] |
[
"BOT_CONFIG"
] |
[]
|
["BOT_CONFIG"]
|
python
| 1 | 0 | |
greaterwms/__init__.py
|
import mimetypes, os, requests, django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
django.setup()
from django.conf import settings
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
try:
path = os.path.join(settings.BASE_DIR, 'utils/authorization.txt')
if os.path.exists(path) is False:
response = requests.post('https://wms.56yhz.com/area_v2/')
with open(path, 'w') as f:
f.write(str(eval(response.text).get('check_token')))
f.close()
except:
pass
print('Welcome To GreaterWMS')
|
[] |
[] |
[
"DJANGO_ALLOW_ASYNC_UNSAFE"
] |
[]
|
["DJANGO_ALLOW_ASYNC_UNSAFE"]
|
python
| 1 | 0 | |
distsql/request_builder_test.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"os"
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/stringutil"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tipb/go-tipb"
)
var _ = Suite(&testSuite{})
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
sctx sessionctx.Context
}
func (s *testSuite) SetUpSuite(c *C) {
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{
MemTracker: memory.NewTracker(stringutil.StringerStr("testSuite"), variable.DefTiDBMemQuotaDistSQL),
}
ctx.Store = &mock.Store{
Client: &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
},
}
s.sctx = ctx
}
func (s *testSuite) TearDownSuite(c *C) {
}
func (s *testSuite) SetUpTest(c *C) {
testleak.BeforeTest()
ctx := s.sctx.(*mock.Context)
store := ctx.Store.(*mock.Store)
store.Client = &mock.Client{
MockResponse: &mockResponse{
ctx: ctx,
batch: 1,
total: 2,
},
}
}
func (s *testSuite) TearDownTest(c *C) {
testleak.AfterTest(c)()
}
type handleRange struct {
start int64
end int64
}
func (s *testSuite) getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(hrs))
for _, hr := range hrs {
low := codec.EncodeInt(nil, hr.start)
high := codec.EncodeInt(nil, hr.end)
high = []byte(kv.Key(high).PrefixNext())
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
return krs
}
func (s *testSuite) TestTableHandlesToKVRanges(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100, 9223372036854775806, 9223372036854775807}
// Build expected key ranges.
hrs := make([]*handleRange, 0, len(handles))
hrs = append(hrs, &handleRange{start: 0, end: 0})
hrs = append(hrs, &handleRange{start: 2, end: 5})
hrs = append(hrs, &handleRange{start: 10, end: 11})
hrs = append(hrs, &handleRange{start: 100, end: 100})
hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807})
// Build key ranges.
expect := s.getExpectedRanges(1, hrs)
actual := TableHandlesToKVRanges(1, handles)
// Compare key ranges and expected key ranges.
c.Assert(len(actual), Equals, len(expect))
for i := range actual {
c.Assert(actual[i].StartKey, DeepEquals, expect[i].StartKey)
c.Assert(actual[i].EndKey, DeepEquals, expect[i].EndKey)
}
}
func (s *testSuite) TestTableRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual := TableRangesToKVRanges(13, ranges, nil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRanges(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 12, 15, ranges, nil)
c.Assert(err, IsNil)
for i := range actual {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestRequestBuilder1(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetTableRanges(12, ranges, nil).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder2(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
},
}
actual, err := (&RequestBuilder{}).SetIndexRanges(new(stmtctx.StatementContext), 12, 15, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
},
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder3(c *C) {
handles := []int64{0, 2, 3, 4, 5, 10, 11, 100}
actual, err := (&RequestBuilder{}).SetTableHandles(15, handles).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
},
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder4(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetStreaming(true).
SetFromSessionVars(variable.NewSessionVars()).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: keyRanges,
KeepOrder: false,
Desc: false,
Concurrency: 15,
IsolationLevel: 0,
Priority: 0,
Streaming: true,
NotFillCache: false,
SyncLog: false,
ReplicaRead: kv.ReplicaReadLeader,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder5(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetAnalyzeRequest(&tipb.AnalyzeReq{}).
SetKeepOrder(true).
SetConcurrency(15).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 104,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x10, 0x0, 0x18, 0x0, 0x20, 0x0},
KeyRanges: keyRanges,
KeepOrder: true,
Desc: false,
Concurrency: 15,
IsolationLevel: kv.RC,
Priority: 1,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder6(c *C) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x00, 0x01},
EndKey: kv.Key{0x02, 0x03},
},
}
concurrency := 10
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetChecksumRequest(&tipb.ChecksumRequest{}).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 105,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x10, 0x0, 0x18, 0x0},
KeyRanges: keyRanges,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: true,
SyncLog: false,
Streaming: false,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestRequestBuilder7(c *C) {
vars := variable.NewSessionVars()
vars.SetReplicaRead(kv.ReplicaReadFollower)
concurrency := 10
actual, err := (&RequestBuilder{}).
SetFromSessionVars(vars).
SetConcurrency(concurrency).
Build()
c.Assert(err, IsNil)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
SyncLog: false,
Streaming: false,
ReplicaRead: kv.ReplicaReadFollower,
}
c.Assert(actual, DeepEquals, expect)
}
func (s *testSuite) TestTableRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual := TableRangesToKVRanges(0, ranges, fb)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
func (s *testSuite) TestIndexRangesToKVRangesWithFbs(c *C) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
},
}
hist := statistics.NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0)
for i := 0; i < 10; i++ {
hist.Bounds.AppendInt64(0, int64(i))
hist.Bounds.AppendInt64(0, int64(i+2))
hist.Buckets = append(hist.Buckets, statistics.Bucket{Repeat: 10, Count: int64(i + 30)})
}
fb := statistics.NewQueryFeedback(0, hist, 0, false)
lower, upper := types.NewIntDatum(2), types.NewIntDatum(3)
fb.Feedback = []statistics.Feedback{
{Lower: &lower, Upper: &upper, Count: 1, Repeat: 1},
}
actual, err := IndexRangesToKVRanges(new(stmtctx.StatementContext), 0, 0, ranges, fb)
c.Assert(err, IsNil)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
c.Assert(actual[i], DeepEquals, expect[i])
}
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
api/billingd/client/client_test.go
|
package client_test
import (
"context"
"crypto/rand"
"fmt"
"os"
"testing"
"time"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/phayes/freeport"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
stripe "github.com/stripe/stripe-go/v72"
"github.com/textileio/go-threads/core/thread"
"github.com/textileio/textile/v2/api/apitest"
"github.com/textileio/textile/v2/api/billingd/client"
"github.com/textileio/textile/v2/api/billingd/service"
mdb "github.com/textileio/textile/v2/mongodb"
"github.com/textileio/textile/v2/util"
"google.golang.org/grpc"
)
const (
mib = 1024 * 1024
)
func TestClient_CheckHealth(t *testing.T) {
t.Parallel()
c := setup(t)
err := c.CheckHealth(context.Background())
require.NoError(t, err)
}
func TestClient_CreateCustomer(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
email := apitest.NewEmail()
_, err := c.CreateCustomer(context.Background(), key, email, mdb.Dev)
require.NoError(t, err)
_, err = c.CreateCustomer(
context.Background(),
newKey(t),
apitest.NewEmail(),
mdb.User,
client.WithParent(key, email, mdb.Dev),
)
require.NoError(t, err)
nonExistentParentKey := newKey(t)
_, err = c.CreateCustomer(
context.Background(),
newKey(t),
apitest.NewEmail(),
mdb.User,
client.WithParent(nonExistentParentKey, apitest.NewEmail(), mdb.Dev),
)
require.NoError(t, err)
newParent, err := c.GetCustomer(context.Background(), nonExistentParentKey)
require.NoError(t, err)
assert.NotEmpty(t, newParent)
assert.Equal(t, int64(1), newParent.Dependents)
}
func TestClient_GetCustomer(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
_, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
cus, err := c.GetCustomer(context.Background(), key)
require.NoError(t, err)
assert.NotEmpty(t, cus.AccountStatus)
assert.NotEmpty(t, cus.SubscriptionStatus)
assert.Equal(t, 0, int(cus.Balance))
assert.False(t, cus.Billable)
assert.False(t, cus.Delinquent)
assert.NotEmpty(t, cus.DailyUsage)
}
func TestClient_GetCustomerSession(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
_, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
session, err := c.GetCustomerSession(context.Background(), key)
require.NoError(t, err)
assert.NotEmpty(t, session.Url)
}
func TestClient_ListDependentCustomers(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
email := apitest.NewEmail()
_, err := c.CreateCustomer(context.Background(), key, email, mdb.Org)
require.NoError(t, err)
for i := 0; i < 30; i++ {
_, err = c.CreateCustomer(
context.Background(),
newKey(t),
apitest.NewEmail(),
mdb.User,
client.WithParent(key, email, mdb.Org),
)
require.NoError(t, err)
time.Sleep(time.Second)
}
res, err := c.ListDependentCustomers(context.Background(), key, client.WithLimit(30))
require.NoError(t, err)
assert.Len(t, res.Customers, 30)
res, err = c.ListDependentCustomers(context.Background(), key)
require.NoError(t, err)
assert.Len(t, res.Customers, 25)
res, err = c.ListDependentCustomers(context.Background(), key, client.WithLimit(5))
require.NoError(t, err)
assert.Len(t, res.Customers, 5)
res, err = c.ListDependentCustomers(context.Background(), key, client.WithOffset(res.NextOffset))
require.NoError(t, err)
assert.Len(t, res.Customers, 25)
res, err = c.ListDependentCustomers(context.Background(), key, client.WithOffset(res.NextOffset))
require.NoError(t, err)
assert.Len(t, res.Customers, 0)
}
func TestClient_UpdateCustomer(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
id, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
err = c.UpdateCustomer(context.Background(), id, 100, true, true)
require.NoError(t, err)
cus, err := c.GetCustomer(context.Background(), key)
require.NoError(t, err)
assert.Equal(t, 100, int(cus.Balance))
assert.True(t, cus.Billable)
assert.True(t, cus.Delinquent)
}
func TestClient_UpdateCustomerSubscription(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
id, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
start := time.Now().Add(-time.Hour).Unix()
end := time.Now().Add(time.Hour).Unix()
err = c.UpdateCustomerSubscription(context.Background(), id, stripe.SubscriptionStatusCanceled, start, end)
require.NoError(t, err)
cus, err := c.GetCustomer(context.Background(), key)
require.NoError(t, err)
assert.Equal(t, string(stripe.SubscriptionStatusCanceled), cus.SubscriptionStatus)
}
func TestClient_RecreateCustomerSubscription(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
id, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
err = c.RecreateCustomerSubscription(context.Background(), key)
require.Error(t, err)
start := time.Now().Add(-time.Hour).Unix()
end := time.Now().Add(time.Hour).Unix()
err = c.UpdateCustomerSubscription(context.Background(), id, stripe.SubscriptionStatusCanceled, start, end)
require.NoError(t, err)
err = c.RecreateCustomerSubscription(context.Background(), key)
require.NoError(t, err)
cus, err := c.GetCustomer(context.Background(), key)
require.NoError(t, err)
assert.Equal(t, string(stripe.SubscriptionStatusActive), cus.SubscriptionStatus)
}
func TestClient_DeleteCustomer(t *testing.T) {
t.Parallel()
c := setup(t)
key := newKey(t)
_, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
err = c.DeleteCustomer(context.Background(), key)
require.NoError(t, err)
}
type usageTest struct {
key string
initialIncSize int64
unitPrice float64
}
func TestClient_GetCustomerUsage(t *testing.T) {
t.Parallel()
tests := []usageTest{
{"stored_data", mib, 0.000007705471},
{"network_egress", mib, 0.000025684903},
{"instance_reads", 1, 0.000099999999},
{"instance_writes", 1, 0.000199999999},
}
for _, test := range tests {
getCustomerUsage(t, test)
}
}
func getCustomerUsage(t *testing.T, test usageTest) {
c := setup(t)
key := newKey(t)
id, err := c.CreateCustomer(context.Background(), key, apitest.NewEmail(), mdb.Dev)
require.NoError(t, err)
product := getProduct(t, test.key)
err = c.UpdateCustomer(context.Background(), id, 0, true, false)
require.NoError(t, err)
_, err = c.IncCustomerUsage(context.Background(), key, map[string]int64{test.key: product.FreeQuotaSize * 2})
require.NoError(t, err)
err = c.ReportCustomerUsage(context.Background(), key)
require.NoError(t, err)
res, err := c.GetCustomerUsage(context.Background(), key)
require.NoError(t, err)
assert.NotEmpty(t, res.Usage)
assert.Equal(t, product.FreeQuotaSize*2, res.Usage[test.key].Total)
assert.Equal(t, float64(product.FreeQuotaSize/product.UnitSize)*test.unitPrice, res.Usage[test.key].Cost)
}
func TestClient_IncCustomerUsage(t *testing.T) {
t.Parallel()
tests := []usageTest{
{"stored_data", mib, 0.000007705471},
{"network_egress", mib, 0.000025684903},
{"instance_reads", 1, 0.000099999999},
{"instance_writes", 1, 0.000199999999},
}
for _, test := range tests {
incCustomerUsage(t, test)
}
}
func incCustomerUsage(t *testing.T, test usageTest) {
c := setup(t)
key := newKey(t)
email := apitest.NewEmail()
id, err := c.CreateCustomer(context.Background(), key, email, mdb.Dev)
require.NoError(t, err)
product := getProduct(t, test.key)
freeUnitsPerInterval := getFreeUnitsPerInterval(product)
// Add some under unit size
res, err := c.IncCustomerUsage(context.Background(), key, map[string]int64{test.key: test.initialIncSize})
require.NoError(t, err)
assert.Equal(t, int64(0), res.DailyUsage[test.key].Units)
assert.Equal(t, test.initialIncSize, res.DailyUsage[test.key].Total)
assert.Equal(t, float64(0), res.DailyUsage[test.key].Cost)
// Add more to reach unit size
res, err = c.IncCustomerUsage(context.Background(), key, map[string]int64{test.key: product.UnitSize - test.initialIncSize})
require.NoError(t, err)
assert.Equal(t, int64(1), res.DailyUsage[test.key].Units)
assert.Equal(t, product.UnitSize, res.DailyUsage[test.key].Total)
assert.Equal(t, float64(0), res.DailyUsage[test.key].Cost)
// Add a bunch of units above free quota
res, err = c.IncCustomerUsage(context.Background(), key, map[string]int64{test.key: product.FreeQuotaSize})
require.Error(t, err)
// Flag as billable to remove the free quota limit
err = c.UpdateCustomer(context.Background(), id, 0, true, false)
require.NoError(t, err)
// Try again
res, err = c.IncCustomerUsage(context.Background(), key, map[string]int64{test.key: product.FreeQuotaSize})
require.NoError(t, err)
assert.Equal(t, freeUnitsPerInterval+1, res.DailyUsage[test.key].Units)
assert.Equal(t, product.FreeQuotaSize+product.UnitSize, res.DailyUsage[test.key].Total)
assert.Equal(t, test.unitPrice, res.DailyUsage[test.key].Cost)
// Try as a child customer
childKey := newKey(t)
_, err = c.CreateCustomer(
context.Background(),
childKey,
apitest.NewEmail(),
mdb.User,
client.WithParent(key, email, mdb.Dev),
)
require.NoError(t, err)
res, err = c.IncCustomerUsage(context.Background(), childKey, map[string]int64{test.key: product.UnitSize})
require.NoError(t, err)
assert.Equal(t, int64(1), res.DailyUsage[test.key].Units)
assert.Equal(t, product.UnitSize, res.DailyUsage[test.key].Total)
assert.Equal(t, float64(0), res.DailyUsage[test.key].Cost)
// Check total usage
cus, err := c.GetCustomer(context.Background(), key)
require.NoError(t, err)
assert.Equal(t, freeUnitsPerInterval+2, cus.DailyUsage[test.key].Units)
assert.Equal(t, product.FreeQuotaSize+(2*product.UnitSize), cus.DailyUsage[test.key].Total)
assert.Equal(t, test.unitPrice*2, cus.DailyUsage[test.key].Cost)
}
func getProduct(t *testing.T, key string) *service.Product {
for _, p := range service.Products {
if p.Key == key {
return &p
}
}
t.Fatalf("could not find product with key %s", key)
return nil
}
func getFreeUnitsPerInterval(product *service.Product) int64 {
return product.FreeQuotaSize / product.UnitSize
}
func setup(t *testing.T) *client.Client {
apiPort, err := freeport.GetFreePort()
require.NoError(t, err)
gwPort, err := freeport.GetFreePort()
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
api, err := service.NewService(ctx, service.Config{
ListenAddr: util.MustParseAddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort)),
StripeAPIURL: "https://api.stripe.com",
StripeAPIKey: os.Getenv("STRIPE_API_KEY"),
StripeSessionReturnURL: "http://127.0.0.1:8006/dashboard",
SegmentAPIKey: os.Getenv("SEGMENT_API_KEY"),
SegmentPrefix: "test_",
DBURI: "mongodb://127.0.0.1:27017",
DBName: util.MakeToken(8),
GatewayHostAddr: util.MustParseAddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gwPort)),
FreeQuotaGracePeriod: 0,
Debug: true,
})
require.NoError(t, err)
err = api.Start()
require.NoError(t, err)
t.Cleanup(func() {
err := api.Stop(true)
require.NoError(t, err)
})
c, err := client.NewClient(fmt.Sprintf("127.0.0.1:%d", apiPort), grpc.WithInsecure())
require.NoError(t, err)
t.Cleanup(func() {
err := c.Close()
require.NoError(t, err)
})
return c
}
func newKey(t *testing.T) thread.PubKey {
_, key, err := crypto.GenerateEd25519Key(rand.Reader)
require.NoError(t, err)
return thread.NewLibp2pPubKey(key)
}
|
[
"\"STRIPE_API_KEY\"",
"\"SEGMENT_API_KEY\""
] |
[] |
[
"SEGMENT_API_KEY",
"STRIPE_API_KEY"
] |
[]
|
["SEGMENT_API_KEY", "STRIPE_API_KEY"]
|
go
| 2 | 0 | |
test/util/server.go
|
package util
import (
"errors"
"fmt"
"net"
"net/url"
"os"
"path"
"time"
"github.com/golang/glog"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
kclient "k8s.io/kubernetes/pkg/client"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/openshift/origin/pkg/client"
newproject "github.com/openshift/origin/pkg/cmd/admin/project"
"github.com/openshift/origin/pkg/cmd/server/admin"
configapi "github.com/openshift/origin/pkg/cmd/server/api"
"github.com/openshift/origin/pkg/cmd/server/start"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/tokencmd"
)
// ServiceAccountWaitTimeout is used to determine how long to wait for the service account
// controllers to start up, and populate the service accounts in the test namespace
const ServiceAccountWaitTimeout = 30 * time.Second
// RequireServer verifies if the etcd, docker and the OpenShift server are
// available and you can successfully connected to them.
func RequireServer() {
RequireEtcd()
RequireDocker()
if _, err := GetClusterAdminClient(KubeConfigPath()); err != nil {
os.Exit(1)
}
}
// GetBaseDir returns the base directory used for test.
func GetBaseDir() string {
return cmdutil.Env("BASETMPDIR", path.Join(os.TempDir(), "openshift-"+Namespace()))
}
// FindAvailableBindAddress returns a bind address on 127.0.0.1 with a free port in the low-high range.
// If lowPort is 0, an ephemeral port is allocated.
func FindAvailableBindAddress(lowPort, highPort int) (string, error) {
if highPort < lowPort {
return "", errors.New("lowPort must be <= highPort")
}
for port := lowPort; port <= highPort; port++ {
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
if port == 0 {
// Only get one shot to get an ephemeral port
return "", err
}
continue
}
defer l.Close()
return l.Addr().String(), nil
}
return "", fmt.Errorf("Could not find available port in the range %d-%d", lowPort, highPort)
}
func setupStartOptions() (*start.MasterArgs, *start.NodeArgs, *start.ListenArg, *start.ImageFormatArgs, *start.KubeConnectionArgs) {
masterArgs, nodeArgs, listenArg, imageFormatArgs, kubeConnectionArgs := start.GetAllInOneArgs()
basedir := GetBaseDir()
nodeArgs.VolumeDir = path.Join(basedir, "volume")
masterArgs.EtcdDir = path.Join(basedir, "etcd")
masterArgs.ConfigDir.Default(path.Join(basedir, "openshift.local.config", "master"))
nodeArgs.ConfigDir.Default(path.Join(basedir, "openshift.local.config", nodeArgs.NodeName))
nodeArgs.MasterCertDir = masterArgs.ConfigDir.Value()
// don't wait for nodes to come up
masterAddr := os.Getenv("OS_MASTER_ADDR")
if len(masterAddr) == 0 {
if addr, err := FindAvailableBindAddress(8443, 8999); err != nil {
glog.Fatalf("Couldn't find free address for master: %v", err)
} else {
masterAddr = addr
}
}
fmt.Printf("masterAddr: %#v\n", masterAddr)
masterArgs.MasterAddr.Set(masterAddr)
listenArg.ListenAddr.Set(masterAddr)
masterArgs.EtcdAddr.Set(GetEtcdURL())
dnsAddr := os.Getenv("OS_DNS_ADDR")
if len(dnsAddr) == 0 {
if addr, err := FindAvailableBindAddress(8053, 8100); err != nil {
glog.Fatalf("Couldn't find free address for DNS: %v", err)
} else {
dnsAddr = addr
}
}
fmt.Printf("dnsAddr: %#v\n", dnsAddr)
masterArgs.DNSBindAddr.Set(dnsAddr)
return masterArgs, nodeArgs, listenArg, imageFormatArgs, kubeConnectionArgs
}
func DefaultMasterOptions() (*configapi.MasterConfig, error) {
startOptions := start.MasterOptions{}
startOptions.MasterArgs, _, _, _, _ = setupStartOptions()
startOptions.Complete()
startOptions.MasterArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", "master"))
if err := CreateMasterCerts(startOptions.MasterArgs); err != nil {
return nil, err
}
if err := CreateBootstrapPolicy(startOptions.MasterArgs); err != nil {
return nil, err
}
return startOptions.MasterArgs.BuildSerializeableMasterConfig()
}
func CreateBootstrapPolicy(masterArgs *start.MasterArgs) error {
createBootstrapPolicy := &admin.CreateBootstrapPolicyFileOptions{
File: path.Join(masterArgs.ConfigDir.Value(), "policy.json"),
OpenShiftSharedResourcesNamespace: "openshift",
}
if err := createBootstrapPolicy.Validate(nil); err != nil {
return err
}
if err := createBootstrapPolicy.CreateBootstrapPolicyFile(); err != nil {
return err
}
return nil
}
func CreateMasterCerts(masterArgs *start.MasterArgs) error {
hostnames, err := masterArgs.GetServerCertHostnames()
if err != nil {
return err
}
masterURL, err := masterArgs.GetMasterAddress()
if err != nil {
return err
}
publicMasterURL, err := masterArgs.GetMasterPublicAddress()
if err != nil {
return err
}
createMasterCerts := admin.CreateMasterCertsOptions{
CertDir: masterArgs.ConfigDir.Value(),
SignerName: admin.DefaultSignerName(),
Hostnames: hostnames.List(),
APIServerURL: masterURL.String(),
PublicAPIServerURL: publicMasterURL.String(),
Output: os.Stderr,
}
if err := createMasterCerts.Validate(nil); err != nil {
return err
}
if err := createMasterCerts.CreateMasterCerts(); err != nil {
return err
}
return nil
}
func CreateNodeCerts(nodeArgs *start.NodeArgs) error {
getSignerOptions := &admin.SignerCertOptions{
CertFile: admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca"),
KeyFile: admin.DefaultKeyFilename(nodeArgs.MasterCertDir, "ca"),
SerialFile: admin.DefaultSerialFilename(nodeArgs.MasterCertDir, "ca"),
}
createNodeConfig := admin.NewDefaultCreateNodeConfigOptions()
createNodeConfig.SignerCertOptions = getSignerOptions
createNodeConfig.NodeConfigDir = nodeArgs.ConfigDir.Value()
createNodeConfig.NodeName = nodeArgs.NodeName
createNodeConfig.Hostnames = []string{nodeArgs.NodeName}
createNodeConfig.ListenAddr = nodeArgs.ListenArg.ListenAddr
createNodeConfig.APIServerCAFile = admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca")
createNodeConfig.NodeClientCAFile = admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca")
if err := createNodeConfig.Validate(nil); err != nil {
return err
}
if err := createNodeConfig.CreateNodeFolder(); err != nil {
return err
}
return nil
}
func DefaultAllInOneOptions() (*configapi.MasterConfig, *configapi.NodeConfig, error) {
startOptions := start.AllInOneOptions{}
startOptions.MasterOptions.MasterArgs, startOptions.NodeArgs, _, _, _ = setupStartOptions()
startOptions.MasterOptions.MasterArgs.NodeList = nil
startOptions.NodeArgs.AllowDisabledDocker = true
startOptions.Complete()
startOptions.MasterOptions.MasterArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", "master"))
startOptions.NodeArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", admin.DefaultNodeDir(startOptions.NodeArgs.NodeName)))
startOptions.NodeArgs.MasterCertDir = startOptions.MasterOptions.MasterArgs.ConfigDir.Value()
if err := CreateMasterCerts(startOptions.MasterOptions.MasterArgs); err != nil {
return nil, nil, err
}
if err := CreateBootstrapPolicy(startOptions.MasterOptions.MasterArgs); err != nil {
return nil, nil, err
}
if err := CreateNodeCerts(startOptions.NodeArgs); err != nil {
return nil, nil, err
}
masterOptions, err := startOptions.MasterOptions.MasterArgs.BuildSerializeableMasterConfig()
if err != nil {
return nil, nil, err
}
nodeOptions, err := startOptions.NodeArgs.BuildSerializeableNodeConfig()
if err != nil {
return nil, nil, err
}
return masterOptions, nodeOptions, nil
}
func StartConfiguredAllInOne(masterConfig *configapi.MasterConfig, nodeConfig *configapi.NodeConfig) (string, error) {
adminKubeConfigFile, err := StartConfiguredMaster(masterConfig)
if err != nil {
return "", err
}
if err := start.StartNode(*nodeConfig); err != nil {
return "", err
}
return adminKubeConfigFile, nil
}
func StartTestAllInOne() (*configapi.MasterConfig, string, error) {
master, node, err := DefaultAllInOneOptions()
if err != nil {
return nil, "", err
}
adminKubeConfigFile, err := StartConfiguredAllInOne(master, node)
return master, adminKubeConfigFile, err
}
type TestOptions struct {
DeleteAllEtcdKeys bool
}
func DefaultTestOptions() TestOptions {
return TestOptions{true}
}
func StartConfiguredMaster(masterConfig *configapi.MasterConfig) (string, error) {
return StartConfiguredMasterWithOptions(masterConfig, DefaultTestOptions())
}
func StartConfiguredMasterWithOptions(masterConfig *configapi.MasterConfig, testOptions TestOptions) (string, error) {
if testOptions.DeleteAllEtcdKeys {
DeleteAllEtcdKeys()
}
if err := start.NewMaster(masterConfig, true, true).Start(); err != nil {
return "", err
}
adminKubeConfigFile := KubeConfigPath()
clientConfig, err := GetClusterAdminClientConfig(adminKubeConfigFile)
if err != nil {
return "", err
}
masterURL, err := url.Parse(clientConfig.Host)
if err != nil {
return "", err
}
// wait for the server to come up: 35 seconds
if err := cmdutil.WaitForSuccessfulDial(true, "tcp", masterURL.Host, 100*time.Millisecond, 1*time.Second, 35); err != nil {
return "", err
}
for {
// confirm that we can actually query from the api server
if client, err := GetClusterAdminClient(adminKubeConfigFile); err == nil {
if _, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything()); err == nil {
break
}
}
time.Sleep(100 * time.Millisecond)
}
return adminKubeConfigFile, nil
}
// StartTestMaster starts up a test master and returns back the startOptions so you can get clients and certs
func StartTestMaster() (*configapi.MasterConfig, string, error) {
master, err := DefaultMasterOptions()
if err != nil {
return nil, "", err
}
adminKubeConfigFile, err := StartConfiguredMaster(master)
return master, adminKubeConfigFile, err
}
func WaitForServiceAccounts(client *kclient.Client, namespace string, accounts []string) error {
// Ensure the service accounts needed by build pods exist in the namespace
// The extra controllers tend to starve the service account controller
serviceAccounts := client.ServiceAccounts(namespace)
return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) {
for _, account := range accounts {
if _, err := serviceAccounts.Get(account); err != nil {
return false, nil
}
}
return true, nil
})
}
// CreateNewProject creates a new project using the clusterAdminClient, then gets a token for the adminUser and returns
// back a client for the admin user
func CreateNewProject(clusterAdminClient *client.Client, clientConfig kclient.Config, projectName, adminUser string) (*client.Client, error) {
newProjectOptions := &newproject.NewProjectOptions{
Client: clusterAdminClient,
ProjectName: projectName,
AdminRole: bootstrappolicy.AdminRoleName,
AdminUser: adminUser,
}
if err := newProjectOptions.Run(false); err != nil {
return nil, err
}
client, _, _, err := GetClientForUser(clientConfig, adminUser)
return client, err
}
func GetClientForUser(clientConfig kclient.Config, username string) (*client.Client, *kclient.Client, *kclient.Config, error) {
token, err := tokencmd.RequestToken(&clientConfig, nil, username, "password")
if err != nil {
return nil, nil, nil, err
}
userClientConfig := clientConfig
userClientConfig.BearerToken = token
userClientConfig.Username = ""
userClientConfig.Password = ""
userClientConfig.TLSClientConfig.CertFile = ""
userClientConfig.TLSClientConfig.KeyFile = ""
userClientConfig.TLSClientConfig.CertData = nil
userClientConfig.TLSClientConfig.KeyData = nil
kubeClient, err := kclient.New(&userClientConfig)
if err != nil {
return nil, nil, nil, err
}
osClient, err := client.New(&userClientConfig)
if err != nil {
return nil, nil, nil, err
}
return osClient, kubeClient, &userClientConfig, nil
}
|
[
"\"OS_MASTER_ADDR\"",
"\"OS_DNS_ADDR\""
] |
[] |
[
"OS_DNS_ADDR",
"OS_MASTER_ADDR"
] |
[]
|
["OS_DNS_ADDR", "OS_MASTER_ADDR"]
|
go
| 2 | 0 | |
cmd/docker/docker.go
|
package main
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/commands"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/term"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command {
opts := cliflags.NewClientOptions()
var flags *pflag.FlagSet
cmd := &cobra.Command{
Use: "docker [OPTIONS] COMMAND [ARG...]",
Short: "A self-sufficient runtime for containers",
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
dockerPreRun(opts)
if err := dockerCli.Initialize(opts); err != nil {
return err
}
return isSupported(cmd, dockerCli)
},
Version: fmt.Sprintf("%s, build %s", cli.Version, cli.GitCommit),
DisableFlagsInUseLine: true,
}
cli.SetupRootCommand(cmd)
flags = cmd.Flags()
flags.BoolP("version", "v", false, "Print version information and quit")
flags.StringVar(&opts.ConfigDir, "config", cliconfig.Dir(), "Location of client config files")
opts.Common.InstallFlags(flags)
// Install persistent flags
persistentFlags := cmd.PersistentFlags()
persistentFlags.StringVar(&opts.Common.Orchestrator, "orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)")
persistentFlags.SetAnnotation("orchestrator", "top-level", []string{"version", "stack"})
setFlagErrorFunc(dockerCli, cmd, flags, opts)
setHelpFunc(dockerCli, cmd, flags, opts)
cmd.SetOutput(dockerCli.Out())
commands.AddCommands(cmd, dockerCli)
disableFlagsInUseLine(cmd)
setValidateArgs(dockerCli, cmd, flags, opts)
return cmd
}
func disableFlagsInUseLine(cmd *cobra.Command) {
visitAll(cmd, func(ccmd *cobra.Command) {
// do not add a `[flags]` to the end of the usage line.
ccmd.DisableFlagsInUseLine = true
})
}
func setFlagErrorFunc(dockerCli *command.DockerCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// When invoking `docker stack --nonsense`, we need to make sure FlagErrorFunc return appropriate
// output if the feature is not supported.
// As above cli.SetupRootCommand(cmd) have already setup the FlagErrorFunc, we will add a pre-check before the FlagErrorFunc
// is called.
flagErrorFunc := cmd.FlagErrorFunc()
cmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
if err := initializeDockerCli(dockerCli, flags, opts); err != nil {
return err
}
if err := isSupported(cmd, dockerCli); err != nil {
return err
}
return flagErrorFunc(cmd, err)
})
}
func setHelpFunc(dockerCli *command.DockerCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
defaultHelpFunc := cmd.HelpFunc()
cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) {
if err := initializeDockerCli(dockerCli, flags, opts); err != nil {
ccmd.Println(err)
return
}
if err := isSupported(ccmd, dockerCli); err != nil {
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, dockerCli)
defaultHelpFunc(ccmd, args)
})
}
func setValidateArgs(dockerCli *command.DockerCli, cmd *cobra.Command, flags *pflag.FlagSet, opts *cliflags.ClientOptions) {
// The Args is handled by ValidateArgs in cobra, which does not allows a pre-hook.
// As a result, here we replace the existing Args validation func to a wrapper,
// where the wrapper will check to see if the feature is supported or not.
// The Args validation error will only be returned if the feature is supported.
visitAll(cmd, func(ccmd *cobra.Command) {
// if there is no tags for a command or any of its parent,
// there is no need to wrap the Args validation.
if !hasTags(ccmd) {
return
}
if ccmd.Args == nil {
return
}
cmdArgs := ccmd.Args
ccmd.Args = func(cmd *cobra.Command, args []string) error {
if err := initializeDockerCli(dockerCli, flags, opts); err != nil {
return err
}
if err := isSupported(cmd, dockerCli); err != nil {
return err
}
return cmdArgs(cmd, args)
}
})
}
func initializeDockerCli(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *cliflags.ClientOptions) error {
if dockerCli.Client() != nil {
return nil
}
// when using --help, PersistentPreRun is not called, so initialization is needed.
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
dockerPreRun(opts)
return dockerCli.Initialize(opts)
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func noArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
return fmt.Errorf(
"docker: '%s' is not a docker command.\nSee 'docker --help'", args[0])
}
func main() {
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
logrus.SetOutput(stderr)
dockerCli := command.NewDockerCli(stdin, stdout, stderr, contentTrustEnabled())
cmd := newDockerCommand(dockerCli)
if err := cmd.Execute(); err != nil {
if sterr, ok := err.(cli.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(stderr, sterr.Status)
}
// StatusError should only be used for errors, and all errors should
// have a non-zero exit status, so never exit with 0
if sterr.StatusCode == 0 {
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
func contentTrustEnabled() bool {
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
if t, err := strconv.ParseBool(e); t || err != nil {
// treat any other value as true
return true
}
}
return false
}
func dockerPreRun(opts *cliflags.ClientOptions) {
cliflags.SetLogLevel(opts.Common.LogLevel)
if opts.ConfigDir != "" {
cliconfig.SetDir(opts.ConfigDir)
}
if opts.Common.Debug {
debug.Enable()
}
}
type versionDetails interface {
Client() client.APIClient
ClientInfo() command.ClientInfo
ServerInfo() command.ServerInfo
}
func hideFeatureFlag(f *pflag.Flag, hasFeature bool, annotation string) {
if hasFeature {
return
}
if _, ok := f.Annotations[annotation]; ok {
f.Hidden = true
}
}
func hideFeatureSubCommand(subcmd *cobra.Command, hasFeature bool, annotation string) {
if hasFeature {
return
}
if _, ok := subcmd.Annotations[annotation]; ok {
subcmd.Hidden = true
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
clientVersion := details.Client().ClientVersion()
osType := details.ServerInfo().OSType
hasExperimental := details.ServerInfo().HasExperimental
hasExperimentalCLI := details.ClientInfo().HasExperimental
hasKubernetes := details.ClientInfo().HasKubernetes()
cmd.Flags().VisitAll(func(f *pflag.Flag) {
hideFeatureFlag(f, hasExperimental, "experimental")
hideFeatureFlag(f, hasExperimentalCLI, "experimentalCLI")
hideFeatureFlag(f, hasKubernetes, "kubernetes")
hideFeatureFlag(f, !hasKubernetes, "swarm")
// hide flags not supported by the server
if !isOSTypeSupported(f, osType) || !isVersionSupported(f, clientVersion) {
f.Hidden = true
}
// root command shows all top-level flags
if cmd.Parent() != nil {
if commands, ok := f.Annotations["top-level"]; ok {
f.Hidden = !findCommand(cmd, commands)
}
}
})
for _, subcmd := range cmd.Commands() {
hideFeatureSubCommand(subcmd, hasExperimental, "experimental")
hideFeatureSubCommand(subcmd, hasExperimentalCLI, "experimentalCLI")
hideFeatureSubCommand(subcmd, hasKubernetes, "kubernetes")
hideFeatureSubCommand(subcmd, !hasKubernetes, "swarm")
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Annotations["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
}
if v, ok := subcmd.Annotations["ostype"]; ok && v != osType {
subcmd.Hidden = true
}
}
}
// Checks if a command or one of its ancestors is in the list
func findCommand(cmd *cobra.Command, commands []string) bool {
if cmd == nil {
return false
}
for _, c := range commands {
if c == cmd.Name() {
return true
}
}
return findCommand(cmd.Parent(), commands)
}
func isSupported(cmd *cobra.Command, details versionDetails) error {
if err := areSubcommandsSupported(cmd, details); err != nil {
return err
}
return areFlagsSupported(cmd, details)
}
func areFlagsSupported(cmd *cobra.Command, details versionDetails) error {
clientVersion := details.Client().ClientVersion()
osType := details.ServerInfo().OSType
hasExperimental := details.ServerInfo().HasExperimental
hasKubernetes := details.ClientInfo().HasKubernetes()
hasExperimentalCLI := details.ClientInfo().HasExperimental
errs := []string{}
cmd.Flags().VisitAll(func(f *pflag.Flag) {
if f.Changed {
if !isVersionSupported(f, clientVersion) {
errs = append(errs, fmt.Sprintf("\"--%s\" requires API version %s, but the Docker daemon API version is %s", f.Name, getFlagAnnotation(f, "version"), clientVersion))
return
}
if !isOSTypeSupported(f, osType) {
errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on a Docker daemon running on %s, but the Docker daemon is running on %s", f.Name, getFlagAnnotation(f, "ostype"), osType))
return
}
if _, ok := f.Annotations["experimental"]; ok && !hasExperimental {
errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on a Docker daemon with experimental features enabled", f.Name))
}
if _, ok := f.Annotations["experimentalCLI"]; ok && !hasExperimentalCLI {
errs = append(errs, fmt.Sprintf("\"--%s\" is on a Docker cli with experimental cli features enabled", f.Name))
}
_, isKubernetesAnnotated := f.Annotations["kubernetes"]
_, isSwarmAnnotated := f.Annotations["swarm"]
if isKubernetesAnnotated && !isSwarmAnnotated && !hasKubernetes {
errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on a Docker cli with kubernetes features enabled", f.Name))
}
if isSwarmAnnotated && !isKubernetesAnnotated && hasKubernetes {
errs = append(errs, fmt.Sprintf("\"--%s\" is only supported on a Docker cli with swarm features enabled", f.Name))
}
}
})
if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n"))
}
return nil
}
// Check recursively so that, e.g., `docker stack ls` returns the same output as `docker stack`
func areSubcommandsSupported(cmd *cobra.Command, details versionDetails) error {
clientVersion := details.Client().ClientVersion()
osType := details.ServerInfo().OSType
hasExperimental := details.ServerInfo().HasExperimental
hasExperimentalCLI := details.ClientInfo().HasExperimental
hasKubernetes := details.ClientInfo().HasKubernetes()
// Check recursively so that, e.g., `docker stack ls` returns the same output as `docker stack`
for curr := cmd; curr != nil; curr = curr.Parent() {
if cmdVersion, ok := curr.Annotations["version"]; ok && versions.LessThan(clientVersion, cmdVersion) {
return fmt.Errorf("%s requires API version %s, but the Docker daemon API version is %s", cmd.CommandPath(), cmdVersion, clientVersion)
}
if os, ok := curr.Annotations["ostype"]; ok && os != osType {
return fmt.Errorf("%s is only supported on a Docker daemon running on %s, but the Docker daemon is running on %s", cmd.CommandPath(), os, osType)
}
if _, ok := curr.Annotations["experimental"]; ok && !hasExperimental {
return fmt.Errorf("%s is only supported on a Docker daemon with experimental features enabled", cmd.CommandPath())
}
if _, ok := curr.Annotations["experimentalCLI"]; ok && !hasExperimentalCLI {
return fmt.Errorf("%s is only supported on a Docker cli with experimental cli features enabled", cmd.CommandPath())
}
_, isKubernetesAnnotated := curr.Annotations["kubernetes"]
_, isSwarmAnnotated := curr.Annotations["swarm"]
if isKubernetesAnnotated && !isSwarmAnnotated && !hasKubernetes {
return fmt.Errorf("%s is only supported on a Docker cli with kubernetes features enabled", cmd.CommandPath())
}
if isSwarmAnnotated && !isKubernetesAnnotated && hasKubernetes {
return fmt.Errorf("%s is only supported on a Docker cli with swarm features enabled", cmd.CommandPath())
}
}
return nil
}
func getFlagAnnotation(f *pflag.Flag, annotation string) string {
if value, ok := f.Annotations[annotation]; ok && len(value) == 1 {
return value[0]
}
return ""
}
func isVersionSupported(f *pflag.Flag, clientVersion string) bool {
if v := getFlagAnnotation(f, "version"); v != "" {
return versions.GreaterThanOrEqualTo(clientVersion, v)
}
return true
}
func isOSTypeSupported(f *pflag.Flag, osType string) bool {
if v := getFlagAnnotation(f, "ostype"); v != "" && osType != "" {
return osType == v
}
return true
}
// hasTags return true if any of the command's parents has tags
func hasTags(cmd *cobra.Command) bool {
for curr := cmd; curr != nil; curr = curr.Parent() {
if len(curr.Annotations) > 0 {
return true
}
}
return false
}
|
[
"\"DOCKER_CONTENT_TRUST\""
] |
[] |
[
"DOCKER_CONTENT_TRUST"
] |
[]
|
["DOCKER_CONTENT_TRUST"]
|
go
| 1 | 0 | |
rl4rs/server/gymHttpServer.py
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from flask import Flask, request, jsonify
import uuid
import gym
import numpy as np
import six
import argparse
import sys
import json
from rl4rs.env.slate import SlateRecEnv, SlateState
from rl4rs.env.seqslate import SeqSlateRecEnv, SeqSlateState
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.ERROR)
# modify from https://github.com/openai/gym-http-api
########## Container for environments ##########
class Envs(object):
"""
Container and manager for the environments instantiated
on this server.
When a new environment is created, such as with
envs.create('CartPole-v0'), it is stored under a short
identifier (such as '3c657dbc'). Future API calls make
use of this instance_id to identify which environment
should be manipulated.
"""
def __init__(self):
self.envs = {}
self.id_len = 8
def _lookup_env(self, instance_id):
try:
return self.envs[instance_id]
except KeyError:
raise InvalidUsage('Instance_id {} unknown'.format(instance_id))
def _remove_env(self, instance_id):
try:
del self.envs[instance_id]
except KeyError:
raise InvalidUsage('Instance_id {} unknown'.format(instance_id))
def create(self, env_id, config={}, seed=None):
try:
if env_id == 'SlateRecEnv-v0':
config['gpu'] = False
sim = SlateRecEnv(config, state_cls=SlateState)
env = gym.make('SlateRecEnv-v0', recsim=sim)
elif env_id == 'SeqSlateRecEnv-v0':
config['gpu'] = False
sim = SeqSlateRecEnv(config, state_cls=SeqSlateState)
env = gym.make('SeqSlateRecEnv-v0', recsim=sim)
else:
env = gym.make(env_id)
if seed:
env.seed(seed)
except gym.error.Error:
raise InvalidUsage("Attempted to look up malformed environment ID '{}'".format(env_id))
instance_id = str(uuid.uuid4().hex)[:self.id_len]
self.envs[instance_id] = env
return instance_id
def list_all(self):
return dict([(instance_id, env.spec.id) for (instance_id, env) in self.envs.items()])
def reset(self, instance_id):
env = self._lookup_env(instance_id)
obs = env.reset()
return env.observation_space.to_jsonable(obs)
def step(self, instance_id, action, render):
env = self._lookup_env(instance_id)
if isinstance(action, six.integer_types):
nice_action = action
else:
nice_action = np.array(action)
if render:
env.render()
[observation, reward, done, info] = env.step(nice_action)
obs_jsonable = env.observation_space.to_jsonable(observation)
return [obs_jsonable, reward, done, info]
def get_action_space_contains(self, instance_id, x):
env = self._lookup_env(instance_id)
return env.action_space.contains(int(x))
def get_action_space_info(self, instance_id):
env = self._lookup_env(instance_id)
return self._get_space_properties(env.action_space)
def get_action_space_sample(self, instance_id):
env = self._lookup_env(instance_id)
action = env.action_space.sample()
if isinstance(action, (list, tuple)) or ('numpy' in str(type(action))):
try:
action = action.tolist()
except TypeError:
print(type(action))
print('TypeError')
return action
def get_observation_space_contains(self, instance_id, j):
env = self._lookup_env(instance_id)
info = self._get_space_properties(env.observation_space)
for key, value in j.items():
# Convert both values to json for comparibility
if json.dumps(info[key]) != json.dumps(value):
print('Values for "{}" do not match. Passed "{}", Observed "{}".'.format(key, value, info[key]))
return False
return True
def get_observation_space_info(self, instance_id):
env = self._lookup_env(instance_id)
return self._get_space_properties(env.observation_space)
def _get_space_properties(self, space):
info = {}
info['name'] = space.__class__.__name__
if info['name'] == 'Discrete':
info['n'] = int(space.n)
elif info['name'] == 'Box':
# info['json'] = str(space)
info['shape'] = [int(x) for x in space.shape]
# It's not JSON compliant to have Infinity, -Infinity, NaN.
# Many newer JSON parsers allow it, but many don't. Notably python json
# module can read and write such floats. So we only here fix "export version",
# also make it flat.
info['low'] = [(float(x) if x != -np.inf else -1e100) for x in np.array(space.low).flatten()]
info['high'] = [(float(x) if x != +np.inf else +1e100) for x in np.array(space.high).flatten()]
elif info['name'] == 'Dict':
# info['json'] = space.to_jsonable()
space = space.spaces
info['keys'] = [str(x) for x in space.keys()]
for key in info['keys']:
info[key] = {}
info[key]['shape'] = [int(x) for x in space[key].shape]
info[key]['low'] = [(float(x) if x != -np.inf else -1e100) for x in np.array(space[key].low).flatten()]
info[key]['high'] = [(float(x) if x != +np.inf else +1e100) for x in np.array(space[key].high).flatten()]
elif info['name'] == 'HighLow':
info['num_rows'] = space.num_rows
info['matrix'] = [((float(x) if x != -np.inf else -1e100) if x != +np.inf else +1e100) for x in np.array(space.matrix).flatten()]
return info
def monitor_start(self, instance_id, directory, force, resume, video_callable):
env = self._lookup_env(instance_id)
if video_callable == False:
v_c = lambda count: False
else:
v_c = lambda count: count % video_callable == 0
self.envs[instance_id] = gym.wrappers.Monitor(env, directory, force=force, resume=resume, video_callable=v_c)
def monitor_close(self, instance_id):
env = self._lookup_env(instance_id)
env.close()
def env_close(self, instance_id):
env = self._lookup_env(instance_id)
env.close()
self._remove_env(instance_id)
########## App setup ##########
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
envs = Envs()
########## Error handling ##########
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def get_required_param(json, param):
if json is None:
logger.info("Request is not a valid json")
raise InvalidUsage("Request is not a valid json")
value = json.get(param, None)
if (value is None) or (value == '') or (value == []):
logger.info("A required request parameter '{}' had value {}".format(param, value))
raise InvalidUsage("A required request parameter '{}' was not provided".format(param))
return value
def get_optional_param(json, param, default):
if json is None:
logger.info("Request is not a valid json")
raise InvalidUsage("Request is not a valid json")
value = json.get(param, None)
if (value is None) or (value == '') or (value == []):
logger.info("An optional request parameter '{}' had value {} and was replaced with default value {}".format(param, value, default))
value = default
return value
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
########## API route definitions ##########
@app.route('/v1/envs/', methods=['POST'])
def env_create():
"""
Create an instance of the specified environment
Parameters:
- env_id: gym environment ID string, such as 'CartPole-v0'
- seed: set the seed for this env's random number generator(s).
Returns:
- instance_id: a short identifier (such as '3c657dbc')
for the created environment instance. The instance_id is
used in future API calls to identify the environment to be
manipulated
"""
env_id = get_required_param(request.get_json(), 'env_id')
config = get_required_param(request.get_json(), 'config')
seed = get_optional_param(request.get_json(), 'seed', None)
instance_id = envs.create(env_id, config, seed)
print('env created', instance_id)
return jsonify(instance_id=instance_id)
@app.route('/v1/envs/', methods=['GET'])
def env_list_all():
"""
List all environments running on the server
Returns:
- envs: dict mapping instance_id to env_id
(e.g. {'3c657dbc': 'CartPole-v0'}) for every env
on the server
"""
all_envs = envs.list_all()
return jsonify(all_envs=all_envs)
@app.route('/v1/envs/<instance_id>/reset/', methods=['POST'])
def env_reset(instance_id):
"""
Reset the state of the environment and return an initial
observation.
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- observation: the initial observation of the space
"""
observation = envs.reset(instance_id)
if np.isscalar(observation):
observation = observation.item()
return jsonify(observation=observation)
@app.route('/v1/envs/<instance_id>/step/', methods=['POST'])
def env_step(instance_id):
"""
Run one timestep of the environment's dynamics.
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
- action: an action to take in the environment
Returns:
- observation: agent's observation of the current
environment
- reward: amount of reward returned after previous action
- done: whether the episode has ended
- info: a dict containing auxiliary diagnostic information
"""
json = request.get_json()
action = get_required_param(json, 'action')
render = get_optional_param(json, 'render', False)
[obs_jsonable, reward, done, info] = envs.step(instance_id, action, render)
if isinstance(obs_jsonable, np.ndarray):
obs_jsonable = obs_jsonable.tolist()
if isinstance(reward, np.ndarray):
reward = reward.tolist()
return jsonify(observation=obs_jsonable,
reward=reward, done=done, info=info)
@app.route('/v1/envs/<instance_id>/action_space/', methods=['GET'])
def env_action_space_info(instance_id):
"""
Get information (name and dimensions/bounds) of the env's
action_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- info: a dict containing 'name' (such as 'Discrete'), and
additional dimensional info (such as 'n') which varies from
space to space
"""
info = envs.get_action_space_info(instance_id)
return jsonify(info=info)
@app.route('/v1/envs/<instance_id>/action_space/sample', methods=['GET'])
def env_action_space_sample(instance_id):
"""
Get a sample from the env's action_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- action: a randomly sampled element belonging to the action_space
"""
action = envs.get_action_space_sample(instance_id)
return jsonify(action=action)
@app.route('/v1/envs/<instance_id>/action_space/contains/<x>', methods=['GET'])
def env_action_space_contains(instance_id, x):
"""
Assess that value is a member of the env's action_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
- x: the value to be checked as member
Returns:
- member: whether the value passed as parameter belongs to the action_space
"""
member = envs.get_action_space_contains(instance_id, x)
return jsonify(member=member)
@app.route('/v1/envs/<instance_id>/observation_space/contains', methods=['POST'])
def env_observation_space_contains(instance_id):
"""
Assess that the parameters are members of the env's observation_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- member: whether all the values passed belong to the observation_space
"""
j = request.get_json()
member = envs.get_observation_space_contains(instance_id, j)
return jsonify(member=member)
@app.route('/v1/envs/<instance_id>/observation_space/', methods=['GET'])
def env_observation_space_info(instance_id):
"""
Get information (name and dimensions/bounds) of the env's
observation_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- info: a dict containing 'name' (such as 'Discrete'),
and additional dimensional info (such as 'n') which
varies from space to space
"""
info = envs.get_observation_space_info(instance_id)
return jsonify(info=info)
@app.route('/v1/envs/<instance_id>/monitor/start/', methods=['POST'])
def env_monitor_start(instance_id):
"""
Start monitoring.
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
- force (default=False): Clear out existing training
data from this directory (by deleting every file
prefixed with "openaigym.")
- resume (default=False): Retain the training data
already in this directory, which will be merged with
our new data
"""
j = request.get_json()
directory = get_required_param(j, 'directory')
force = get_optional_param(j, 'force', False)
resume = get_optional_param(j, 'resume', False)
video_callable = get_optional_param(j, 'video_callable', False)
envs.monitor_start(instance_id, directory, force, resume, video_callable)
return ('', 204)
@app.route('/v1/envs/<instance_id>/monitor/close/', methods=['POST'])
def env_monitor_close(instance_id):
"""
Flush all monitor data to disk.
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
"""
envs.monitor_close(instance_id)
return ('', 204)
@app.route('/v1/envs/<instance_id>/close/', methods=['POST'])
def env_close(instance_id):
"""
Manually close an environment
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
"""
envs.env_close(instance_id)
return ('', 204)
@app.route('/v1/upload/', methods=['POST'])
def upload():
"""
Upload the results of training (as automatically recorded by
your env's monitor) to OpenAI Gym.
Parameters:
- training_dir: A directory containing the results of a
training run.
- api_key: Your OpenAI API key
- algorithm_id (default=None): An arbitrary string
indicating the paricular version of the algorithm
(including choices of parameters) you are running.
"""
j = request.get_json()
training_dir = get_required_param(j, 'training_dir')
api_key = get_required_param(j, 'api_key')
algorithm_id = get_optional_param(j, 'algorithm_id', None)
try:
gym.upload(training_dir, algorithm_id, writeup=None, api_key=api_key,
ignore_open_monitors=False)
return ('', 204)
except gym.error.AuthenticationError:
raise InvalidUsage('You must provide an OpenAI Gym API key')
@app.route('/v1/shutdown/', methods=['POST'])
def shutdown():
""" Request a server shutdown - currently used by the integration tests to repeatedly create and destroy fresh copies of the server running in a separate thread"""
f = request.environ.get('werkzeug.server.shutdown')
f()
return 'Server shutting down'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Start a Gym HTTP API server')
parser.add_argument('-l', '--listen', help='interface to listen to', default='0.0.0.0')
parser.add_argument('-p', '--port', default=5000, type=int, help='port to bind to')
args = parser.parse_args()
print('Server starting at: ' + 'http://{}:{}'.format(args.listen, args.port))
app.run(host=args.listen, port=args.port)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
providers/box/box_test.go
|
package box_test
import (
"github.com/a93h/goth"
"github.com/a93h/goth/providers/box"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("BOX_KEY"))
a.Equal(p.Secret, os.Getenv("BOX_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*box.Session)
a.NoError(err)
a.Contains(s.AuthURL, "app.box.com/api/oauth2/authorize")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://app.box.com/api/oauth2/authorize","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*box.Session)
a.Equal(s.AuthURL, "https://app.box.com/api/oauth2/authorize")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *box.Provider {
return box.New(os.Getenv("BOX_KEY"), os.Getenv("BOX_SECRET"), "/foo")
}
|
[
"\"BOX_KEY\"",
"\"BOX_SECRET\"",
"\"BOX_KEY\"",
"\"BOX_SECRET\""
] |
[] |
[
"BOX_KEY",
"BOX_SECRET"
] |
[]
|
["BOX_KEY", "BOX_SECRET"]
|
go
| 2 | 0 | |
runtime/java/treelite4j/create_jni.py
|
#!/usr/bin/env python
# coding: utf-8
import errno
import glob
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
# Monkey-patch the API inconsistency between Python2.X and 3.X.
if sys.platform.startswith("linux"):
sys.platform = "linux"
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
if __name__ == "__main__":
if sys.platform == "darwin":
# Enable of your compiler supports OpenMP.
os.environ["JAVA_HOME"] = subprocess.check_output(
"/usr/libexec/java_home").strip().decode()
print("building Java wrapper")
with cd(".."):
maybe_makedirs("build")
with cd("build"):
if sys.platform == "win32":
# Force x64 build on Windows.
maybe_generator = ' -G"Visual Studio 14 Win64"'
else:
maybe_generator = ""
if 'cpp-coverage' in sys.argv:
maybe_generator += ' -DTEST_COVERAGE=ON'
run("cmake .. " + maybe_generator)
run("cmake --build . --config Release")
print("copying native library")
library_name = {
"win32": "treelite4j.dll",
"darwin": "libtreelite4j.dylib",
"linux": "libtreelite4j.so"
}[sys.platform]
maybe_makedirs("src/main/resources/lib")
cp("../build/lib/" + library_name, "src/main/resources/lib")
print("building mushroom example")
with cd("src/test/resources/mushroom_example"):
run("cmake . " + maybe_generator)
run("cmake --build . --config Release")
|
[] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
python
| 1 | 0 | |
frontend/gateway/grpcclient/client.go
|
package grpcclient
import (
"context"
"encoding/json"
"io"
"net"
"os"
"strings"
"time"
"github.com/gogo/googleapis/google/rpc"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
type GrpcClient interface {
Run(context.Context, client.BuildFunc) error
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
resp, err := c.Ping(ctx, &pb.PingRequest{})
if err != nil {
return nil, err
}
if resp.FrontendAPICaps == nil {
resp.FrontendAPICaps = defaultCaps()
}
if resp.LLBCaps == nil {
resp.LLBCaps = defaultLLBCaps()
}
return &grpcClient{
client: c,
opts: opts,
sessionID: session,
workers: w,
product: product,
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
requests: map[string]*pb.SolveRequest{},
}, nil
}
func current() (GrpcClient, error) {
if ep := product(); ep != "" {
apicaps.ExportedProduct = ep
}
ctx, conn, err := grpcClientConn(context.Background())
if err != nil {
return nil, err
}
return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers())
}
func convertRef(ref client.Reference) (*pb.Ref, error) {
if ref == nil {
return &pb.Ref{}, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("invalid return reference type %T", ref)
}
return &pb.Ref{Ids: []string{r.id}, Defs: []*opspb.Definition{r.def}}, nil
}
func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error {
client, err := current()
if err != nil {
return errors.Wrapf(err, "failed to initialize client from environment")
}
return client.Run(ctx, f)
}
func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) {
export := c.caps.Supports(pb.CapReturnResult) == nil
var (
res *client.Result
err error
)
if export {
defer func() {
req := &pb.ReturnRequest{}
if retError == nil {
if res == nil {
res = &client.Result{}
}
pbRes := &pb.Result{
Metadata: res.Metadata,
}
if res.Refs != nil {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
m := map[string]*pb.Ref{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
m[k] = pbRef
}
pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref map.
m := map[string]string{}
for k, r := range res.Refs {
pbRef, err := convertRef(r)
if err != nil {
retError = err
continue
}
var id string
if len(pbRef.Ids) > 0 {
id = pbRef.Ids[0]
}
m[k] = id
}
pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}}
}
} else {
pbRef, err := convertRef(res.Ref)
if err != nil {
retError = err
} else {
if c.caps.Supports(pb.CapProtoRefArray) == nil {
pbRes.Result = &pb.Result_Ref{Ref: pbRef}
} else {
// Server doesn't support the new wire format for refs, so we construct
// a deprecated result ref.
var id string
if len(pbRef.Ids) > 0 {
id = pbRef.Ids[0]
}
pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id}
}
}
}
if retError == nil {
req.Result = pbRes
}
}
if retError != nil {
st, _ := status.FromError(errors.Cause(retError))
stp := st.Proto()
req.Error = &rpc.Status{
Code: stp.Code,
Message: stp.Message,
// Details: stp.Details,
}
}
if _, err := c.client.Return(ctx, req); err != nil && retError == nil {
retError = err
}
}()
}
if res, err = f(ctx, c); err != nil {
return err
}
if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil {
return err
}
if !export {
exportedAttrBytes, err := json.Marshal(res.Metadata)
if err != nil {
return errors.Wrapf(err, "failed to marshal return metadata")
}
req, err := c.requestForRef(res.Ref)
if err != nil {
return errors.Wrapf(err, "failed to find return ref")
}
req.Final = true
req.ExporterAttr = exportedAttrBytes
if _, err := c.client.Solve(ctx, req); err != nil {
return errors.Wrapf(err, "failed to solve")
}
}
return nil
}
// defaultCaps returns the capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(pb.CapSolveBase), Enabled: true},
{ID: string(pb.CapSolveInlineReturn), Enabled: true},
{ID: string(pb.CapResolveImage), Enabled: true},
{ID: string(pb.CapReadFile), Enabled: true},
}
}
// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities
// support was added. This list is frozen and should never be changed.
func defaultLLBCaps() []apicaps.PBCap {
return []apicaps.PBCap{
{ID: string(opspb.CapSourceImage), Enabled: true},
{ID: string(opspb.CapSourceLocal), Enabled: true},
{ID: string(opspb.CapSourceLocalUnique), Enabled: true},
{ID: string(opspb.CapSourceLocalSessionID), Enabled: true},
{ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true},
{ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true},
{ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true},
{ID: string(opspb.CapSourceGit), Enabled: true},
{ID: string(opspb.CapSourceGitKeepDir), Enabled: true},
{ID: string(opspb.CapSourceGitFullURL), Enabled: true},
{ID: string(opspb.CapSourceHTTP), Enabled: true},
{ID: string(opspb.CapSourceHTTPChecksum), Enabled: true},
{ID: string(opspb.CapSourceHTTPPerm), Enabled: true},
{ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true},
{ID: string(opspb.CapBuildOpLLBFileName), Enabled: true},
{ID: string(opspb.CapExecMetaBase), Enabled: true},
{ID: string(opspb.CapExecMetaProxy), Enabled: true},
{ID: string(opspb.CapExecMountBind), Enabled: true},
{ID: string(opspb.CapExecMountCache), Enabled: true},
{ID: string(opspb.CapExecMountCacheSharing), Enabled: true},
{ID: string(opspb.CapExecMountSelector), Enabled: true},
{ID: string(opspb.CapExecMountTmpfs), Enabled: true},
{ID: string(opspb.CapExecMountSecret), Enabled: true},
{ID: string(opspb.CapConstraints), Enabled: true},
{ID: string(opspb.CapPlatform), Enabled: true},
{ID: string(opspb.CapMetaIgnoreCache), Enabled: true},
{ID: string(opspb.CapMetaDescription), Enabled: true},
{ID: string(opspb.CapMetaExportCache), Enabled: true},
}
}
type grpcClient struct {
client pb.LLBBridgeClient
opts map[string]string
sessionID string
product string
workers []client.WorkerInfo
caps apicaps.CapSet
llbCaps apicaps.CapSet
requests map[string]*pb.SolveRequest
}
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
emptyReq := &pb.SolveRequest{
Definition: &opspb.Definition{},
}
if ref == nil {
return emptyReq, nil
}
r, ok := ref.(*reference)
if !ok {
return nil, errors.Errorf("return reference has invalid type %T", ref)
}
if r.id == "" {
return emptyReq, nil
}
req, ok := c.requests[r.id]
if !ok {
return nil, errors.Errorf("did not find request for return reference %s", r.id)
}
return req, nil
}
func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*client.Result, error) {
if creq.Definition != nil {
for _, md := range creq.Definition.Metadata {
for cap := range md.Caps {
if err := c.llbCaps.Supports(cap); err != nil {
return nil, err
}
}
}
}
var (
// old API
legacyRegistryCacheImports []string
// new API (CapImportCaches)
cacheImports []*pb.CacheOptionsEntry
)
supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil
for _, im := range creq.CacheImports {
if !supportCapImportCaches && im.Type == "registry" {
legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"])
} else {
cacheImports = append(cacheImports, &pb.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,
FrontendOpt: creq.FrontendOpt,
AllowResultReturn: true,
AllowResultArrayRef: true,
// old API
ImportCacheRefsDeprecated: legacyRegistryCacheImports,
// new API
CacheImports: cacheImports,
}
// backwards compatibility with inline return
if c.caps.Supports(pb.CapReturnResult) != nil {
req.ExporterAttr = []byte("{}")
}
resp, err := c.client.Solve(ctx, req)
if err != nil {
return nil, err
}
res := &client.Result{}
if resp.Result == nil {
if id := resp.Ref; id != "" {
c.requests[id] = req
}
res.SetRef(&reference{id: resp.Ref, c: c})
} else {
res.Metadata = resp.Result.Metadata
switch pbRes := resp.Result.Result.(type) {
case *pb.Result_RefDeprecated:
if id := pbRes.RefDeprecated; id != "" {
res.SetRef(&reference{id: id, c: c})
}
case *pb.Result_RefsDeprecated:
for k, v := range pbRes.RefsDeprecated.Refs {
ref := &reference{id: v, c: c}
if v == "" {
ref = nil
}
res.AddRef(k, ref)
}
case *pb.Result_Ref:
ids := pbRes.Ref.Ids
if len(ids) > 0 {
ref, err := newReference(c, pbRes.Ref)
if err != nil {
return nil, err
}
res.SetRef(ref)
}
case *pb.Result_Refs:
for k, v := range pbRes.Refs.Refs {
var ref *reference
if len(v.Ids) > 0 {
ref, err = newReference(c, v)
if err != nil {
return nil, err
}
}
res.AddRef(k, ref)
}
}
}
return res, nil
}
func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
var p *opspb.Platform
if platform := opt.Platform; platform != nil {
p = &opspb.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
}
}
resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName})
if err != nil {
return "", nil, err
}
return resp.Digest, resp.Config, nil
}
func (c *grpcClient) BuildOpts() client.BuildOpts {
return client.BuildOpts{
Opts: c.opts,
SessionID: c.sessionID,
Workers: c.workers,
Product: c.product,
LLBCaps: c.llbCaps,
Caps: c.caps,
}
}
type reference struct {
c *grpcClient
id string
def *opspb.Definition
output llb.Output
}
func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
if len(ref.Ids) == 0 {
return nil, errors.Errorf("reference has no ids")
}
if len(ref.Ids) > 1 {
return nil, errors.Errorf("cannot create multi-result array reference")
}
if len(ref.Ids) != len(ref.Defs) {
return nil, errors.Errorf("reference ids and definitions mismatch length")
}
return &reference{c: c, id: ref.Ids[0], def: ref.Defs[0]}, nil
}
func (r *reference) ToState() (st llb.State, err error) {
err = r.c.caps.Supports(pb.CapReferenceOutput)
if err != nil {
return st, err
}
if r.def == nil {
return st, errors.Errorf("gateway did not return reference with definition")
}
defop, err := llb.NewDefinitionOp(r.def)
if err != nil {
return st, err
}
return llb.NewState(defop), nil
}
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
if r := req.Range; r != nil {
rfr.Range = &pb.FileRange{
Offset: int64(r.Offset),
Length: int64(r.Length),
}
}
resp, err := r.c.client.ReadFile(ctx, rfr)
if err != nil {
return nil, err
}
return resp.Data, nil
}
func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapReadDir); err != nil {
return nil, err
}
rdr := &pb.ReadDirRequest{
DirPath: req.Path,
IncludePattern: req.IncludePattern,
Ref: r.id,
}
resp, err := r.c.client.ReadDir(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Entries, nil
}
func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapStatFile); err != nil {
return nil, err
}
rdr := &pb.StatFileRequest{
Path: req.Path,
Ref: r.id,
}
resp, err := r.c.client.StatFile(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Stat, nil
}
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
return stdioConn(), nil
})
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
_ = cancel
// go monitorHealth(ctx, cc, cancel)
return ctx, cc, nil
}
func stdioConn() net.Conn {
return &conn{os.Stdin, os.Stdout, os.Stdout}
}
type conn struct {
io.Reader
io.Writer
io.Closer
}
func (s *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *conn) SetDeadline(t time.Time) error {
return nil
}
func (s *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (s *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "pipe"
}
func (d dummyAddr) String() string {
return "localhost"
}
func opts() map[string]string {
opts := map[string]string{}
for _, env := range os.Environ() {
parts := strings.SplitN(env, "=", 2)
k := parts[0]
v := ""
if len(parts) == 2 {
v = parts[1]
}
if !strings.HasPrefix(k, frontendPrefix) {
continue
}
parts = strings.SplitN(v, "=", 2)
v = ""
if len(parts) == 2 {
v = parts[1]
}
opts[parts[0]] = v
}
return opts
}
func sessionID() string {
return os.Getenv("BUILDKIT_SESSION_ID")
}
func workers() []client.WorkerInfo {
var c []client.WorkerInfo
if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil {
return nil
}
return c
}
func product() string {
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
}
|
[
"\"BUILDKIT_SESSION_ID\"",
"\"BUILDKIT_WORKERS\"",
"\"BUILDKIT_EXPORTEDPRODUCT\""
] |
[] |
[
"BUILDKIT_WORKERS",
"BUILDKIT_SESSION_ID",
"BUILDKIT_EXPORTEDPRODUCT"
] |
[]
|
["BUILDKIT_WORKERS", "BUILDKIT_SESSION_ID", "BUILDKIT_EXPORTEDPRODUCT"]
|
go
| 3 | 0 | |
executor/executor_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/log"
"github.com/pingcap/parser"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
autoid.SetStep(5000)
testleak.BeforeTest()
TestingT(t)
testleak.AfterTestT(t)()
}
var _ = Suite(&testSuite{&baseTestSuite{}})
var _ = Suite(&testSuiteP1{&baseTestSuite{}})
var _ = Suite(&testSuite1{})
var _ = Suite(&testSuite2{})
var _ = Suite(&testSuite3{})
var _ = Suite(&testSuite4{})
var _ = SerialSuites(&testShowStatsSuite{&baseTestSuite{}})
var _ = Suite(&testBypassSuite{})
var _ = Suite(&testUpdateSuite{})
var _ = Suite(&testOOMSuite{})
var _ = Suite(&testPointGetSuite{})
var _ = Suite(&testBatchPointGetSuite{})
var _ = Suite(&testRecoverTable{})
var _ = Suite(&testFlushSuite{})
type testSuite struct{ *baseTestSuite }
type testSuiteP1 struct{ *baseTestSuite }
type baseTestSuite struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *baseTestSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *baseTestSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testSuiteP1) TestPessimisticSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a int)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("begin PESSIMISTIC")
tk.MustQuery("select a from t where id=1 for update").Check(testkit.Rows("1"))
tk.MustExec("update t set a=a+1 where id=1")
tk.MustExec("commit")
tk.MustQuery("select a from t where id=1").Check(testkit.Rows("2"))
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuiteP1) TestBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists testbind")
tk.MustExec("create table testbind(i int, s varchar(20))")
tk.MustExec("create index index_t on testbind(i,s)")
tk.MustExec("create global binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show global bindings").Rows()), Equals, 1)
tk.MustExec("create session binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show session bindings").Rows()), Equals, 1)
tk.MustExec("drop session binding for select * from testbind")
}
func (s *testSuiteP1) TestChange(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("alter table t change a b int")
tk.MustExec("alter table t change b c bigint")
c.Assert(tk.ExecToErr("alter table t change c d varchar(100)"), NotNil)
}
func (s *testSuiteP1) TestChangePumpAndDrainer(c *C) {
tk := testkit.NewTestKit(c, s.store)
// change pump or drainer's state need connect to etcd
// so will meet error "URL scheme must be http, https, unix, or unixs: /tmp/tidb"
err := tk.ExecToErr("change pump to node_state ='paused' for node_id 'pump1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
err = tk.ExecToErr("change drainer to node_state ='paused' for node_id 'drainer1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
}
func (s *testSuiteP1) TestLoadStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
c.Assert(tk.ExecToErr("load stats"), NotNil)
c.Assert(tk.ExecToErr("load stats ./xxx.json"), NotNil)
}
func (s *testSuiteP1) TestShow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test_show;")
tk.MustExec("use test_show")
tk.MustQuery("show engines")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
c.Assert(len(tk.MustQuery("show index in t").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show index from t").Rows()), Equals, 1)
tk.MustQuery("show charset").Check(testkit.Rows(
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
"ascii US ASCII ascii_bin 1",
"latin1 Latin1 latin1_bin 1",
"binary binary binary 1"))
c.Assert(len(tk.MustQuery("show master status").Rows()), Equals, 1)
tk.MustQuery("show create database test_show").Check(testkit.Rows("test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */"))
tk.MustQuery("show privileges").Check(testkit.Rows("Alter Tables To alter the table",
"Alter Tables To alter the table",
"Alter routine Functions,Procedures To alter or drop stored functions/procedures",
"Create Databases,Tables,Indexes To create new databases and tables",
"Create routine Databases To use CREATE FUNCTION/PROCEDURE",
"Create temporary tables Databases To use CREATE TEMPORARY TABLE",
"Create view Tables To create new views",
"Create user Server Admin To create new users",
"Delete Tables To delete existing rows",
"Drop Databases,Tables To drop databases, tables, and views",
"Event Server Admin To create, alter, drop and execute events",
"Execute Functions,Procedures To execute stored routines",
"File File access on server To read and write files on the server",
"Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess",
"Index Tables To create or drop indexes",
"Insert Tables To insert data into tables",
"Lock tables Databases To use LOCK TABLES (together with SELECT privilege)",
"Process Server Admin To view the plain text of currently executing queries",
"Proxy Server Admin To make proxy user possible",
"References Databases,Tables To have references on tables",
"Reload Server Admin To reload or refresh tables, logs and privileges",
"Replication client Server Admin To ask where the slave or master servers are",
"Replication slave Server Admin To read binary log events from the master",
"Select Tables To retrieve rows from table",
"Show databases Server Admin To see all databases with SHOW DATABASES",
"Show view Tables To see views with SHOW CREATE VIEW",
"Shutdown Server Admin To shut down the server",
"Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.",
"Trigger Tables To use triggers",
"Create tablespace Server Admin To create/alter/drop tablespaces",
"Update Tables To update existing rows",
"Usage Server Admin No privileges - allow connect only"))
c.Assert(len(tk.MustQuery("show table status").Rows()), Equals, 1)
}
func (s *testSuiteP1) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
req := r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Equals, "error: [admin:4]DDL Job:1 not found")
// show ddl test;
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 6)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
serverInfo, err := infosync.GetServerInfoByID(ctx, row.GetString(1))
c.Assert(err, IsNil)
c.Assert(row.GetString(2), Equals, serverInfo.IP+":"+
strconv.FormatUint(uint64(serverInfo.Port), 10))
c.Assert(row.GetString(3), Equals, "")
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
req = r.NewChunk()
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJobs, err = admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID))
result.Check(testkit.Rows(historyJobs[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
err = tk.ExecToErr("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), 1, table.WithAssertion(txn))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
errAdmin := tk.ExecToErr("admin check table admin_test")
c.Assert(errAdmin, NotNil)
if config.CheckTableBeforeDrop {
err = tk.ExecToErr("drop table admin_test")
c.Assert(err.Error(), Equals, errAdmin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// Test admin show ddl jobs table name after table has been droped.
tk.MustExec("drop table if exists t1;")
re := tk.MustQuery("admin show ddl jobs 1")
rows := re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][2], Equals, "t1")
// Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err = admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
// Split region for history ddl job queues.
m := meta.NewMeta(txn)
startKey := meta.DDLJobHistoryKey(m, 0)
endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID)
s.cluster.SplitKeys(s.mvccStore, startKey, endKey, int(historyJobs[0].ID/5))
historyJobs2, err := admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
c.Assert(historyJobs, DeepEquals, historyJobs2)
}
func (s *testSuite) TestAdminShowDDLJobs(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("create table t (a int);")
re := tk.MustQuery("admin show ddl jobs 1")
row := re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
jobID, err := strconv.Atoi(row[0].(string))
c.Assert(err, IsNil)
c.Assert(tk.Se.NewTxn(context.Background()), IsNil)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
t := meta.NewMeta(txn)
job, err := t.GetHistoryDDLJob(int64(jobID))
c.Assert(err, IsNil)
c.Assert(job, NotNil)
// Test for compatibility. Old TiDB version doesn't have SchemaName field, and the BinlogInfo maybe nil.
// See PR: 11561.
job.BinlogInfo = nil
job.SchemaName = ""
err = t.AddHistoryDDLJob(job)
c.Assert(err, IsNil)
err = tk.Se.CommitTxn(context.Background())
c.Assert(err, IsNil)
re = tk.MustQuery("admin show ddl jobs 1")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
re = tk.MustQuery("admin show ddl jobs 1 where job_type='create table'")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
}
func (s *testSuite) TestAdminChecksumOfPartitionedTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("DROP TABLE IF EXISTS admin_checksum_partition_test;")
tk.MustExec("CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4;")
tk.MustExec("INSERT INTO admin_checksum_partition_test VALUES (1), (2);")
r := tk.MustQuery("ADMIN CHECKSUM TABLE admin_checksum_partition_test;")
r.Check(testkit.Rows("test admin_checksum_partition_test 1 5 5"))
}
func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
expectedMsg string
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
c *C, tk *testkit.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
c.Assert(ctx.NewTxn(context.Background()), IsNil)
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
ctx.GetSessionVars().StmtCtx.InLoadDataStmt = true
ctx.GetSessionVars().StmtCtx.InDeleteStmt = false
data, reachLimit, err1 := ld.InsertData(context.Background(), tt.data1, tt.data2)
c.Assert(err1, IsNil)
c.Assert(reachLimit, IsFalse)
err1 = ld.CheckAndInsertOneBatch(context.Background(), ld.GetRows(), ld.GetCurBatchCnt())
c.Assert(err1, IsNil)
ld.SetMaxRowsInBatch(20000)
if tt.restData == nil {
c.Assert(data, HasLen, 0,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
} else {
c.Assert(data, DeepEquals, tt.restData,
Commentf("data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data)))
}
ld.SetMessage()
tk.CheckLastMessage(tt.expectedMsg)
err := ctx.StmtCommit()
c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuiteP1) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuiteP1) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
}
// TestSelectNull Issue #4053.
func (s *testSuiteP1) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuiteP1) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
}
func (s *testSuiteP1) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuiteP1) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuiteP1) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := tk.ExecToErr("select row(1, 1) from test")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select (select 1, 1) from test;")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuiteP1) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
req := rs.NewChunk()
err = rs.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(req.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
rs.Close()
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuiteP1) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuite) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
r.Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustExec(`set @@sql_mode="";`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_init_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
// #issue 8196
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(3,'c'),(4,'d'),(5,'f'),(6,'e')")
tk.MustExec("analyze table t1")
tk.MustExec("analyze table t2")
_, err = tk.Exec("(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b")
c.Assert(err.Error(), Equals, "[planner:1250]Table 't1' from one of the SELECTs cannot be used in global ORDER clause")
// #issue 9900
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b decimal(6, 3))")
tk.MustExec("insert into t values(1, 1.000)")
tk.MustQuery("select count(distinct a), sum(distinct a), avg(distinct a) from (select a from t union all select b from t) tmp;").Check(testkit.Rows("1 1.000 1.0000000"))
}
func (s *testSuiteP1) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuiteP1) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuiteP1) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuiteP1) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
// fix issue9636
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (a int, KEY (a))")
result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`)
result.Check(testkit.Rows())
}
func (s *testSuiteP1) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuiteP1) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuiteP1) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuiteP1) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuiteP1) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
result := tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
// Check cast json to decimal.
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
tk.MustExec(`insert into test_json (b) values
('{"c": "1267.1"}'),
('{"c": "1267.01"}'),
('{"c": "1267.1234"}'),
('{"c": "1267.3456"}'),
('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
"1267.35", "1234567890123456789012345678901234567890123456789012345.00",
"1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuiteP1) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 0`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 8 8`, `7 8 8`))
}
func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("c").Error())
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuiteP1) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2))`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 10 15 50 30`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 6 11 30 22`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 9 14 45 28`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test select only virtual generated column that refers to other virtual generated columns.
result = tk.MustQuery(`SELECT e FROM test_gc_read`)
result.Check(testkit.Rows(`<nil>`, `6`, `14`, `28`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `6 6 12 36 24`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12 14`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64 32`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE e = 6`)
result.Check(testkit.Rows(`1 2 3 2 6`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412 214`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812 414`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`,
`4 <nil> <nil> <nil> <nil>`, `8 8 16 64 32`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil> <nil>`, `3 1 2 6`, `7 3 12 14`, `16 8 64 32`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 3 2 6`, `3 7 12 14`, `8 16 64 32`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.a = m.a + 10, n.a = n.a + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`10 <nil> <nil> <nil> <nil>`, `11 2 13 22 26`, `13 4 17 52 34`, `18 8 26 144 52`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) )`)
tk.MustExec(`INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_3`)
result.Check(testkit.Rows(`16`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test read generated columns after drop some irrelevant column
tk.MustExec(`DROP TABLE IF EXISTS test_gc_read_m`)
tk.MustExec(`CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2))`)
tk.MustExec(`INSERT INTO test_gc_read_m(a) values (1), (2)`)
tk.MustExec(`ALTER TABLE test_gc_read_m DROP b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read_m`)
result.Check(testkit.Rows(`1 2 4`, `2 3 6`))
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
func (s *testSuiteP1) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuiteP1) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuiteP1) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1364 Field 'a' doesn't have a default value"))
_, err = tk.Exec("insert t values (null)")
c.Check(err, NotNil)
tk.MustExec("insert ignore t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t select null")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("0", "0", "0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
// Disable global variable cache, so load global session variable take effect immediate.
s.domain.GetGlobalVarsCache().Disable()
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("drop table if exists t2")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuiteP1) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows())
}
func (s *testSuiteP1) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP1) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuiteP1) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": false,
"select 1 from help_topic where name='aaa'": true,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
infoSchema := executor.GetInfoSchema(ctx)
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
err = plannercore.Preprocess(ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
p, err := planner.Optimize(context.TODO(), ctx, stmtNode, infoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSuiteP1) TestPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table point_get (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into point_get values (1, 1, 1)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "pointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from point_get where b = 1")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSuiteP1) TestBatchPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table batch_point_get (a int, b int, c int, unique key k_b(a, b, c))`)
tk1.MustExec("insert into batch_point_get values (1, 1, 1), (2, 3, 4), (3, 4, 5)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "batchPointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from batch_point_get where (a, b, c) in ((1, 1, 1))")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update batch_point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSuite4) TestSplitRegionTimeout(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockSplitRegionTimeout", `return(true)`), IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockSplitRegionTimeout"), IsNil)
// Test scatter regions timeout.
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/MockScatterRegionTimeout", `return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/MockScatterRegionTimeout"), IsNil)
}
func (s *testSuiteP1) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP1) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
rs.Close()
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
rs.Close()
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
rs.Close()
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
rs.Close()
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
rs.Close()
// Test case for query a column wrapped with parentheses and unary plus.
// In this case, the column name should be its original name.
rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t")
c.Check(err, IsNil)
fields = rs.Fields()
for i := 0; i < 5; i++ {
c.Check(fields[i].Column.Name.L, Equals, "c")
c.Check(fields[i].ColumnAsName.L, Equals, "c")
}
rs.Close()
// Test issue https://github.com/pingcap/tidb/issues/9639 .
// Both window function and expression appear in final result field.
tk.MustExec("set @@tidb_enable_window_function = 1")
rs, err = tk.Exec("select 1+1, row_number() over() num from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "1+1")
c.Assert(fields[0].ColumnAsName.L, Equals, "1+1")
c.Assert(fields[1].Column.Name.L, Equals, "num")
c.Assert(fields[1].ColumnAsName.L, Equals, "num")
tk.MustExec("set @@tidb_enable_window_function = 0")
rs.Close()
rs, err = tk.Exec("select if(1,c,c) from t;")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)")
// It's a compatibility issue. Should be empty instead.
c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)")
}
func (s *testSuiteP1) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
// Test for PR #10658.
tk.MustExec("select SQL_BIG_RESULT d from t group by d")
tk.MustExec("select SQL_SMALL_RESULT d from t group by d")
tk.MustExec("select SQL_BUFFER_RESULT d from t group by d")
}
func (s *testSuiteP1) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion()
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion()
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.EncodeTSO(snapshotTime.UnixNano()/int64(time.Millisecond)), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuiteP1) TestLowResolutionTSORead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@autocommit=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists low_resolution_tso")
tk.MustExec("create table low_resolution_tso(a int)")
tk.MustExec("insert low_resolution_tso values (1)")
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
tk.Exec("set @@tidb_low_resolution_tso = 'on'")
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
_, err := tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2"))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTimestampDefaultValueTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "2019-01-17 14:46:14")`)
tk.MustExec("insert into t set a=1")
r := tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 14:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 06:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14"))
// Test the column's version is greater than ColumnInfoVersion1.
sctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(sctx).InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14", "3 2019-01-17 06:46:14"))
tk.MustExec("delete from t where a=3")
// Change time zone back.
tk.MustExec("set time_zone = '+08:00'")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 14:46:14", "2 2019-01-17 14:46:14"))
tk.MustExec("set time_zone = '-08:00'")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-16 22:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// test zero default value in multiple time zone.
defer tk.MustExec(fmt.Sprintf("set @@sql_mode='%s'", tk.MustQuery("select @@sql_mode").Rows()[0][0]))
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "0000-00-00 00")`)
tk.MustExec("insert into t set a=1")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '-08:00'")
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 0000-00-00 00:00:00", "2 0000-00-00 00:00:00", "3 0000-00-00 00:00:00"))
// test add timestamp column default current_timestamp.
tk.MustExec(`drop table if exists t`)
tk.MustExec(`set time_zone = 'Asia/Shanghai'`)
tk.MustExec(`create table t (a int)`)
tk.MustExec(`insert into t set a=1`)
tk.MustExec(`alter table t add column b timestamp not null default current_timestamp;`)
timeIn8 := tk.MustQuery("select b from t").Rows()[0][0]
tk.MustExec(`set time_zone = '+00:00'`)
timeIn0 := tk.MustQuery("select b from t").Rows()[0][0]
c.Assert(timeIn8 != timeIn0, IsTrue, Commentf("%v == %v", timeIn8, timeIn0))
datumTimeIn8, err := expression.GetTimeValue(tk.Se, timeIn8, mysql.TypeTimestamp, 0)
c.Assert(err, IsNil)
tIn8To0 := datumTimeIn8.GetMysqlTime()
timeZoneIn8, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
err = tIn8To0.ConvertTimeZone(timeZoneIn8, time.UTC)
c.Assert(err, IsNil)
c.Assert(timeIn0 == tIn8To0.String(), IsTrue, Commentf("%v != %v", timeIn0, tIn8To0.String()))
// test add index.
tk.MustExec(`alter table t add index(b);`)
tk.MustExec("admin check table t")
tk.MustExec(`set time_zone = '+05:00'`)
tk.MustExec("admin check table t")
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
txn, err = tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err = tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrReadOnly), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = iota
checkRequestSyncLog
checkDDLAddIndexPriority
)
type checkRequestClient struct {
tikv.Client
priority pb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority pb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() pb.CommandPri {
return (pb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == pb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testSuite1 struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
func (s *testSuite1) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
session.SetStatsLease(0)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom.SetStatsUpdating(true)
}
func (s *testSuite1) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testSuite1) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite1) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(pb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite1) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
tk.MustExec(`set time_zone="System"`)
tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testSuite1) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
req := r.NewChunk()
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(req.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
r.Close()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))")
tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)")
tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1"))
tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1"))
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID, false)
tb, err := tables.TableFromMeta(alloc, tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(context.Background()), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2)
c.Assert(err, IsNil)
txn, err := s.ctx.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), 3, table.WithAssertion(txn))
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, codec.EncodeInt(nil, 4))
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "handle 3, index:types.Datum{k:0x1, collation:0x0, decimal:0x0, length:0x0, i:30, b:[]uint8(nil), x:interface {}(nil)} != record:<nil>")
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), 4, table.WithAssertion(txn))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), 3, nil)
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), 2, nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C, txn kv.Transaction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 3",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 3",
"4 4",
"5 5",
))
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustQuery(`select * from t order by a limit 2, 1;`).Check(testkit.Rows(
"3 3",
))
tk.MustQuery(`select * from t order by a limit 2, 2;`).Check(testkit.Rows(
"3 3",
"4 4",
))
tk.MustQuery(`select * from t order by a limit 2, 3;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
))
tk.MustQuery(`select * from t order by a limit 2, 4;`).Check(testkit.Rows(
"3 3",
"4 4",
"5 5",
"6 6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1105|Division by 0"))
}
func (s *testSuite3) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite3) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
// 'For update' would be ignored if 'order by' or 'limit' exists.
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, IsNil)
}
func (s *testSuite3) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
res.Close()
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite3) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
tk.MustQuery("select a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuite3) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 0 0 0 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 0", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuite3) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
}
func (s *testSuite3) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_init_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk())
c.Assert(err.Error(), Equals, "subquery returns more than 1 row")
err = rs.Close()
c.Assert(err, IsNil)
}
func (s *testSuite3) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite3) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite3) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
func (s *testSuite3) TestTSOFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil)
ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool {
return fpname == "github.com/pingcap/tidb/session/mockGetTSFail"
})
_, err := tk.Se.Execute(ctx, `select * from t`)
c.Assert(err, NotNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil)
}
func (s *testSuite3) TestSelectHashPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
defer tk.MustExec(`drop table if exists th`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustQuery("select b from th order by a").Check(testkit.Rows("-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6", "7", "8"))
tk.MustQuery(" select * from th where a=-2;").Check(testkit.Rows("-2 -2"))
tk.MustQuery(" select * from th where a=5;").Check(testkit.Rows("5 5"))
}
func (s *testSuite3) TestSelectPartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th, tr`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
tk.MustExec(`create table tr (a int, b int)
partition by range (a) (
partition r0 values less than (4),
partition r1 values less than (7),
partition r3 values less than maxvalue)`)
defer tk.MustExec(`drop table if exists th, tr`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustExec(`insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8);`)
// select 1 partition.
tk.MustQuery("select b from th partition (p0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from th partition (p0,P0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0,R0,r0) order by a").Check(testkit.Rows("-3", "3"))
// select multi partition.
tk.MustQuery("select b from th partition (P2,p0) order by a").Check(testkit.Rows("-8", "-6", "-5", "-3", "-2", "0", "2", "3", "5", "6", "8"))
tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8"))
// test select unknown partition error
err := tk.ExecToErr("select b from th partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'")
err = tk.ExecToErr("select b from tr partition (r1,r4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'")
// test select partition table in transaction.
tk.MustExec("begin")
tk.MustExec("insert into th values (10,10),(11,11)")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
}
func (s *testSuite) TestSelectView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("select * from view1")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view1").Error())
err = tk.ExecToErr("select * from view2")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view2").Error())
err = tk.ExecToErr("select * from view3")
c.Assert(err.Error(), Equals, "[planner:1054]Unknown column 'a' in 'field list'")
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
result := tk.MustQuery("select * from v")
result.Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustExec("drop view v;")
}
type testSuite2 struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
func (s *testSuite2) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testSuite2) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testSuite2) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite3 struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
func (s *testSuite3) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testSuite3) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testSuite3) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite4 struct {
cluster *mocktikv.Cluster
mvccStore mocktikv.MVCCStore
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context
}
func (s *testSuite4) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.mvccStore = mocktikv.MustNewMVCCStore()
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(s.cluster),
mockstore.WithMVCCStore(s.mvccStore),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testSuite4) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
}
func (s *testSuite4) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
func (s *testSuiteP1) TestStrToDateBuiltin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(`select str_to_date('18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("0018-10-22"))
tk.MustQuery(`select str_to_date('2018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%Y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('20188/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018510522','%Y5%m5%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018^10^22','%Y^%m^%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018@10@22','%Y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018%10%22','%Y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018(10(22','%Y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018\10\22','%Y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018=10=22','%Y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018+10+22','%Y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018_10_22','%Y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('69510522','%y5%m5%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('69^10^22','%y^%m^%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('18@10@22','%y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18%10%22','%y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18(10(22','%y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18\10\22','%y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18+10+22','%y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18=10=22','%y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18_10_22','%y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
}
func (s *testSuiteP1) TestReadPartitionedTable(c *C) {
// Test three reader on partitioned table.
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists pt")
tk.MustExec("create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6))")
for i := 0; i < 6; i++ {
tk.MustExec(fmt.Sprintf("insert into pt values(%d, %d)", i, i))
}
// Table reader
tk.MustQuery("select * from pt order by a").Check(testkit.Rows("0 0", "1 1", "2 2", "3 3", "4 4", "5 5"))
// Index reader
tk.MustQuery("select b from pt where b = 3").Check(testkit.Rows("3"))
// Index lookup
tk.MustQuery("select a from pt where b = 3").Check(testkit.Rows("3"))
}
func (s *testSuiteP1) TestSplitRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
_, err := tk.Exec(`split table t index idx1 by ("abcd");`)
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, terror.ErrCode(mysql.WarnDataTruncated))
// Test for split index region.
// Check min value is more than max value.
tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`)
_, err = tk.Exec(`split table t index idx1 between (2,'a') and (1,'c') regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value (2,a) should less than the upper value (1,c)")
// Check min value is invalid.
_, err = tk.Exec(`split table t index idx1 between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value count should more than 0")
// Check max value is invalid.
_, err = tk.Exec(`split table t index idx1 between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region upper value count should more than 0")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t index idx1 between ("aa") and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column 'b'")
// Test for split table region.
tk.MustExec(`split table t between (0) and (1000000000) regions 10`)
// Check the lower value is more than the upper value.
_, err = tk.Exec(`split table t between (2) and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table `t` region lower value 2 should less than the upper value 1")
// Check the lower value is invalid.
_, err = tk.Exec(`split table t between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region lower value count should be 1")
// Check upper value is invalid.
_, err = tk.Exec(`split table t between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region upper value count should be 1")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t between ("aa") and (1000000000) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'")
// Test split table region step is too small.
_, err = tk.Exec(`split table t between (0) and (100) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table `t` region step value should more than 1000, step 10 is invalid")
// Test split region by syntax.
tk.MustExec(`split table t by (0),(1000),(1000000)`)
// Test split region twice to test for multiple batch split region requests.
tk.MustExec("create table t1(a int, b int)")
tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1"))
tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1"))
}
func (s *testSuite) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions")
tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))")
// Test show table regions.
tk.MustQuery(`split table t_regions between (-10000) and (10000) regions 4;`).Check(testkit.Rows("4 1"))
re := tk.MustQuery("show table t_regions regions")
rows := re.Rows()
// Table t_regions should have 5 regions now.
// 4 regions to store record data.
// 1 region to store index data.
c.Assert(len(rows), Equals, 5)
c.Assert(len(rows[0]), Equals, 11)
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("5 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 9 regions now.
// 4 regions to store record data.
// 4 region to store index idx data.
// 1 region to store index idx2 data.
c.Assert(len(rows), Equals, 9)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[7][2], Equals, fmt.Sprintf("t_%d_i_2_", tbl.Meta().ID))
c.Assert(rows[8][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")
// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustQuery(`split table t_regions by (2500),(5000),(7500);`).Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx by (250),(500),(750);`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_1_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")
// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2;")
re = tk.MustQuery("show table t_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_pre")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID))
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
}
func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
func (s *testSuiteP1) TestIssue10435(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(i int, j int, k int)")
tk.MustExec("insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4)")
tk.MustExec("INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1")
tk.MustExec("set @@session.tidb_enable_window_function=1")
tk.MustQuery("SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w").Check(
testkit.Rows("1", "2", "3", "4", "11", "22", "31", "33", "44", "61", "62", "93", "122", "124", "183", "244"),
)
}
func (s *testSuiteP1) TestUnsignedFeedback(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1,1),(2,2)")
tk.MustExec("analyze table t")
tk.MustQuery("select count(distinct b) from t").Check(testkit.Rows("2"))
result := tk.MustQuery("explain analyze select count(distinct b) from t")
c.Assert(result.Rows()[2][3], Equals, "table:t, range:[0,+inf], keep order:false")
}
type testOOMSuite struct {
store kv.Storage
do *domain.Domain
oom *oomCapturer
}
func (s *testOOMSuite) SetUpSuite(c *C) {
c.Skip("log.ReplaceGlobals(lg, r) in registerHook() may result in data race")
testleak.BeforeTest()
s.registerHook()
var err error
s.store, err = mockstore.NewMockTikvStore()
c.Assert(err, IsNil)
session.SetSchemaLease(0)
domain.RunAutoAnalyze = false
s.do, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testOOMSuite) registerHook() {
conf := &log.Config{Level: "info", File: log.FileLogConfig{}}
_, r, _ := log.InitLogger(conf)
s.oom = &oomCapturer{r.Core, ""}
lg := zap.New(s.oom)
log.ReplaceGlobals(lg, r)
}
func (s *testOOMSuite) TestDistSQLMemoryControl(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, a int, b int, index idx_a(`a`))")
tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3)")
s.oom.tracker = ""
tk.MustQuery("select * from t")
c.Assert(s.oom.tracker, Equals, "")
tk.Se.GetSessionVars().MemQuotaDistSQL = 1
tk.MustQuery("select * from t")
c.Assert(s.oom.tracker, Equals, "TableReaderDistSQLTracker")
tk.Se.GetSessionVars().MemQuotaDistSQL = -1
s.oom.tracker = ""
tk.MustQuery("select a from t")
c.Assert(s.oom.tracker, Equals, "")
tk.Se.GetSessionVars().MemQuotaDistSQL = 1
tk.MustQuery("select a from t use index(idx_a)")
c.Assert(s.oom.tracker, Equals, "IndexReaderDistSQLTracker")
tk.Se.GetSessionVars().MemQuotaDistSQL = -1
s.oom.tracker = ""
tk.MustQuery("select * from t")
c.Assert(s.oom.tracker, Equals, "")
tk.Se.GetSessionVars().MemQuotaDistSQL = 1
tk.MustQuery("select * from t use index(idx_a)")
c.Assert(s.oom.tracker, Equals, "IndexLookupDistSQLTracker")
tk.Se.GetSessionVars().MemQuotaDistSQL = -1
}
func setOOMAction(action string) {
newConf := config.NewConfig()
newConf.OOMAction = action
config.StoreGlobalConfig(newConf)
}
func (s *testSuite) TestOOMPanicAction(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b double);")
tk.MustExec("insert into t values (1,1)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
orgAction := config.GetGlobalConfig().OOMAction
setOOMAction(config.OOMActionCancel)
defer func() {
setOOMAction(orgAction)
}()
tk.MustExec("set @@tidb_mem_quota_query=1;")
err := tk.QueryToErr("select sum(b) from t group by a;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
// Test insert from select oom panic.
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (a bigint);")
tk.MustExec("create table t1 (a bigint);")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t select a from t1 order by a desc;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
}
type oomCapturer struct {
zapcore.Core
tracker string
}
func (h *oomCapturer) Write(entry zapcore.Entry, fields []zapcore.Field) error {
if strings.Contains(entry.Message, "memory exceeds quota") {
err, _ := fields[0].Interface.(error)
str := err.Error()
begin := strings.Index(str, "8001]")
if begin == -1 {
panic("begin not found")
}
end := strings.Index(str, " holds")
if end == -1 {
panic("end not found")
}
h.tracker = str[begin+len("8001]") : end]
}
return nil
}
func (h *oomCapturer) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if h.Enabled(e.Level) {
return ce.AddCore(e, h)
}
return ce
}
type testRecoverTable struct {
store kv.Storage
dom *domain.Domain
cluster *mocktikv.Cluster
cli *regionProperityClient
}
func (s *testRecoverTable) SetUpSuite(c *C) {
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
s.store, err = mockstore.NewMockTikvStore(
mockstore.WithHijackClient(hijackClient),
mockstore.WithCluster(s.cluster),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover, t_recover2")
tk.MustExec("create table t_recover (a int);")
defer func(originGC bool) {
if originGC {
ddl.EmulatorGCEnable()
} else {
ddl.EmulatorGCDisable()
}
}(ddl.IsEmulatorGCEnable())
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddl.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - time.Duration(48*60*60*time.Second)).Format(gcTimeFormat)
timeAfterDrop := time.Now().Add(time.Duration(48 * 60 * 60 * time.Second)).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// if GC safe point is not exists in mysql.tidb
_, err := tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if GC enable is not exists in mysql.tidb
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:-1]can not get 'tikv_gc_enable'")
err = gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "snapshot is older than GC safe point"), Equals, true)
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int);")
_, err = tk.Exec("recover table t_recover")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("rename table t_recover to t_recover2")
// do recover table.
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// recover table by none exits job.
_, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000))
c.Assert(err, NotNil)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9"))
gcEnable, err := gcutil.CheckGCEnable(tk.Se)
c.Assert(err, IsNil)
c.Assert(gcEnable, Equals, false)
}
func (s *testSuiteP1) TestPointGetPreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
pspk2Id, _, _, err := tk1.Se.PrepareStmt("select * from t where ? = a ")
c.Assert(err, IsNil)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
// unique index
psuk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where b = ? ")
c.Assert(err, IsNil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec(`insert into t values(4, 3, 3, 11)`)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10", "4 3 3 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec("delete from t where a = 4")
tk1.MustExec("alter table t add index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// use pk again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
}
func (s *testSuiteP1) TestPointGetPreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use ps_text")
tk2.MustExec("update t set c = c + 10 where c = 1")
// try to point get again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// try to update in session 1
tk1.MustExec("update t set c = c + 10 where c = 1")
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 11"))
}
func (s *testSuiteP1) TestPointUpdatePreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test")
defer tk1.MustExec("drop database if exists pu_test")
tk1.MustExec("create database pu_test")
tk1.MustExec("use pu_test")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
updateID1, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
c.Assert(pc, Equals, 1)
updateID2, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 2 where ? = a`)
c.Assert(err, IsNil)
c.Assert(pc, Equals, 1)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// updateID2
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 8"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
// unique index
updUkID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 10 where b = ?`)
c.Assert(err, IsNil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 20"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 30"))
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 31 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 32 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 42 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 52 10"))
tk1.MustExec("alter table t add unique index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 62 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 72 10"))
tk1.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1 10"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2 10"))
}
func (s *testSuiteP1) TestPointUpdatePreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test2")
defer tk1.MustExec("drop database if exists pu_test2")
tk1.MustExec("create database pu_test2")
tk1.MustExec("use pu_test2")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
ctx := context.Background()
updateID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use pu_test2")
tk2.MustExec(`prepare pu2 from "update t set c = c + 2 where ? = a "`)
tk2.MustExec("set @p3 = 3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 7"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// try to update in session 1
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// again next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
tk1.MustExec("commit")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
queries/activity.py
|
import os
from string import Template
from datetime import datetime
from pytz import timezone
from escape_helpers import sparql_escape_uri, sparql_escape_string, sparql_escape_int, sparql_escape_datetime
TIMEZONE = timezone('Europe/Brussels')
APPLICATION_GRAPH = "http://mu.semte.ch/application"
SIGNING_PREP_ACT_TYPE_URI = "http://kanselarij.vo.data.gift/id/concept/activiteit-types/001d38fb-b285-41ef-a252-4e70208e9266"
SIGNING_ACT_TYPE_URI = "http://mu.semte.ch/vocabularies/ext/publicatie/Handtekenactiviteit"
SIGNING_WRAP_TYPE_URI = "http://kanselarij.vo.data.gift/id/concept/activiteit-types/d05978cb-3219-4ed4-9ab5-45b03c58a0ae"
SH_DOC_TYPE_URI = "http://mu.semte.ch/vocabularies/ext/signinghub/Document"
sh_package_base_uri = os.environ.get("SIGNINGHUB_API_URL", "http://kanselarij.vo.data.gift/").strip("/") + "/"
SH_DOC_BASE_URI = "{}package/{{package_id}}/document/{{document_id}}".format(sh_package_base_uri)
def construct_insert_signing_prep_activity(activity,
signing_subcase_uri,
file_uri,
sh_package_id,
sh_document_id,
graph=APPLICATION_GRAPH):
sh_doc_uri = SH_DOC_BASE_URI.format(package_id=sh_package_id, document_id=sh_document_id)
query_template = Template("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
INSERT {
GRAPH $graph {
$signing_prep a prov:Activity ;
mu:uuid $uuid ;
dct:type $type .
$signing_prep dossier:vindtPlaatsTijdens $signing_subcase .
$signing_prep ext:gebruiktBestand $file .
$signing_prep sh:document $sh_doc .
$sh_doc a $sh_doc_type ;
sh:packageId $sh_package_id ;
sh:documentId $sh_document_id ;
prov:hadPrimarySource $file .
}
}
WHERE {
GRAPH $graph {
$signing_subcase a dossier:Procedurestap .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
signing_prep=sparql_escape_uri(activity["uri"]),
signing_subcase=sparql_escape_uri(signing_subcase_uri),
uuid=sparql_escape_string(activity["uuid"]),
type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
file=sparql_escape_uri(file_uri),
sh_doc=sparql_escape_uri(sh_doc_uri),
sh_doc_type=sparql_escape_uri(SH_DOC_TYPE_URI),
sh_package_id=sparql_escape_string(sh_package_id),
sh_document_id=sparql_escape_string(sh_document_id))
def construct_get_signing_prep_from_subcase_file(signing_subcase_uri,
file_uri,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
SELECT (?signing_prep AS ?uri) ?sh_package_id ?sh_document_id ?signing
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type .
?signing_prep dossier:vindtPlaatsTijdens $signing_subcase .
?signing_prep sh:document ?sh_doc .
?signing_prep ext:gebruiktBestand $file .
?sh_doc sh:packageId ?sh_package_id ;
sh:documentId ?sh_document_id .
OPTIONAL {
?signing a prov:Activity ;
dct:type $sign_type ;
dossier:vindtPlaatsTijdens $signing_subcase ;
prov:wasInformedBy ?signing_prep .
}
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
signing_subcase=sparql_escape_uri(signing_subcase_uri),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sign_type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
file=sparql_escape_uri(file_uri))
def construct_get_signing_prep_from_sh_package_id(sh_package_id,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
SELECT DISTINCT (?signing_prep AS ?uri) ?sh_document_id ?used_file
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type .
?signing_prep sh:document ?sh_doc ;
ext:gebruiktBestand ?used_file .
?sh_doc sh:packageId $sh_package_id ;
sh:documentId ?sh_document_id .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sh_package_id=sparql_escape_string(sh_package_id))
def construct_get_signing_preps_from_subcase(signing_subcase_uri,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
SELECT DISTINCT (?signing_prep AS ?uri) ?file ?file_id
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type .
?signing_prep dossier:vindtPlaatsTijdens $signing_subcase .
?signing_prep ext:gebruiktBestand ?file .
?file mu:uuid ?file_id .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
signing_subcase=sparql_escape_uri(signing_subcase_uri),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI))
def construct_insert_signing_activity(activity,
signing_prep_uri,
mandatee_uri,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX mandaat: <http://data.vlaanderen.be/ns/mandaat#>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX mu: <http://mu.semte.ch/vocabularies/core/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
INSERT {
GRAPH $graph {
$signing a prov:Activity ;
mu:uuid $uuid ;
dct:type $type ;
prov:wasInformedBy $signing_prep ;
prov:qualifiedAssociation $mandatee .
$signing dossier:vindtPlaatsTijdens ?signing_subcase .
}
}
WHERE {
GRAPH $graph {
$signing_prep a prov:Activity ;
dct:type $prep_type ;
dossier:vindtPlaatsTijdens ?signing_subcase .
$mandatee a mandaat:Mandataris .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
signing=sparql_escape_uri(activity["uri"]),
uuid=sparql_escape_string(activity["uuid"]),
type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
signing_prep=sparql_escape_uri(signing_prep_uri),
mandatee=sparql_escape_uri(mandatee_uri))
def construct_end_prep_start_signing(signing_prep_uri,
time,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
INSERT {
GRAPH $graph {
$signing_prep dossier:Activiteit.einddatum $time .
?signing dossier:Activiteit.startdatum $time .
}
}
WHERE {
GRAPH $graph {
$signing_prep a prov:Activity ;
dct:type $prep_type .
?signing a prov:Activity ;
dct:type $sig_type ;
prov:wasInformedBy $signing_prep .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
signing_prep=sparql_escape_uri(signing_prep_uri),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sig_type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
time=sparql_escape_datetime(time))
def construct_update_signing_activity(sh_package_id,
sh_document_id,
mandatee_uri,
end_time,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
PREFIX mandaat: <http://data.vlaanderen.be/ns/mandaat#>
INSERT {
GRAPH $graph {
?signing dossier:Activiteit.einddatum $end_time .
}
}
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type ;
sh:document ?sh_doc .
?sh_doc sh:packageId $sh_package_id ;
sh:documentId $sh_document_id .
?signing a prov:Activity ;
dct:type $type ;
prov:wasInformedBy ?signing_prep ;
prov:qualifiedAssociation $mandatee .
$mandatee a mandaat:Mandataris .
FILTER NOT EXISTS { ?signing dossier:Activiteit.einddatum ?end_time . }
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sh_package_id=sparql_escape_string(sh_package_id),
sh_document_id=sparql_escape_string(sh_document_id),
sig_type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
mandatee=sparql_escape_uri(mandatee_uri),
end_time=sparql_escape_datetime(end_time))
def construct_insert_wrap_up_activity(sh_package_id,
sh_document_id,
signed_doc,
end_time,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
INSERT {
GRAPH $graph {
?signing_wrap_up a prov:Activity ;
dct:type $wrap_up_type ;
prov:wasInformedBy ?signing ;
dossier:Activiteit.einddatum $end_time ;
prov:generated $signed_doc .
}
}
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type ;
sh:document ?sh_doc .
?sh_doc sh:packageId $sh_package_id ;
sh:documentId $sh_document_id .
?signing a prov:Activity ;
dct:type $sig_type ;
prov:wasInformedBy ?signing_prep .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sig_type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
wrap_up_type=sparql_escape_uri(SIGNING_WRAP_TYPE_URI),
sh_package_id=sparql_escape_string(sh_package_id),
sh_document_id=sparql_escape_string(sh_document_id),
signed_doc=sparql_escape_uri(signed_doc),
end_time=sparql_escape_datetime(end_time))
def construct_get_wrap_up_activity(sh_package_id,
graph=APPLICATION_GRAPH):
query_template = Template("""
PREFIX dossier: <https://data.vlaanderen.be/ns/dossier#>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX sh: <http://mu.semte.ch/vocabularies/ext/signinghub/>
SELECT DISTINCT(?signing_wrap_up AS ?uri) ?signed_doc
WHERE {
GRAPH $graph {
?signing_prep a prov:Activity ;
dct:type $prep_type ;
sh:document ?sh_doc .
?sh_doc sh:packageId $sh_package_id .
?signing a prov:Activity ;
dct:type $type ;
prov:wasInformedBy ?signing_prep .
?signing_wrap_up a prov:Activity ;
dct:type $wrap_up_type ;
prov:generated ?signed_doc .
?signed_doc a dossier:Stuk .
}
}
""")
return query_template.substitute(
graph=sparql_escape_uri(graph),
prep_type=sparql_escape_uri(SIGNING_PREP_ACT_TYPE_URI),
sig_type=sparql_escape_uri(SIGNING_ACT_TYPE_URI),
wrap_up_type=sparql_escape_uri(SIGNING_WRAP_TYPE_URI),
sh_package_id=sparql_escape_string(sh_package_id))
|
[] |
[] |
[
"SIGNINGHUB_API_URL"
] |
[]
|
["SIGNINGHUB_API_URL"]
|
python
| 1 | 0 | |
migrationtest/wsgi.py
|
"""
WSGI config for migrationtest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'migrationtest.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/testutil/config.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package testutil
import (
"context"
"fmt"
"math"
"os"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/event"
"go.mongodb.org/mongo-driver/x/bsonx"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"go.mongodb.org/mongo-driver/x/mongo/driver/description"
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
"go.mongodb.org/mongo-driver/x/network/command"
)
var connectionString connstring.ConnString
var connectionStringOnce sync.Once
var connectionStringErr error
var liveTopology *topology.Topology
var liveTopologyOnce sync.Once
var liveTopologyErr error
var monitoredTopology *topology.Topology
var monitoredTopologyOnce sync.Once
var monitoredTopologyErr error
// AddOptionsToURI appends connection string options to a URI.
func AddOptionsToURI(uri string, opts ...string) string {
if !strings.ContainsRune(uri, '?') {
if uri[len(uri)-1] != '/' {
uri += "/"
}
uri += "?"
} else {
uri += "&"
}
for _, opt := range opts {
uri += opt
}
return uri
}
// AddTLSConfigToURI checks for the environmental variable indicating that the tests are being run
// on an SSL-enabled server, and if so, returns a new URI with the necessary configuration.
func AddTLSConfigToURI(uri string) string {
caFile := os.Getenv("MONGO_GO_DRIVER_CA_FILE")
if len(caFile) == 0 {
return uri
}
return AddOptionsToURI(uri, "ssl=true&sslCertificateAuthorityFile=", caFile)
}
// AddCompressorToUri checks for the environment variable indicating that the tests are being run with compression
// enabled. If so, it returns a new URI with the necessary configuration
func AddCompressorToUri(uri string) string {
comp := os.Getenv("MONGO_GO_DRIVER_COMPRESSOR")
if len(comp) == 0 {
return uri
}
return AddOptionsToURI(uri, "compressors=", comp)
}
// MonitoredTopology returns a new topology with the command monitor attached
func MonitoredTopology(t *testing.T, dbName string, monitor *event.CommandMonitor) *topology.Topology {
cs := ConnString(t)
opts := []topology.Option{
topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }),
topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
return append(
opts,
topology.WithConnectionOptions(func(opts ...topology.ConnectionOption) []topology.ConnectionOption {
return append(
opts,
topology.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
return monitor
}),
)
}),
)
}),
}
monitoredTopology, err := topology.New(opts...)
if err != nil {
t.Fatal(err)
} else {
monitoredTopology.Connect()
s, err := monitoredTopology.SelectServerLegacy(context.Background(), description.WriteSelector())
require.NoError(t, err)
c, err := s.ConnectionLegacy(context.Background())
require.NoError(t, err)
_, err = (&command.Write{
DB: dbName,
Command: bsonx.Doc{{"dropDatabase", bsonx.Int32(1)}},
}).RoundTrip(context.Background(), s.SelectedDescription(), c)
require.NoError(t, err)
}
return monitoredTopology
}
// GlobalMonitoredTopology gets the globally configured topology and attaches a command monitor.
func GlobalMonitoredTopology(t *testing.T, monitor *event.CommandMonitor) *topology.Topology {
cs := ConnString(t)
opts := []topology.Option{
topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }),
topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
return append(
opts,
topology.WithConnectionOptions(func(opts ...topology.ConnectionOption) []topology.ConnectionOption {
return append(
opts,
topology.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
return monitor
}),
)
}),
)
}),
}
monitoredTopologyOnce.Do(func() {
var err error
monitoredTopology, err = topology.New(opts...)
if err != nil {
monitoredTopologyErr = err
} else {
monitoredTopology.Connect()
s, err := monitoredTopology.SelectServerLegacy(context.Background(), description.WriteSelector())
require.NoError(t, err)
c, err := s.ConnectionLegacy(context.Background())
require.NoError(t, err)
_, err = (&command.Write{
DB: DBName(t),
Command: bsonx.Doc{{"dropDatabase", bsonx.Int32(1)}},
}).RoundTrip(context.Background(), s.SelectedDescription(), c)
require.NoError(t, err)
}
})
if monitoredTopologyErr != nil {
t.Fatal(monitoredTopologyErr)
}
return monitoredTopology
}
// Topology gets the globally configured topology.
func Topology(t *testing.T) *topology.Topology {
cs := ConnString(t)
liveTopologyOnce.Do(func() {
var err error
liveTopology, err = topology.New(topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }))
if err != nil {
liveTopologyErr = err
} else {
liveTopology.Connect()
s, err := liveTopology.SelectServerLegacy(context.Background(), description.WriteSelector())
require.NoError(t, err)
c, err := s.ConnectionLegacy(context.Background())
require.NoError(t, err)
_, err = (&command.Write{
DB: DBName(t),
Command: bsonx.Doc{{"dropDatabase", bsonx.Int32(1)}},
}).RoundTrip(context.Background(), s.SelectedDescription(), c)
require.NoError(t, err)
}
})
if liveTopologyErr != nil {
t.Fatal(liveTopologyErr)
}
return liveTopology
}
// TopologyWithConnString takes a connection string and returns a connected
// topology, or else bails out of testing
func TopologyWithConnString(t *testing.T, cs connstring.ConnString) *topology.Topology {
topology, err := topology.New(topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return cs }))
if err != nil {
t.Fatal("Could not construct topology")
}
err = topology.Connect()
if err != nil {
t.Fatal("Could not start topology connection")
}
return topology
}
// ColName gets a collection name that should be unique
// to the currently executing test.
func ColName(t *testing.T) string {
// Get this indirectly to avoid copying a mutex
v := reflect.Indirect(reflect.ValueOf(t))
name := v.FieldByName("name")
return name.String()
}
// ConnString gets the globally configured connection string.
func ConnString(t *testing.T) connstring.ConnString {
connectionStringOnce.Do(func() {
connectionString, connectionStringErr = GetConnString()
mongodbURI := os.Getenv("MONGODB_URI")
if mongodbURI == "" {
mongodbURI = "mongodb://localhost:27017"
}
mongodbURI = AddTLSConfigToURI(mongodbURI)
mongodbURI = AddCompressorToUri(mongodbURI)
var err error
connectionString, err = connstring.Parse(mongodbURI)
if err != nil {
connectionStringErr = err
}
})
if connectionStringErr != nil {
t.Fatal(connectionStringErr)
}
return connectionString
}
func GetConnString() (connstring.ConnString, error) {
mongodbURI := os.Getenv("MONGODB_URI")
if mongodbURI == "" {
mongodbURI = "mongodb://localhost:27017"
}
mongodbURI = AddTLSConfigToURI(mongodbURI)
cs, err := connstring.Parse(mongodbURI)
if err != nil {
return connstring.ConnString{}, err
}
return cs, nil
}
// DBName gets the globally configured database name.
func DBName(t *testing.T) string {
return GetDBName(ConnString(t))
}
func GetDBName(cs connstring.ConnString) string {
if cs.Database != "" {
return cs.Database
}
return fmt.Sprintf("mongo-go-driver-%d", os.Getpid())
}
// Integration should be called at the beginning of integration
// tests to ensure that they are skipped if integration testing is
// turned off.
func Integration(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
}
// compareVersions compares two version number strings (i.e. positive integers separated by
// periods). Comparisons are done to the lesser precision of the two versions. For example, 3.2 is
// considered equal to 3.2.11, whereas 3.2.0 is considered less than 3.2.11.
//
// Returns a positive int if version1 is greater than version2, a negative int if version1 is less
// than version2, and 0 if version1 is equal to version2.
func CompareVersions(t *testing.T, v1 string, v2 string) int {
n1 := strings.Split(v1, ".")
n2 := strings.Split(v2, ".")
for i := 0; i < int(math.Min(float64(len(n1)), float64(len(n2)))); i++ {
i1, err := strconv.Atoi(n1[i])
require.NoError(t, err)
i2, err := strconv.Atoi(n2[i])
require.NoError(t, err)
difference := i1 - i2
if difference != 0 {
return difference
}
}
return 0
}
|
[
"\"MONGO_GO_DRIVER_CA_FILE\"",
"\"MONGO_GO_DRIVER_COMPRESSOR\"",
"\"MONGODB_URI\"",
"\"MONGODB_URI\""
] |
[] |
[
"MONGO_GO_DRIVER_CA_FILE",
"MONGO_GO_DRIVER_COMPRESSOR",
"MONGODB_URI"
] |
[]
|
["MONGO_GO_DRIVER_CA_FILE", "MONGO_GO_DRIVER_COMPRESSOR", "MONGODB_URI"]
|
go
| 3 | 0 | |
sumoenv.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
class SumoEnv:
place_len = 7.5
place_offset = 8.50
lane_len = 10
lane_ids = ['-gneE0_0','-gneE0_1','-gneE0_2','-gneE1_0','-gneE1_1','-gneE1_2','-gneE2_0','-gneE2_1','-gneE2_2','-gneE3_0','-gneE3_1','-gneE3_2']
def __init__(self, label='default', gui_f=False):
self.label = label
self.wt_last = 0.
self.ncars = 0
#exe = 'sumo-gui.exe' if gui_f else 'sumo.exe' #WINDOWS
exe = 'sumo-gui' if gui_f else 'sumo' #LUNUX
sumoBinary = os.path.join(os.environ['SUMO_HOME'], 'bin', exe)
#sumoBinary = checkBinary('sumo')
self.sumoCmd = [sumoBinary, '-c', 'intersection.sumocfg']
return
def get_state_d(self):
state = np.zeros(self.lane_len * 12 + 4, dtype=np.float32)
for ilane in range(0, 12):
lane_id = self.lane_ids[ilane]
ncars = traci.lane.getLastStepVehicleNumber(lane_id)
cars = traci.lane.getLastStepVehicleIDs(lane_id)
for icar in cars:
xcar, ycar = traci.vehicle.getPosition(icar)
if ilane < 3:
pos = (ycar - self.place_offset) / self.place_len
elif ilane < 6:
pos = (xcar - self.place_offset) / self.place_len
elif ilane < 9:
pos = (-ycar - self.place_offset) / self.place_len
else:
pos = (-xcar - self.place_offset) / self.place_len
if pos > self.lane_len - 1.:
continue
pos = np.clip(pos, 0., self.lane_len - 1. - 1e-6)
ipos = int(pos)
state[int(ilane * self.lane_len + ipos)] += 1. - pos + ipos
state[int(ilane * self.lane_len + ipos + 1)] += pos - ipos
state[self.lane_len * 12:self.lane_len * 12+4] = np.eye(4)[traci.trafficlight.getPhase('gneJ00')]
return state
def step_d(self, action):
done = False
# traci.switch(self.label)
action = np.squeeze(action)
traci.trafficlight.setPhase('gneJ00', action)
traci.simulationStep()
traci.simulationStep()
self.ncars += traci.simulation.getDepartedNumber()
state = self.get_state_d()
wt = 0
for ilane in range(0, 12):
lane_id = self.lane_ids[ilane]
wt += traci.lane.getWaitingTime(lane_id)
reward = - (wt - self.wt_last)*0.004
if self.ncars > 250:
done = True
return state, reward, done, np.array([[reward]])
def reset(self):
self.wt_last = 0.
self.ncars = 0
traci.start(self.sumoCmd, label=self.label)
traci.trafficlight.setProgram('gneJ00', '0')
traci.simulationStep()
return self.get_state_d()
def close(self):
traci.close()
|
[] |
[] |
[
"SUMO_HOME"
] |
[]
|
["SUMO_HOME"]
|
python
| 1 | 0 | |
cli/commands/create_lease.go
|
package commands
import (
"bytes"
"fmt"
"io"
"log"
"os"
"strings"
"github.com/codegangsta/cli"
"github.com/tentsk8s/k8s-claimer/client"
)
const (
ipEnvVarName = "IP"
tokenEnvVarName = "TOKEN"
clusterNameEnvVarName = "CLUSTER_NAME"
)
// CreateLease is a cli.Command action for creating a lease
func CreateLease(c *cli.Context) {
// inspect env for auth env var
authToken := os.Getenv("AUTH_TOKEN")
if authToken == "" {
log.Fatal("An authorization token is required in the form of an env var AUTH_TOKEN")
}
server := c.GlobalString("server")
if server == "" {
log.Fatal("Server missing")
}
durationSec := c.Int("duration")
if durationSec <= 0 {
log.Fatalf("Invalid duration %d", durationSec)
}
envPrefix := c.String("env-prefix")
clusterRegex := c.String("cluster-regex")
clusterVersion := c.String("cluster-version")
cloudProvider := c.String("provider")
if cloudProvider == "" {
log.Fatal("Cloud Provider not provided")
}
if cloudProvider == "azure" && clusterVersion != "" {
log.Fatal("Finding clusters by version is currently not supported with Azure!")
}
kcfgFile := c.String("kubeconfig-file")
if len(kcfgFile) < 1 {
log.Fatal("Missing kubeconfig-file")
}
fd, err := os.Create(kcfgFile)
if err != nil {
log.Fatalf("Error opening %s: %s", kcfgFile, err)
}
defer fd.Close()
resp, err := client.CreateLease(server, authToken, cloudProvider, clusterVersion, clusterRegex, durationSec)
if err != nil {
log.Fatalf("Error returned from server when creating lease: %s", err)
}
kcfg, err := resp.KubeConfigBytes()
if err != nil {
log.Fatalf("Error decoding kubeconfig: %s", err)
}
fmt.Println(exportVar(envPrefix, ipEnvVarName, resp.IP))
fmt.Println(exportVar(envPrefix, tokenEnvVarName, resp.Token))
fmt.Println(exportVar(envPrefix, clusterNameEnvVarName, resp.ClusterName))
if _, err := io.Copy(fd, bytes.NewBuffer(kcfg)); err != nil {
log.Fatalf("Error writing new Kubeconfig file to %s: %s", kcfgFile, err)
}
}
func exportVar(prefix, envVarName, val string) string {
if prefix != "" {
envVarName = fmt.Sprintf("%s_%s", prefix, envVarName)
}
val = strings.Replace(val, `"`, `\"`, -1)
return fmt.Sprintf(`export %s="%s"`, envVarName, val)
}
|
[
"\"AUTH_TOKEN\""
] |
[] |
[
"AUTH_TOKEN"
] |
[]
|
["AUTH_TOKEN"]
|
go
| 1 | 0 | |
compute/common_instances.go
|
package compute
import (
"context"
"fmt"
"log"
"os"
"sync"
"github.com/databrickslabs/terraform-provider-databricks/common"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
)
var (
oncePool sync.Once
commonInstancePool *InstancePoolAndStats
)
// CommonInstancePoolID returns common instance pool that is supposed to be used for internal testing purposes
func CommonInstancePoolID() string {
if commonInstancePool != nil {
return commonInstancePool.InstancePoolID
}
client := common.CommonEnvironmentClient()
oncePool.Do(func() { // atomic
log.Printf("[INFO] Initializing common instance pool")
ctx := context.Background()
instancePools := NewInstancePoolsAPI(ctx, client)
clusters := NewClustersAPI(ctx, client)
currentUserPool := fmt.Sprintf("Terraform Integration Test by %s", os.Getenv("USER"))
pools, err := instancePools.List()
if err != nil {
log.Printf("[ERROR] Cannot list instance pools: %v", err)
panic(err)
}
for _, existingPool := range pools.InstancePools {
if existingPool.InstancePoolName == currentUserPool {
log.Printf(
"[INFO] Using existing instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, existingPool.InstancePoolID)
commonInstancePool = &existingPool
return
}
}
instancePool := InstancePool{
PreloadedSparkVersions: []string{
clusters.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true})},
NodeTypeID: clusters.GetSmallestNodeType(NodeTypeRequest{
LocalDisk: true,
}),
InstancePoolName: currentUserPool,
MaxCapacity: 10,
IdleInstanceAutoTerminationMinutes: 15,
}
if !client.IsAzure() {
instancePool.AwsAttributes = &InstancePoolAwsAttributes{
Availability: AwsAvailabilitySpot,
}
}
newPool, err := instancePools.Create(instancePool)
if err != nil {
log.Printf("[ERROR] Cannot create instance pool: %v", err)
panic(err)
}
log.Printf("[INFO] Created common instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, newPool.InstancePoolID)
commonInstancePool = &newPool
})
return commonInstancePool.InstancePoolID
}
// CommonEnvironmentClientWithRealCommandExecutor is good for internal tests
func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient {
client := common.CommonEnvironmentClient()
client.WithCommandExecutor(func(ctx context.Context, _ *common.DatabricksClient) common.CommandExecutor {
return NewCommandsAPI(ctx, client)
})
return client
}
// NewTinyClusterInCommonPool creates new cluster for short-lived purposes
func NewTinyClusterInCommonPool() (c ClusterInfo, err error) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
ctx := context.Background()
clusters := NewClustersAPI(ctx, CommonEnvironmentClientWithRealCommandExecutor())
c, err = clusters.Create(Cluster{
NumWorkers: 1,
ClusterName: "Terraform " + randomName,
SparkVersion: clusters.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true}),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
return
}
// NewTinyClusterInCommonPoolPossiblyReused is recommended to be used for testing only
func NewTinyClusterInCommonPoolPossiblyReused() (c ClusterInfo) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
currentCluster := "TerraformIntegrationTest"
ctx := context.Background()
clusters := NewClustersAPI(ctx, CommonEnvironmentClientWithRealCommandExecutor())
c, err := clusters.GetOrCreateRunningCluster(currentCluster, Cluster{
NumWorkers: 1,
ClusterName: currentCluster,
SparkVersion: clusters.LatestSparkVersionOrDefault(SparkVersionRequest{Latest: true, LongTermSupport: true}),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
if err != nil {
panic(err)
}
return
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
telegram_upload/video.py
|
import platform
import re
import subprocess
import tempfile
import os
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from telegram_upload.exceptions import ThumbVideoError
def video_metadata(file):
return extractMetadata(createParser(file))
def call_ffmpeg(args):
try:
return subprocess.Popen([get_ffmpeg_command()] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
raise ThumbVideoError('ffmpeg command is not available. Thumbnails for videos are not available!')
def get_ffmpeg_command():
return os.environ.get('FFMPEG_COMMAND',
'ffmpeg.exe' if platform.system() == 'Windows' else 'ffmpeg')
def get_video_size(file):
p = call_ffmpeg([
'-i', file,
])
stdout, stderr = p.communicate()
video_lines = re.findall(': Video: ([^\n]+)', stderr.decode('utf-8'))
if not video_lines:
return
matchs = re.findall("(\d{2,6})x(\d{2,6})", video_lines[0])
if matchs:
return [int(x) for x in matchs[0]]
def get_video_thumb(file, output=None, size=200):
output = output or tempfile.NamedTemporaryFile(suffix='.jpg').name
metadata = video_metadata(file)
duration = metadata.get('duration').seconds if metadata.has('duration') else 0
ratio = get_video_size(file)
if ratio is None:
raise ThumbVideoError('Video ratio is not available.')
if ratio[0] / ratio[1] > 1:
width, height = size, -1
else:
width, height = -1, size
p = call_ffmpeg([
'-ss', str(int(duration / 2)),
'-i', file,
'-filter:v',
'scale={}:{}'.format(width, height),
'-vframes:v', '1',
output,
])
p.communicate()
if not p.returncode and os.path.lexists(file):
return output
|
[] |
[] |
[
"FFMPEG_COMMAND"
] |
[]
|
["FFMPEG_COMMAND"]
|
python
| 1 | 0 | |
devscripts/pyinstaller_zopfli.py
|
# coding: utf-8
from __future__ import unicode_literals
from PyInstaller import __main__
import zlib
import zopfli
import os
try:
iterations = int(os.environ['ZOPFLI_ITERATIONS'])
except BaseException:
iterations = 30
def zlib_compress(data, level=-1):
c = zopfli.ZopfliCompressor(zopfli.ZOPFLI_FORMAT_ZLIB, iterations=iterations)
return c.compress(data) + c.flush()
zlib.compress = zlib_compress
__main__.run()
|
[] |
[] |
[
"ZOPFLI_ITERATIONS"
] |
[]
|
["ZOPFLI_ITERATIONS"]
|
python
| 1 | 0 | |
node-manager/superviser/backup.go
|
// Copyright 2019 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package superviser
import (
"encoding/hex"
"errors"
"github.com/eoscanada/pitreos"
node_manager "github.com/streamingfast/node-manager"
"github.com/streamingfast/node-manager/metrics"
"github.com/streamingfast/node-manager/superviser"
"go.uber.org/zap"
)
type NodeosBackupInfo struct {
ChainIDStr string `yaml:"chainIdStr"`
ServerVersion string `yaml:"serverVersion"`
LastBlockSeen uint32 `yaml:"lastBlockSeen"`
ServerVersionString string `yaml:"serverVersionString"`
}
var _ node_manager.BackupableChainSuperviser = (*NodeosSuperviser)(nil)
func (s *NodeosSuperviser) TakeBackup(backupTag string, backupStoreURL string) error {
if s.options.NoBlocksLog {
return errors.New("unable to take backup: refusing to take backup on an instance with option 'NoBlocksLog'")
}
if s.IsRunning() {
return errors.New("unable to take backup: refusing to take backup while process is running")
}
p, err := superviser.GetPitreos(s.Logger, backupStoreURL, "blocks/blocks.log")
if err != nil {
return err
}
details := make(map[string]interface{})
details["nodeosInfo"] = NodeosBackupInfo{
ChainIDStr: hex.EncodeToString(s.chainID),
ServerVersion: string(s.serverVersion),
ServerVersionString: s.serverVersionString,
LastBlockSeen: s.lastBlockSeen,
}
s.Logger.Info("creating backup", zap.String("store_url", backupStoreURL), zap.String("tag", backupTag))
err = p.GenerateBackup(s.options.DataDir, backupTag, details, pitreos.MustNewIncludeThanExcludeFilter(".*", ""))
if err == nil {
metrics.SuccessfulBackups.Inc()
}
return err
}
func (s *NodeosSuperviser) RestoreBackup(backupName, backupTag string, backupStoreURL string) error {
if s.IsRunning() {
return errors.New("unable to take backup: refusing to restore backup while process is running")
}
var appendonlyFiles []string
var exclusionFilter string
if s.options.NoBlocksLog {
exclusionFilter = "blocks/blocks.(log|index)"
} else {
appendonlyFiles = append(appendonlyFiles, "blocks/blocks.log")
}
p, err := superviser.GetPitreos(s.Logger, backupStoreURL, appendonlyFiles...)
if err != nil {
return err
}
if backupName == "latest" {
// FIXME: This logic should be moved up to the operator, so it's not repeated between each superviser!
backupName, err = p.GetLatestBackup(backupTag)
if err != nil {
return err
}
}
s.logger.Info("restoring from pitreos", zap.String("backup_name", backupName), zap.Any("appendonly_files", appendonlyFiles), zap.String("exclusion_filter", exclusionFilter))
err = p.RestoreFromBackup(s.options.DataDir, backupName, pitreos.MustNewIncludeThanExcludeFilter(".*", exclusionFilter))
if s.HandlePostRestore != nil {
s.HandlePostRestore()
}
return err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tests/keras/test_callbacks.py
|
import os
import multiprocessing
import numpy as np
import pytest
from csv import reader
from csv import Sniffer
import shutil
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add, dot, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling1D, GlobalAveragePooling2D
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
from keras import backend as K
from keras.utils import np_utils
try:
from unittest.mock import patch
except:
from mock import patch
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
@keras_test
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
# case 2 fit_generator
def data_generator():
max_batch_index = len(X_train) // batch_size
i = 0
while 1:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
@keras_test
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
@keras_test
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
@keras_test
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
@keras_test
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
@keras_test
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3, as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
@keras_test
def test_EarlyStopping_baseline():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def baseline_tester(acc_levels):
early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75, patience=2)
early_stop.model = DummyModel()
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(acc_levels)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})
if early_stop.model.stop_training:
break
return epochs_trained
acc_levels = [0.55, 0.76, 0.81, 0.81]
baseline_met = baseline_tester(acc_levels)
acc_levels = [0.55, 0.74, 0.81, 0.81]
baseline_not_met = baseline_tester(acc_levels)
# All epochs should run because baseline was met in second epoch
assert baseline_met == 4
# Baseline was not met by second epoch and should stop
assert baseline_not_met == 2
@keras_test
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
@keras_test
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@keras_test
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
@keras_test
def test_ReduceLROnPlateau_backwards_compatibility():
import warnings
with warnings.catch_warnings(record=True) as ws:
reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)
# Check if warnings are disabled
if os.environ.get("PYTHONWARNINGS") != "ignore":
assert "`epsilon` argument is deprecated" in str(ws[0].message)
assert not hasattr(reduce_on_plateau, 'epsilon')
assert hasattr(reduce_on_plateau, 'min_delta')
assert reduce_on_plateau.min_delta == 1e-13
@keras_test
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
@keras_test
def test_TensorBoard(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=0), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=(X_test, y_test),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires TensorFlow backend')
def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data should raise ValueError if histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit(X_train, y_train, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=1), epochs=3)
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
# fit generator with validation data generator should raise ValueError if
# histogram_freq > 0
with pytest.raises(ValueError) as raised_exception:
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=data_generator(False),
validation_steps=1,
callbacks=callbacks_factory(histogram_freq=1))
assert 'validation_data must be provided' in str(raised_exception.value)
@keras_test
def test_TensorBoard_multi_input_output(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim, input_dim),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2,
[y_train[i * batch_size: (i + 1) * batch_size]] * 2)
else:
yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2,
[y_test[i * batch_size: (i + 1) * batch_size]] * 2)
i += 1
i = i % max_batch_index
inp1 = Input((input_dim, input_dim))
inp2 = Input((input_dim, input_dim))
inp_3d = add([inp1, inp2])
inp_2d = GlobalAveragePooling1D()(inp_3d)
inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d]) # test a layer with a list of output tensors
hidden = dot(inp_pair, axes=-1)
hidden = Dense(num_hidden, activation='relu')(hidden)
hidden = Dropout(0.1)(hidden)
output1 = Dense(num_classes, activation='softmax')(hidden)
output2 = Dense(num_classes, activation='softmax')(hidden)
model = Model(inputs=[inp1, inp2], outputs=[output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
embeddings_freq=1,
embeddings_layer_names=['dense_1'],
batch_size=5)]
# fit without validation data
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), epochs=2,
validation_data=([X_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
def test_TensorBoard_convnet(tmpdir):
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
input_shape = (16, 16, 3)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
num_test=200,
input_shape=input_shape,
classification=True,
num_classes=num_classes)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = Sequential([
Conv2D(filters=8, kernel_size=3,
activation='relu',
input_shape=input_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=4, kernel_size=(3, 3),
activation='relu', padding='same'),
GlobalAveragePooling2D(),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
write_images=True, write_grads=True,
batch_size=16)
cbks = [tsb]
model.summary()
history = model.fit(x_train, y_train, epochs=2, batch_size=16,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size],
y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit_generator(data_generator(True), len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
@keras_test
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
@keras_test
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
@keras_test
def tests_RemoteMonitor():
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor()]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
@keras_test
def tests_RemoteMonitorWithJsonPayload():
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor(send_as_json=True)]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
|
[] |
[] |
[
"PYTHONWARNINGS"
] |
[]
|
["PYTHONWARNINGS"]
|
python
| 1 | 0 | |
setup.py
|
from setuptools import setup
from setuptools import find_packages
from distutils.command.clean import clean
from distutils.extension import Extension
from distutils.sysconfig import get_config_vars
from Cython.Build import cythonize
import os, platform, sys, fnmatch
import numpy
def setup_package():
# Get Platform/OS
_os = sys.platform
# Get the current directory
_pwd_ = os.path.dirname(os.path.realpath('__file__'))
_upwd_ = os.path.dirname(_pwd_)
# Remove the "-Wstrict-prototypes" compiler option, which isn't valid for C++.
cfg_vars = get_config_vars()
for key, value in cfg_vars.items():
if isinstance(value,str):
cfg_vars[key] = value.replace("-Wstrict-prototypes", "")
# Suppress numpy deprecation warnings
no_deprecated = ("NPY_NO_DEPRECATED_API",None)
sourcefiles = [
os.path.join(_pwd_,"bindings","PostMeshPy.pyx"),
os.path.join(_pwd_,"src","PostMeshBase.cpp"),
os.path.join(_pwd_,"src","PostMeshCurve.cpp"),
os.path.join(_pwd_,"src","PostMeshSurface.cpp")
]
# Set the compiler
# Must be called as: "python setup.py build_ext CXX=/usr/bin/g++"
args = sys.argv
_cxx_specified = False
if len(args) > 1:
for counter, arg in enumerate(args):
if "CXX" in arg:
_cxx_specified = True
_cxx_compiler = arg.split("=")[-1]
args.remove(arg)
if _cxx_specified:
os.environ["CC"] = _cxx_compiler
os.environ["CXX"] = _cxx_compiler
else:
_cxx_compiler = get_config_vars()['CXX'].split(' ')[0]
os.environ["CC"] = _cxx_compiler
os.environ["CXX"] = _cxx_compiler
# Compiler arguments
if "clang++" in _cxx_compiler or ("c++" in _cxx_compiler and "darwin" in _os):
compiler_args = ["-O3","-std=c++11","-m64","-march=native","-mtune=native","-ffp-contract=fast",
"-ffast-math","-flto","-DNPY_NO_DEPRECATED_API","-Wno-shorten-64-to-32"]
else:
compiler_args = ["-O3","-std=c++11","-m64","-march=native","-mtune=native","-ffp-contract=fast",
"-mfpmath=sse","-ffast-math","-ftree-vectorize","-finline-functions","-finline-limit=100000",
"-funroll-loops","-Wno-unused-function","-flto","-DNPY_NO_DEPRECATED_API","-Wno-cpp"]
# if "darwin" in _os:
# compiler_args.append("-stdlib=libstdc++")
eigen_include_path = "/usr/local/include/eigen/"
oce_include_path = "/usr/local/include/oce/"
# Link to OpenCascade runtime libraries
# Search for all subdirectories under /usr/local/lib
# Change the directory name if occ is elsewhere
occ_dir = "/usr/local/lib"
all_dir_libs = os.listdir(occ_dir)
occ_libs = []
for i in all_dir_libs:
lib_suffix = i.split(".")[-1]
if i[:4]=="libT" and (lib_suffix != "a" and lib_suffix != "la" and lib_suffix != "0"):
if "darwin" in _os:
occ_libs.append(i[3:-6])
elif "linux" in _os:
occ_libs.append(":"+i)
found_oce = False
for i in occ_libs:
if "TKernel" in i:
found_oce = True
break
if found_oce is False:
if "darwin" in _os:
version = next(os.walk("/usr/local/Cellar/oce/"))[1][0]
occ_dir = os.path.join("/usr/local/Cellar/oce",version,"lib")
oce_include_path = os.path.join("/usr/local/Cellar/oce",version,"include","oce")
elif "linux" in _os:
occ_dir = "/usr/lib/x86_64-linux-gnu"
oce_include_path = "/usr/include/oce/"
all_dir_libs = os.listdir(occ_dir)
for i in all_dir_libs:
lib_suffix = i.split(".")[-1]
if i[:4]=="libT" and (lib_suffix != "a" and lib_suffix != "la" and lib_suffix != "0"):
occ_libs.append(":"+i)
# Create extension module
extensions = [
Extension(
name = "PostMeshPy",
sources = sourcefiles,
language="c++",
include_dirs = [_pwd_,
_pwd_+"/include/",
eigen_include_path,
oce_include_path,
numpy.get_include()],
libraries= ["stdc++"] + occ_libs,
library_dirs = [_pwd_, os.path.join("/usr","local","lib")],
extra_compile_args = compiler_args,
define_macros=[no_deprecated],
),
]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
ext_modules = cythonize(extensions),
name = "PostMeshPy",
version = "1.6.1",
description = "A Python wrapper for PostMesh - a high order curvilinear mesh generator based on OpenCascade",
long_description=long_description,
long_description_content_type="text/markdown",
author="Roman Poya",
author_email = "[email protected]",
url = "https://github.com/romeric/PostMesh",
license="MIT",
install_requires=[
'numpy>=1.9',
'cython>=0.23'],
packages=find_packages(),
include_package_data=True,
package_data={'': ['bindings/*','src/*','include/*','example/*',
'*.pyx', '*.pxd', '*.h', '*.hpp', '*.c', '*.cpp', 'Makefile']},
extra_files = "LICENSE.md"
)
if __name__ == "__main__":
setup_package()
|
[] |
[] |
[
"CXX",
"CC"
] |
[]
|
["CXX", "CC"]
|
python
| 2 | 0 | |
src/impurity/atomd/atom_orbitalsusc.py
|
#!/usr/bin/env python
pycxx_available=True
symeig_available=False
from scipy import *
import sys, re, os
import copy
import getopt
import pickle
import glob
import scipy.weave as weave
from numpy import linalg
ROOT = os.environ.get('WIEN_DMFT_ROOT')
if ROOT is not None:
sys.path.append( ROOT )
else:
print >> sys.stderr, "Environment variable WIEN_DMFT_ROOT must be set!"
print "Environment variable WIEN_DMFT_ROOT must be set!"
sys.exit(1)
import gaunt
if pycxx_available: import gutils
if symeig_available: import symeig
import numpy
if numpy.__version__ == '1.0.1':
loadtxt = io.read_array
savetxt = io.write_array
def union(data1, data2):
" Takes a union of two lists"
res = data1
for d in data2:
if d not in res: res.append(d)
return res
def overlap(data1, data2):
" Checks if there is any overlap between data1 and data2"
for d in data2:
if d in data1:
return True
return False
def compres(groups):
loopsDone = True
while (loopsDone):
loopsDone = False
for i in range(len(groups)):
if loopsDone: break
for j in range(i+1,len(groups)):
if loopsDone: break
if overlap(groups[i],groups[j]):
groups[i] = union(groups[i],groups[j])
del groups[j]
loopsDone = True
for g in groups: g.sort()
groups.sort(cmp=lambda x,y: x[0]-y[0])
return groups
if pycxx_available:
compress = gutils.compres
else:
compress = compres
def cprint(fh, U):
for i in range(shape(U)[0]):
for j in range(shape(U)[1]):
f = U[i,j]
if abs(f)<1e-10: f = 0j
print >> fh, "%7.4f %7.4f*i " % (f.real, f.imag),
print >> fh
print >> fh
def mprint(fh, U):
for i in range(shape(U)[0]):
for j in range(shape(U)[1]):
f = U[i,j]
if abs(f)<1e-10: f = 0
print >> fh, "%7.4f " % f,
print >> fh
print >> fh
def TS2C_p():
s2 = 1./sqrt(2.)
T2 = [[],[],[]]
## m x y z
T2[-1+1]= [ s2, -1j*s2, 0.0]
T2[ 0+1]= [ 0.0, 0.0, 1.0]
T2[ 1+1]= [ s2, 1j*s2, 0.0]
return array(T2)
def TS2C_d():
""" Generates transformation matrix from complex
spherical harmonic to cubic harmonic for l=2.
A^{cub} = T^+ * A^{spher} * T
Complex spherical harmonics are (after Varshalovich)
order of cubics = [yz, zx, xy, x^2-y^2, 3z^2-r^2]
Spheric harmonics are:
Y (r)=a/sqrt(2)*(x^2-y^2-2ixy)/r^2 Y (r)=a/sqrt(2)*(2zx-2izy)/r^2
2-2 2-1
Y (r)=a/sqrt(6)*(3z^2/r^2-1)
2 0
Y (r)=-a/sqrt(2)*(2zx+2izy)/r^2 Y (r)=a/sqrt(2)*(x^2-y^2+2ixy)/r^2
2 1 2 2
Cubic harmonics are compatible with Wien2K code:
Y (r)= a*(3z^2/r^2-1)/sqrt(6)
2 1
Y (r)= a*(x^2-y^2)/r^2
2 2
Y (r)= 2*a*yz/r^2
2 3
Y (r)= 2*a*zx/r^2
2 4
Y (r)= a*2xy/r^2
2 5
where a=sqrt(3*5/pi)/4 is a normalization constant.
Transformation matrix T(mu,i) is given by
_ --
Y (r)= > T(m,i)*Y (r)
2m -- 2i
i=xyz
"""
s2 = 1./sqrt(2.)
T2 = [[],[],[],[],[]]
## m z^2 x^2-y^2 yz zx xy
T2[-2+2]= [ 0.0, s2, 0.0, 0.0, -1j*s2]
T2[-1+2]= [ 0.0, 0.0, -1j*s2, s2, 0.0]
T2[ 0+2]= [ 1.0, 0.0, 0.0, 0.0, 0.0]
T2[ 1+2]= [ 0.0, 0.0, -1j*s2, -s2, 0.0]
T2[ 2+2]= [ 0.0, s2, 0.0, 0.0, 1j*s2]
return array(T2)
def TS2C(l):
if l==0: return array([[1]])
if l==1: return TS2C_p()
if l==2: return TS2C_d()
#if l==3: return Ts2C_f()
def CoulUs(T2C, l):
# Precomputes gaunt coefficients for speed
# shape(gck)=(4,7,7,4). It contains gck(l, m4, m1, k)
gck = gaunt.cmp_all_gaunt()
nw = 2*l+1
T2Cp = conj(T2C.transpose())
UC = zeros((l+1, nw, nw, nw, nw), dtype=complex)
shft = 3-l
for i4 in range(nw):
for i3 in range(nw):
for i2 in range(nw):
for i1 in range(nw):
for k in range(l+1):
dsum = 0
for m4 in range(nw):
for m3 in range(nw):
for m2 in range(nw):
for m1 in range(nw):
if (m1+m2!=m3+m4): continue
dsum += T2Cp[i4,m4] * gck[l,shft+m4,shft+m1,k] * T2C[m1,i1] * T2Cp[i3,m3] * gck[l,shft+m2,shft+m3,k] * T2C[m2,i2]
UC[k,i4,i3,i2,i1] = dsum
# 3-l,3+l
# 3-l,
#print
#for i4 in range(nw):
# for i1 in range(nw):
# for i3 in range(nw):
# for i2 in range(nw):
# f = UC[2,i4,i3,i2,i1]
# print "%6.3f " % f.real,
# print
# print
# print
return UC
def CoulUsC1(l, T2C):
# Precomputes gaunt coefficients for speed
# shape(gck)=(4,7,7,4). It contains gck(l, m4, m1, k)
gck = gaunt.cmp_all_gaunt()
#print 'Gaunt coefficients precomputed - shape(gck)', shape(gck)
nw = 2*l+1
T2Cp = conj(T2C.transpose())
UC = zeros((l+1, nw, nw, nw, nw), dtype=complex)
shft = 3-l
source="""
using namespace std;
for (int i4=0; i4<nw; i4++){
for (int i3=0; i3<nw; i3++){
for (int i2=0; i2<nw; i2++){
for (int i1=0; i1<nw; i1++){
for (int k=0; k<l+1; k++){
complex<double> dsum = 0;
for (int m4=0; m4<nw; m4++){
for (int m3=0; m3<nw; m3++){
for (int m2=0; m2<nw; m2++){
for (int m1=0; m1<nw; m1++){
if (m1+m2!=m3+m4) continue;
dsum += T2Cp(i4,m4)*gck(l,shft+m4,shft+m1,k)*T2C(m1,i1) * T2Cp(i3,m3)*gck(l,shft+m2,shft+m3,k)*T2C(m2,i2);
}
}
}
}
UC(k,i4,i3,i2,i1) = dsum;
}
}
}
}
}
"""
weave.inline(source, ['UC', 'gck', 'l', 'T2C', 'T2Cp', 'shft', 'nw'],
type_converters=weave.converters.blitz, compiler = 'gcc')
return UC
def CoulUsC2(l, T2C):
# Precomputes gaunt coefficients for speed
# shape(gck)=(4,7,7,4). It contains gck(l, m4, m1, k)
gck = gaunt.cmp_all_gaunt()
#print 'Gaunt coefficients precomputed - shape(gck)', shape(gck)
mw = 2*l+1
if len(T2C) == mw:
nw = mw
ns = 1
elif len(T2C) == 2*mw:
nw = 2*(2*l+1)
ns = 2
else:
print "ERROR in atom_d.py: T2C has wrong shape"
sys.exit(0)
T2Cp = conj(T2C.transpose())
UC = zeros((l+1, nw, nw, nw, nw), dtype=complex)
shft = 3-l
shft2 = 2*l+1
Sum1 = zeros((nw,nw,shft2*2),dtype=complex)
Sum2 = zeros((nw,nw,shft2*2),dtype=complex)
source="""
using namespace std;
for (int k=0; k<l+1; k++){
Sum1=0;
for (int i4=0; i4<nw; i4++){
for (int i1=0; i1<nw; i1++){
for (int m4=0; m4<mw; m4++){
for (int m1=0; m1<mw; m1++){
for (int s=0; s<ns; s++) Sum1(i4,i1,m1-m4+shft2) += T2Cp(i4,m4+s*mw)*gck(l,shft+m4,shft+m1,k)*T2C(m1+s*mw,i1);
}
}
}
}
Sum2=0;
for (int i3=0; i3<nw; i3++){
for (int i2=0; i2<nw; i2++){
for (int m3=0; m3<mw; m3++){
for (int m2=0; m2<mw; m2++){
for (int s=0; s<ns; s++) Sum2(i3,i2,m3-m2+shft2) += T2Cp(i3,m3+s*mw)*gck(l,shft+m2,shft+m3,k)*T2C(m2+s*mw,i2);
}
}
}
}
for (int i4=0; i4<nw; i4++){
for (int i3=0; i3<nw; i3++){
for (int i2=0; i2<nw; i2++){
for (int i1=0; i1<nw; i1++){
complex<double> csum=0.0;
for (int dm=0; dm<shft2*2; dm++) csum += Sum1(i4,i1,dm)*Sum2(i3,i2,dm);
UC(k,i4,i3,i2,i1) = csum;
}
}
}
}
}
"""
weave.inline(source, ['UC', 'gck', 'l', 'T2C', 'T2Cp', 'shft', 'nw', 'mw', 'ns', 'shft2', 'Sum1', 'Sum2'],
type_converters=weave.converters.blitz, compiler = 'gcc')
return UC
def CoulUsC(l, T2C, op):
# Precomputes gaunt coefficients for speed
# shape(gck)=(4,7,7,4). It contains gck(l, m4, m1, k)
gck = gaunt.cmp_all_gaunt()
#print 'shape(gck)=', shape(gck)
#print 'Gaunt precomputed'
nw = 2*l+1
# creating large T2C base
if ( len(T2C) < 2*nw ):
if (len(T2C)!=nw): print 'ERROR: Something wrong with size of T2C'
T2Cl = zeros((2*nw,2*nw),dtype=complex)
T2Cl[:nw,:nw] = T2C
T2Cl[nw:,nw:] = T2C
else:
T2Cl = T2C
T2Cp = conj(T2Cl.transpose())
UC = zeros((l+1, 2*nw, 2*nw, 2*nw, 2*nw), dtype=complex)
shft = 3-l
bi = array(op.bi)
sz = array(op.sz)
#print 'bi=', bi
#print 'sz=', sz
#print 'nw=', nw
source="""
using namespace std;
for (int i4=0; i4<2*nw; i4++){
for (int i3=0; i3<2*nw; i3++){
for (int i2=0; i2<2*nw; i2++){
for (int i1=0; i1<2*nw; i1++){
for (int k=0; k<l+1; k++){
complex<double> dsum = 0;
for (int ms4=0; ms4<2*nw; ms4++){
int m4 = bi(ms4);
int s4 = sz(ms4);
//cout<<"ms4="<<ms4<<" "<<m4<<" "<<s4<<endl;
for (int ms3=0; ms3<2*nw; ms3++){
int m3 = bi(ms3);
int s3 = sz(ms3);
//cout<<"ms3="<<ms3<<" "<<m3<<" "<<s3<<endl;
for (int ms2=0; ms2<2*nw; ms2++){
int m2 = bi(ms2);
int s2 = sz(ms2);
//cout<<"ms2="<<ms2<<" "<<m2<<" "<<s2<<endl;
for (int ms1=0; ms1<2*nw; ms1++){
int m1 = bi(ms1);
int s1 = sz(ms1);
//cout<<"ms1="<<ms1<<" "<<m1<<" "<<s1<<endl;
if (m1+m2!=m3+m4) continue;
if (s1!=s4 || s2!=s3) continue;
dsum += T2Cp(i4,ms4)*gck(l,shft+m4,shft+m1,k)*T2C(ms1,i1) * T2Cp(i3,ms3)*gck(l,shft+m2,shft+m3,k)*T2C(ms2,i2);
}
}
}
}
UC(k,i4,i3,i2,i1) = dsum;
}
}
}
}
}
"""
weave.inline(source, ['UC', 'gck', 'l', 'T2C', 'T2Cp', 'shft', 'nw', 'bi', 'sz'],
type_converters=weave.converters.blitz, compiler = 'gcc')
return UC
class operateLS(object):
def __init__ (self, baths, T2C, Q3d):
self.baths = baths
self.Nband = self.baths/2
self.N = self.baths
self.mask=[]
for i in range(self.N): self.mask.append(1<<i);
self.T2C = T2C
self.Q3d = Q3d
if not self.Q3d:
#############################################
# Here for 5d's where spin-orbit is kept #
#############################################
self.Q3d=False
M2=[]
l=(self.Nband-1)/2
print 'L is here ', l
for s in [0.5,-0.5]:
for m in range(-l,l+1):
M2.append( (m+2*s)/2.)
#print 'M2=',M2
self.M2a=zeros((len(M2),len(M2)),dtype=float)
for a in range(len(M2)):
for b in range(len(M2)):
for ms in range(len(M2)):
self.M2a[a,b] += real(conj(T2C[ms,a])*T2C[ms,b]*M2[ms])
#print 'M2a=', self.M2a
else:
####################################################
# Here only for 3d's where spin-orbit is neglected #
####################################################
self.Q3d=True
self.bi=[] # band index
self.sz=[] # sz
for i in range(self.Nband):
self.sz.append(1);
self.bi.append(i);
for i in range(self.Nband):
self.bi.append(i)
self.sz.append(-1)
self.mask_u = []
self.mask_d = []
for i in range(self.Nband):
self.mask_u.append(self.mask[i])
for i in range(self.Nband):
self.mask_d.append(self.mask[self.Nband+i])
def Nel(self, state):
n=0
for k in self.mask:
if (k&state): n+=1
return n
def occup(self, state):
""" gives a list of occupancies per band [n_{band1},n_{band2},...]
"""
oc=[]
for i in range(self.N):
if state & self.mask[i]: oc.append(1)
else: oc.append(0)
return oc
def sign(self, state, mask_min, mask_max):
""" Sign when electron hops from mask_min to mask_max
Counts number of electrons between the two spaces
"""
# mask will run between mask_min to mask_max
mask = mask_min<<1
n=0 # number of electrons between mask_min and mask_max
while (mask<mask_max): # loop to mask_max
if (mask&state): n+=1 # found electron between the two places
mask = mask<<1 # increment the mask
return 1-2*(n%2) # (-1)^n
def sign_(self, state, mask_max):
""" Sign when electron is added to the state (from the left)
"""
# mask will run between mask_min to mask_max
mask = 1
n=0 # number of electrons between mask_min and mask_max
while (mask<mask_max): # loop to mask_max
if (mask&state): n+=1 # found electron between the two places
mask = mask<<1 # increment the mask
return 1-2*(n%2) # (-1)^n
def N_el_before(self, state, i):
n=0
for q in range(i):
if self.mask[q]&state: n+=1
return n
def DM(self, state):
"Density matrix"
DenM = [[[] for i in range(self.N)] for j in range(self.N)]
for j in range(self.N):
if not self.mask[j]&state: continue # c_j operator
jsig = self.sign_(state, self.mask[j])
nst = state^self.mask[j]
for i in range(self.N):
if self.mask[i]&nst: continue # c_i^\dagger operator
nstate = nst^self.mask[i]
isig = self.sign_(nst, self.mask[i])
DenM[i][j].append( (nstate, jsig*isig) )
return DenM
def Fp(self, state, ib):
""" This implements psi^dagger_{ib} operator acting on state
indexes are:
ib - band+spin index
"""
if state&self.mask[ib]: return (0,1) # This state is already occupied
newstate = state^self.mask[ib]
sig = self.sign_(state, self.mask[ib])
return (newstate, sig)
def CoulombU(self, state, UC, FkoJ, Ising=False):
sts=[]
ni=-1
maxk=l+1
if (self.Q3d):
### will evaluate again <sts| U(b,a,j,i) psi^+_b psi^+_a psi_j psi_i | state>, but
### this time U is defined in (m4,m3,m2,m1) basis only, and is missing the spin component.
### Need to make sure that s_1==s_4 and s_2==s_3
### hence <sts| U(m4,m3,m2,m1) psi^+_{m4,s} psi^+_{m3,s'} psi_{m2,s'} psi_{m1,s} | state>
for i in range(self.N):
if not(self.mask[i]&state) : continue # (i,m1) does not exists
ni+=1
state1 = state^self.mask[i]
m1 = self.bi[i]
s1 = self.sz[i]
nj=-1
for j in range(self.N):
if not(self.mask[j]&state1) : continue # (j,m2) does not exists
nj+=1
# here we have: mask[i]&state && mask[j]&state
state2 = state1^self.mask[j]
m2 = self.bi[j]
s2 = self.sz[j]
for a in range(self.N): # (a,m3) exists
if self.mask[a]&state2: continue
if self.sz[a]!=s2 : continue # s3 == s2
na = self.N_el_before(state2,a)
state3 = state2^self.mask[a]
m3 = self.bi[a]
s3 = self.sz[a]
for b in range(self.N): # (b,m4) exists
if self.mask[b]&state3 : continue
if self.sz[b]!=s1: continue # s4 == s1
nb = self.N_el_before(state3,b)
state4 = state3^self.mask[b]
m4 = self.bi[b]
s4 = self.sz[b]
if Ising and state4!=state: continue
sign = 1-2*((ni+nj+na+nb)%2)
U0 = sign*UC[0,m4,m3,m2,m1]*FkoJ[0]
dsum=0
for k in range(1,maxk):
dsum += UC[k,m4,m3,m2,m1]*FkoJ[k]
U1 = sign*dsum
if (abs(U0)>1e-6 or abs(U1)>1e-6): sts.append([state4, [U0,U1]])
else : # This is used for 5d, but not for 3d
### will evaluate <sts| U(b,a,j,i) psi^+_b psi^+_a psi_j psi_i | state>
for i in range(self.N):
if not(self.mask[i]&state) : continue # i should exist, otherwise continue
ni+=1
state1 = state^self.mask[i]
nj=-1
for j in range(self.N):
if not(self.mask[j]&state1) : continue # j should exist, otherwise continue
nj+=1
# here we have: mask[i]&state && mask[j]&state
state2 = state1^self.mask[j]
for a in range(self.N):
if self.mask[a]&state2: continue # a should not exist exist
na = self.N_el_before(state2,a)
state3 = state2^self.mask[a]
for b in range(self.N):
if self.mask[b]&state3: continue # b should not exist
nb = self.N_el_before(state3,b)
state4 = state3^self.mask[b]
if Ising and state4!=state: continue
sign = 1-2*((ni+nj+na+nb)%2)
U0 = sign*UC[0,b,a,j,i]*FkoJ[0]
dsum=0
for k in range(1,maxk):
dsum += UC[k,b,a,j,i]*FkoJ[k]
U1 = sign*dsum
if (abs(U0)>1e-6 or abs(U1)>1e-6): sts.append([state4, [U0,U1]])
return sts
def printn(self, state):
sstate=''
if self.Q3d:
for i in range(self.Nband):
if (state & self.mask_u[i]) and (state & self.mask_d[i]) : sstate += '2'
elif (state & self.mask_u[i]) : sstate += 'u'
elif (state & self.mask_d[i]) : sstate += 'd'
else : sstate += '0'
else:
for i in range(self.N):
if state & self.mask[i]: sstate += '1'
else : sstate += '0'
return sstate
def Mz(self,state):
m2=0.0
for i in range(self.N):
if state&self.mask[i]: m2 += self.M2a[i,i]
return m2
def OrbDiff(self, state, orb1=2, orb2=3):
n1 = 0
if (state&self.mask_u[orb1]): n1+=1
if (state&self.mask_d[orb1]): n1+=1
n2 = 0
if (state&self.mask_u[orb2]): n2+=1
if (state&self.mask_d[orb2]): n2+=1
return n1-n2
#########################
def Sz(self, state):
if not self.Q3d: return 0
nu = 0
nd = 0
for i in range(self.Nband):
if state&self.mask_u[i] : nu += 1
if state&self.mask_d[i] : nd += 1
return nu-nd
def S2(self, state):
l2p1 = self.Nband
sts=[]
# diagonal part
dd=0;
for ilz in range(l2p1):
up=0; dn=0
if self.mask_u[ilz] & state: up = 1
if self.mask_d[ilz] & state: dn = 1
# if only up or only down in certain lz
if up+dn==1: dd += 0.5
# Sz^2
fct = (0.5*self.Sz(state))**2 + dd
# store diagonal
sts.append([state,fct])
# off diagonal
for ilz in range(l2p1):
im1 = self.mask_u[ilz]
im2 = self.mask_d[ilz]
ib1 = bool(state & im1)
ib2 = bool(state & im2)
if ib1 and not ib2: # S^-_i gives nonzero
isig = self.sign(state, min(im1,im2), max(im1,im2))
istate = state^im1^im2
for jlz in range(l2p1):
if (ilz==jlz): continue
jm1 = self.mask_d[jlz]
jm2 = self.mask_u[jlz]
jb1 = bool(state & jm1)
jb2 = bool(state & jm2)
if jb1 and not jb2: # S^+_j gives nonzero
jsig = self.sign(istate, min(jm1,jm2), max(jm1,jm2))
jstate = istate^jm1^jm2
sts.append([jstate, isig*jsig])
return sts
def PairHop(self, state):
""" Computes the pair-hopping term: D_a^\dagger D_a , where D_a creates or
anhilates a double occupied site. There is no minus sign in this term!
"""
doubles=[]
empty=[]
for i in range(self.Nband):
if state & self.mask_u[i] and state & self.mask_d[i]:
doubles.append(i)
elif not(state & self.mask_u[i]) and not(state & self.mask_d[i]):
empty.append(i)
rst=[]
for id in doubles:
nst1 = state^self.mask_u[id]^self.mask_d[id]
for ie in empty:
nst2 = nst1^self.mask_u[ie]^self.mask_d[ie]
rst.append(nst2)
return rst
def NDouble(self, state):
ne = 0
for i in range(self.Nband):
if state & self.mask_u[i] and state & self.mask_d[i]: ne += 1
return ne
def OneBodyNab(self, state, Sab):
""" computing the term Sab[a,i] f^+_a f_i
returns all matrix elements generated by the above one-body term
when acting on state
"""
sts=[]
ni=-1
for i in range(self.baths):
if not(self.mask[i]&state) : continue
ni+=1
state1 = state^self.mask[i]
m1 = self.bi[i]
s1 = self.sz[i]
# here we have: mask[i]&state
for a in range(self.baths):
if self.mask[a]&state1 : continue # sz_a == sz_j
# here we have: state&mask[i] and not(state1&mask[a])
na = self.N_el_before(state1,a)
state2 = state1^self.mask[a]
m2 = self.bi[a]
s2 = self.sz[a]
sign = 1-2*((ni+na)%2)
nab = sign*Sab[a,i]
if (abs(nab)>1e-6): sts.append([state2, nab])
return sts
#class operateLS(object):
# def __init__ (self, Nband):
# self.Nband = Nband
# self.baths = 2*Nband
#
# self.N = self.baths
#
# self.mask=[]
# for i in range(self.N): self.mask.append(1<<i);
#
# self.bi=[] # band index
# self.sz=[] # sz
# for i in range(self.Nband):
# self.sz.append(1);
# self.bi.append(i);
# for i in range(self.Nband):
# self.bi.append(i)
# self.sz.append(-1)
#
# self.mask_u = []
# self.mask_d = []
# for i in range(self.Nband):
# self.mask_u.append(self.mask[i])
# for i in range(self.Nband):
# self.mask_d.append(self.mask[self.Nband+i])
#
#
# def printn(self, state):
# sstate=''
# for i in range(self.Nband):
# if (state & self.mask_u[i]) and (state & self.mask_d[i]) : sstate += '2'
# elif (state & self.mask_u[i]) : sstate += 'u'
# elif (state & self.mask_d[i]) : sstate += 'd'
# else : sstate += '0'
# #sstate += ' '
# return sstate
#
# def Nel(self, state):
# n=0
# for k in self.mask:
# if (k&state): n+=1
# return n
#
# def Sz(self, state):
# nu = 0
# nd = 0
# for i in range(self.Nband):
# if state&self.mask_u[i] : nu += 1
# if state&self.mask_d[i] : nd += 1
# return nu-nd
#
# def occup(self, state):
# """ gives a list of occupancies per band [n_{band1},n_{band2},...]
# """
# oc=[]
# for i in range(self.Nband):
# ne = 0
# if state & self.mask_u[i] : ne += 1
# oc.append(ne)
# for i in range(self.Nband):
# ne = 0
# if state & self.mask_d[i] : ne += 1
# oc.append(ne)
# return oc
#
# def sign(self, state, mask_min, mask_max):
# """ Sign when electron hops from mask_min to mask_max
# Counts number of electrons between the two spaces
# """
# # mask will run between mask_min to mask_max
# mask = mask_min<<1
# n=0 # number of electrons between mask_min and mask_max
# while (mask<mask_max): # loop to mask_max
# if (mask&state): n+=1 # found electron between the two places
# mask = mask<<1 # increment the mask
# return 1-2*(n%2) # (-1)^n
#
# def sign_(self, state, mask_max):
# """ Sign when electron is added to the state (from the left)
# """
# # mask will run between mask_min to mask_max
# mask = 1
# n=0 # number of electrons between mask_min and mask_max
# while (mask<mask_max): # loop to mask_max
# if (mask&state): n+=1 # found electron between the two places
# mask = mask<<1 # increment the mask
# return 1-2*(n%2) # (-1)^n
#
# def N_el_before(self, state, i):
# n=0
# for q in range(i):
# if self.mask[q]&state: n+=1
# return n
#
# def DM(self, state):
# "Density matrix"
# DenM = [[[] for i in range(self.baths)] for j in range(self.baths)]
# for j in range(self.baths):
# if not self.mask[j]&state: continue # c_j operator
# jsig = self.sign_(state, self.mask[j])
# nst = state^self.mask[j]
# for i in range(self.baths):
# if self.mask[i]&nst: continue # c_i^\dagger operator
# nstate = nst^self.mask[i]
# isig = self.sign_(nst, self.mask[i])
# DenM[i][j].append( (nstate, jsig*isig) )
# return DenM
#
#
# def S2(self, state):
# l2p1 = self.Nband
# sts=[]
# # diagonal part
# dd=0;
# for ilz in range(l2p1):
# up=0; dn=0
# if self.mask_u[ilz] & state: up = 1
# if self.mask_d[ilz] & state: dn = 1
# # if only up or only down in certain lz
# if up+dn==1: dd += 0.5
# # Sz^2
# fct = (0.5*self.Sz(state))**2 + dd
# # store diagonal
# sts.append([state,fct])
# # off diagonal
# for ilz in range(l2p1):
# im1 = self.mask_u[ilz]
# im2 = self.mask_d[ilz]
# ib1 = bool(state & im1)
# ib2 = bool(state & im2)
# if ib1 and not ib2: # S^-_i gives nonzero
# isig = self.sign(state, min(im1,im2), max(im1,im2))
# istate = state^im1^im2
# for jlz in range(l2p1):
# if (ilz==jlz): continue
# jm1 = self.mask_d[jlz]
# jm2 = self.mask_u[jlz]
# jb1 = bool(state & jm1)
# jb2 = bool(state & jm2)
# if jb1 and not jb2: # S^+_j gives nonzero
# jsig = self.sign(istate, min(jm1,jm2), max(jm1,jm2))
# jstate = istate^jm1^jm2
# sts.append([jstate, isig*jsig])
# return sts
#
#
# def Fp(self, state, ib):
# """ This implements psi^dagger_{ib} operator acting on state
# indexes are:
# ib - band+spin index
# """
# if state&self.mask[ib]: return (0,1) # This state is already occupied
# newstate = state^self.mask[ib]
# sig = self.sign_(state, self.mask[ib])
# return (newstate, sig)
#
# def PairHop(self, state):
# """ Computes the pair-hopping term: D_a^\dagger D_a , where D_a creates or
# anhilates a double occupied site. There is no minus sign in this term!
# """
# doubles=[]
# empty=[]
# for i in range(self.Nband):
# if state & self.mask_u[i] and state & self.mask_d[i]:
# doubles.append(i)
# elif not(state & self.mask_u[i]) and not(state & self.mask_d[i]):
# empty.append(i)
#
# rst=[]
# for id in doubles:
# nst1 = state^self.mask_u[id]^self.mask_d[id]
# for ie in empty:
# nst2 = nst1^self.mask_u[ie]^self.mask_d[ie]
# rst.append(nst2)
# return rst
#
# def NDouble(self, state):
# ne = 0
# for i in range(self.Nband):
# if state & self.mask_u[i] and state & self.mask_d[i]: ne += 1
# return ne
#
# def CoulombU(self, state, UC, FkoJ, Ising=False):
# sts=[]
# ni=-1
# maxk=l+1
# #maxk=2
# for i in range(self.baths):
# if not(self.mask[i]&state) : continue # (i,m1) does not exists
# ni+=1
# state1 = state^self.mask[i]
# m1 = self.bi[i]
# s1 = self.sz[i]
# nj=-1
# for j in range(self.baths):
# if not(self.mask[j]&state1) : continue # (j,m2) does not exists
# nj+=1
# # here we have: mask[i]&state && mask[j]&state
# state2 = state1^self.mask[j]
# m2 = self.bi[j]
# s2 = self.sz[j]
# for a in range(self.baths): # (a,m3) exists
# if self.mask[a]&state2 or self.sz[a]!=s2 : continue # s3 == s2
# na = self.N_el_before(state2,a)
# state3 = state2^self.mask[a]
# m3 = self.bi[a]
# s3 = self.sz[a]
# for b in range(self.baths): # (b,m4) exists
# if self.mask[b]&state3 or self.sz[b]!=s1: continue # s4 == s1
# nb = self.N_el_before(state3,b)
# state4 = state3^self.mask[b]
# m4 = self.bi[b]
# s4 = self.sz[b]
#
#
# if Ising and state4!=state: continue
#
# sign = 1-2*((ni+nj+na+nb)%2)
# U0 = sign*UC[0,m4,m3,m2,m1]*FkoJ[0]
#
# dsum=0
# for k in range(1,maxk):
# dsum += UC[k,m4,m3,m2,m1]*FkoJ[k]
# U1 = sign*dsum
#
# if (abs(U0)>1e-6 or abs(U1)>1e-6): sts.append([state4, [U0,U1]])
# return sts
#
# def OneBodyNab(self, state, Sab):
# """ computing the term Sab[a,i] f^+_a f_i
# returns all matrix elements generated by the above one-body term
# when acting on state
# """
# sts=[]
# ni=-1
# for i in range(self.baths):
# if not(self.mask[i]&state) : continue
# ni+=1
# state1 = state^self.mask[i]
# m1 = self.bi[i]
# s1 = self.sz[i]
# # here we have: mask[i]&state
# for a in range(self.baths):
# if self.mask[a]&state1 : continue # sz_a == sz_j
# # here we have: state&mask[i] and not(state1&mask[a])
# na = self.N_el_before(state1,a)
# state2 = state1^self.mask[a]
# m2 = self.bi[a]
# s2 = self.sz[a]
#
# sign = 1-2*((ni+na)%2)
#
# nab = sign*Sab[a,i]
#
# if (abs(nab)>1e-6): sts.append([state2, nab])
# return sts
def baseN(Nband, prop,Q3d):
Ntot = len(prop)
wstates=[]
if Q3d:
for n1 in range(Nband*2+1):
for sz1 in range(-n1,n1+1,2):
states=[]
for i in range(Ntot):
if prop[i][0]==n1 and prop[i][1]==sz1:
states.append(i)
if (len(states)>0): wstates.append([n1, sz1, states])
else:
for n1 in range(Nband*2+1):
states=[]
for i in range(Ntot):
if prop[i][0]==n1:
states.append(i)
if (len(states)>0): wstates.append([n1, 0, states])
return wstates
def list_to_string(x):
return str(array(x).flatten().tolist())
def analizeGroups(A, small = 1e-4):
groups=[]
for i in range(shape(A)[0]):
nonz=[]
for j in range(shape(A)[1]):
if abs(A[i,j])>small : nonz.append(j)
if (len(nonz)>0): groups.append(nonz)
groups0 = compress(groups)
groups=[]
for i in range(shape(A)[1]):
nonz=[]
for j in range(shape(A)[0]):
if abs(A[j,i])>small : nonz.append(j)
if (len(nonz)>0): groups.append(nonz)
groups1 = compress(groups)
return (groups1,groups0)
def coupled(A, groups0, groups1, small = 1e-4):
#ng0 = len(array(groups0).flatten().tolist())
#ng1 = len(array(groups1).flatten().tolist())
fpair = [-1]*len(groups0)
#pairs=[]
for ii,ig0 in enumerate(groups0):
nonz=[]
for ir0 in ig0:
for q in range(shape(A)[1]):
if abs(A[ir0,q])>small : nonz.append(q)
for jj,jg1 in enumerate(groups1):
if overlap(nonz,jg1):
#pairs.append([ii,jj])
fpair[ii] = jj
return fpair
def comp(x, y):
if x[2]!=y[2]: return int(x[2]-y[2])
else:
if abs(x[3]-y[3])<1e-5: return 0
elif (x[3]<y[3]): return -1
else: return 1
def SpinOrbitM(l,T2C):
# one electron |l,m,s> base
ms_base=[]
for s in [1/2.,-1/2.]:
for m in range(-l,l+1):
ms_base.append([m,s])
#print 'ms_base=', ms_base
# one electron |j,mj> base
pj = [l-1/2.,l+1/2.]
if l==0 : pj = [0.5]
jj_base=[]
for j in pj:
for mj in arange(-j,j+1):
jj_base.append([j, mj])
#print 'jj_base=', jj_base
# transforms between |lms> and |jmj> base
Tjls = zeros((len(ms_base),len(jj_base)))
for ijj,jj in enumerate(jj_base):
for ims,ms in enumerate(ms_base):
Tjls[ijj,ims] = gaunt.clebschg(jj[0], jj[1], l, ms[0], 1/2., ms[1])
# the one-body operator l*s in matrix form
# in the j-j base
jSO = zeros((len(jj_base),len(jj_base)))
for ijj,jj in enumerate(jj_base):
jSO[ijj,ijj] = 0.5*(jj[0]*(jj[0]+1) - l*(l+1) - 3/4.)
#print 'jSO=', jSO
#mprint(jSO)
# changing to lms base
mSO = matrix(Tjls.transpose())*matrix(jSO)*matrix(Tjls)
# creating large T2C base
if ( len(T2C) < len(jj_base) ):
T2Cl = zeros(tuple(array(shape(T2C))*2),dtype=complex)
T2Cl[:len(T2C),:len(T2C)] = T2C
T2Cl[len(T2C):,len(T2C):] = T2C
else:
T2Cl = T2C
# changing to cubic harmonics base
cSO = matrix(conj(T2Cl.transpose())) * mSO * matrix(T2Cl)
print 'spin-orbit='
mprint(sys.stdout,real(cSO))
return cSO
def Diagonalize(Ham, small=1e-4, fh=sys.stdout):
""" Diagonalization is done in blocks. This is not because of efficiency but because
the resulting eigenvectors must not mix states of direct base if not absolutely necessary.
If brute force diagonalization is used in large scale problems, eigenvectors can be seriously
mix direct states with different symmetry.
"""
diff = sum(Ham-transpose(conj(Ham)))
if abs(diff)>1e-6:
print 'H NOT HERMITIAN!'
# Check block structure of Hamiltonian
# States which are mixed in Ham will have the same blck[i]
ndim = len(Ham)
blck=range(ndim)
for i in range(ndim):
for j in range(i+1,ndim):
if (abs(Ham[i][j])>small):
commonb = min(blck[i],blck[j])
for k in range(ndim):
if blck[k] in [blck[i],blck[j]]: blck[k]=commonb
#print ('%2d'%i), 'current blck=', '%2d,'*len(blck) % tuple(blck)
# Having blck[i] a new array block[:][:] is created, which contains indexes to all blocks
# for example [[1,2,3],[4,5,6]] for Hamiltonian containing two blocks
block=[]
for i in range(ndim):
bb=[]
for j in range(ndim):
if blck[j]==i: bb.append(j)
if len(bb)>0:
block.append(bb)
#print 'block=', block
# Here we go over all blocks and diagonalize each one.
eigv=[] # contains all eigenvalues
eigx=[] # contains full eigenvectors
for ibl,bl in enumerate(block):
hs = zeros((len(bl),len(bl)), dtype=complex)
for i,ib in enumerate(bl):
for j,jb in enumerate(bl):
hs[i,j] = Ham[ib,jb]
if symeig_available:
eigy = symeig.symeig(hs) # diagonalization of small block
else:
eigy = linalg.eigh(hs)
print >> fh, 'Eigenvalues[',bl,']=',eigy[0]
# Checking if eigenvectors are complex!
for l in range(len(eigy[1])):
imax=0
#print 'shape(eigy[1])', shape(eigy[1])
for iu in range(len(eigy[0])):
#print iu, imax
if abs(eigy[1][iu,l])>abs(eigy[1][imax,l]): imax=iu
z=eigy[1][imax,l]
phi=math.atan2(z.imag,z.real)
eigy[1][:,l] *= exp(-phi*1j)
ime = sum([abs(x.imag) for x in eigy[1][:,l]])
if (abs(ime))<1e-10: ime=0
print >> fh, 'im=%2d %2d %f' % (ibl, l, ime)
#ime = sum([abs(eigy[1][u,l].imag) for u in range(len(eigy[1]))])
# if ime>1e-7: print 'TROUBLES!!! Complex eigenvector! You sould improve that!'
# Creating a big eigenvector with all components
for l in range(len(eigy[1])):
large_eig=zeros(ndim, dtype=complex)
small_eig = eigy[1][:,l]
for m,mb in enumerate(bl): large_eig[mb] = small_eig[m]
eigx.append(large_eig)
eigv += eigy[0].tolist()
# Now we need to sort eigenvectors and eigenvalues
# index is created for sorting
indx=range(ndim)
indx.sort(lambda a,b: cmp(eigv[a],eigv[b]))
# and actual sorting is performed
seigv=[]
seigx=[]
for i in range(ndim):
seigv.append(eigv[indx[i]])
seigx.append(eigx[indx[i]])
# Eigenvectors should be in the form Ham*v[:,i] = w[i]*v[:,i]
# which means that we need to transpose the list of eigenvectors
seigx = array(seigx).transpose()
# We also do a brute force diagonalization just to check if something goes wrong with block diagonalization
# Note that the two resulting eigensystems are not necessary the same due to freedom in choosing eigenvectors
eig = linalg.eigh(Ham)
# If eigenvalues from block diagonalization and full diagonalization are different, something is wrong
if sum(map(abs,eig[0]-array(seigv)))>small:
print '!!!!!TEZAVE!'
print 'The right eigenvalues are:', eig[0]
return [seigv,seigx]
def EquivalentBaths(Eimp):
""" Finds which baths are equivalent from impurity levels"""
wE = [(i,Eimp[i]) for i in range(len(Eimp))]
#print 'wE=', wE
kbths=[]
while len(wE)>0:
En = wE[0][1]
j=0
rr=[]
while j < len(wE):
if abs(En-wE[j][1])<1e-10:
rr.append(wE[j][0])
del wE[j]
else: j+=1
#print 'w', j, rr, En, wE[j][1]
kbths.append(rr)
bathis=range(len(Eimp))
for ik,k in enumerate(kbths):
for ij in k: bathis[ij]=ik
#print 'kbths=', kbths
Ed=[]
for ik,k in enumerate(kbths):
Ed.append(Eimp[k[0]])
return (bathis,kbths,Ed)
def thesame(mx,my,small=1e-3):
if mx.keys() != my.keys(): return False
for k in mx.keys():
if abs(mx[k]-my[k])>small: return False
return True
def VEquivalentStates(mps,ind):
""" Finds which states have the same bubbles """
wx = [(i,mps[i]) for i in range(len(mps))]
iequiv=[]
while len(wx)>0:
mx = wx[0][1]
j=0
rr=[]
while j < len(wx):
if thesame(mx,wx[j][1]):
rr.append(wx[j][0])
del wx[j]
else: j+=1
iequiv.append(rr)
for ik in range(len(iequiv)):
for ij in range(len(iequiv[ik])):
iequiv[ik][ij] = ind[iequiv[ik][ij]]
return iequiv
def AverageBubbles(tmps):
""" Compute average over 'almost' equivalent states """
trmp=[]
for mps in tmps:
all_keys=[]
for mp in mps:
all_keys = union(all_keys,mp.keys())
rmp={}
for k in all_keys:
sm=0.0
for mp in mps:
if mp.has_key(k):
sm += mp[k]
#sm/=len(mps)
rmp[k]=sm
trmp.append(rmp)
return trmp
def EquivalentStates(ipE, ipN):
iequiv=[]
equiv = range(len(ipE))
leq=0
ju=0
Nmax = ipN[-1]
for Ni in range(Nmax+1):
# all states of the same N are in the interval [ju,je]
je=ju
while je<len(ipN) and ipN[je]==Ni: je+=1
ind = range(ju,je)
ind.sort(lambda x,y: cmp(ipE[x],ipE[y]) )
#print Ni
i0=0
while (i0<len(ind)):
Ec = ipE[ind[i0]]
ieq=[]
#print 'Ec=', Ec
while i0<len(ind) and abs(ipE[ind[i0]]-Ec)<1e-10:
#print ind[i0], ipE[ind[i0]], leq
equiv[ind[i0]] = leq
ieq.append(ind[i0])
i0+=1
leq += 1
iequiv.append(ieq)
#print
#print
ju=je
return (equiv, iequiv)
def RenumberStates(pseudostates, Enes, wstates, S2ws):
# renumbers states such that each of 1024 states has unique index
# also remembers energy and N for each state
ij=0
puniq={}
ipuniq=[]
ipE=[]
ipN=[]
ipS=[]
for ii,iwp in enumerate(pseudostates):
wdim = len(Enes[ii])
for j in range(wdim):
puniq[(ii,j)]=ij
ipuniq.append((ii,j))
ipE.append(Enes[ii][j])
ipS.append(S2ws[ii][j])
wN = sum(wstates[iwp[0]][0])
ipN.append(wN)
ij+=1
return (puniq, ipE, ipN, ipS)
def CreateEmpty3D_Dict(n0,n1,n2):
return [[[{} for i2 in range(n2)] for i1 in range(n1)] for i0 in range(n0)]
def CreateEmpty2D_Dict(n0,n1):
return [[{} for i1 in range(n1)] for i0 in range(n0)]
def ReadTrans(filename, fh_info):
"""Read the self-energy index file Sigind and the local transformation matrix CF from a file"""
fh = open(filename, 'r')
data = fh.readlines()
(n1,n2) = map(int, data[0].split()[:2])
Sigind=[]
for i in range(n1):
Sigind.append( map(int, data[i+2].split()[:n2]) )
Sigind = array(Sigind)
print >> fh_info, 'len(data)', len(data)
print >> fh_info, 'n1=', n1
if len(data) >= n1+n1+3:
n2 = n1
CF=[]
for i in range(n2):
cl = array(map(float, data[n1+3+i].split()))
CF.append( cl[0::2]+cl[1::2]*1j )
CF = array(CF)
elif len(data)>=n1+n1/2+3:
n2 = n1/2
CF=[]
for i in range(n2):
cl = array(map(float, data[n1+3+i].split()))
CF.append( cl[0::2]+cl[1::2]*1j )
CF = array(CF)
CFN = zeros((2*n2,2*n2), dtype=complex)
CFN[:n2,:n2] = CF
CFN[n2:,n2:] = CF
CF = CFN
else:
CF = identify(n1)
print >> fh_info, 'CF=', CF
return (Sigind, CF)
def SlaterF(U, J, l):
Fk = zeros((4,4), dtype=float)
if l==0:
# F0 for s-electrons
Fk[0,0] = U
elif l==1:
# F2 for p-electrons
Fk[0,1] = U
if type(J) is list:
Fk[1,1] = 5*J[0]
else:
Fk[1,1] = 5*J
elif l==2:
# F2 and F4 for d-electrons
Fk[0,2] = U
if type(J) is list:
Fk[1,2] = 14./1.625 * J[0]
Fk[2,2] = 14.*0.625/1.625 * J[1]
else:
Fk[1,2] = 14./1.625 * J
Fk[2,2] = 14.*0.625/1.625 * J
elif l==3:
# F2, F4 and F6 for f-electrons
Fk[0,3] = U
if type(J) is list:
Fk[1,3] = 6435./(286+195*0.668+250*0.494) * J[0]
Fk[2,3] = 0.668*6435./539.76 * J[1]
Fk[3,3] = 0.494*6435./539.76 * J[2]
else:
Fk[1,3] = 6435./(286+195*0.668+250*0.494) * J
Fk[2,3] = 0.668*6435./539.76 * J
Fk[3,3] = 0.494*6435./539.76 * J
return Fk
def Check_T2C_Real(T2C, l, fh_info, small):
"""
Here we added a routine which checks that cubic harmonics are real.
Only in this case operators F^+ and F used in ctqmc will be real.
Otherwise these matrix elements might be complex.
The condition for cubic harmonics to be real is:
Imag( T2C[m,i] + (-1)**m * T2C[-m,i] ) == 0
and
Real( T2C[m,i] - (-1)**m * T2C[-m,i] ) ==0
which follows from the requirement: \sum_m T2C[m,i]*exp(i*m*phi) is real for any phi
We are free to add any phase to cubic harmonics, hence T2C[m,i] -> T2C[m,i]*exp(i*phi_i)
with phi_i being arbitrary
This leads to the following 2x2 system of equations:
( Rp[m,i], Qp[m,i] ) ( sin(phi_i) )
( Qm[m,i], Rm[m,i] ) ( cos(phi_i) ) = 0
where
Qp[m,i] = Imag( T2C[m,i] + (-1)**m * T2C[-m,i] )
Rm[m,i] = Real( T2C[m,i] - (-1)**m * T2C[-m,i] )
Rp[m,i] = Real( T2C[m,i] + (-1)**m * T2C[-m,i] )
Qm[m,i] = Imag(-T2C[m,i] + (-1)**m * T2C[-m,i] )
"""
for i in range(2*l+1):
ctg=None
for m in range(0,l+1):
Qp = T2C[m+l,i].imag + (-1)**m * T2C[-m+l,i].imag
Rm = T2C[m+l,i].real - (-1)**m * T2C[-m+l,i].real
Rp = T2C[m+l,i].real + (-1)**m * T2C[-m+l,i].real
Qm =-T2C[m+l,i].imag + (-1)**m * T2C[-m+l,i].imag
if abs(Qp) > small or abs(Rm) > small:
if abs(Qp) > small :
ctg = -Rp/Qp
xb = -Rp
xa = Qp
if abs(Rm) > small :
ctg = -Qm/Rm
xb = -Qm
xa = Rm
if ctg is not None:
for m in range(0,l+1):
Qp = T2C[m+l,i].imag + (-1)**m * T2C[-m+l,i].imag
Rm = T2C[m+l,i].real - (-1)**m * T2C[-m+l,i].real
Rp = T2C[m+l,i].real + (-1)**m * T2C[-m+l,i].real
Qm =-T2C[m+l,i].imag + (-1)**m * T2C[-m+l,i].imag
if abs(Rp + Qp * ctg)>small or abs(Qm + Rm * ctg)>small:
print 'ERROR: Could not find an angle to make all cubic harmonics real'
phi = arctan2(xa, xb)
#print i, ctg, exp(phi*1j)
print >> fh_info, 'Correcting T2C because original cubic harmonics were not real'
print >> fh_info, 'T2C before correction:'
cprint(fh_info, T2C)
T2C[:,i] = T2C[:,i] * exp(phi*1j)
print >> fh_info, 'T2C after correction:'
cprint(fh_info, T2C)
if __name__ == '__main__':
""" Help here"""
n=[1,2,3] # occupanices used for OCA
l=1 # angular momentum
J = 0.3 # Hunds coupling
qOCA=1 # OCA diagrams are computed
Eoca=10. # Energy window for OCA diagrams
mOCA=1e-3 # matrix element for OCA should be greater than that
Ncentral=[5] # OCA diagrams are selected such that central occupancy is in Ncentral
Ewindow = [-1000,1000]
max_M_size=500
add_occupancy=True
CoulombF = 'Full' #'Bulla_Jarrel' #'Full' # 'Bulla_Jarrel' # 'Full' 'Oles'
OCA_G=True
PrintReal=True
HB2 = False
Eimp = [0.0]
Nmax = 1024
small = 1e-6
args = sys.argv[1:]
if ('-h' in args) or ('--help' in args):
print """Code for generating impurity cix file for a d-type of material
The output cix-file can be used for OCA or CTQMC solvers
The outputs:
out.cix -- the oca cix file
impurity.cix -- the ctqmc cix file
The input is:
filename -- another python script, which contains some definitions of parameters
Eimp -- list of impurity levels (i.e., [0,0,0...] )
Sigind -- The symmetry of the self-energy and impurity levels given as a 2D-list
CF -- The local rotation matrix given as a 2D list or array
n -- a list of occupancies use in oca output [1,2,3]
l -- angular momentum (l=0, l=1, l=2 supported)
J -- Hund's coupling
qOCA -- OCA diagrams included if qOCA=1
Eoca -- OCA diagrams cutof: if any of the atomic states has for Eoca higher energy than the ground state for particular occupancy, the diagram is dropped
mOCA -- If matrix element is smaller than mOCA, the diagram is dropped
Ewindow -- Energy window for the states kept (used in ctqmc only)
max_M_size -- maximum matrix size kept for ctqmc
Ncentral -- a list of central occupancies for OCA [1]
OCA_G -- bool - comput input for OCA as well
"""
sys.exit(0)
Q3d=None
for arg in args:
if os.path.isfile(arg):
execfile(arg)
print 'Executed file', arg
else:
exec(arg)
#if CoulombF=='Ising': HB2 = True
if CoulombF=='Georges': CoulombF='Ising'
fh_info = open('info_atom_d.dat','w')
print >> fh_info, ' '.join(sys.argv)
print >> fh_info, 'Eimp=', '[','%f, '*len(Eimp) % tuple(Eimp),']'
print >> fh_info, 'n=', n
print >> fh_info, 'l=', l
print >> fh_info, 'J=', J
print >> fh_info, 'qOCA=', qOCA
print >> fh_info, 'Eoca=', Eoca
print >> fh_info, 'mOCA=', mOCA
print >> fh_info, 'Ewindow=', Ewindow
print >> fh_info, 'max_M_size=', max_M_size
ftrans='Trans.dat'
if (len(glob.glob(ftrans))!=0):
print >> fh_info, 'Reading file', ftrans
(Sigind, CF) = ReadTrans(ftrans, fh_info)
if len(Sigind)==(2*l+1):
dn = 2*l+1
SigindN = zeros((2*dn, 2*dn), dtype=int)
SigindN[:dn,:dn] = Sigind
SigindN[dn:,dn:] = Sigind
Sigind=SigindN
if (len(Sigind)!= 2*(2*l+1)):
print 'ERROR: Sigind from file', ftrans, 'does not have correct dimension!'
sys.exit(1)
if len(CF)==2*l+1:
if Q3d==None: Q3d=True # this must be 3d orbital
elif len(CF)==2*(2*l+1):
dn=2*l+1
off_diag = CF[dn:,:dn]
off = sum(sum(abs(off_diag)))
print off
if Q3d==None:
if off>1e-5:
Q3d=False
else:
Q3d=True
else:
print 'ERROR: Transformation CF=T2C does not have correct dimension'
sys.exit(1)
# If any diagonal entry in Sigind[] is equal to zero, we want to project it out.
# This is achieved by making impurity levels Eimp very large for this orbital
Simax = max(map(max,Sigind))
for i in range(len(Sigind)):
if Sigind[i,i]==0:
Sigind[i,i]=Simax+1
if len(Eimp)<=Simax : Eimp += [2000.]
# Matrix contained in Trans.dat should be T(i,m):
# - i runs over real harmonics (z^2, x^2-y^2, yz, xz, xy)
# - m runs over complex harmonics (-2, -1, 0, 1, 2)
# The matrix in Trans.dat, read into CF, is the transpose of
# what we want in T2C, hence the T2C = transpose(CF) statement
if Q3d:
##### change 2013
T2C = transpose(CF[:(2*l+1),:(2*l+1)])
Check_T2C_Real(T2C, l, fh_info, small)
else:
#CFN = zeros(shape(CF),dtype=complex)
#CFN[:,:(2*l+1)] = CF[:,(2*l+1):]
#CFN[:,(2*l+1):] = CF[:,:(2*l+1)]
CFN = CF
T2C = transpose(CFN)
##### change 2013, you will need to generalize this
else:
"""
Cubic harmonics have the order compatible with the Wien2K package:
z^2, x^2-y^2, yz, zx, xy
"""
print ftrans, 'file not found; generating Sigind & complex-to-real spherical harmonics transformation inside atom_d.'
T2C = TS2C(l)
if Q3d==None: Q3d=True
Sigind = zeros((2*(2*l+1),2*(2*l+1)), dtype=int)
if Q3d:
for i in range(2*l+1):
Sigind[i,i] = i+1
Sigind[i+(2*l+1),i+(2*l+1)] = i+1
else:
zr = zeros(2*l+1)
CF = transpose(T2C)
CFn=[]
for i in range(2*l+1):
CFn.append( CF[i].tolist()+zr.tolist() )
CFn.append( zr.tolist()+CF[i].tolist() )
Sigind[2*i,2*i] = i+1
Sigind[2*i+1,2*i+1] = i+1
CFn = array(CFn)
T2C = transpose(CFn)
if Q3d:
global_flip = range(2*l+1) + range(2*l+1)
else:
global_flip=[]
for i in range(2*l+1):
global_flip += [i,i]
print 'Sigind=', Sigind
print 'T2C='
for i in range(len(T2C)):
for j in range(len(T2C)):
print "%6.3f %6.3f " % (T2C[i,j].real, T2C[i,j].imag),
print
print 'global_flip=', global_flip
print >> fh_info, 'Impurity level structure Sigind is:'
print >> fh_info, Sigind
print >> fh_info, 'T2C follows:'
print >> fh_info, '\n'.join(' '.join('%10f %10f' % (x.real, x.imag) for x in row) for row in T2C)
print >> fh_info, 'shape(T2C)=', shape(T2C)
print >> fh_info, 'T2C is Unitary=', sum(abs(matrix(T2C) * matrix(T2C).H - identity(len(T2C))))
Nitt=1 # To figure out the symmetry, we itterate Nitt times
Jc = J
cx = 0. # No spin orbit at the moment
# Ratio between F2,F4,F6 and J! At the end of the day, we want to use U and J only!
#Fk = gaunt.slaterf(1., Jc)
U0=1.
Fk = SlaterF(U0, Jc, l)
print >> fh_info, 'Slater integrals F^k are ', Fk[:,l]
# one electron base
baths=[]
for s in [1,-1]:
for b in range(2*l+1):
baths.append([b,s])
bathi=[Sigind[b,b]-1 for b in range(len(Sigind))]
dkbth={}
for i in range(len(bathi)):
if dkbth.has_key(bathi[i]):
dkbth[bathi[i]].append(i)
else:
dkbth[bathi[i]]=[i]
kbth=[]
for k in sort(dkbth.keys()):
kbth.append(dkbth[k])
kbth0=[]
for i in range(len(baths)): kbth0.append([i])
bkeep=[]
for b in range(len(bathi)):
if Eimp[bathi[b]]<1000: bkeep.append(b)
tEimp = filter(lambda x: x<1000,Eimp)
tkbth=[]
for k in kbth:
if k[0] in bkeep: tkbth.append(k)
print >> fh_info, 'Some other info in ED:'
print >> fh_info, 'bathi=', bathi
print >> fh_info, 'kbth=', kbth
print >> fh_info, 'tkbth=', tkbth
print >> fh_info, 'Eimp=', Eimp
print >> fh_info, 'tEimp=', tEimp
print >> fh_info, 'bkeep=', bkeep
Ntot = 2**(len(baths)) # size of the direct base
op = operateLS(2*(2*l+1), T2C, Q3d) # operators class
if op.Q3d:
print >> fh_info, 'baths bi=', op.bi
print >> fh_info, 'spin Sz=', op.sz
print >> fh_info, 'mask-down=', op.mask_d
print >> fh_info, 'mask-up =', op.mask_u
# some properties of integers which will serve a direct base - partial occupancy and Sz
prop=[]
for i in range(Ntot):
#### 2013 ### all these are wrong for 5d
occ = op.occup(i)
prop.append([sum(occ), op.Sz(i),occ])
# creates direct base from integers having correct properties
# wstates contains [N, Sz, [all direct states with this N and Sz]]
wstates = baseN(2*l+1,prop,op.Q3d)
SO = SpinOrbitM(l,T2C) # Spin orbit matrix
UC = CoulUsC2(l,T2C) # Coulomb repulsion matrix
if os.path.isfile('../Uc.dat') and os.path.getsize('../Uc.dat')>0:
Uc = loadtxt('../Uc.dat')
for m1 in range(5):
for m2 in range(5):
UC[0,m1,m2,m2,m1] = Uc[m1,m2]
#if abs(UC[0,m1,m2,m2,m1])>1e-3:
# print "%2d %2d %2d %2d %5.2f " % (m1, m2, m2, m1, UC[0,m1,m2,m2,m1])
else:
UC[0,:,:,:,:]=0.0
# New for self-energy sampling
UHa=zeros((2*l+1,2*l+1,2*l+1))
UFo=zeros((2*l+1,2*l+1,2*l+1))
for m1 in range(2*l+1):
for m2 in range(2*l+1):
for m3 in range(2*l+1):
for k in range(0,l+1):
UHa[m1,m2,m3] += real(UC[k,m1,m2,m3,m1])*Fk[k,l]
UFo[m1,m2,m3] += real(UC[k,m1,m2,m1,m3])*Fk[k,l]
#print 'Together='
for bs1 in baths:
for bs2 in baths:
for bs3 in baths:
m1 = bs1[0]
s1 = bs1[1]
m2 = bs2[0]
s2 = bs2[1]
m3 = bs3[0]
s3 = bs3[1]
Uc = 0.0
if s2==s3:
if s1==s2: Uc = UHa[m1,m2,m3]-UFo[m1,m2,m3] # Equal spins: Hartree and Fock
else: Uc = UHa[m1,m2,m3], # Opposite spins: Hartree Only
#print "%10.6f" % Uc,
#print
indx={}
for ni,ns in enumerate(wstates):
indx[(ns[0],ns[1])] = ni # index according to N and Sz of the state
print >> fh_info, 'indx='
print >> fh_info, indx
kindx = indx.keys()
print >> fh_info, 'Stage0: Exact diagonalization of the atom'
mxs = max(map(max,Sigind))
if len(Eimp)<mxs:
print 'ERROR: The dimension of the Eimp should be equal to the maximum index of Sigind->',mxs
sys.exit(1)
Eimpc = zeros((2*(2*l+1), 2*(2*l+1)), dtype=complex)
for ic in range(len(Sigind)):
Eimpc[ic,ic] = Eimp[Sigind[ic,ic]-1]
print >> fh_info, 'impurity levels Eimpc0='
mprint(fh_info, real(Eimpc))
#Eimpc = matrix(T2C) * matrix(Eimpc) * matrix(T2C).H
#print 'Eimpc1='
#mprint(Eimpc)
Ene=[] # Energy
Te=[] # eigenvectors
S2w=[] # Spin
for ni,ns in enumerate(wstates):
#print 'Br:', 'n=', ns[0], 'sz=', ns[1]/2.
print >> fh_info, '----------------------------------------------------'
print >> fh_info, 'n=', ns[0], 'sz=', ns[1]/2.
states = ns[2]
# printing all states in this sector
print >> fh_info, 'states=',
for ist,st in enumerate(states): print >> fh_info, ('%d:'%ist),op.printn(st),
print >> fh_info
S2 = zeros((len(states),len(states)),dtype=complex)
if CoulombF != 'Ising' and op.Q3d:
# Computes matrix of S^2
for js,st in enumerate(states):
#print st, op.printn(st), " ",
stn = op.S2(st)
#print stn
for ps in stn:
ii = ps[0]
iu = states.index(ii)
S2[js,iu] += ps[1]
Ham = zeros((len(states),len(states)),dtype=complex)
for js,st in enumerate(states):
# on-site Energies in base of cubic harmonics
# contain crystal-field splittings
DM = op.DM(st)
for i in range(len(Eimpc)):
for j in range(len(Eimpc)):
if abs(Eimpc[i,j])<1e-5:continue
for p in DM[i][j]:
iu = states.index(p[0])
Ham[js,iu] += p[1]*Eimpc[i,j]
if CoulombF=='Full':
## Coulomb interaction including F2 and F4
cst = op.CoulombU(st, UC, Fk[:,l])
for cs in cst:
ii = cs[0]
U0 = cs[1][0]
U1 = cs[1][1]
iu = states.index(ii)
Ham[js,iu] += 0.5*U1 # adding only F2,F4,... but not F0
Ham[js,iu] += 0.5*U0 # adding only F2,F4,... but not F0
elif CoulombF=='Ising':
## Coulomb interaction including F2 and F4
cst = op.CoulombU(st, UC, Fk[:,l],Ising=True)
for cs in cst:
ii = cs[0]
U0 = cs[1][0]
U1 = cs[1][1]
iu = states.index(ii)
Ham[js,iu] += 0.5*U1 # adding only F2,F4,... but not F0
Ham[js,iu] += 0.5*U0 # adding only F2,F4,... but not F0
elif CoulombF in ['Bulla_Jarrel', 'Oles']:
occ = op.occup(st)
# Model Coulomb interaction -J*S^2
cst = op.S2(st)
#print 'S2=', cst
for cs in cst:
ii = cs[0]
iu = states.index(ii)
Ham[js,iu] += -Jc*cs[1]
nd = sum(occ)
#print 'nd=', nd
#print 'h1='
#mprint(Ham)
if CoulombF == 'Bulla_Jarrel':
#### Model Coulomb interaction is -J*S^2 + J*N - 1/4*J*N^2
Ham[js,js] += Jc*nd*(1-0.25*nd)
else :
## Model Coulomb interaction is -J*S^2 - J[5/4*N^2-3/2*N-ND] + J D_a^+ D_b
ND = op.NDouble(st)
# See e.g.: PRB 72, 214431 (2005), Eq.2.5.
Ham[js,js] += Jc*(-5/4.*nd**2 + 2.*nd + ND)
if ND>0:
ph = op.PairHop(st)
for p in ph:
iu = states.index(p)
Ham[js,iu] += Jc
else:
print 'Not yet implemented!'
sys.exit(1)
# Spin-orbit interaction
if cx>1e-5:
cst = op.OneBodyNab(st, SO)
for cs in cst:
iu = states.index(cs[0])
#if (js>=len(states) or iu>=len(states)): print 'Tezave!'
Ham[js,iu] += cs[1]*cx
#if (cx>1e-5): cprint(Ham)
#else:
#print >> fh_info, 'H='
#mprint(fh_info, Ham)
if CoulombF != 'Ising':
eig = Diagonalize(Ham, small, fh_info) # Block diagonalization is better!!
Ex = eig[0]
Tx = eig[1]
Ene.append( Ex )
Te.append( Tx )
else:
Ex = [real(Ham[i,i]) for i in range(len(Ham))]
Tx = eye(len(Ham),len(Ham))
Ene.append( Ex )
Te.append( Tx )
if CoulombF != 'Ising' and op.Q3d:
# Here we compute matrix of S^2 in eigenbase. Should be diagonal if no spin-orbit coupling
S2e = matrix(conj(Tx.transpose())) * S2 * matrix(Tx)
printS=False
trouble=[]
for i0 in range(shape(S2e)[0]):
for i1 in range(shape(S2e)[1]):
if i0!=i1 and abs(S2e[i0,i1])>1e-6 :
print 'WARNING: Troubles->', i0, i1, S2e[i0,i1]
printS=True
trouble.append(i0)
printS=False # BRISISSS
if printS:
print >> fh_info, 'S2='
mprint(fh_info, S2e)
print >> fh_info, 'H='
cprint(fh_info, Ham)
for it,t in enumerate(trouble):
print >> fh_info, 'A[%d]=' % t
print >> fh_info, Tx[t]
S2w.append([0.5*int(round(-1+sqrt(1+4*S2e[i,i].real))) for i in range(len(S2e))]) # Spin is computed using formula s(s+1)
else:
S2w.append( [0 for i in range(len(S2))] )
print >> fh_info, 'E=', '%f '*len(Ex) % tuple(Ex)
#print 'kindx=', kindx
#print 'wstates=', wstates
print 'Br:', 'kindx=', kindx
# Here we create index for psi^dagger
iFi = zeros((len(wstates),len(baths)),dtype=int)
for ni,ns in enumerate(wstates):
for ib,be in enumerate(baths):
if op.Q3d:
st = (ns[0]+1, ns[1] + be[1]) # (n+1,sz+s)
if st in kindx:
iFi[ni,ib] = indx[st]
else:
st = (ns[0]+1, 0) # (n+1,sz+s)
if st in kindx:
iFi[ni,ib] = indx[st]
wgr=[]
for iw in range(len(wstates)): wgr.append([])
print 'Br:', 'Stage1: Computing F^ in direct base'
print >> fh_info, 'Stage1: Computing F^ in direct base'
# Below we compute matrix elements of F^ (FKP)
kindx = indx.keys()
FKP = []
for ni,ns in enumerate(wstates):
states = ns[2]
bFp=[]
if CoulombF == 'Ising':
wgr[ni] += [[ist] for ist in range(len(states))]
for ib,wib in enumerate(baths):
inew = iFi[ni,ib]
if inew==0:
bFp.append([])
continue
newstates = wstates[inew][2]
Fp = zeros((len(states), len(newstates)), dtype=complex)
for js,st in enumerate(states):
(newst, sig) = op.Fp(st, ib)
if newst>0:
ii = newstates.index(newst)
Fp[js,ii] += sig
#print 'state=', st, newst, ii
if CoulombF == 'Ising':
bFp.append(Fp)
else:
Fpn = matrix(conj(Te[ni].transpose())) * matrix(Fp) * matrix(Te[inew])
# Set to zero small values
for i0 in range(shape(Fpn)[0]):
for i1 in range(shape(Fpn)[1]):
if abs(Fpn[i0,i1])<small: Fpn[i0,i1]=0.0
gr = analizeGroups(Fpn, small)
# |inew> = F^+ |ni>
wgr[ni] += gr[0] # which states are coupled by F^ in |ni>
wgr[inew] += gr[1] # which states are coupled by F^ in |inew>
bFp.append(Fpn)
FKP.append(bFp)
#FKP created!
print 'Br:', 'Stage2: Compressing F^+ according to its block diagonal form'
print >> fh_info, 'Stage2: Compressing F^+ according to its block diagonal form'
for i in range(len(wstates)):
wgr[i] = compress(wgr[i])
print >> fh_info, i+1, wgr[i]
print >> fh_info, 'Stage3: Renumbers states -- creates superstates for ctqmc'
print 'Br:', 'Stage3: Renumbers states -- creates superstates for ctqmc'
# Here we store ground state and N for each superstates to be used later for sorting
tstates=[]
for iw in range(len(wstates)):
Nt = sum(wstates[iw][0])
for ip in range(len(wgr[iw])):
Eg = Ene[iw][wgr[iw][ip][0]]
tstates.append([iw,ip,Nt,Eg])
tstates.sort(comp)
# tstates contains [index-to-wstates, index-to-state-inside-wstates, N, E]
# superstates == pseudostates are defined
pseudostates=[]
indpseudo={}
jj=0
for st in tstates:
iw = st[0]
ip = st[1]
pseudostates.append([iw,ip])
indpseudo[(iw,ip)] = jj
jj+=1
iFi_inside=[]
for iw in range(len(wstates)):
biFi=[]
for ib in range(len(baths)):
ifi = iFi[iw,ib]
if ifi>0:
fpair = coupled(FKP[iw][ib], wgr[iw], wgr[ifi], small)
biFi.append(fpair)
else:
biFi.append([])
iFi_inside.append(biFi)
# creates arrays containing Energy, occupancy and index table for all superstates
iFinal = zeros((len(pseudostates),len(baths)),dtype=int)
Enes = []
S2ws = []
Occ=[]
for ii,iwp in enumerate(pseudostates):
iw = iwp[0]
ip = iwp[1]
wstate = wstates[iw]
group = wgr[iw][ip]
for ib in range(len(baths)):
ifi = iFi[iw,ib]
ifinal = -1
if (ifi>0):
ifi_ins = iFi_inside[iw][ib][ip]
if ifi_ins>=0:
ifinal = indpseudo[(ifi,ifi_ins)]
iFinal[ii,ib] = ifinal
Ens=[]
occ=[]
S2s=[]
for iq,q in enumerate(group):
Ens.append(Ene[iw][q])
occ.append(wstate[0])
S2s.append(S2w[iw][q])
Enes.append(Ens)
Occ.append(occ)
S2ws.append(S2s)
#print 'pseu=', pseudostates
#print 'Enes=', Enes
#print 'Occ=', Occ
#print 'S2=', S2ws
print >> fh_info, 'Stage4: F^dagger matrices between superstates evaluated'
print 'Br:', 'Stage4: F^dagger matrices between superstates evaluated'
# creates small F^dagger matrices between superstates
maxs = 0
rFKP = []
rNn=[] # This is the matrix F*F^ == 1-N
for ii,iwp in enumerate(pseudostates):
iw = iwp[0]
ip = iwp[1]
wstate = wstates[iw]
group = wgr[iw][ip]
bNn=[]
bFKP=[]
for ib in range(len(baths)):
ifi = iFi[iw,ib]
ifinal = iFinal[ii,ib]
if (ifi>0): ifi_ins = iFi_inside[iw][ib][ip]
if ifinal>=0:
M = zeros((len(group),len(wgr[ifi][ifi_ins])),dtype=complex)
#Nn =zeros(len(group),dtype=float)
for ii0,i0 in enumerate(group):
for jj0,j0 in enumerate(wgr[ifi][ifi_ins]):
M[ii0,jj0] = FKP[iw][ib][i0,j0]
#Nn[ii0] = sum(map(lambda x: x**2, M[ii0]))
if max(shape(M)) > maxs : maxs = max(shape(M))
Nn = zeros((len(group),len(group)))
for ii0,i0 in enumerate(group):
for ii1,i1 in enumerate(group):
Nn[ii0,ii1]=sum(M[ii0]*M[ii1]).real
#print 'ii=', ii, 'ib=', ib, 'ifinal=', ifinal, 'M=', M, 'Nn=', Nn, 'Nt=', Nt
else:
M = array([])
Nn = array([])
bFKP.append(M)
bNn.append(Nn)
rFKP.append(bFKP)
rNn.append(bNn)
############################################################################################
######## The part of the code between this symbols, generates input for OCA solver #######
############################################################################################
if (OCA_G):
print >> fh_info, 'Stage5: Renumber states for oca - each atomic states has unique number'
# renumbers states such that each of 1024 states has unique index
# also remembers energy and N for each state
(puniq, ipE, ipN, ipS) = RenumberStates(pseudostates, Enes, wstates, S2ws)
# bubbles will be accessed by bubbles[ib][ii][ij]
# where ib is integer running over all baths, ii is integer running over all possible states
# and ij is integer which can be accessed by F^{+,ib}|ii>
bubbles = CreateEmpty2D_Dict(len(kbth), len(ipE))
FpF = zeros((len(ipE),len(kbth)))
smallb=1e-4
for ib,bs in enumerate(kbth): # over all different baths
bubl=bubbles[ib]
for ii,iwp in enumerate(pseudostates): # over all pseudostates
for iib in bs: # over equivalent baths
ifinal = iFinal[ii,iib]
if ifinal>=0:
dims = shape(rFKP[ii][iib])
for i0 in range(dims[0]):
istr = puniq[(ii,i0)]
for j0 in range(dims[1]):
iend = puniq[(ifinal,j0)]
FpF[istr][ib] += abs(rFKP[ii][iib][i0,j0])**2
if (abs(rFKP[ii][iib][i0,j0])>smallb):
if bubl[istr].has_key(iend):
bubl[istr][iend] += abs(rFKP[ii][iib][i0,j0])**2
else:
bubl[istr][iend] = abs(rFKP[ii][iib][i0,j0])**2
(equiv, iequiv) = EquivalentStates(ipE, ipN)
# Now we have all the bubbles.
# We need to find which states are equivalent
for tt in range(Nitt):
# We want to merge bubbles that we believe are equivalent
ebubbles=[]
for ib,bs in enumerate(kbth):
bubl = bubbles[ib]
nbubl=[]
for i in range(len(bubl)): nbubl.append({})
for i0 in range(len(bubl)):
for i1 in bubl[i0].keys():
if nbubl[i0].has_key(equiv[i1]):
nbubl[i0][equiv[i1]] += bubl[i0][i1]
else:
nbubl[i0][equiv[i1]] = bubl[i0][i1]
ebubbles.append(nbubl)
# Here we check if the states, which we idenfified above as equivalent,
# really are equivalent, i.e., have the same type of bubble
new_iequiv=[]
back_bubs=[]
for ii in iequiv:
cbubs=[]
for ij in ii:
cbub={}
for ib in range(len(kbth)):
cbub.update(ebubbles[ib][ij])
cbubs.append(cbub)
abubs = AverageBubbles([[ebubbles[ib][ij] for ij in ii] for ib in range(len(kbth))])
ieqs = VEquivalentStates(cbubs,ii)
back_bubs.append(abubs)
new_iequiv += ieqs
new_equiv=range(len(equiv))
for i,ii in enumerate(new_iequiv):
for j in ii: new_equiv[j]=i
if len(iequiv)==len(new_iequiv) or tt+1==Nitt: break
equiv = new_equiv
iequiv = new_iequiv
print >> fh_info, 'before qOCA'
if qOCA: # Here we add second order OCA diagrams to NCA bubbles
# Second order diagramms
# OCAdiag will be accessed by OCAdiag[ib1][ib2][ii][ij]
# where ib1, ib2 is integer running over all baths, ii is integer running over all possible states
# and ij is integer which can be accessed by F^{+,ib}|ii>
OCAdiag = CreateEmpty3D_Dict(len(kbth), len(kbth), len(iequiv))
for ib1,bs1 in enumerate(kbth): # over all baths ones
for ib2,bs2 in enumerate(kbth): # over all baths twice
OCAs=OCAdiag[ib1][ib2]
for ii,iwp in enumerate(pseudostates): # over all pseudostates
for iib1 in bs1: # over equivalent baths ones
for iib2 in bs2: # over equivalent baths twice
ifinal_j = iFinal[ii,iib1]
ifinal_l = iFinal[ii,iib2]
if ifinal_j>=0 and ifinal_l>=0:
ifinal_k = iFinal[ifinal_j,iib2]
ifinal_k2 = iFinal[ifinal_l,iib1]
if ifinal_k>=0 and ifinal_k2>=0 and ifinal_k==ifinal_k2:
Fij = rFKP[ii][iib1]
Fil = rFKP[ii][iib2]
Fjk = rFKP[ifinal_j][iib2]
Flk = rFKP[ifinal_l][iib1]
(dims_i, dims_j) = shape(Fij)
(dims_i2,dims_l) = shape(Fil)
(dims_j2, dims_k) = shape(Fjk)
(dims_l2,dims_k2) = shape(Flk)
if dims_i != dims_i2: print 'Troubles i'
if dims_j != dims_j2: print 'Troubles j'
if dims_l != dims_l2: print 'Troubles l'
if dims_k != dims_k2: print 'Troubles k'
for i0 in range(dims_i):
iu = equiv[puniq[(ii,i0)]]
for j0 in range(dims_j):
ju = equiv[puniq[(ifinal_j,j0)]]
if (abs(Fij[i0,j0])<smallb): continue
for k0 in range(dims_k):
ku = equiv[puniq[(ifinal_k,k0)]]
if (abs(Fjk[j0,k0])<smallb): continue
for l0 in range(dims_l):
lu = equiv[puniq[(ifinal_l,l0)]]
if (abs(Fil[i0,l0])<smallb): continue
if (abs(Flk[l0,k0])<smallb): continue
contr = -Fij[i0,j0]*Fjk[j0,k0]*Flk[l0,k0]*Fil[i0,l0]
akey = (ju,ku,lu)
if OCAs[iu].has_key(akey):
OCAs[iu][akey] += contr
else:
OCAs[iu][akey] = contr
# OCA diagramms are renumbered to (i0,i1,i2,i3,ib1,ib2) where
# i0,i1,i2,i3 are atomic states involved in the diagram and ib1,ib2 are the two bath
# propagators in the diagram.
OCAf={}
for i in range(len(iequiv)):
for ib1 in range(len(kbth)):
for ib2 in range(len(kbth)):
for ks in OCAdiag[ib1][ib2][i].keys():
if abs(OCAdiag[ib1][ib2][i][ks])<1e-10: continue
if (ib2<=ib1):
new_key = (i,) + ks + (ib1,ib2)
OCAf[new_key] = OCAdiag[ib1][ib2][i][ks]
else: # Due to time invariance, some diagrams are equivalent. For example
# 0 (b1) 1 (b2) 4 (b1) 2 (b2) 0 and 0 (b2) 2 (b1) 4 (b2) 1 (b1) 0
new_key = (i,) + (ks[2],ks[1],ks[0]) + (ib2,ib1)
OCAf[new_key] = OCAdiag[ib1][ib2][i][ks]
# Here we regroup N_a =F^+ F in more convenient way
rFpF = zeros((len(iequiv),len(kbth)))
for i in range(len(equiv)):
df = FpF[0]-FpF[i]
for j in range(len(df)): # if we don't do that, the occupancy can be annoyingly negative
if abs(df[j])<1e-10: df[j]=0
rFpF[equiv[i]] += df
for i,ii in enumerate(iequiv):
rFpF[i]/=len(iequiv[i]) # it has to be average, not the sum
# Bubble contains diagrams named b (back). We need to construct from these also the other diagramms
# which are called f (forward).
forw_bubs = CreateEmpty2D_Dict(len(iequiv), len(kbth))
for i in range(len(iequiv)):
for b in range(len(kbth)):
for ks in back_bubs[i][b]:
forw_bubs[ks][b][i]= back_bubs[i][b][ks]
# We want to have energy of each OCA-pseudoparticle ready
Eq=zeros(len(iequiv))
Egs=zeros(len(iequiv))
Nq=zeros(len(iequiv),dtype=int)
Nc0=0; Eg0=ipE[0]
for i,ii in enumerate(iequiv):
Nq[i] = ipN[ii[0]]
Eq[i] = ipE[ii[0]]
Egs[i] = Eg0 # ground state in this sector
if (Nq[i]>Nc0):
Nc0=Nq[i]
Eg0=Eq[i]
Egs[i]=Eg0
# last renumbering for printing!
# Only occupancies in n=[....] need to be kept. The rest of the diagrams is ignored.
pu=-ones(len(iequiv),dtype=int)
pl=0
for i,ii in enumerate(iequiv):
if Nq[i] in n:
pu[i]=pl
pl+=1
# Printing output for OCA
foca=open('out.cix', 'w')
print >> foca, '# Input file for OCA impurity solver.', 'l=', l, 'J=', Jc, 'Eimp=', Eimp, 'c=', cx, 'mOCA=', mOCA, 'Eoca=', Eoca
print >> foca, len(kbth), (("%d "*len(kbth)) % tuple(map(len,kbth))), pl, 0,
print >> foca, '# Number of baths it\'s degeneracy and number of local valence and local core states'
print >> foca, '%3s' % '#',
print >> foca, ("%6s")*len(kbth) % tuple(map(lambda x: 'N'+str(x), range(len(kbth)))),
print >> foca, "%4s" % 'Mtot', '%4s' % 'deg', '%10s' % 'Eatom',
print >> foca, ("%3s"%'#b')*len(back_bubs[i]),
print >> foca, ("%3s"%'#f')*len(forw_bubs[i])
for i,ii in enumerate(iequiv):
if pu[i] <0 : continue # state not used
print >> foca, "%3d" % pu[i], (("%6.2f")*len(kbth) % tuple(rFpF[i])), "%4d" % Nq[i], #ipN[ii[0]],
print >> foca, "%4d" % len(ii),
Eatom = ipE[ii[0]] # This is the atomic energy
for ib in range(len(kbth)): Eatom -= rFpF[i][ib]*Eimp[ib] # This part will be added back inside the impurity solver, therefore the energy should be subtracted
print >> foca, "%10.4f" % Eatom,
for b in range(len(kbth)): # delete diagrams which include states that were removed
for ks in back_bubs[i][b].keys():
if pu[ks]<0: # this diagram involves state not considered
del back_bubs[i][b][ks]
for ks in forw_bubs[i][b].keys():
if pu[ks]<0: # this diagram involves state not considered
del forw_bubs[i][b][ks]
print >> foca, ("%3d"*len(back_bubs[i])) % tuple(map(len,back_bubs[i])),
print >> foca, ("%3d"*len(forw_bubs[i])) % tuple(map(len,forw_bubs[i])),
print >> foca, ' ',
for b in range(len(kbth)):
for ks in back_bubs[i][b]:
print >> foca, "%6.2f x %-3d " % (back_bubs[i][b][ks], pu[ks]),
for b in range(len(kbth)):
for ks in forw_bubs[i][b]:
print >> foca, "%6.2f x %-3d " % (forw_bubs[i][b][ks], pu[ks]),
print >> foca, '# S ', ipS[ii[0]], ' Eatom=', ipE[ii[0]]
if qOCA:
print >> foca, '# OCA diagrams, information is (pp0,pp1,pp2,pp3) (b1,b2) fact , where pp is pseudoparticle and b is bath'
OCAF = OCAf.items()
OCAF.sort(lambda x,y: cmp(y[1],x[1]))
for i in range(len(OCAF)):
excitedE = [Eq[j]-Egs[j] for j in OCAF[i][0]]
states_involved = [pu[l] for l in OCAF[i][0][:4]]
#print states_involved
if (-1 in states_involved): continue # One of the states is not considered
if max(excitedE)>Eoca: continue # We take it into account only if all states that are involved, have energy close to the ground state energy for this occupancy
if abs(OCAF[i][1])<mOCA: continue # Matrix element negligible
if not (Nq[OCAF[i][0][1]] in Ncentral): continue
print >> foca, "%3d %3d %3d %3d " % tuple(states_involved), #tuple([pu[l] for l in OCAF[i][0][:4]]),
print >> foca, "%2d %2d" % tuple(OCAF[i][0][4:]),
print >> foca, real(OCAF[i][1]),
print >> foca, ' #', [Eq[j]-Egs[j] for j in OCAF[i][0]]
############################################################################################
######## End of the part which generates input for OCA solver #######
############################################################################################
# Extract low energy states
lowE=[]
low_maxsize=0
for ii in range(len(Enes)):
size=0
plowE=[]
for iq in range(min(len(Enes[ii]),max_M_size)):
if Enes[ii][iq]>=Ewindow[0] and Enes[ii][iq]<Ewindow[1] and iq<Nmax:
plowE.append(iq)
size += 1
if size>low_maxsize: low_maxsize = size
if len(plowE)>0:
lowE.append((ii,plowE))
# Creates index array between all states and low energy ones
inv_lowE1={-1:-1}
for i in range(len(pseudostates)): inv_lowE1[i]=-1
for i in range(len(lowE)):
ii = lowE[i][0]
inv_lowE1[ii]=i
fcix = open('actqmc.cix', 'w')
# ---------------------------------------------------------------------------------------
# -------------- Below is printing for ctqmc solver ------------------------------------
# ---------------------------------------------------------------------------------------
print >> fcix, '# CIX file for ctqmc! '
print >> fcix, '# cluster_size, number of states, number of baths, maximum_matrix_size'
print >> fcix, 1, len(lowE), len(bkeep), low_maxsize
print >> fcix, '# baths, dimension, symmetry'
for ib in range(len(bkeep)):
print >> fcix, ib, ' ', 1, Sigind[bkeep[ib],bkeep[ib]]-1, ' ', global_flip[bkeep[ib]]
print >> fcix, '# cluster energies for non-equivalent baths, eps[k]'
for E in tEimp: print >> fcix, E,
print >> fcix
print >> fcix, '# N K Sz size'
for i in range(len(lowE)):
ii = lowE[i][0]
iwp = pseudostates[ii]
iw = iwp[0]
ip = iwp[1]
wstate = wstates[iw]
if op.Q3d:
Mz = sum(wstate[1])/2.
gs=wstate[2][ip]
Mz = op.OrbDiff(gs) #### Here we compute N_{xz}-N_{yz}
else:
gs=wstate[2][ip]
Mz = op.Mz(gs)
print >> fcix, "%3d %2d %2d %6.3f %2d " % (i+1, sum(wstate[0]), 0, Mz, len(lowE[i][1])),#len(Enes[ii])),
for ib in bkeep:
ifinal = iFinal[ii,ib]
print >> fcix, "%3d" % (inv_lowE1[ifinal]+1),
print >> fcix, " ",
for iq in lowE[i][1]:
print >> fcix, "%10.6f" % (Enes[ii][iq],),
print >> fcix, " ",
for iq in lowE[i][1]:
print >> fcix, S2ws[ii][iq],
if CoulombF == 'Ising':
print >> fcix, " # ", op.printn(gs),
print >> fcix
print >> fcix, '# matrix elements'
for i in range(len(lowE)):
ii = lowE[i][0]
for ib in bkeep:
ifinal = iFinal[ii,ib]
low_ifinal = inv_lowE1[ifinal]
print >> fcix, "%3d %3d " % (i+1, low_ifinal+1),
if low_ifinal>=0:
ind0 = lowE[i][1]
ind1 = lowE[low_ifinal][1]
print >> fcix, "%2d %2d" % (len(ind0), len(ind1)),
for i0 in ind0:
for j0 in ind1:
x = rFKP[ii][ib][i0,j0]
if abs(x.imag)<1e-4 or PrintReal:
print >> fcix, x.real,
else:
print >> fcix, x,
else:
print >> fcix, "%2d %2d" % (0, 0),
print >> fcix
if HB2 : print >> fcix, 'HB2'
else: print >> fcix, 'HB1'
if (HB2):
print >> fcix, "# Uc = U[m1,m2,m3,m1]-U[m1,m2,m1,m3] ; loops [m1,m2,m3]"
for bs1 in baths:
for bs2 in baths:
for bs3 in baths:
m1 = bs1[0]
s1 = bs1[1]
m2 = bs2[0]
s2 = bs2[1]
m3 = bs3[0]
s3 = bs3[1]
Uc = 0.0
if s2==s3:
if s1==s2: Uc = UHa[m1,m2,m3]-UFo[m1,m2,m3] # Equal spins: Hartree and Fock
else: Uc = UHa[m1,m2,m3], # Opposite spins: Hartree Only
print >> fcix, "%10.6f" % Uc,
print >> fcix
print >> fcix, '# number of operators needed'
if not add_occupancy:
print >> fcix, '0'
else:
print >> fcix, '1'
print >> fcix, '# Occupancy '
for i in range(len(lowE)):
ii = lowE[i][0]
ind0 = lowE[i][1]
#tkbth = kbth
#if HB2: tkbth=kbth0
for ikb,bt in enumerate(tkbth):
Oub = zeros((len(ind0),len(ind0)),dtype=float)
for ib in bt:
Nm = zeros((len(ind0),len(ind0)),dtype=float)
if len(rNn[ii][ib])>0:
#Nm = rNn[ii][ib]
for j in range(len(ind0)):
for k in range(len(ind0)):
Nm[j,k] = rNn[ii][ib][ind0[j],ind0[k]]
Oub += identity(len(ind0))-Nm
print >> fcix, ("%3d " % (i+1)),
print >> fcix, "%2d %2d" % (len(ind0), len(ind0)),
for iw,i0 in enumerate(ind0):
for iz,j0 in enumerate(ind0):
ff = Oub[iw,iz]
if abs(ff)<small: ff=0.0
print >> fcix, ff,
print >> fcix
print >> fcix, '# Data for HB1'
print >> fcix, 1, len(pseudostates), len(bkeep), maxs
print >> fcix, '# ind N K Jz size'
for ii,iwp in enumerate(pseudostates):
iw = iwp[0]
ip = iwp[1]
wstate = wstates[iw]
print >> fcix, "%3d %3d %2d %2d %4.1f %2d " % (ii+1, inv_lowE1[ii]+1, sum(wstate[0]), 0, sum(wstate[1])/2., len(Enes[ii])),
for ib in bkeep:
print >> fcix, "%3d" % (iFinal[ii,ib]+1),
print >> fcix, " ",
for iq in range(len(Enes[ii])):
print >> fcix, Enes[ii][iq],
print >> fcix, " ",
for iq in range(len(Enes[ii])):
print >> fcix, 0,
print >> fcix, " ",
print >> fcix
print >> fcix, '# matrix elements'
for ii in range(len(pseudostates)):
for ib in bkeep:
print >> fcix, "%3d %3d " % (ii+1, iFinal[ii,ib]+1),
ffp = zeros(len(Enes[ii]),dtype=float)
if iFinal[ii,ib]>=0:
(dim0, dim1) = shape(rFKP[ii][ib])
print >> fcix, "%2d %2d" % (dim0,dim1),
for i0 in range(dim0):
for j0 in range(dim1):
x = rFKP[ii][ib][i0,j0]
if abs(x.imag)<1e-4 or PrintReal:
print >> fcix, x.real,
else:
print >> fcix, x,
#print >> fcix, rFKP[ii][ib][i0,j0],
for i0 in range(dim0):
dsum=0
for j0 in range(dim1):
dsum += abs(rFKP[ii][ib][i0][j0])**2
ffp[i0] += dsum
else:
print >> fcix, "%2d %2d" % (0, 0),
print >> fcix
|
[] |
[] |
[
"WIEN_DMFT_ROOT"
] |
[]
|
["WIEN_DMFT_ROOT"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.