element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
file | openshift/openshift-tests-private | a41d2a5f-a9a7-4400-a7a9-6f5e91f7b14c | e2e | import (
"strings"
"time"
"k8s.io/kubectl/pkg/util/templates"
_ "github.com/openshift/openshift-tests-private/test/extended"
"github.com/openshift/openshift-tests-private/pkg/test/ginkgo"
) | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/e2e.go | package main
import (
"strings"
"time"
"k8s.io/kubectl/pkg/util/templates"
_ "github.com/openshift/openshift-tests-private/test/extended"
"github.com/openshift/openshift-tests-private/pkg/test/ginkgo"
)
// staticSuites are all known test suites this binary should run
var staticSuites = []*ginkgo.TestSuite{
{
Name: "openshift/conformance",
Description: templates.LongDesc(`
Tests that ensure an OpenShift cluster and components are working properly.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/conformance/")
},
Parallelism: 30,
},
{
Name: "openshift/conformance/parallel",
Description: templates.LongDesc(`
Only the portion of the openshift/conformance test suite that run in parallel.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/conformance/parallel") && !strings.Contains(name, "[Suite:openshift/isv")
},
Parallelism: 30,
MaximumAllowedFlakes: 15,
},
{
Name: "openshift/conformance/serial",
Description: templates.LongDesc(`
Only the portion of the openshift/conformance test suite that run serially.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/conformance/serial")
},
},
{
Name: "openshift/disruptive",
Description: templates.LongDesc(`
The disruptive test suite.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Disruptive]") && strings.Contains(name, "[dr-quorum-restore]")
},
TestTimeout: 60 * time.Minute,
},
{
Name: "kubernetes/conformance",
Description: templates.LongDesc(`
The default Kubernetes conformance suite.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:k8s]") && strings.Contains(name, "[Conformance]")
},
Parallelism: 30,
},
{
Name: "openshift/build",
Description: templates.LongDesc(`
Tests that exercise the OpenShift build functionality.
`),
Matches: func(name string) bool {
return !strings.Contains(name, "[Disabled") && strings.Contains(name, "[Feature:Builds]")
},
Parallelism: 7,
// TODO: Builds are really flaky right now, remove when we land perf updates and fix io on workers
MaximumAllowedFlakes: 3,
// Jenkins tests can take a really long time
TestTimeout: 60 * time.Minute,
},
{
Name: "openshift/image-registry",
Description: templates.LongDesc(`
Tests that exercise the OpenShift image-registry functionality.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[registry]") && !strings.Contains(name, "[Local]")
},
},
{
Name: "openshift/image-ecosystem",
Description: templates.LongDesc(`
Tests that exercise language and tooling images shipped as part of OpenShift.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[image_ecosystem]") && !strings.Contains(name, "[Local]")
},
Parallelism: 7,
TestTimeout: 20 * time.Minute,
},
{
Name: "openshift/jenkins-e2e",
Description: templates.LongDesc(`
Tests that exercise the OpenShift / Jenkins integrations provided by the OpenShift Jenkins image/plugins and the Pipeline Build Strategy.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Feature:Jenkins]")
},
Parallelism: 4,
TestTimeout: 20 * time.Minute,
},
{
Name: "openshift/scalability",
Description: templates.LongDesc(`
Tests that verify the scalability characteristics of the cluster. Currently this is focused on core performance behaviors and preventing regressions.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/scalability]")
},
Parallelism: 1,
TestTimeout: 20 * time.Minute,
},
{
Name: "openshift/conformance-excluded",
Description: templates.LongDesc(`
Run only tests that are excluded from conformance. Makes identifying omitted tests easier.
`),
Matches: func(name string) bool { return !strings.Contains(name, "[Suite:openshift/conformance/") },
},
{
Name: "openshift/test-cmd",
Description: templates.LongDesc(`
Run only tests for test-cmd.
`),
Matches: func(name string) bool { return strings.Contains(name, "[Suite:openshift/test-cmd]") },
},
{
Name: "openshift/csi",
Description: templates.LongDesc(`
Run tests for an installed CSI driver. TEST_CSI_DRIVER_FILES env. variable must be set and it must be a comma separated list of CSI driver definition files.
See https://github.com/kubernetes/kubernetes/blob/master/test/e2e/storage/external/README.md for required format of the files.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/csi") && !strings.Contains(name, "[Disruptive]")
},
},
{
Name: "openshift/network/stress",
Description: templates.LongDesc(`
This test suite repeatedly verifies the networking function of the cluster in parallel to find flakes.
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/conformance/") && strings.Contains(name, "[sig-network]")
},
Parallelism: 30,
Count: 15,
TestTimeout: 20 * time.Minute,
},
{
Name: "openshift/isv",
Description: templates.LongDesc(`
This test suite verifies the Certified Operators execution on Openshift
`),
Matches: func(name string) bool {
return strings.Contains(name, "[Suite:openshift/isv]")
},
Parallelism: 3,
TestTimeout: 180 * time.Minute,
},
{
Name: "all",
Description: templates.LongDesc(`
Run all tests.
`),
Matches: func(name string) bool { return true },
},
}
| package main | ||||
file | openshift/openshift-tests-private | b2b19d80-ba8c-4898-bf2d-8eb32fca8596 | openshift-tests | import (
"encoding/json"
"flag"
goflag "flag"
"fmt"
"io"
"math/rand"
"os"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
utilflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/klog"
"k8s.io/kubectl/pkg/util/templates"
e2e "k8s.io/kubernetes/test/e2e/framework"
"github.com/openshift/library-go/pkg/serviceability"
"github.com/openshift/openshift-tests-private/pkg/monitor"
testginkgo "github.com/openshift/openshift-tests-private/pkg/test/ginkgo"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
exutilcloud "github.com/openshift/openshift-tests-private/test/extended/util/cloud"
// these are loading important global flags that we need to get and set
_ "k8s.io/kubernetes/test/e2e"
_ "k8s.io/kubernetes/test/e2e/lifecycle"
) | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | package main
import (
"encoding/json"
"flag"
goflag "flag"
"fmt"
"io"
"math/rand"
"os"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
utilflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/klog"
"k8s.io/kubectl/pkg/util/templates"
e2e "k8s.io/kubernetes/test/e2e/framework"
"github.com/openshift/library-go/pkg/serviceability"
"github.com/openshift/openshift-tests-private/pkg/monitor"
testginkgo "github.com/openshift/openshift-tests-private/pkg/test/ginkgo"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
exutilcloud "github.com/openshift/openshift-tests-private/test/extended/util/cloud"
// these are loading important global flags that we need to get and set
_ "k8s.io/kubernetes/test/e2e"
_ "k8s.io/kubernetes/test/e2e/lifecycle"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
rand.Seed(time.Now().UTC().UnixNano())
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
root := &cobra.Command{
Long: templates.LongDesc(`
OpenShift Extended Platform Tests
This command verifies behavior of an OpenShift cluster by running remote tests against
the cluster API that exercise functionality. In general these tests may be disruptive
or require elevated privileges - see the descriptions of each test suite.
`),
}
root.AddCommand(
newRunCommand(),
newRunTestCommand(),
newRunMonitorCommand(),
)
pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError)
flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError)
exutil.InitStandardFlags()
if err := func() error {
defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()
return root.Execute()
}(); err != nil {
if ex, ok := err.(testginkgo.ExitError); ok {
os.Exit(ex.Code)
}
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
func newRunMonitorCommand() *cobra.Command {
monitorOpt := &monitor.Options{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-monitor",
Short: "Continuously verify the cluster is functional",
Long: templates.LongDesc(`
Run a continuous verification process
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return monitorOpt.Run()
},
}
return cmd
}
func newRunCommand() *cobra.Command {
opt := &testginkgo.Options{
Suites: staticSuites,
}
cmd := &cobra.Command{
Use: "run SUITE",
Short: "Run a test suite",
Long: templates.LongDesc(`
Run a test suite against an OpenShift server
This command will run one of the following suites against a cluster identified by the current
KUBECONFIG file. See the suite description for more on what actions the suite will take.
If you specify the --dry-run argument, the names of each individual test that is part of the
suite will be printed, one per line. You may filter this list and pass it back to the run
command with the --file argument. You may also pipe a list of test names, one per line, on
standard input by passing "-f -".
`) + testginkgo.SuitesString(opt.Suites, "\n\nAvailable test suites:\n\n"),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return mirrorToFile(opt, func() error {
if err := initProvider(opt.Provider, opt.DryRun); err != nil {
return err
}
if !opt.DryRun {
checkClusterTypeAndSetEnvs()
}
e2e.AfterReadingAllFlags(exutil.TestContext)
e2e.TestContext.DumpLogsOnFailure = true
exutil.TestContext.DumpLogsOnFailure = true
return opt.Run(args)
})
},
}
bindOptions(opt, cmd.Flags())
return cmd
}
func newRunTestCommand() *cobra.Command {
testOpt := &testginkgo.TestOptions{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-test NAME",
Short: "Run a single test by name",
Long: templates.LongDesc(`
Execute a single test
This executes a single test by name. It is used by the run command during suite execution but may also
be used to test in isolation while developing new tests.
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := initProvider(os.Getenv("TEST_PROVIDER"), testOpt.DryRun); err != nil {
return err
}
if !testOpt.DryRun {
readClusterTypeEnvsAndSetFlags()
}
e2e.AfterReadingAllFlags(exutil.TestContext)
e2e.TestContext.DumpLogsOnFailure = true
exutil.TestContext.DumpLogsOnFailure = true
return testOpt.Run(args)
},
}
cmd.Flags().BoolVar(&testOpt.DryRun, "dry-run", testOpt.DryRun, "Print the test to run without executing them.")
return cmd
}
func checkClusterTypeAndSetEnvs() {
if exutil.PreSetEnvK8s() == "yes" {
_ = os.Setenv(exutil.EnvIsExternalOIDCCluster, "no")
} else {
exutil.PreSetEnvOIDCCluster()
}
}
func readClusterTypeEnvsAndSetFlags() {
isK8sEnv := os.Getenv(exutil.EnvIsKubernetesCluster)
isExtOIDCEnv := os.Getenv(exutil.EnvIsExternalOIDCCluster)
if len(isK8sEnv) == 0 {
isK8sEnv = exutil.PreSetEnvK8s()
if isK8sEnv == "yes" {
isExtOIDCEnv = "no"
_ = os.Setenv(exutil.EnvIsExternalOIDCCluster, "no")
}
}
if len(isExtOIDCEnv) == 0 {
isExtOIDCEnv = exutil.PreSetEnvOIDCCluster()
}
exutil.IsExternalOIDCClusterFlag = isExtOIDCEnv
exutil.IsKubernetesClusterFlag = isK8sEnv
e2e.Logf("Is kubernetes cluster: %s, is external OIDC cluster: %s", exutil.IsKubernetesClusterFlag, exutil.IsExternalOIDCClusterFlag)
}
// mirrorToFile ensures a copy of all output goes to the provided OutFile, including
// any error returned from fn. The function returns fn() or any error encountered while
// attempting to open the file.
func mirrorToFile(opt *testginkgo.Options, fn func() error) error {
if len(opt.OutFile) == 0 {
opt.Out, opt.ErrOut = os.Stdout, os.Stderr
return fn()
}
f, err := os.OpenFile(opt.OutFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
if err != nil {
return err
}
opt.Out = io.MultiWriter(os.Stdout, f)
opt.ErrOut = io.MultiWriter(os.Stderr, f)
exitErr := fn()
if exitErr != nil {
fmt.Fprintf(f, "error: %s", exitErr)
}
if err := f.Close(); err != nil {
fmt.Fprintf(os.Stderr, "error: Unable to close output file\n")
}
return exitErr
}
func bindOptions(opt *testginkgo.Options, flags *pflag.FlagSet) {
flags.BoolVar(&opt.DryRun, "dry-run", opt.DryRun, "Print the tests to run without executing them.")
flags.BoolVar(&opt.PrintCommands, "print-commands", opt.PrintCommands, "Print the sub-commands that would be executed instead.")
flags.StringVar(&opt.JUnitDir, "junit-dir", opt.JUnitDir, "The directory to write test reports to.")
flags.StringVar(&opt.Provider, "provider", opt.Provider, "The cluster infrastructure provider. Will automatically default to the correct value.")
flags.StringVarP(&opt.TestFile, "file", "f", opt.TestFile, "Create a suite from the newline-delimited test names in this file.")
flags.StringVar(&opt.Regex, "run", opt.Regex, "Regular expression of tests to run.")
flags.StringVarP(&opt.OutFile, "output-file", "o", opt.OutFile, "Write all test output to this file.")
flags.IntVar(&opt.Count, "count", opt.Count, "Run each test a specified number of times. Defaults to 1 or the suite's preferred value.")
flags.DurationVar(&opt.Timeout, "timeout", opt.Timeout, "Set the maximum time a test can run before being aborted. This is read from the suite by default, but will be 10 minutes otherwise.")
flags.BoolVar(&opt.IncludeSuccessOutput, "include-success", opt.IncludeSuccessOutput, "Print output from successful tests.")
flags.IntVar(&opt.Parallelism, "max-parallel-tests", opt.Parallelism, "Maximum number of tests running in parallel. 0 defaults to test suite recommended value, which is different in each suite.")
}
func initProvider(provider string, dryRun bool) error {
// record the exit error to the output file
// if err := decodeProviderTo(provider, exutil.TestContext, dryRun); err != nil {
// e2e.Logf("Fail to decode Provider:%s, but continue to run with skeleton mode", provider)
// }
exutil.TestContext.AllowedNotReadyNodes = 100
exutil.TestContext.MaxNodesToGather = 0
// reale2e.SetViperConfig(os.Getenv("VIPERCONFIG"))
if err := initCSITests(dryRun); err != nil {
return err
}
exutil.AnnotateTestSuite()
err := exutil.InitTest(dryRun)
gomega.RegisterFailHandler(ginkgo.Fail)
// TODO: infer SSH keys from the cluster
return err
}
func decodeProviderTo(provider string, testContext *e2e.TestContextType, dryRun bool) error {
switch provider {
case "":
if _, ok := os.LookupEnv("KUBE_SSH_USER"); ok {
if _, ok := os.LookupEnv("LOCAL_SSH_KEY"); ok {
testContext.Provider = "local"
break
}
}
if dryRun {
break
}
fallthrough
case "azure", "aws", "gce", "vsphere":
provider, cfg, err := exutilcloud.LoadConfig()
if err != nil {
return err
}
if cfg != nil {
testContext.Provider = provider
testContext.CloudConfig = *cfg
}
default:
var providerInfo struct{ Type string }
if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil {
return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err)
}
if len(providerInfo.Type) == 0 {
return fmt.Errorf("provider must be a JSON object with the 'type' key")
}
testContext.Provider = providerInfo.Type
if err := json.Unmarshal([]byte(provider), &testContext.CloudConfig); err != nil {
return fmt.Errorf("provider must decode into the cloud config object: %v", err)
}
}
if len(testContext.Provider) == 0 {
testContext.Provider = "skeleton"
}
klog.V(2).Infof("Provider %s: %#v", testContext.Provider, testContext.CloudConfig)
return nil
}
| package main | ||||
function | openshift/openshift-tests-private | 31669808-c8aa-4546-b11c-f30b55ab5ae8 | main | ['"flag"', 'goflag "flag"', '"fmt"', '"math/rand"', '"os"', '"time"', '"github.com/spf13/cobra"', '"github.com/spf13/pflag"', 'utilflag "k8s.io/component-base/cli/flag"', '"k8s.io/component-base/logs"', '"k8s.io/kubectl/pkg/util/templates"', '"github.com/openshift/library-go/pkg/serviceability"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func main() {
logs.InitLogs()
defer logs.FlushLogs()
rand.Seed(time.Now().UTC().UnixNano())
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
root := &cobra.Command{
Long: templates.LongDesc(`
OpenShift Extended Platform Tests
This command verifies behavior of an OpenShift cluster by running remote tests against
the cluster API that exercise functionality. In general these tests may be disruptive
or require elevated privileges - see the descriptions of each test suite.
`),
}
root.AddCommand(
newRunCommand(),
newRunTestCommand(),
newRunMonitorCommand(),
)
pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError)
flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError)
exutil.InitStandardFlags()
if err := func() error {
defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()
return root.Execute()
}(); err != nil {
if ex, ok := err.(testginkgo.ExitError); ok {
os.Exit(ex.Code)
}
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
} | main | ||||
function | openshift/openshift-tests-private | 46eb0a6c-d721-4485-a1d0-676b0dd1a5c4 | newRunMonitorCommand | ['"os"', '"github.com/spf13/cobra"', '"k8s.io/kubectl/pkg/util/templates"', '"github.com/openshift/openshift-tests-private/pkg/monitor"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func newRunMonitorCommand() *cobra.Command {
monitorOpt := &monitor.Options{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-monitor",
Short: "Continuously verify the cluster is functional",
Long: templates.LongDesc(`
Run a continuous verification process
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return monitorOpt.Run()
},
}
return cmd
} | main | ||||
function | openshift/openshift-tests-private | bd6e9345-95a1-4823-934e-e4e8adff5a3d | newRunCommand | ['"github.com/spf13/cobra"', '"k8s.io/kubectl/pkg/util/templates"', '_ "k8s.io/kubernetes/test/e2e"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func newRunCommand() *cobra.Command {
opt := &testginkgo.Options{
Suites: staticSuites,
}
cmd := &cobra.Command{
Use: "run SUITE",
Short: "Run a test suite",
Long: templates.LongDesc(`
Run a test suite against an OpenShift server
This command will run one of the following suites against a cluster identified by the current
KUBECONFIG file. See the suite description for more on what actions the suite will take.
If you specify the --dry-run argument, the names of each individual test that is part of the
suite will be printed, one per line. You may filter this list and pass it back to the run
command with the --file argument. You may also pipe a list of test names, one per line, on
standard input by passing "-f -".
`) + testginkgo.SuitesString(opt.Suites, "\n\nAvailable test suites:\n\n"),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return mirrorToFile(opt, func() error {
if err := initProvider(opt.Provider, opt.DryRun); err != nil {
return err
}
if !opt.DryRun {
checkClusterTypeAndSetEnvs()
}
e2e.AfterReadingAllFlags(exutil.TestContext)
e2e.TestContext.DumpLogsOnFailure = true
exutil.TestContext.DumpLogsOnFailure = true
return opt.Run(args)
})
},
}
bindOptions(opt, cmd.Flags())
return cmd
} | main | ||||
function | openshift/openshift-tests-private | cec5dce3-e592-4c87-a917-a1937707c259 | newRunTestCommand | ['"os"', '"github.com/spf13/cobra"', '"k8s.io/kubectl/pkg/util/templates"', '_ "k8s.io/kubernetes/test/e2e"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func newRunTestCommand() *cobra.Command {
testOpt := &testginkgo.TestOptions{
Out: os.Stdout,
ErrOut: os.Stderr,
}
cmd := &cobra.Command{
Use: "run-test NAME",
Short: "Run a single test by name",
Long: templates.LongDesc(`
Execute a single test
This executes a single test by name. It is used by the run command during suite execution but may also
be used to test in isolation while developing new tests.
`),
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := initProvider(os.Getenv("TEST_PROVIDER"), testOpt.DryRun); err != nil {
return err
}
if !testOpt.DryRun {
readClusterTypeEnvsAndSetFlags()
}
e2e.AfterReadingAllFlags(exutil.TestContext)
e2e.TestContext.DumpLogsOnFailure = true
exutil.TestContext.DumpLogsOnFailure = true
return testOpt.Run(args)
},
}
cmd.Flags().BoolVar(&testOpt.DryRun, "dry-run", testOpt.DryRun, "Print the test to run without executing them.")
return cmd
} | main | ||||
function | openshift/openshift-tests-private | c9850b74-5578-4953-9474-4642bcf06ac2 | checkClusterTypeAndSetEnvs | ['"os"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func checkClusterTypeAndSetEnvs() {
if exutil.PreSetEnvK8s() == "yes" {
_ = os.Setenv(exutil.EnvIsExternalOIDCCluster, "no")
} else {
exutil.PreSetEnvOIDCCluster()
}
} | main | ||||
function | openshift/openshift-tests-private | d03e562d-3c83-408f-b2fb-767ec693dd3b | readClusterTypeEnvsAndSetFlags | ['"os"', '_ "k8s.io/kubernetes/test/e2e"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func readClusterTypeEnvsAndSetFlags() {
isK8sEnv := os.Getenv(exutil.EnvIsKubernetesCluster)
isExtOIDCEnv := os.Getenv(exutil.EnvIsExternalOIDCCluster)
if len(isK8sEnv) == 0 {
isK8sEnv = exutil.PreSetEnvK8s()
if isK8sEnv == "yes" {
isExtOIDCEnv = "no"
_ = os.Setenv(exutil.EnvIsExternalOIDCCluster, "no")
}
}
if len(isExtOIDCEnv) == 0 {
isExtOIDCEnv = exutil.PreSetEnvOIDCCluster()
}
exutil.IsExternalOIDCClusterFlag = isExtOIDCEnv
exutil.IsKubernetesClusterFlag = isK8sEnv
e2e.Logf("Is kubernetes cluster: %s, is external OIDC cluster: %s", exutil.IsKubernetesClusterFlag, exutil.IsExternalOIDCClusterFlag)
} | main | ||||
function | openshift/openshift-tests-private | f32c4698-3513-442a-ac92-87cdf21b82b8 | mirrorToFile | ['"fmt"', '"io"', '"os"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func mirrorToFile(opt *testginkgo.Options, fn func() error) error {
if len(opt.OutFile) == 0 {
opt.Out, opt.ErrOut = os.Stdout, os.Stderr
return fn()
}
f, err := os.OpenFile(opt.OutFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
if err != nil {
return err
}
opt.Out = io.MultiWriter(os.Stdout, f)
opt.ErrOut = io.MultiWriter(os.Stderr, f)
exitErr := fn()
if exitErr != nil {
fmt.Fprintf(f, "error: %s", exitErr)
}
if err := f.Close(); err != nil {
fmt.Fprintf(os.Stderr, "error: Unable to close output file\n")
}
return exitErr
} | main | ||||
function | openshift/openshift-tests-private | 7be48194-3f9c-4778-a7fe-eacc45208d2f | bindOptions | ['"time"', '"github.com/spf13/pflag"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func bindOptions(opt *testginkgo.Options, flags *pflag.FlagSet) {
flags.BoolVar(&opt.DryRun, "dry-run", opt.DryRun, "Print the tests to run without executing them.")
flags.BoolVar(&opt.PrintCommands, "print-commands", opt.PrintCommands, "Print the sub-commands that would be executed instead.")
flags.StringVar(&opt.JUnitDir, "junit-dir", opt.JUnitDir, "The directory to write test reports to.")
flags.StringVar(&opt.Provider, "provider", opt.Provider, "The cluster infrastructure provider. Will automatically default to the correct value.")
flags.StringVarP(&opt.TestFile, "file", "f", opt.TestFile, "Create a suite from the newline-delimited test names in this file.")
flags.StringVar(&opt.Regex, "run", opt.Regex, "Regular expression of tests to run.")
flags.StringVarP(&opt.OutFile, "output-file", "o", opt.OutFile, "Write all test output to this file.")
flags.IntVar(&opt.Count, "count", opt.Count, "Run each test a specified number of times. Defaults to 1 or the suite's preferred value.")
flags.DurationVar(&opt.Timeout, "timeout", opt.Timeout, "Set the maximum time a test can run before being aborted. This is read from the suite by default, but will be 10 minutes otherwise.")
flags.BoolVar(&opt.IncludeSuccessOutput, "include-success", opt.IncludeSuccessOutput, "Print output from successful tests.")
flags.IntVar(&opt.Parallelism, "max-parallel-tests", opt.Parallelism, "Maximum number of tests running in parallel. 0 defaults to test suite recommended value, which is different in each suite.")
} | main | ||||
function | openshift/openshift-tests-private | 399f69f8-3bb0-4bcb-a377-c56b08f02e87 | initProvider | ['"os"', '"github.com/onsi/gomega"', 'testginkgo "github.com/openshift/openshift-tests-private/pkg/test/ginkgo"', '_ "k8s.io/kubernetes/test/e2e"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func initProvider(provider string, dryRun bool) error {
// record the exit error to the output file
// if err := decodeProviderTo(provider, exutil.TestContext, dryRun); err != nil {
// e2e.Logf("Fail to decode Provider:%s, but continue to run with skeleton mode", provider)
// }
exutil.TestContext.AllowedNotReadyNodes = 100
exutil.TestContext.MaxNodesToGather = 0
// reale2e.SetViperConfig(os.Getenv("VIPERCONFIG"))
if err := initCSITests(dryRun); err != nil {
return err
}
exutil.AnnotateTestSuite()
err := exutil.InitTest(dryRun)
gomega.RegisterFailHandler(ginkgo.Fail)
// TODO: infer SSH keys from the cluster
return err
} | main | ||||
function | openshift/openshift-tests-private | 85ac90ce-96ff-4407-b256-9237c8dd05b5 | decodeProviderTo | ['"encoding/json"', '"fmt"', '"os"', '"k8s.io/klog"', 'exutilcloud "github.com/openshift/openshift-tests-private/test/extended/util/cloud"', '_ "k8s.io/kubernetes/test/e2e"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/openshift-tests.go | func decodeProviderTo(provider string, testContext *e2e.TestContextType, dryRun bool) error {
switch provider {
case "":
if _, ok := os.LookupEnv("KUBE_SSH_USER"); ok {
if _, ok := os.LookupEnv("LOCAL_SSH_KEY"); ok {
testContext.Provider = "local"
break
}
}
if dryRun {
break
}
fallthrough
case "azure", "aws", "gce", "vsphere":
provider, cfg, err := exutilcloud.LoadConfig()
if err != nil {
return err
}
if cfg != nil {
testContext.Provider = provider
testContext.CloudConfig = *cfg
}
default:
var providerInfo struct{ Type string }
if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil {
return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err)
}
if len(providerInfo.Type) == 0 {
return fmt.Errorf("provider must be a JSON object with the 'type' key")
}
testContext.Provider = providerInfo.Type
if err := json.Unmarshal([]byte(provider), &testContext.CloudConfig); err != nil {
return fmt.Errorf("provider must decode into the cloud config object: %v", err)
}
}
if len(testContext.Provider) == 0 {
testContext.Provider = "skeleton"
}
klog.V(2).Infof("Provider %s: %#v", testContext.Provider, testContext.CloudConfig)
return nil
} | main | ||||
file | openshift/openshift-tests-private | 361803f0-1a80-46ae-a497-df1e98ef358e | csi | import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/openshift/openshift-tests-private/test/extended/csi"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/storage/external"
) | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/csi.go | package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/openshift/openshift-tests-private/test/extended/csi"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/storage/external"
)
const (
manifestEnvVar = "TEST_CSI_DRIVER_FILES"
installDriversEnvVar = "TEST_INSTALL_CSI_DRIVERS"
)
// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES.
func initCSITests(dryRun bool) error {
driverList := os.Getenv(installDriversEnvVar)
if driverList != "" {
drivers := strings.Split(driverList, ",")
for _, driver := range drivers {
manifestFile, err := csi.InstallCSIDriver(driver, dryRun)
if err != nil {
return fmt.Errorf("failed to install CSI driver from %q: %s", driver, err)
}
// Children processes need to see the newly introduced manifest,
// store it in TEST_CSI_DRIVER_FILES env. var for them.
manifestList := os.Getenv(manifestEnvVar)
if len(manifestList) > 0 {
manifestList += ","
}
manifestList += manifestFile
os.Setenv(manifestEnvVar, manifestList)
}
}
// Clear TEST_INSTALL_CSI_DRIVERS, we don't want the driver installed by children too.
os.Setenv(installDriversEnvVar, "")
manifestList := os.Getenv(manifestEnvVar)
if manifestList != "" {
manifests := strings.Split(manifestList, ",")
for _, manifest := range manifests {
if err := external.AddDriverDefinition(manifest); err != nil {
return fmt.Errorf("failed to load manifest from %q: %s", manifest, err)
}
// Register the base dir of the manifest file as a file source.
// With this we can reference the CSI driver's storageClass
// in the manifest file (FromFile field).
testfiles.AddFileSource(testfiles.RootFileSource{
Root: filepath.Dir(manifest),
})
}
}
return nil
}
| package main | ||||
function | openshift/openshift-tests-private | f6384396-741d-4178-9b31-f12d4f97a223 | initCSITests | ['"fmt"', '"os"', '"path/filepath"', '"strings"', '"github.com/openshift/openshift-tests-private/test/extended/csi"', '"k8s.io/kubernetes/test/e2e/framework/testfiles"', '"k8s.io/kubernetes/test/e2e/storage/external"'] | github.com/openshift/openshift-tests-private/cmd/extended-platform-tests/csi.go | func initCSITests(dryRun bool) error {
driverList := os.Getenv(installDriversEnvVar)
if driverList != "" {
drivers := strings.Split(driverList, ",")
for _, driver := range drivers {
manifestFile, err := csi.InstallCSIDriver(driver, dryRun)
if err != nil {
return fmt.Errorf("failed to install CSI driver from %q: %s", driver, err)
}
// Children processes need to see the newly introduced manifest,
// store it in TEST_CSI_DRIVER_FILES env. var for them.
manifestList := os.Getenv(manifestEnvVar)
if len(manifestList) > 0 {
manifestList += ","
}
manifestList += manifestFile
os.Setenv(manifestEnvVar, manifestList)
}
}
// Clear TEST_INSTALL_CSI_DRIVERS, we don't want the driver installed by children too.
os.Setenv(installDriversEnvVar, "")
manifestList := os.Getenv(manifestEnvVar)
if manifestList != "" {
manifests := strings.Split(manifestList, ",")
for _, manifest := range manifests {
if err := external.AddDriverDefinition(manifest); err != nil {
return fmt.Errorf("failed to load manifest from %q: %s", manifest, err)
}
// Register the base dir of the manifest file as a file source.
// With this we can reference the CSI driver's storageClass
// in the manifest file (FromFile field).
testfiles.AddFileSource(testfiles.RootFileSource{
Root: filepath.Dir(manifest),
})
}
}
return nil
} | main | ||||
test | openshift/openshift-tests-private | 32469bb5-bb75-4185-b459-4a2e97883cb6 | api | import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
e2e "k8s.io/kubernetes/test/e2e/framework"
configclientset "github.com/openshift/client-go/config/clientset/versioned"
clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | package monitor
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
e2e "k8s.io/kubernetes/test/e2e/framework"
configclientset "github.com/openshift/client-go/config/clientset/versioned"
clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
// Start begins monitoring the cluster referenced by the default kube configuration until
// context is finished.
func Start(ctx context.Context) (*Monitor, error) {
m := NewMonitor()
cfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{})
clusterConfig, err := cfg.ClientConfig()
if err != nil {
return nil, fmt.Errorf("could not load client configuration: %v", err)
}
client, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
return nil, err
}
configClient, err := configclientset.NewForConfig(clusterConfig)
if err != nil {
return nil, err
}
if err := startAPIMonitoring(ctx, m, clusterConfig); err != nil {
return nil, err
}
startPodMonitoring(ctx, m, client)
startNodeMonitoring(ctx, m, client)
startEventMonitoring(ctx, m, client)
// Monitor ClusterOperators and ClusterVersions only if we are running against an OpenShift cluster
// This check occurs after suite initialization and before test case start, so we get the cluster type
// directly from the environment variable instead of using exutil.IsKubernetesClusterFlag.
isK8sClusterEnv := os.Getenv(exutil.EnvIsKubernetesCluster)
if isK8sClusterEnv != "yes" {
e2e.Logf("EnvIsKubernetesCluster = %s, start monitoring ClusterOperators and ClusterVersions", isK8sClusterEnv)
startClusterOperatorMonitoring(ctx, m, configClient)
}
m.StartSampling(ctx)
return m, nil
}
func startAPIMonitoring(ctx context.Context, m *Monitor, clusterConfig *rest.Config) error {
pollingConfig := *clusterConfig
pollingConfig.Timeout = 3 * time.Second
pollingClient, err := clientcorev1.NewForConfig(&pollingConfig)
if err != nil {
return err
}
openshiftPollingClient, err := clientimagev1.NewForConfig(&pollingConfig)
if err != nil {
return err
}
m.AddSampler(
StartSampling(ctx, m, time.Second, func(previous bool) (condition *Condition, next bool) {
_, err := pollingClient.Namespaces().Get(ctx, "kube-system", metav1.GetOptions{})
switch {
case err == nil && !previous:
condition = &Condition{
Level: Info,
Locator: "kube-apiserver",
Message: "Kube API started responding to GET requests",
}
case err != nil && previous:
condition = &Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("Kube API started failing: %v", err),
}
}
return condition, err == nil
}).ConditionWhenFailing(&Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("Kube API is not responding to GET requests"),
}),
)
m.AddSampler(
StartSampling(ctx, m, time.Second, func(previous bool) (condition *Condition, next bool) {
_, err := openshiftPollingClient.ImageStreams("openshift-apiserver").Get(ctx, "missing", metav1.GetOptions{})
if !errors.IsUnexpectedServerError(err) && errors.IsNotFound(err) {
err = nil
}
switch {
case err == nil && !previous:
condition = &Condition{
Level: Info,
Locator: "openshift-apiserver",
Message: "OpenShift API started responding to GET requests",
}
case err != nil && previous:
condition = &Condition{
Level: Info,
Locator: "openshift-apiserver",
Message: fmt.Sprintf("OpenShift API started failing: %v", err),
}
}
return condition, err == nil
}).ConditionWhenFailing(&Condition{
Level: Error,
Locator: "openshift-apiserver",
Message: fmt.Sprintf("OpenShift API is not responding to GET requests"),
}),
)
return nil
}
func findContainerStatus(status []corev1.ContainerStatus, name string, position int) *corev1.ContainerStatus {
if position < len(status) {
if status[position].Name == name {
return &status[position]
}
}
for i := range status {
if status[i].Name == name {
return &status[i]
}
}
return nil
}
func findNodeCondition(status []corev1.NodeCondition, name corev1.NodeConditionType, position int) *corev1.NodeCondition {
if position < len(status) {
if status[position].Type == name {
return &status[position]
}
}
for i := range status {
if status[i].Type == name {
return &status[i]
}
}
return nil
}
func locateEvent(event *corev1.Event) string {
if len(event.InvolvedObject.Namespace) > 0 {
return fmt.Sprintf("ns/%s %s/%s", event.InvolvedObject.Namespace, strings.ToLower(event.InvolvedObject.Kind), event.InvolvedObject.Name)
}
return fmt.Sprintf("%s/%s", strings.ToLower(event.InvolvedObject.Kind), event.InvolvedObject.Name)
}
func locatePod(pod *corev1.Pod) string {
return fmt.Sprintf("ns/%s pod/%s node/%s", pod.Namespace, pod.Name, pod.Spec.NodeName)
}
func locateNode(node *corev1.Node) string {
return fmt.Sprintf("node/%s", node.Name)
}
func locatePodContainer(pod *corev1.Pod, containerName string) string {
return fmt.Sprintf("ns/%s pod/%s node/%s container=%s", pod.Namespace, pod.Name, pod.Spec.NodeName, containerName)
}
func filterToSystemNamespaces(obj runtime.Object) bool {
m, ok := obj.(metav1.Object)
if !ok {
return true
}
ns := m.GetNamespace()
if len(ns) == 0 {
return true
}
return strings.HasPrefix(ns, "kube-") || strings.HasPrefix(ns, "openshift-") || ns == "default"
}
type errorRecordingListWatcher struct {
lw cache.ListerWatcher
recorder Recorder
lock sync.Mutex
receivedError bool
}
func NewErrorRecordingListWatcher(recorder Recorder, lw cache.ListerWatcher) cache.ListerWatcher {
return &errorRecordingListWatcher{
lw: lw,
recorder: recorder,
}
}
func (w *errorRecordingListWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
obj, err := w.lw.List(options)
w.handle(err)
return obj, err
}
func (w *errorRecordingListWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
obj, err := w.lw.Watch(options)
w.handle(err)
return obj, err
}
func (w *errorRecordingListWatcher) handle(err error) {
w.lock.Lock()
defer w.lock.Unlock()
if err != nil {
if !w.receivedError {
w.recorder.Record(Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("failed contacting the API: %v", err),
})
}
w.receivedError = true
} else {
w.receivedError = false
}
}
| package monitor | ||||
function | openshift/openshift-tests-private | ffe9c8c5-23f9-4846-adeb-fc39fa5d0a6e | Start | ['"context"', '"fmt"', '"os"', '"k8s.io/client-go/kubernetes"', '"k8s.io/client-go/tools/clientcmd"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func Start(ctx context.Context) (*Monitor, error) {
m := NewMonitor()
cfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{})
clusterConfig, err := cfg.ClientConfig()
if err != nil {
return nil, fmt.Errorf("could not load client configuration: %v", err)
}
client, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
return nil, err
}
configClient, err := configclientset.NewForConfig(clusterConfig)
if err != nil {
return nil, err
}
if err := startAPIMonitoring(ctx, m, clusterConfig); err != nil {
return nil, err
}
startPodMonitoring(ctx, m, client)
startNodeMonitoring(ctx, m, client)
startEventMonitoring(ctx, m, client)
// Monitor ClusterOperators and ClusterVersions only if we are running against an OpenShift cluster
// This check occurs after suite initialization and before test case start, so we get the cluster type
// directly from the environment variable instead of using exutil.IsKubernetesClusterFlag.
isK8sClusterEnv := os.Getenv(exutil.EnvIsKubernetesCluster)
if isK8sClusterEnv != "yes" {
e2e.Logf("EnvIsKubernetesCluster = %s, start monitoring ClusterOperators and ClusterVersions", isK8sClusterEnv)
startClusterOperatorMonitoring(ctx, m, configClient)
}
m.StartSampling(ctx)
return m, nil
} | monitor | ||||
function | openshift/openshift-tests-private | 6ff63200-2fb6-4983-8ec1-bd4437f6f05c | startAPIMonitoring | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/api/errors"', '"k8s.io/client-go/rest"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func startAPIMonitoring(ctx context.Context, m *Monitor, clusterConfig *rest.Config) error {
pollingConfig := *clusterConfig
pollingConfig.Timeout = 3 * time.Second
pollingClient, err := clientcorev1.NewForConfig(&pollingConfig)
if err != nil {
return err
}
openshiftPollingClient, err := clientimagev1.NewForConfig(&pollingConfig)
if err != nil {
return err
}
m.AddSampler(
StartSampling(ctx, m, time.Second, func(previous bool) (condition *Condition, next bool) {
_, err := pollingClient.Namespaces().Get(ctx, "kube-system", metav1.GetOptions{})
switch {
case err == nil && !previous:
condition = &Condition{
Level: Info,
Locator: "kube-apiserver",
Message: "Kube API started responding to GET requests",
}
case err != nil && previous:
condition = &Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("Kube API started failing: %v", err),
}
}
return condition, err == nil
}).ConditionWhenFailing(&Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("Kube API is not responding to GET requests"),
}),
)
m.AddSampler(
StartSampling(ctx, m, time.Second, func(previous bool) (condition *Condition, next bool) {
_, err := openshiftPollingClient.ImageStreams("openshift-apiserver").Get(ctx, "missing", metav1.GetOptions{})
if !errors.IsUnexpectedServerError(err) && errors.IsNotFound(err) {
err = nil
}
switch {
case err == nil && !previous:
condition = &Condition{
Level: Info,
Locator: "openshift-apiserver",
Message: "OpenShift API started responding to GET requests",
}
case err != nil && previous:
condition = &Condition{
Level: Info,
Locator: "openshift-apiserver",
Message: fmt.Sprintf("OpenShift API started failing: %v", err),
}
}
return condition, err == nil
}).ConditionWhenFailing(&Condition{
Level: Error,
Locator: "openshift-apiserver",
Message: fmt.Sprintf("OpenShift API is not responding to GET requests"),
}),
)
return nil
} | monitor | ||||
function | openshift/openshift-tests-private | 3e53d6d6-1722-4942-b85f-f541ca4233d8 | findContainerStatus | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func findContainerStatus(status []corev1.ContainerStatus, name string, position int) *corev1.ContainerStatus {
if position < len(status) {
if status[position].Name == name {
return &status[position]
}
}
for i := range status {
if status[i].Name == name {
return &status[i]
}
}
return nil
} | monitor | |||||
function | openshift/openshift-tests-private | a05364e3-bb5c-4c1a-86d4-5e466f5fa542 | findNodeCondition | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func findNodeCondition(status []corev1.NodeCondition, name corev1.NodeConditionType, position int) *corev1.NodeCondition {
if position < len(status) {
if status[position].Type == name {
return &status[position]
}
}
for i := range status {
if status[i].Type == name {
return &status[i]
}
}
return nil
} | monitor | |||||
function | openshift/openshift-tests-private | c029f73a-b535-4aa8-82b1-fe526f89c0df | locateEvent | ['"fmt"', '"strings"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func locateEvent(event *corev1.Event) string {
if len(event.InvolvedObject.Namespace) > 0 {
return fmt.Sprintf("ns/%s %s/%s", event.InvolvedObject.Namespace, strings.ToLower(event.InvolvedObject.Kind), event.InvolvedObject.Name)
}
return fmt.Sprintf("%s/%s", strings.ToLower(event.InvolvedObject.Kind), event.InvolvedObject.Name)
} | monitor | ||||
function | openshift/openshift-tests-private | 98f74dd4-f710-45d2-9016-58c15163a045 | locatePod | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func locatePod(pod *corev1.Pod) string {
return fmt.Sprintf("ns/%s pod/%s node/%s", pod.Namespace, pod.Name, pod.Spec.NodeName)
} | monitor | ||||
function | openshift/openshift-tests-private | 9f6a5ed3-8a5c-412e-8783-19c8673c5ece | locateNode | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func locateNode(node *corev1.Node) string {
return fmt.Sprintf("node/%s", node.Name)
} | monitor | ||||
function | openshift/openshift-tests-private | ab45e047-da7b-40e1-b195-34462c2c1b13 | locatePodContainer | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func locatePodContainer(pod *corev1.Pod, containerName string) string {
return fmt.Sprintf("ns/%s pod/%s node/%s container=%s", pod.Namespace, pod.Name, pod.Spec.NodeName, containerName)
} | monitor | ||||
function | openshift/openshift-tests-private | df0236cc-a7f1-4e6d-9f43-902311b50aba | filterToSystemNamespaces | ['"strings"', '"k8s.io/apimachinery/pkg/runtime"'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func filterToSystemNamespaces(obj runtime.Object) bool {
m, ok := obj.(metav1.Object)
if !ok {
return true
}
ns := m.GetNamespace()
if len(ns) == 0 {
return true
}
return strings.HasPrefix(ns, "kube-") || strings.HasPrefix(ns, "openshift-") || ns == "default"
} | monitor | ||||
function | openshift/openshift-tests-private | 93a4861e-ce97-4d06-ae6f-2fa043e98732 | NewErrorRecordingListWatcher | ['"k8s.io/client-go/tools/cache"'] | ['errorRecordingListWatcher'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func NewErrorRecordingListWatcher(recorder Recorder, lw cache.ListerWatcher) cache.ListerWatcher {
return &errorRecordingListWatcher{
lw: lw,
recorder: recorder,
}
} | monitor | |||
function | openshift/openshift-tests-private | bd886d5f-33d9-478d-918c-3f14e3fe53fe | List | ['"k8s.io/apimachinery/pkg/runtime"'] | ['errorRecordingListWatcher'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func (w *errorRecordingListWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
obj, err := w.lw.List(options)
w.handle(err)
return obj, err
} | monitor | |||
function | openshift/openshift-tests-private | 8dc1b0e1-ba29-4281-a59e-555f98f0333f | Watch | ['"k8s.io/apimachinery/pkg/watch"'] | ['errorRecordingListWatcher'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func (w *errorRecordingListWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
obj, err := w.lw.Watch(options)
w.handle(err)
return obj, err
} | monitor | |||
function | openshift/openshift-tests-private | 11015b77-f654-4a6e-b794-44c54e9b2938 | handle | ['"fmt"'] | ['errorRecordingListWatcher'] | github.com/openshift/openshift-tests-private/pkg/monitor/api.go | func (w *errorRecordingListWatcher) handle(err error) {
w.lock.Lock()
defer w.lock.Unlock()
if err != nil {
if !w.receivedError {
w.recorder.Record(Condition{
Level: Error,
Locator: "kube-apiserver",
Message: fmt.Sprintf("failed contacting the API: %v", err),
})
}
w.receivedError = true
} else {
w.receivedError = false
}
} | monitor | |||
test | openshift/openshift-tests-private | cee823b1-3068-4aa3-94d3-45bf5e905612 | cmd | import (
"context"
"fmt"
"io"
"os"
"os/signal"
"syscall"
"time"
) | github.com/openshift/openshift-tests-private/pkg/monitor/cmd.go | package monitor
import (
"context"
"fmt"
"io"
"os"
"os/signal"
"syscall"
"time"
)
// Options is used to run a monitoring process against the provided server as
// a command line interaction.
type Options struct {
Out, ErrOut io.Writer
}
// Run starts monitoring the cluster by invoking Start, periodically printing the
// events accumulated to Out. When the user hits CTRL+C or signals termination the
// condition intervals (all non-instantaneous events) are reported to Out.
func (opt *Options) Run() error {
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
abortCh := make(chan os.Signal)
go func() {
<-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted, terminating\n")
cancelFn()
sig := <-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted twice, exiting (%s)\n", sig)
switch sig {
case syscall.SIGINT:
os.Exit(130)
default:
os.Exit(0)
}
}()
signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
m, err := Start(ctx)
if err != nil {
return err
}
go func() {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
var last time.Time
done := false
for !done {
select {
case <-ticker.C:
case <-ctx.Done():
done = true
}
events := m.Events(last, time.Time{})
if len(events) > 0 {
for _, event := range events {
if !event.From.Equal(event.To) {
continue
}
fmt.Fprintln(opt.Out, event.String())
}
last = events[len(events)-1].From
}
}
}()
<-ctx.Done()
time.Sleep(150 * time.Millisecond)
if events := m.Conditions(time.Time{}, time.Time{}); len(events) > 0 {
fmt.Fprintf(opt.Out, "\nConditions:\n\n")
for _, event := range events {
fmt.Fprintln(opt.Out, event.String())
}
}
return nil
}
| package monitor | ||||
function | openshift/openshift-tests-private | dcdd343f-b440-415f-a713-50f701ec8db9 | Run | ['"context"', '"fmt"', '"os"', '"os/signal"', '"syscall"', '"time"'] | ['Options'] | github.com/openshift/openshift-tests-private/pkg/monitor/cmd.go | func (opt *Options) Run() error {
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
abortCh := make(chan os.Signal)
go func() {
<-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted, terminating\n")
cancelFn()
sig := <-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted twice, exiting (%s)\n", sig)
switch sig {
case syscall.SIGINT:
os.Exit(130)
default:
os.Exit(0)
}
}()
signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
m, err := Start(ctx)
if err != nil {
return err
}
go func() {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
var last time.Time
done := false
for !done {
select {
case <-ticker.C:
case <-ctx.Done():
done = true
}
events := m.Events(last, time.Time{})
if len(events) > 0 {
for _, event := range events {
if !event.From.Equal(event.To) {
continue
}
fmt.Fprintln(opt.Out, event.String())
}
last = events[len(events)-1].From
}
}
}()
<-ctx.Done()
time.Sleep(150 * time.Millisecond)
if events := m.Conditions(time.Time{}, time.Time{}); len(events) > 0 {
fmt.Fprintf(opt.Out, "\nConditions:\n\n")
for _, event := range events {
fmt.Fprintln(opt.Out, event.String())
}
}
return nil
} | monitor | |||
file | openshift/openshift-tests-private | 70bba3b1-007f-48dd-9529-99b0487cca5d | event | import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
) | github.com/openshift/openshift-tests-private/pkg/monitor/event.go | package monitor
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
func startEventMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
go func() {
for {
select {
case <-ctx.Done():
return
default:
}
events, err := client.CoreV1().Events("").List(ctx, metav1.ListOptions{Limit: 1})
if err != nil {
continue
}
rv := events.ResourceVersion
for expired := false; !expired; {
w, err := client.CoreV1().Events("").Watch(ctx, metav1.ListOptions{ResourceVersion: rv})
if err != nil {
if errors.IsResourceExpired(err) {
break
}
continue
}
w = watch.Filter(w, func(in watch.Event) (watch.Event, bool) {
return in, filterToSystemNamespaces(in.Object)
})
func() {
defer w.Stop()
for event := range w.ResultChan() {
switch event.Type {
case watch.Added, watch.Modified:
obj, ok := event.Object.(*corev1.Event)
if !ok {
continue
}
message := obj.Message
if obj.Count > 1 {
message += fmt.Sprintf(" (%d times)", obj.Count)
}
condition := Condition{
Level: Info,
Locator: locateEvent(obj),
Message: message,
}
if obj.Type == corev1.EventTypeWarning {
condition.Level = Warning
}
m.Record(condition)
case watch.Error:
var message string
if status, ok := event.Object.(*metav1.Status); ok {
if err := errors.FromObject(status); err != nil && errors.IsResourceExpired(err) {
expired = true
return
}
message = status.Message
} else {
message = fmt.Sprintf("event object was not a Status: %T", event.Object)
}
m.Record(Condition{
Level: Info,
Locator: "kube-apiserver",
Message: fmt.Sprintf("received an error while watching events: %s", message),
})
return
default:
}
}
}()
}
}
}()
}
| package monitor | ||||
function | openshift/openshift-tests-private | 8d9cd4cb-3c58-4c2f-a55d-08550b99ca0b | startEventMonitoring | ['"context"', '"fmt"', '"k8s.io/apimachinery/pkg/api/errors"', '"k8s.io/apimachinery/pkg/watch"', '"k8s.io/client-go/kubernetes"'] | github.com/openshift/openshift-tests-private/pkg/monitor/event.go | func startEventMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
go func() {
for {
select {
case <-ctx.Done():
return
default:
}
events, err := client.CoreV1().Events("").List(ctx, metav1.ListOptions{Limit: 1})
if err != nil {
continue
}
rv := events.ResourceVersion
for expired := false; !expired; {
w, err := client.CoreV1().Events("").Watch(ctx, metav1.ListOptions{ResourceVersion: rv})
if err != nil {
if errors.IsResourceExpired(err) {
break
}
continue
}
w = watch.Filter(w, func(in watch.Event) (watch.Event, bool) {
return in, filterToSystemNamespaces(in.Object)
})
func() {
defer w.Stop()
for event := range w.ResultChan() {
switch event.Type {
case watch.Added, watch.Modified:
obj, ok := event.Object.(*corev1.Event)
if !ok {
continue
}
message := obj.Message
if obj.Count > 1 {
message += fmt.Sprintf(" (%d times)", obj.Count)
}
condition := Condition{
Level: Info,
Locator: locateEvent(obj),
Message: message,
}
if obj.Type == corev1.EventTypeWarning {
condition.Level = Warning
}
m.Record(condition)
case watch.Error:
var message string
if status, ok := event.Object.(*metav1.Status); ok {
if err := errors.FromObject(status); err != nil && errors.IsResourceExpired(err) {
expired = true
return
}
message = status.Message
} else {
message = fmt.Sprintf("event object was not a Status: %T", event.Object)
}
m.Record(Condition{
Level: Info,
Locator: "kube-apiserver",
Message: fmt.Sprintf("received an error while watching events: %s", message),
})
return
default:
}
}
}()
}
}
}()
} | monitor | ||||
file | openshift/openshift-tests-private | 7dabc363-4513-48af-9dcb-4f18169b8f35 | monitor | import (
"context"
"fmt"
"sort"
"sync"
"time"
) | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | package monitor
import (
"context"
"fmt"
"sort"
"sync"
"time"
)
// Monitor records events that have occurred in memory and can also periodically
// sample results.
type Monitor struct {
interval time.Duration
samplers []SamplerFunc
lock sync.Mutex
events []*Event
samples []*sample
}
// NewMonitor creates a monitor with the default sampling interval.
func NewMonitor() *Monitor {
return &Monitor{
interval: 15 * time.Second,
}
}
var _ Interface = &Monitor{}
// StartSampling starts sampling every interval until the provided context is done.
// A sample is captured when the context is closed.
func (m *Monitor) StartSampling(ctx context.Context) {
if m.interval == 0 {
return
}
go func() {
ticker := time.NewTicker(m.interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
m.sample()
return
}
m.sample()
}
}()
}
// AddSampler adds a sampler function to the list of samplers to run every interval.
// Conditions discovered this way are recorded with a start and end time if they persist
// across multiple sampling intervals.
func (m *Monitor) AddSampler(fn SamplerFunc) {
m.lock.Lock()
defer m.lock.Unlock()
m.samplers = append(m.samplers, fn)
}
// Record captures one or more conditions at the current time. All conditions are recorded
// in monotonic order as Event objects.
func (m *Monitor) Record(conditions ...Condition) {
if len(conditions) == 0 {
return
}
m.lock.Lock()
defer m.lock.Unlock()
t := time.Now().UTC()
for _, condition := range conditions {
m.events = append(m.events, &Event{
At: t,
Condition: condition,
})
}
}
func (m *Monitor) sample() {
m.lock.Lock()
samplers := m.samplers
m.lock.Unlock()
now := time.Now().UTC()
var conditions []*Condition
for _, fn := range samplers {
conditions = append(conditions, fn(now)...)
}
if len(conditions) == 0 {
return
}
m.lock.Lock()
defer m.lock.Unlock()
t := time.Now().UTC()
m.samples = append(m.samples, &sample{
at: t,
conditions: conditions,
})
}
func (m *Monitor) snapshot() ([]*sample, []*Event) {
m.lock.Lock()
defer m.lock.Unlock()
return m.samples, m.events
}
// Conditions returns all conditions that were sampled in the interval
// between from and to. If that does not include a sample interval, no
// results will be returned. EventIntervals are returned in order of
// their first sampling. A condition that was only sampled once is
// returned with from == to. No duplicate conditions are returned
// unless a sampling interval did not report that value.
func (m *Monitor) Conditions(from, to time.Time) EventIntervals {
samples, _ := m.snapshot()
return filterSamples(samples, from, to)
}
// Events returns all events that occur between from and to, including
// any sampled conditions that were encountered during that period.
// EventIntervals are returned in order of their occurrence.
func (m *Monitor) Events(from, to time.Time) EventIntervals {
samples, events := m.snapshot()
intervals := filterSamples(samples, from, to)
events = filterEvents(events, from, to)
// merge the two sets of inputs
mustSort := len(intervals) > 0
for i := range events {
if i > 0 && events[i-1].At.After(events[i].At) {
fmt.Printf("ERROR: event %d out of order\n %#v\n %#v\n", i, events[i-1], events[i])
}
at := events[i].At
condition := &events[i].Condition
intervals = append(intervals, &EventInterval{
From: at,
To: at,
Condition: condition,
})
}
if mustSort {
sort.Sort(intervals)
}
return intervals
}
func filterSamples(samples []*sample, from, to time.Time) EventIntervals {
if len(samples) == 0 {
return nil
}
if !from.IsZero() {
first := sort.Search(len(samples), func(i int) bool {
return samples[i].at.After(from)
})
if first == -1 {
return nil
}
samples = samples[first:]
}
if !to.IsZero() {
for i, sample := range samples {
if sample.at.After(to) {
samples = samples[:i]
break
}
}
}
if len(samples) == 0 {
return nil
}
intervals := make(EventIntervals, 0, len(samples)*2)
last, next := make(map[Condition]*EventInterval), make(map[Condition]*EventInterval)
for _, sample := range samples {
for _, condition := range sample.conditions {
interval, ok := last[*condition]
if ok {
interval.To = sample.at
next[*condition] = interval
continue
}
interval = &EventInterval{
Condition: condition,
From: sample.at,
To: sample.at,
}
next[*condition] = interval
intervals = append(intervals, interval)
}
for k := range last {
delete(last, k)
}
last, next = next, last
}
return intervals
}
func filterEvents(events []*Event, from, to time.Time) []*Event {
if from.IsZero() && to.IsZero() {
return events
}
first := sort.Search(len(events), func(i int) bool {
return events[i].At.After(from)
})
if first == -1 {
return nil
}
if to.IsZero() {
return events[first:]
}
for i := first; i < len(events); i++ {
if events[i].At.After(to) {
return events[first:i]
}
}
return events[first:]
}
| package monitor | ||||
function | openshift/openshift-tests-private | e60db774-476b-4cd7-8a44-e3739c0791a5 | NewMonitor | ['"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func NewMonitor() *Monitor {
return &Monitor{
interval: 15 * time.Second,
}
} | monitor | |||
function | openshift/openshift-tests-private | 228decb6-f666-485c-bf73-a18a4767484e | StartSampling | ['"context"', '"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) StartSampling(ctx context.Context) {
if m.interval == 0 {
return
}
go func() {
ticker := time.NewTicker(m.interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
m.sample()
return
}
m.sample()
}
}()
} | monitor | |||
function | openshift/openshift-tests-private | e41b6f6a-2517-4f25-be54-9386d723236e | AddSampler | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) AddSampler(fn SamplerFunc) {
m.lock.Lock()
defer m.lock.Unlock()
m.samplers = append(m.samplers, fn)
} | monitor | ||||
function | openshift/openshift-tests-private | cb0f648a-973b-41d4-9e33-846982c2c2a7 | Record | ['"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) Record(conditions ...Condition) {
if len(conditions) == 0 {
return
}
m.lock.Lock()
defer m.lock.Unlock()
t := time.Now().UTC()
for _, condition := range conditions {
m.events = append(m.events, &Event{
At: t,
Condition: condition,
})
}
} | monitor | |||
function | openshift/openshift-tests-private | 9fb22aaa-d66e-474f-af06-2f62ca90cff2 | sample | ['"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) sample() {
m.lock.Lock()
samplers := m.samplers
m.lock.Unlock()
now := time.Now().UTC()
var conditions []*Condition
for _, fn := range samplers {
conditions = append(conditions, fn(now)...)
}
if len(conditions) == 0 {
return
}
m.lock.Lock()
defer m.lock.Unlock()
t := time.Now().UTC()
m.samples = append(m.samples, &sample{
at: t,
conditions: conditions,
})
} | monitor | |||
function | openshift/openshift-tests-private | 023ff638-661f-4665-b43e-104c9fed63bc | snapshot | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) snapshot() ([]*sample, []*Event) {
m.lock.Lock()
defer m.lock.Unlock()
return m.samples, m.events
} | monitor | ||||
function | openshift/openshift-tests-private | 21b8cc2f-62f1-4cdc-8fac-4b96719dbfba | Conditions | ['"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) Conditions(from, to time.Time) EventIntervals {
samples, _ := m.snapshot()
return filterSamples(samples, from, to)
} | monitor | |||
function | openshift/openshift-tests-private | daea46a3-4c2f-4a3e-bc59-47cba4960f9a | Events | ['"fmt"', '"sort"', '"time"'] | ['Monitor'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func (m *Monitor) Events(from, to time.Time) EventIntervals {
samples, events := m.snapshot()
intervals := filterSamples(samples, from, to)
events = filterEvents(events, from, to)
// merge the two sets of inputs
mustSort := len(intervals) > 0
for i := range events {
if i > 0 && events[i-1].At.After(events[i].At) {
fmt.Printf("ERROR: event %d out of order\n %#v\n %#v\n", i, events[i-1], events[i])
}
at := events[i].At
condition := &events[i].Condition
intervals = append(intervals, &EventInterval{
From: at,
To: at,
Condition: condition,
})
}
if mustSort {
sort.Sort(intervals)
}
return intervals
} | monitor | |||
function | openshift/openshift-tests-private | afcdc96e-4619-4cdb-8e03-100f71aaf9f8 | filterSamples | ['"sort"', '"time"'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func filterSamples(samples []*sample, from, to time.Time) EventIntervals {
if len(samples) == 0 {
return nil
}
if !from.IsZero() {
first := sort.Search(len(samples), func(i int) bool {
return samples[i].at.After(from)
})
if first == -1 {
return nil
}
samples = samples[first:]
}
if !to.IsZero() {
for i, sample := range samples {
if sample.at.After(to) {
samples = samples[:i]
break
}
}
}
if len(samples) == 0 {
return nil
}
intervals := make(EventIntervals, 0, len(samples)*2)
last, next := make(map[Condition]*EventInterval), make(map[Condition]*EventInterval)
for _, sample := range samples {
for _, condition := range sample.conditions {
interval, ok := last[*condition]
if ok {
interval.To = sample.at
next[*condition] = interval
continue
}
interval = &EventInterval{
Condition: condition,
From: sample.at,
To: sample.at,
}
next[*condition] = interval
intervals = append(intervals, interval)
}
for k := range last {
delete(last, k)
}
last, next = next, last
}
return intervals
} | monitor | ||||
function | openshift/openshift-tests-private | 631ebaf7-9a10-4ede-b06d-525af31ddceb | filterEvents | ['"sort"', '"time"'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor.go | func filterEvents(events []*Event, from, to time.Time) []*Event {
if from.IsZero() && to.IsZero() {
return events
}
first := sort.Search(len(events), func(i int) bool {
return events[i].At.After(from)
})
if first == -1 {
return nil
}
if to.IsZero() {
return events[first:]
}
for i := first; i < len(events); i++ {
if events[i].At.After(to) {
return events[first:i]
}
}
return events[first:]
} | monitor | ||||
file | openshift/openshift-tests-private | 7ba43380-b369-4558-afed-42bd856f08a4 | monitor_test | import (
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/diff"
) | github.com/openshift/openshift-tests-private/pkg/monitor/monitor_test.go | package monitor
import (
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/diff"
)
func TestMonitor_Newlines(t *testing.T) {
evt := &Event{Condition: Condition{Message: "a\nb\n"}}
expected := "Jan 01 00:00:00.000 I a\\nb\\n"
if evt.String() != expected {
t.Fatalf("unexpected:\n%s\n%s", expected, evt.String())
}
}
func TestMonitor_Events(t *testing.T) {
tests := []struct {
name string
events []*Event
samples []*sample
from time.Time
to time.Time
want EventIntervals
}{
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
want: EventIntervals{
{&Condition{Message: "1"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(1, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(1, 0),
to: time.Unix(2, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(2, 0),
want: nil,
},
{
samples: []*sample{
{time.Unix(1, 0), []*Condition{{Message: "1"}, {Message: "A"}}},
{time.Unix(2, 0), []*Condition{{Message: "2"}}},
{time.Unix(3, 0), []*Condition{{Message: "2"}, {Message: "A"}}},
},
from: time.Unix(1, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(3, 0)},
{&Condition{Message: "A"}, time.Unix(3, 0), time.Unix(3, 0)},
},
},
{
samples: []*sample{
{time.Unix(1, 0), []*Condition{{Message: "1"}, {Message: "A"}}},
{time.Unix(2, 0), []*Condition{{Message: "2"}}},
{time.Unix(3, 0), []*Condition{{Message: "2"}, {Message: "A"}}},
},
want: EventIntervals{
{&Condition{Message: "1"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "A"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(3, 0)},
{&Condition{Message: "A"}, time.Unix(3, 0), time.Unix(3, 0)},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &Monitor{
events: tt.events,
samples: tt.samples,
}
if got := m.Events(tt.from, tt.to); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%s", diff.ObjectReflectDiff(tt.want, got))
}
})
}
}
| package monitor | ||||
function | openshift/openshift-tests-private | f3279cfa-c779-4dee-be4f-19f45d090495 | TestMonitor_Newlines | ['"testing"'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor_test.go | func TestMonitor_Newlines(t *testing.T) {
evt := &Event{Condition: Condition{Message: "a\nb\n"}}
expected := "Jan 01 00:00:00.000 I a\\nb\\n"
if evt.String() != expected {
t.Fatalf("unexpected:\n%s\n%s", expected, evt.String())
}
} | monitor | ||||
function | openshift/openshift-tests-private | 565ba134-0182-4b89-b041-1aa181bd264d | TestMonitor_Events | ['"reflect"', '"testing"', '"time"', '"k8s.io/apimachinery/pkg/util/diff"'] | github.com/openshift/openshift-tests-private/pkg/monitor/monitor_test.go | func TestMonitor_Events(t *testing.T) {
tests := []struct {
name string
events []*Event
samples []*sample
from time.Time
to time.Time
want EventIntervals
}{
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
want: EventIntervals{
{&Condition{Message: "1"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(1, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(1, 0),
to: time.Unix(2, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(2, 0)},
},
},
{
events: []*Event{
{Condition{Message: "1"}, time.Unix(1, 0)},
{Condition{Message: "2"}, time.Unix(2, 0)},
},
from: time.Unix(2, 0),
want: nil,
},
{
samples: []*sample{
{time.Unix(1, 0), []*Condition{{Message: "1"}, {Message: "A"}}},
{time.Unix(2, 0), []*Condition{{Message: "2"}}},
{time.Unix(3, 0), []*Condition{{Message: "2"}, {Message: "A"}}},
},
from: time.Unix(1, 0),
want: EventIntervals{
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(3, 0)},
{&Condition{Message: "A"}, time.Unix(3, 0), time.Unix(3, 0)},
},
},
{
samples: []*sample{
{time.Unix(1, 0), []*Condition{{Message: "1"}, {Message: "A"}}},
{time.Unix(2, 0), []*Condition{{Message: "2"}}},
{time.Unix(3, 0), []*Condition{{Message: "2"}, {Message: "A"}}},
},
want: EventIntervals{
{&Condition{Message: "1"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "A"}, time.Unix(1, 0), time.Unix(1, 0)},
{&Condition{Message: "2"}, time.Unix(2, 0), time.Unix(3, 0)},
{&Condition{Message: "A"}, time.Unix(3, 0), time.Unix(3, 0)},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &Monitor{
events: tt.events,
samples: tt.samples,
}
if got := m.Events(tt.from, tt.to); !reflect.DeepEqual(got, tt.want) {
t.Errorf("%s", diff.ObjectReflectDiff(tt.want, got))
}
})
}
} | monitor | ||||
file | openshift/openshift-tests-private | 7df1c539-2aef-4874-b00c-d594d352bfc5 | node | import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
informercorev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
) | github.com/openshift/openshift-tests-private/pkg/monitor/node.go | package monitor
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
informercorev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
func startNodeMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
nodeChangeFns := []func(node, oldNode *corev1.Node) []Condition{
func(node, oldNode *corev1.Node) []Condition {
var conditions []Condition
for i := range node.Status.Conditions {
c := &node.Status.Conditions[i]
previous := findNodeCondition(oldNode.Status.Conditions, c.Type, i)
if previous == nil {
continue
}
if c.Status != previous.Status {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateNode(node),
Message: fmt.Sprintf("condition %s changed", c.Type),
})
}
}
if node.UID != oldNode.UID {
conditions = append(conditions, Condition{
Level: Error,
Locator: locateNode(node),
Message: fmt.Sprintf("node was deleted and recreated"),
})
}
return conditions
},
}
nodeInformer := informercorev1.NewNodeInformer(client, time.Hour, nil)
nodeInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {},
DeleteFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Node)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateNode(pod),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
node, ok := obj.(*corev1.Node)
if !ok {
return
}
oldNode, ok := old.(*corev1.Node)
if !ok {
return
}
for _, fn := range nodeChangeFns {
m.Record(fn(node, oldNode)...)
}
},
},
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range nodeInformer.GetStore().List() {
node, ok := obj.(*corev1.Node)
if !ok {
continue
}
isReady := false
if c := findNodeCondition(node.Status.Conditions, corev1.NodeReady, 0); c != nil {
isReady = c.Status == corev1.ConditionTrue
}
if !isReady {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locateNode(node),
Message: "node is not ready",
})
}
}
return conditions
})
go nodeInformer.Run(ctx.Done())
}
| package monitor | ||||
function | openshift/openshift-tests-private | 30eeb9e2-8d52-431b-a181-7ca47b07c9eb | startNodeMonitoring | ['"context"', '"fmt"', '"time"', '"k8s.io/client-go/kubernetes"', '"k8s.io/client-go/tools/cache"'] | github.com/openshift/openshift-tests-private/pkg/monitor/node.go | func startNodeMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
nodeChangeFns := []func(node, oldNode *corev1.Node) []Condition{
func(node, oldNode *corev1.Node) []Condition {
var conditions []Condition
for i := range node.Status.Conditions {
c := &node.Status.Conditions[i]
previous := findNodeCondition(oldNode.Status.Conditions, c.Type, i)
if previous == nil {
continue
}
if c.Status != previous.Status {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateNode(node),
Message: fmt.Sprintf("condition %s changed", c.Type),
})
}
}
if node.UID != oldNode.UID {
conditions = append(conditions, Condition{
Level: Error,
Locator: locateNode(node),
Message: fmt.Sprintf("node was deleted and recreated"),
})
}
return conditions
},
}
nodeInformer := informercorev1.NewNodeInformer(client, time.Hour, nil)
nodeInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {},
DeleteFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Node)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateNode(pod),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
node, ok := obj.(*corev1.Node)
if !ok {
return
}
oldNode, ok := old.(*corev1.Node)
if !ok {
return
}
for _, fn := range nodeChangeFns {
m.Record(fn(node, oldNode)...)
}
},
},
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range nodeInformer.GetStore().List() {
node, ok := obj.(*corev1.Node)
if !ok {
continue
}
isReady := false
if c := findNodeCondition(node.Status.Conditions, corev1.NodeReady, 0); c != nil {
isReady = c.Status == corev1.ConditionTrue
}
if !isReady {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locateNode(node),
Message: "node is not ready",
})
}
}
return conditions
})
go nodeInformer.Run(ctx.Done())
} | monitor | ||||
file | openshift/openshift-tests-private | 732e769e-3c7d-45ed-a573-bec4dca2b59d | operator | import (
"context"
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
configv1 "github.com/openshift/api/config/v1"
configclientset "github.com/openshift/client-go/config/clientset/versioned"
) | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | package monitor
import (
"context"
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
configv1 "github.com/openshift/api/config/v1"
configclientset "github.com/openshift/client-go/config/clientset/versioned"
)
func startClusterOperatorMonitoring(ctx context.Context, m Recorder, client configclientset.Interface) {
coInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.ConfigV1().ClusterOperators().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.ConfigV1().ClusterOperators().Watch(ctx, options)
},
}),
&configv1.ClusterOperator{},
time.Hour,
nil,
)
coChangeFns := []func(co, oldCO *configv1.ClusterOperator) []Condition{
func(co, oldCO *configv1.ClusterOperator) []Condition {
var conditions []Condition
for i := range co.Status.Conditions {
s := &co.Status.Conditions[i]
previous := findOperatorStatusCondition(oldCO.Status.Conditions, s.Type)
if previous == nil {
continue
}
if s.Status != previous.Status {
var msg string
switch {
case len(s.Reason) > 0 && len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s: %s", s.Type, s.Status, s.Reason, s.Message)
case len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s", s.Type, s.Status, s.Message)
default:
msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status)
}
level := Warning
if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue {
level = Error
}
if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue {
level = Error
}
conditions = append(conditions, Condition{
Level: level,
Locator: locateClusterOperator(co),
Message: msg,
})
}
}
if changes := findOperatorVersionChange(oldCO.Status.Versions, co.Status.Versions); len(changes) > 0 {
conditions = append(conditions, Condition{
Level: Info,
Locator: locateClusterOperator(co),
Message: fmt.Sprintf("versions: %v", strings.Join(changes, ", ")),
})
}
return conditions
},
}
startTime := time.Now().Add(-time.Minute)
coInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of co creations
if co.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locateClusterOperator(co),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateClusterOperator(co),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
oldCO, ok := old.(*configv1.ClusterOperator)
if !ok {
return
}
if co.UID != oldCO.UID {
return
}
for _, fn := range coChangeFns {
m.Record(fn(co, oldCO)...)
}
},
},
)
go coInformer.Run(ctx.Done())
cvInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = "metadata.name=version"
return client.ConfigV1().ClusterVersions().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = "metadata.name=version"
return client.ConfigV1().ClusterVersions().Watch(ctx, options)
},
}),
&configv1.ClusterVersion{},
time.Hour,
nil,
)
cvChangeFns := []func(cv, oldCV *configv1.ClusterVersion) []Condition{
func(cv, oldCV *configv1.ClusterVersion) []Condition {
var conditions []Condition
if len(cv.Status.History) == 0 {
return nil
}
if len(oldCV.Status.History) == 0 {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster converging to %s", cv.Status.History[0].Version),
})
return conditions
}
cvNew, cvOld := cv.Status.History[0], oldCV.Status.History[0]
switch {
case cvNew.State == configv1.CompletedUpdate && cvOld.State != cvNew.State:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster reached %s", cvNew.Version),
})
case cvNew.State == configv1.PartialUpdate && cvOld.State == cvNew.State && cvOld.Image != cvNew.Image:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster upgrading to %s without completing %s", cvNew.Version, cvOld.Version),
})
}
return conditions
},
func(cv, oldCV *configv1.ClusterVersion) []Condition {
var conditions []Condition
for i := range cv.Status.Conditions {
s := &cv.Status.Conditions[i]
previous := findOperatorStatusCondition(oldCV.Status.Conditions, s.Type)
if previous == nil {
continue
}
if s.Status != previous.Status {
var msg string
switch {
case len(s.Reason) > 0 && len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s: %s", s.Type, s.Status, s.Reason, s.Message)
case len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s", s.Type, s.Status, s.Message)
default:
msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status)
}
level := Warning
if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue {
level = Error
}
if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue {
level = Error
}
conditions = append(conditions, Condition{
Level: level,
Locator: locateClusterVersion(cv),
Message: msg,
})
}
}
return conditions
},
}
cvInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of co creations
if cv.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locateClusterVersion(cv),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
oldCV, ok := old.(*configv1.ClusterVersion)
if !ok {
return
}
if cv.UID != oldCV.UID {
return
}
for _, fn := range cvChangeFns {
m.Record(fn(cv, oldCV)...)
}
},
},
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range cvInformer.GetStore().List() {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
continue
}
if len(cv.Status.History) > 0 {
if cv.Status.History[0].State != configv1.CompletedUpdate {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster is updating to %s", cv.Status.History[0].Version),
})
}
}
}
return conditions
})
go cvInformer.Run(ctx.Done())
}
func locateClusterOperator(co *configv1.ClusterOperator) string {
return fmt.Sprintf("clusteroperator/%s", co.Name)
}
func locateClusterVersion(cv *configv1.ClusterVersion) string {
return fmt.Sprintf("clusterversion/%s", cv.Name)
}
func findOperatorVersionChange(old, new []configv1.OperandVersion) []string {
var changed []string
for i := 0; i < len(new); i++ {
for j := 0; j < len(old); j++ {
p := (j + i) % len(old)
if old[p].Name != new[i].Name {
continue
}
if old[p].Version == new[i].Version {
break
}
changed = append(changed, fmt.Sprintf("%s %s -> %s", new[i].Name, old[p].Version, new[i].Version))
break
}
}
return changed
}
func findOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
}
| package monitor | ||||
function | openshift/openshift-tests-private | a28ba156-727f-42d1-8cad-a56c0bedc03e | startClusterOperatorMonitoring | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/runtime"', '"k8s.io/apimachinery/pkg/watch"', '"k8s.io/client-go/tools/cache"'] | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | func startClusterOperatorMonitoring(ctx context.Context, m Recorder, client configclientset.Interface) {
coInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.ConfigV1().ClusterOperators().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.ConfigV1().ClusterOperators().Watch(ctx, options)
},
}),
&configv1.ClusterOperator{},
time.Hour,
nil,
)
coChangeFns := []func(co, oldCO *configv1.ClusterOperator) []Condition{
func(co, oldCO *configv1.ClusterOperator) []Condition {
var conditions []Condition
for i := range co.Status.Conditions {
s := &co.Status.Conditions[i]
previous := findOperatorStatusCondition(oldCO.Status.Conditions, s.Type)
if previous == nil {
continue
}
if s.Status != previous.Status {
var msg string
switch {
case len(s.Reason) > 0 && len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s: %s", s.Type, s.Status, s.Reason, s.Message)
case len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s", s.Type, s.Status, s.Message)
default:
msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status)
}
level := Warning
if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue {
level = Error
}
if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue {
level = Error
}
conditions = append(conditions, Condition{
Level: level,
Locator: locateClusterOperator(co),
Message: msg,
})
}
}
if changes := findOperatorVersionChange(oldCO.Status.Versions, co.Status.Versions); len(changes) > 0 {
conditions = append(conditions, Condition{
Level: Info,
Locator: locateClusterOperator(co),
Message: fmt.Sprintf("versions: %v", strings.Join(changes, ", ")),
})
}
return conditions
},
}
startTime := time.Now().Add(-time.Minute)
coInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of co creations
if co.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locateClusterOperator(co),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateClusterOperator(co),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
co, ok := obj.(*configv1.ClusterOperator)
if !ok {
return
}
oldCO, ok := old.(*configv1.ClusterOperator)
if !ok {
return
}
if co.UID != oldCO.UID {
return
}
for _, fn := range coChangeFns {
m.Record(fn(co, oldCO)...)
}
},
},
)
go coInformer.Run(ctx.Done())
cvInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = "metadata.name=version"
return client.ConfigV1().ClusterVersions().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = "metadata.name=version"
return client.ConfigV1().ClusterVersions().Watch(ctx, options)
},
}),
&configv1.ClusterVersion{},
time.Hour,
nil,
)
cvChangeFns := []func(cv, oldCV *configv1.ClusterVersion) []Condition{
func(cv, oldCV *configv1.ClusterVersion) []Condition {
var conditions []Condition
if len(cv.Status.History) == 0 {
return nil
}
if len(oldCV.Status.History) == 0 {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster converging to %s", cv.Status.History[0].Version),
})
return conditions
}
cvNew, cvOld := cv.Status.History[0], oldCV.Status.History[0]
switch {
case cvNew.State == configv1.CompletedUpdate && cvOld.State != cvNew.State:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster reached %s", cvNew.Version),
})
case cvNew.State == configv1.PartialUpdate && cvOld.State == cvNew.State && cvOld.Image != cvNew.Image:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster upgrading to %s without completing %s", cvNew.Version, cvOld.Version),
})
}
return conditions
},
func(cv, oldCV *configv1.ClusterVersion) []Condition {
var conditions []Condition
for i := range cv.Status.Conditions {
s := &cv.Status.Conditions[i]
previous := findOperatorStatusCondition(oldCV.Status.Conditions, s.Type)
if previous == nil {
continue
}
if s.Status != previous.Status {
var msg string
switch {
case len(s.Reason) > 0 && len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s: %s", s.Type, s.Status, s.Reason, s.Message)
case len(s.Message) > 0:
msg = fmt.Sprintf("changed %s to %s: %s", s.Type, s.Status, s.Message)
default:
msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status)
}
level := Warning
if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue {
level = Error
}
if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue {
level = Error
}
conditions = append(conditions, Condition{
Level: level,
Locator: locateClusterVersion(cv),
Message: msg,
})
}
}
return conditions
},
}
cvInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of co creations
if cv.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locateClusterVersion(cv),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
return
}
oldCV, ok := old.(*configv1.ClusterVersion)
if !ok {
return
}
if cv.UID != oldCV.UID {
return
}
for _, fn := range cvChangeFns {
m.Record(fn(cv, oldCV)...)
}
},
},
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range cvInformer.GetStore().List() {
cv, ok := obj.(*configv1.ClusterVersion)
if !ok {
continue
}
if len(cv.Status.History) > 0 {
if cv.Status.History[0].State != configv1.CompletedUpdate {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locateClusterVersion(cv),
Message: fmt.Sprintf("cluster is updating to %s", cv.Status.History[0].Version),
})
}
}
}
return conditions
})
go cvInformer.Run(ctx.Done())
} | monitor | ||||
function | openshift/openshift-tests-private | 0fb55703-6766-4212-b098-5f4e608a1e34 | locateClusterOperator | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | func locateClusterOperator(co *configv1.ClusterOperator) string {
return fmt.Sprintf("clusteroperator/%s", co.Name)
} | monitor | ||||
function | openshift/openshift-tests-private | d4b7709c-6979-4490-b860-e90acb2bb7c0 | locateClusterVersion | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | func locateClusterVersion(cv *configv1.ClusterVersion) string {
return fmt.Sprintf("clusterversion/%s", cv.Name)
} | monitor | ||||
function | openshift/openshift-tests-private | 2cfcf840-cf13-4cc5-8279-4cf8c7eeb95b | findOperatorVersionChange | ['"fmt"'] | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | func findOperatorVersionChange(old, new []configv1.OperandVersion) []string {
var changed []string
for i := 0; i < len(new); i++ {
for j := 0; j < len(old); j++ {
p := (j + i) % len(old)
if old[p].Name != new[i].Name {
continue
}
if old[p].Version == new[i].Version {
break
}
changed = append(changed, fmt.Sprintf("%s %s -> %s", new[i].Name, old[p].Version, new[i].Version))
break
}
}
return changed
} | monitor | ||||
function | openshift/openshift-tests-private | 26ac3974-9412-40eb-809b-114451b50633 | findOperatorStatusCondition | github.com/openshift/openshift-tests-private/pkg/monitor/operator.go | func findOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
} | monitor | |||||
file | openshift/openshift-tests-private | d2699363-a930-4560-8896-44c3b3cd3267 | operator_test | import (
"reflect"
"testing"
configv1 "github.com/openshift/api/config/v1"
) | github.com/openshift/openshift-tests-private/pkg/monitor/operator_test.go | package monitor
import (
"reflect"
"testing"
configv1 "github.com/openshift/api/config/v1"
)
func Test_findOperatorVersionChange(t *testing.T) {
type args struct {
}
tests := []struct {
name string
old []configv1.OperandVersion
new []configv1.OperandVersion
want []string
}{
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.1"}, {Name: "b", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "b", Version: "1.0.1"}, {Name: "a", Version: "1.0.0"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "b", Version: "1.0.1"}, {Name: "a", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{},
},
{
old: []configv1.OperandVersion{},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findOperatorVersionChange(tt.old, tt.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("findOperatorVersionChange() = %v, want %v", got, tt.want)
}
})
}
}
| package monitor | ||||
function | openshift/openshift-tests-private | c0bbc43f-430e-4f88-bae4-6e428259ff8d | Test_findOperatorVersionChange | ['"reflect"', '"testing"'] | github.com/openshift/openshift-tests-private/pkg/monitor/operator_test.go | func Test_findOperatorVersionChange(t *testing.T) {
type args struct {
}
tests := []struct {
name string
old []configv1.OperandVersion
new []configv1.OperandVersion
want []string
}{
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.1"}, {Name: "b", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "b", Version: "1.0.1"}, {Name: "a", Version: "1.0.0"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}, {Name: "b", Version: "1.0.1"}},
new: []configv1.OperandVersion{{Name: "b", Version: "1.0.1"}, {Name: "a", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.1"}},
want: []string{"a 1.0.0 -> 1.0.1"},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
},
{
old: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
new: []configv1.OperandVersion{},
},
{
old: []configv1.OperandVersion{},
new: []configv1.OperandVersion{{Name: "a", Version: "1.0.0"}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findOperatorVersionChange(tt.old, tt.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("findOperatorVersionChange() = %v, want %v", got, tt.want)
}
})
}
} | monitor | ||||
file | openshift/openshift-tests-private | 16fb6541-af49-44cb-9f3b-db30af1dc649 | pod | import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
) | github.com/openshift/openshift-tests-private/pkg/monitor/pod.go | package monitor
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
func startPodMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
podInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
items, err := client.CoreV1().Pods("").List(ctx, options)
if err == nil {
last := 0
for i := range items.Items {
item := &items.Items[i]
if !filterToSystemNamespaces(item) {
continue
}
if i != last {
items.Items[last] = *item
last++
}
}
items.Items = items.Items[:last]
}
return items, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
w, err := client.CoreV1().Pods("").Watch(ctx, options)
if err == nil {
w = watch.Filter(w, func(in watch.Event) (watch.Event, bool) {
return in, filterToSystemNamespaces(in.Object)
})
}
return w, err
},
}),
&corev1.Pod{},
time.Hour,
nil,
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range podInformer.GetStore().List() {
pod, ok := obj.(*corev1.Pod)
if !ok {
continue
}
if pod.Status.Phase == "Pending" {
if now.Sub(pod.CreationTimestamp.Time) > time.Minute {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locatePod(pod),
Message: "pod has been pending longer than a minute",
})
}
}
}
return conditions
})
podChangeFns := []func(pod, oldPod *corev1.Pod) []Condition{
// check phase transitions
func(pod, oldPod *corev1.Pod) []Condition {
new, old := pod.Status.Phase, oldPod.Status.Phase
if new == old || len(old) == 0 {
return nil
}
var conditions []Condition
switch {
case new == corev1.PodPending && old != corev1.PodUnknown:
switch {
case pod.DeletionTimestamp != nil:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation (bug): pod should not transition %s->%s even when terminated", old, new),
})
case len(pod.Annotations["kubernetes.io/config.mirror"]) > 0:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation (bug): static pod should not transition %s->%s with same UID", old, new),
})
default:
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation: pod may not transition %s->%s", old, new),
})
}
case new == corev1.PodUnknown:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod moved to the Unknown phase"),
})
case new == corev1.PodFailed && old != corev1.PodFailed:
switch pod.Status.Reason {
case "Evicted":
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod evicted: %s", pod.Status.Message),
})
case "Preempting":
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod preempted: %s", pod.Status.Message),
})
default:
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod failed (%s): %s", pod.Status.Reason, pod.Status.Message),
})
}
for _, s := range pod.Status.InitContainerStatuses {
if t := s.State.Terminated; t != nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
}
for _, s := range pod.Status.ContainerStatuses {
if t := s.State.Terminated; t != nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("init container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
}
}
return conditions
},
// check for transitions to being deleted
func(pod, oldPod *corev1.Pod) []Condition {
var conditions []Condition
if pod.DeletionGracePeriodSeconds != nil && oldPod.DeletionGracePeriodSeconds == nil {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("graceful deletion within %ds", *pod.DeletionGracePeriodSeconds),
})
}
if pod.DeletionGracePeriodSeconds == nil && oldPod.DeletionGracePeriodSeconds != nil {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: "invariant violation: pod was marked for deletion and then deletion grace period was cleared",
})
}
return conditions
},
// check restarts, readiness drop outs, or other status changes
func(pod, oldPod *corev1.Pod) []Condition {
var conditions []Condition
for i := range pod.Status.ContainerStatuses {
s := &pod.Status.ContainerStatuses[i]
previous := findContainerStatus(oldPod.Status.ContainerStatuses, s.Name, i)
if previous == nil {
continue
}
if t := s.State.Terminated; t != nil && previous.State.Terminated == nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
if s.RestartCount != previous.RestartCount {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "container restarted",
})
}
if s.State.Terminated == nil && previous.Ready && !s.Ready {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "container stopped being ready",
})
}
}
for i := range pod.Status.InitContainerStatuses {
s := &pod.Status.InitContainerStatuses[i]
previous := findContainerStatus(oldPod.Status.InitContainerStatuses, s.Name, i)
if previous == nil {
continue
}
if t := s.State.Terminated; t != nil && previous.State.Terminated == nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("init container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
if s.RestartCount != previous.RestartCount {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "init container restarted",
})
}
}
return conditions
},
}
startTime := time.Now().Add(-time.Minute)
podInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of pod creations
if pod.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locatePod(pod),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locatePod(pod),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
oldPod, ok := old.(*corev1.Pod)
if !ok {
return
}
if pod.UID != oldPod.UID {
return
}
for _, fn := range podChangeFns {
m.Record(fn(pod, oldPod)...)
}
},
},
)
go podInformer.Run(ctx.Done())
}
| package monitor | ||||
function | openshift/openshift-tests-private | 8c9acdbc-2bbd-4297-9c57-605b647d8afd | startPodMonitoring | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/runtime"', '"k8s.io/apimachinery/pkg/watch"', '"k8s.io/client-go/kubernetes"', '"k8s.io/client-go/tools/cache"'] | github.com/openshift/openshift-tests-private/pkg/monitor/pod.go | func startPodMonitoring(ctx context.Context, m Recorder, client kubernetes.Interface) {
podInformer := cache.NewSharedIndexInformer(
NewErrorRecordingListWatcher(m, &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
items, err := client.CoreV1().Pods("").List(ctx, options)
if err == nil {
last := 0
for i := range items.Items {
item := &items.Items[i]
if !filterToSystemNamespaces(item) {
continue
}
if i != last {
items.Items[last] = *item
last++
}
}
items.Items = items.Items[:last]
}
return items, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
w, err := client.CoreV1().Pods("").Watch(ctx, options)
if err == nil {
w = watch.Filter(w, func(in watch.Event) (watch.Event, bool) {
return in, filterToSystemNamespaces(in.Object)
})
}
return w, err
},
}),
&corev1.Pod{},
time.Hour,
nil,
)
m.AddSampler(func(now time.Time) []*Condition {
var conditions []*Condition
for _, obj := range podInformer.GetStore().List() {
pod, ok := obj.(*corev1.Pod)
if !ok {
continue
}
if pod.Status.Phase == "Pending" {
if now.Sub(pod.CreationTimestamp.Time) > time.Minute {
conditions = append(conditions, &Condition{
Level: Warning,
Locator: locatePod(pod),
Message: "pod has been pending longer than a minute",
})
}
}
}
return conditions
})
podChangeFns := []func(pod, oldPod *corev1.Pod) []Condition{
// check phase transitions
func(pod, oldPod *corev1.Pod) []Condition {
new, old := pod.Status.Phase, oldPod.Status.Phase
if new == old || len(old) == 0 {
return nil
}
var conditions []Condition
switch {
case new == corev1.PodPending && old != corev1.PodUnknown:
switch {
case pod.DeletionTimestamp != nil:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation (bug): pod should not transition %s->%s even when terminated", old, new),
})
case len(pod.Annotations["kubernetes.io/config.mirror"]) > 0:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation (bug): static pod should not transition %s->%s with same UID", old, new),
})
default:
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("invariant violation: pod may not transition %s->%s", old, new),
})
}
case new == corev1.PodUnknown:
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod moved to the Unknown phase"),
})
case new == corev1.PodFailed && old != corev1.PodFailed:
switch pod.Status.Reason {
case "Evicted":
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod evicted: %s", pod.Status.Message),
})
case "Preempting":
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod preempted: %s", pod.Status.Message),
})
default:
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: fmt.Sprintf("pod failed (%s): %s", pod.Status.Reason, pod.Status.Message),
})
}
for _, s := range pod.Status.InitContainerStatuses {
if t := s.State.Terminated; t != nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
}
for _, s := range pod.Status.ContainerStatuses {
if t := s.State.Terminated; t != nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("init container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
}
}
return conditions
},
// check for transitions to being deleted
func(pod, oldPod *corev1.Pod) []Condition {
var conditions []Condition
if pod.DeletionGracePeriodSeconds != nil && oldPod.DeletionGracePeriodSeconds == nil {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePod(pod),
Message: fmt.Sprintf("graceful deletion within %ds", *pod.DeletionGracePeriodSeconds),
})
}
if pod.DeletionGracePeriodSeconds == nil && oldPod.DeletionGracePeriodSeconds != nil {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePod(pod),
Message: "invariant violation: pod was marked for deletion and then deletion grace period was cleared",
})
}
return conditions
},
// check restarts, readiness drop outs, or other status changes
func(pod, oldPod *corev1.Pod) []Condition {
var conditions []Condition
for i := range pod.Status.ContainerStatuses {
s := &pod.Status.ContainerStatuses[i]
previous := findContainerStatus(oldPod.Status.ContainerStatuses, s.Name, i)
if previous == nil {
continue
}
if t := s.State.Terminated; t != nil && previous.State.Terminated == nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
if s.RestartCount != previous.RestartCount {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "container restarted",
})
}
if s.State.Terminated == nil && previous.Ready && !s.Ready {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "container stopped being ready",
})
}
}
for i := range pod.Status.InitContainerStatuses {
s := &pod.Status.InitContainerStatuses[i]
previous := findContainerStatus(oldPod.Status.InitContainerStatuses, s.Name, i)
if previous == nil {
continue
}
if t := s.State.Terminated; t != nil && previous.State.Terminated == nil && t.ExitCode != 0 {
conditions = append(conditions, Condition{
Level: Error,
Locator: locatePodContainer(pod, s.Name),
Message: fmt.Sprintf("init container exited with code %d (%s): %s", t.ExitCode, t.Reason, t.Message),
})
}
if s.RestartCount != previous.RestartCount {
conditions = append(conditions, Condition{
Level: Warning,
Locator: locatePodContainer(pod, s.Name),
Message: "init container restarted",
})
}
}
return conditions
},
}
startTime := time.Now().Add(-time.Minute)
podInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
// filter out old pods so our monitor doesn't send a big chunk
// of pod creations
if pod.CreationTimestamp.Time.Before(startTime) {
return
}
m.Record(Condition{
Level: Info,
Locator: locatePod(pod),
Message: "created",
})
},
DeleteFunc: func(obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
m.Record(Condition{
Level: Warning,
Locator: locatePod(pod),
Message: "deleted",
})
},
UpdateFunc: func(old, obj interface{}) {
pod, ok := obj.(*corev1.Pod)
if !ok {
return
}
oldPod, ok := old.(*corev1.Pod)
if !ok {
return
}
if pod.UID != oldPod.UID {
return
}
for _, fn := range podChangeFns {
m.Record(fn(pod, oldPod)...)
}
},
},
)
go podInformer.Run(ctx.Done())
} | monitor | ||||
test | openshift/openshift-tests-private | 9a5c60fb-f716-4dbc-9aba-37071eb8eafc | sampler | import (
"context"
"sync"
"time"
) | github.com/openshift/openshift-tests-private/pkg/monitor/sampler.go | package monitor
import (
"context"
"sync"
"time"
)
type ConditionalSampler interface {
ConditionWhenFailing(*Condition) SamplerFunc
}
type sampler struct {
lock sync.Mutex
available bool
}
func StartSampling(ctx context.Context, recorder Recorder, interval time.Duration, sampleFn func(previous bool) (*Condition, bool)) ConditionalSampler {
s := &sampler{
available: true,
}
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
return
}
success := s.isAvailable()
condition, ok := sampleFn(success)
if condition != nil {
recorder.Record(*condition)
}
s.setAvailable(ok)
}
}()
return s
}
func (s *sampler) isAvailable() bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.available
}
func (s *sampler) setAvailable(b bool) {
s.lock.Lock()
defer s.lock.Unlock()
s.available = b
}
func (s *sampler) ConditionWhenFailing(condition *Condition) SamplerFunc {
return func(_ time.Time) []*Condition {
if s.isAvailable() {
return nil
}
return []*Condition{condition}
}
}
| package monitor | ||||
function | openshift/openshift-tests-private | acee28ae-9618-4652-9918-0d62743a5e5c | StartSampling | ['"context"', '"time"'] | ['sampler'] | ['ConditionalSampler'] | github.com/openshift/openshift-tests-private/pkg/monitor/sampler.go | func StartSampling(ctx context.Context, recorder Recorder, interval time.Duration, sampleFn func(previous bool) (*Condition, bool)) ConditionalSampler {
s := &sampler{
available: true,
}
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
return
}
success := s.isAvailable()
condition, ok := sampleFn(success)
if condition != nil {
recorder.Record(*condition)
}
s.setAvailable(ok)
}
}()
return s
} | monitor | ||
function | openshift/openshift-tests-private | 8f9a5940-8118-4d51-84a4-db7a9ecd4b39 | isAvailable | ['sampler'] | github.com/openshift/openshift-tests-private/pkg/monitor/sampler.go | func (s *sampler) isAvailable() bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.available
} | monitor | ||||
function | openshift/openshift-tests-private | 91565601-09e0-414d-a72d-120ea4549300 | setAvailable | ['sampler'] | github.com/openshift/openshift-tests-private/pkg/monitor/sampler.go | func (s *sampler) setAvailable(b bool) {
s.lock.Lock()
defer s.lock.Unlock()
s.available = b
} | monitor | ||||
function | openshift/openshift-tests-private | eba5d7cc-c598-4ef8-a74d-777c1cbe29a9 | ConditionWhenFailing | ['"time"'] | ['sampler'] | github.com/openshift/openshift-tests-private/pkg/monitor/sampler.go | func (s *sampler) ConditionWhenFailing(condition *Condition) SamplerFunc {
return func(_ time.Time) []*Condition {
if s.isAvailable() {
return nil
}
return []*Condition{condition}
}
} | monitor | |||
file | openshift/openshift-tests-private | 7b2ae2f9-87b1-4c41-930f-228b6ce1c584 | types | import (
"fmt"
"sort"
"strconv"
"strings"
"time"
) | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | package monitor
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
)
type SamplerFunc func(time.Time) []*Condition
type Interface interface {
Events(from, to time.Time) EventIntervals
Conditions(from, to time.Time) EventIntervals
}
type Recorder interface {
Record(conditions ...Condition)
AddSampler(fn SamplerFunc)
}
type EventLevel int
const (
Info EventLevel = iota
Warning
Error
)
var eventString = []string{
"I",
"W",
"E",
}
type Event struct {
Condition
At time.Time
}
func (e *Event) String() string {
return fmt.Sprintf("%s.%03d %s %s %s", e.At.Format("Jan 02 15:04:05"), e.At.Nanosecond()/1000000, eventString[e.Level], e.Locator, strings.Replace(e.Message, "\n", "\\n", -1))
}
type sample struct {
at time.Time
conditions []*Condition
}
type Condition struct {
Level EventLevel
Locator string
Message string
}
type EventInterval struct {
*Condition
From time.Time
To time.Time
}
func (i *EventInterval) String() string {
if i.From.Equal(i.To) {
return fmt.Sprintf("%s.%03d %s %s %s", i.From.Format("Jan 02 15:04:05"), i.From.Nanosecond()/int(time.Millisecond), eventString[i.Level], i.Locator, strings.Replace(i.Message, "\n", "\\n", -1))
}
return fmt.Sprintf("%s.%03d - %-5s %s %s %s", i.From.Format("Jan 02 15:04:05"), i.From.Nanosecond()/int(time.Millisecond), strconv.Itoa(int(i.To.Sub(i.From)/time.Second))+"s", eventString[i.Level], i.Locator, strings.Replace(i.Message, "\n", "\\n", -1))
}
type EventIntervals []*EventInterval
var _ sort.Interface = EventIntervals{}
func (intervals EventIntervals) Less(i, j int) bool {
switch d := intervals[i].From.Sub(intervals[j].From); {
case d < 0:
return true
case d > 0:
return false
}
switch d := intervals[i].To.Sub(intervals[j].To); {
case d < 0:
return true
case d > 0:
return false
}
return intervals[i].Message < intervals[j].Message
}
func (intervals EventIntervals) Len() int { return len(intervals) }
func (intervals EventIntervals) Swap(i, j int) {
intervals[i], intervals[j] = intervals[j], intervals[i]
}
| package monitor | ||||
function | openshift/openshift-tests-private | a267679b-5289-461c-bc82-9f4b472d9733 | String | ['"fmt"', '"strings"'] | ['Event'] | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | func (e *Event) String() string {
return fmt.Sprintf("%s.%03d %s %s %s", e.At.Format("Jan 02 15:04:05"), e.At.Nanosecond()/1000000, eventString[e.Level], e.Locator, strings.Replace(e.Message, "\n", "\\n", -1))
} | {'eventString': '[]string{\n\t"I",\n\t"W",\n\t"E",\n}'} | monitor | ||
function | openshift/openshift-tests-private | 7837a05e-b444-4596-9ab0-f8db8693fb1c | String | ['"fmt"', '"strconv"', '"strings"', '"time"'] | ['EventInterval'] | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | func (i *EventInterval) String() string {
if i.From.Equal(i.To) {
return fmt.Sprintf("%s.%03d %s %s %s", i.From.Format("Jan 02 15:04:05"), i.From.Nanosecond()/int(time.Millisecond), eventString[i.Level], i.Locator, strings.Replace(i.Message, "\n", "\\n", -1))
}
return fmt.Sprintf("%s.%03d - %-5s %s %s %s", i.From.Format("Jan 02 15:04:05"), i.From.Nanosecond()/int(time.Millisecond), strconv.Itoa(int(i.To.Sub(i.From)/time.Second))+"s", eventString[i.Level], i.Locator, strings.Replace(i.Message, "\n", "\\n", -1))
} | {'eventString': '[]string{\n\t"I",\n\t"W",\n\t"E",\n}'} | monitor | ||
function | openshift/openshift-tests-private | 100f1fc7-0ca1-4a95-bd56-9bd4550367fd | Less | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | func (intervals EventIntervals) Less(i, j int) bool {
switch d := intervals[i].From.Sub(intervals[j].From); {
case d < 0:
return true
case d > 0:
return false
}
switch d := intervals[i].To.Sub(intervals[j].To); {
case d < 0:
return true
case d > 0:
return false
}
return intervals[i].Message < intervals[j].Message
} | monitor | |||||
function | openshift/openshift-tests-private | 1a6c6150-a297-4d09-98cf-ff57eb50b33f | Len | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | func (intervals EventIntervals) Len() int { return len(intervals) } | monitor | |||||
function | openshift/openshift-tests-private | 4ac5446c-c2bc-474d-86fc-61e1ab28d702 | Swap | github.com/openshift/openshift-tests-private/pkg/monitor/types.go | func (intervals EventIntervals) Swap(i, j int) {
intervals[i], intervals[j] = intervals[j], intervals[i]
} | monitor | |||||
file | openshift/openshift-tests-private | 91447f2d-876d-4abb-b878-390a0ad9495f | cmd_runsuite | import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/openshift/openshift-tests-private/pkg/monitor"
) | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runsuite.go | package ginkgo
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/openshift/openshift-tests-private/pkg/monitor"
)
// Options is used to run a suite of tests by invoking each test
// as a call to a child worker (the run-tests command).
type Options struct {
Parallelism int
Count int
Timeout time.Duration
JUnitDir string
TestFile string
OutFile string
Regex string
IncludeSuccessOutput bool
Provider string
SuiteOptions string
Suites []*TestSuite
DryRun bool
PrintCommands bool
Out, ErrOut io.Writer
}
func (opt *Options) AsEnv() []string {
var args []string
args = append(args, fmt.Sprintf("TEST_PROVIDER=%s", opt.Provider))
args = append(args, fmt.Sprintf("TEST_SUITE_OPTIONS=%s", opt.SuiteOptions))
return args
}
func (opt *Options) Run(args []string) error {
var suite *TestSuite
if len(opt.TestFile) > 0 {
var in []byte
var err error
if opt.TestFile == "-" {
in, err = ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
} else {
in, err = ioutil.ReadFile(opt.TestFile)
}
if err != nil {
return err
}
suite, err = newSuiteFromFile("files", in)
if err != nil {
return fmt.Errorf("could not read test suite from input: %v", err)
}
}
if suite == nil && len(args) == 0 {
fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n"))
return fmt.Errorf("specify a test suite to run, for example: %s run %s", filepath.Base(os.Args[0]), opt.Suites[0].Name)
}
if suite == nil && len(args) > 0 {
for _, s := range opt.Suites {
if s.Name == args[0] {
suite = s
break
}
}
}
if suite == nil {
fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n"))
return fmt.Errorf("suite %q does not exist", args[0])
}
if len(opt.Regex) > 0 {
if err := filterWithRegex(suite, opt.Regex); err != nil {
return fmt.Errorf("regular expression for filtering tests is invalid: %v", err)
}
}
tests, err := testsForSuite()
if err != nil {
return err
}
tests = suite.Filter(tests)
if len(tests) == 0 {
return fmt.Errorf("suite %q does not contain any tests", suite.Name)
}
count := opt.Count
if count == 0 {
count = suite.Count
}
if count > 1 {
var newTests []*testCase
for i := 0; i < count; i++ {
newTests = append(newTests, tests...)
}
tests = newTests
}
if opt.PrintCommands {
status := newTestStatus(opt.Out, true, len(tests), time.Minute, &monitor.Monitor{}, opt.AsEnv())
newParallelTestQueue(tests).Execute(context.Background(), 1, status.OutputCommand)
return nil
}
if opt.DryRun {
for _, test := range sortedTests(tests) {
fmt.Fprintf(opt.Out, "%q\n", test.name)
}
return nil
}
if len(opt.JUnitDir) > 0 {
if _, err := os.Stat(opt.JUnitDir); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("could not access --junit-dir: %v", err)
}
if err := os.MkdirAll(opt.JUnitDir, 0755); err != nil {
return fmt.Errorf("could not create --junit-dir: %v", err)
}
}
}
parallelism := opt.Parallelism
if parallelism == 0 {
parallelism = suite.Parallelism
}
if parallelism == 0 {
parallelism = 3
}
timeout := opt.Timeout
if timeout == 0 {
timeout = suite.TestTimeout
}
if timeout == 0 {
timeout = 15 * time.Minute
}
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
abortCh := make(chan os.Signal)
go func() {
<-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted, terminating tests\n")
cancelFn()
sig := <-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted twice, exiting (%s)\n", sig)
switch sig {
case syscall.SIGINT:
os.Exit(130)
default:
os.Exit(0)
}
}()
signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
m, err := monitor.Start(ctx)
if err != nil {
return err
}
// if we run a single test, always include success output
includeSuccess := opt.IncludeSuccessOutput
if len(tests) == 1 {
includeSuccess = true
}
status := newTestStatus(opt.Out, includeSuccess, len(tests), timeout, m, opt.AsEnv())
smoke, normal := splitTests(tests, func(t *testCase) bool {
return strings.Contains(t.name, "[Smoke]")
})
// run the tests
start := time.Now()
// run our smoke tests first
q := newParallelTestQueue(smoke)
q.Execute(ctx, parallelism, status.Run)
// run other tests next
q = newParallelTestQueue(normal)
q.Execute(ctx, parallelism, status.Run)
duration := time.Now().Sub(start).Round(time.Second / 10)
if duration > time.Minute {
duration = duration.Round(time.Second)
}
pass, fail, skip, failing := summarizeTests(tests)
// monitor the cluster while the tests are running and report any detected
// anomalies
var syntheticTestResults []*JUnitTestCase
if events := m.Events(time.Time{}, time.Time{}); len(events) > 0 {
buf, errBuf := &bytes.Buffer{}, &bytes.Buffer{}
fmt.Fprintf(buf, "\nTimeline:\n\n")
errorCount := 0
for _, test := range tests {
if !test.failed {
continue
}
events = append(events,
&monitor.EventInterval{
From: test.start,
To: test.end,
Condition: &monitor.Condition{
Level: monitor.Info,
Locator: fmt.Sprintf("test=%q", test.name),
Message: "running",
},
},
&monitor.EventInterval{
From: test.end,
To: test.end,
Condition: &monitor.Condition{
Level: monitor.Info,
Locator: fmt.Sprintf("test=%q", test.name),
Message: "failed",
},
},
)
}
sort.Sort(events)
for _, event := range events {
if event.Level == monitor.Error {
errorCount++
fmt.Fprintln(errBuf, event.String())
}
fmt.Fprintln(buf, event.String())
}
fmt.Fprintln(buf)
if errorCount > 0 {
syntheticTestResults = append(syntheticTestResults, &JUnitTestCase{
Name: "Monitor cluster while tests execute",
SystemOut: buf.String(),
Duration: duration.Seconds(),
FailureOutput: &FailureOutput{
Output: fmt.Sprintf("%d error level events were detected during this test run:\n\n%s", errorCount, errBuf.String()),
},
})
}
if strings.EqualFold(os.Getenv("ENABLE_PRINT_EVENT_STDOUT"), "true") {
opt.Out.Write(buf.Bytes())
}
}
// attempt to retry failures to do flake detection
if fail > 0 && fail <= suite.MaximumAllowedFlakes {
var retries []*testCase
for _, test := range failing {
retries = append(retries, test.Retry())
if len(retries) > suite.MaximumAllowedFlakes {
break
}
}
q := newParallelTestQueue(retries)
status := newTestStatus(ioutil.Discard, opt.IncludeSuccessOutput, len(retries), timeout, m, opt.AsEnv())
q.Execute(ctx, parallelism, status.Run)
var flaky []string
var repeatFailures []*testCase
for _, test := range retries {
if test.success {
flaky = append(flaky, test.name)
} else {
repeatFailures = append(repeatFailures, test)
}
}
if len(flaky) > 0 {
failing = repeatFailures
sort.Strings(flaky)
fmt.Fprintf(opt.Out, "Flaky tests:\n\n%s\n\n", strings.Join(flaky, "\n"))
}
}
if len(failing) > 0 {
names := testNames(failing)
sort.Strings(names)
fmt.Fprintf(opt.Out, "Failing tests:\n\n%s\n\n", strings.Join(names, "\n"))
}
if len(opt.JUnitDir) > 0 {
if err := writeJUnitReport("junit_e2e", "openshift-tests-private", tests, opt.JUnitDir, duration, opt.ErrOut, syntheticTestResults...); err != nil {
fmt.Fprintf(opt.Out, "error: Unable to write e2e JUnit results: %v", err)
}
}
if fail > 0 {
if len(failing) > 0 || suite.MaximumAllowedFlakes == 0 {
return fmt.Errorf("%d fail, %d pass, %d skip (%s)", fail, pass, skip, duration)
}
fmt.Fprintf(opt.Out, "%d flakes detected, suite allows passing with only flakes\n\n", fail)
}
fmt.Fprintf(opt.Out, "%d pass, %d skip (%s)\n", pass, skip, duration)
return ctx.Err()
}
| package ginkgo | ||||
function | openshift/openshift-tests-private | af5a5f22-d1ee-479a-80ec-27e7553769a2 | AsEnv | ['"fmt"'] | ['Options'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runsuite.go | func (opt *Options) AsEnv() []string {
var args []string
args = append(args, fmt.Sprintf("TEST_PROVIDER=%s", opt.Provider))
args = append(args, fmt.Sprintf("TEST_SUITE_OPTIONS=%s", opt.SuiteOptions))
return args
} | ginkgo | |||
function | openshift/openshift-tests-private | 8a594dd0-fd55-43cd-aa14-5e81d371b5ba | Run | ['"bytes"', '"context"', '"fmt"', '"io/ioutil"', '"os"', '"os/signal"', '"path/filepath"', '"sort"', '"strings"', '"syscall"', '"time"', '"github.com/openshift/openshift-tests-private/pkg/monitor"'] | ['Options'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runsuite.go | func (opt *Options) Run(args []string) error {
var suite *TestSuite
if len(opt.TestFile) > 0 {
var in []byte
var err error
if opt.TestFile == "-" {
in, err = ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
} else {
in, err = ioutil.ReadFile(opt.TestFile)
}
if err != nil {
return err
}
suite, err = newSuiteFromFile("files", in)
if err != nil {
return fmt.Errorf("could not read test suite from input: %v", err)
}
}
if suite == nil && len(args) == 0 {
fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n"))
return fmt.Errorf("specify a test suite to run, for example: %s run %s", filepath.Base(os.Args[0]), opt.Suites[0].Name)
}
if suite == nil && len(args) > 0 {
for _, s := range opt.Suites {
if s.Name == args[0] {
suite = s
break
}
}
}
if suite == nil {
fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n"))
return fmt.Errorf("suite %q does not exist", args[0])
}
if len(opt.Regex) > 0 {
if err := filterWithRegex(suite, opt.Regex); err != nil {
return fmt.Errorf("regular expression for filtering tests is invalid: %v", err)
}
}
tests, err := testsForSuite()
if err != nil {
return err
}
tests = suite.Filter(tests)
if len(tests) == 0 {
return fmt.Errorf("suite %q does not contain any tests", suite.Name)
}
count := opt.Count
if count == 0 {
count = suite.Count
}
if count > 1 {
var newTests []*testCase
for i := 0; i < count; i++ {
newTests = append(newTests, tests...)
}
tests = newTests
}
if opt.PrintCommands {
status := newTestStatus(opt.Out, true, len(tests), time.Minute, &monitor.Monitor{}, opt.AsEnv())
newParallelTestQueue(tests).Execute(context.Background(), 1, status.OutputCommand)
return nil
}
if opt.DryRun {
for _, test := range sortedTests(tests) {
fmt.Fprintf(opt.Out, "%q\n", test.name)
}
return nil
}
if len(opt.JUnitDir) > 0 {
if _, err := os.Stat(opt.JUnitDir); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("could not access --junit-dir: %v", err)
}
if err := os.MkdirAll(opt.JUnitDir, 0755); err != nil {
return fmt.Errorf("could not create --junit-dir: %v", err)
}
}
}
parallelism := opt.Parallelism
if parallelism == 0 {
parallelism = suite.Parallelism
}
if parallelism == 0 {
parallelism = 3
}
timeout := opt.Timeout
if timeout == 0 {
timeout = suite.TestTimeout
}
if timeout == 0 {
timeout = 15 * time.Minute
}
ctx, cancelFn := context.WithCancel(context.Background())
defer cancelFn()
abortCh := make(chan os.Signal)
go func() {
<-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted, terminating tests\n")
cancelFn()
sig := <-abortCh
fmt.Fprintf(opt.ErrOut, "Interrupted twice, exiting (%s)\n", sig)
switch sig {
case syscall.SIGINT:
os.Exit(130)
default:
os.Exit(0)
}
}()
signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
m, err := monitor.Start(ctx)
if err != nil {
return err
}
// if we run a single test, always include success output
includeSuccess := opt.IncludeSuccessOutput
if len(tests) == 1 {
includeSuccess = true
}
status := newTestStatus(opt.Out, includeSuccess, len(tests), timeout, m, opt.AsEnv())
smoke, normal := splitTests(tests, func(t *testCase) bool {
return strings.Contains(t.name, "[Smoke]")
})
// run the tests
start := time.Now()
// run our smoke tests first
q := newParallelTestQueue(smoke)
q.Execute(ctx, parallelism, status.Run)
// run other tests next
q = newParallelTestQueue(normal)
q.Execute(ctx, parallelism, status.Run)
duration := time.Now().Sub(start).Round(time.Second / 10)
if duration > time.Minute {
duration = duration.Round(time.Second)
}
pass, fail, skip, failing := summarizeTests(tests)
// monitor the cluster while the tests are running and report any detected
// anomalies
var syntheticTestResults []*JUnitTestCase
if events := m.Events(time.Time{}, time.Time{}); len(events) > 0 {
buf, errBuf := &bytes.Buffer{}, &bytes.Buffer{}
fmt.Fprintf(buf, "\nTimeline:\n\n")
errorCount := 0
for _, test := range tests {
if !test.failed {
continue
}
events = append(events,
&monitor.EventInterval{
From: test.start,
To: test.end,
Condition: &monitor.Condition{
Level: monitor.Info,
Locator: fmt.Sprintf("test=%q", test.name),
Message: "running",
},
},
&monitor.EventInterval{
From: test.end,
To: test.end,
Condition: &monitor.Condition{
Level: monitor.Info,
Locator: fmt.Sprintf("test=%q", test.name),
Message: "failed",
},
},
)
}
sort.Sort(events)
for _, event := range events {
if event.Level == monitor.Error {
errorCount++
fmt.Fprintln(errBuf, event.String())
}
fmt.Fprintln(buf, event.String())
}
fmt.Fprintln(buf)
if errorCount > 0 {
syntheticTestResults = append(syntheticTestResults, &JUnitTestCase{
Name: "Monitor cluster while tests execute",
SystemOut: buf.String(),
Duration: duration.Seconds(),
FailureOutput: &FailureOutput{
Output: fmt.Sprintf("%d error level events were detected during this test run:\n\n%s", errorCount, errBuf.String()),
},
})
}
if strings.EqualFold(os.Getenv("ENABLE_PRINT_EVENT_STDOUT"), "true") {
opt.Out.Write(buf.Bytes())
}
}
// attempt to retry failures to do flake detection
if fail > 0 && fail <= suite.MaximumAllowedFlakes {
var retries []*testCase
for _, test := range failing {
retries = append(retries, test.Retry())
if len(retries) > suite.MaximumAllowedFlakes {
break
}
}
q := newParallelTestQueue(retries)
status := newTestStatus(ioutil.Discard, opt.IncludeSuccessOutput, len(retries), timeout, m, opt.AsEnv())
q.Execute(ctx, parallelism, status.Run)
var flaky []string
var repeatFailures []*testCase
for _, test := range retries {
if test.success {
flaky = append(flaky, test.name)
} else {
repeatFailures = append(repeatFailures, test)
}
}
if len(flaky) > 0 {
failing = repeatFailures
sort.Strings(flaky)
fmt.Fprintf(opt.Out, "Flaky tests:\n\n%s\n\n", strings.Join(flaky, "\n"))
}
}
if len(failing) > 0 {
names := testNames(failing)
sort.Strings(names)
fmt.Fprintf(opt.Out, "Failing tests:\n\n%s\n\n", strings.Join(names, "\n"))
}
if len(opt.JUnitDir) > 0 {
if err := writeJUnitReport("junit_e2e", "openshift-tests-private", tests, opt.JUnitDir, duration, opt.ErrOut, syntheticTestResults...); err != nil {
fmt.Fprintf(opt.Out, "error: Unable to write e2e JUnit results: %v", err)
}
}
if fail > 0 {
if len(failing) > 0 || suite.MaximumAllowedFlakes == 0 {
return fmt.Errorf("%d fail, %d pass, %d skip (%s)", fail, pass, skip, duration)
}
fmt.Fprintf(opt.Out, "%d flakes detected, suite allows passing with only flakes\n\n", fail)
}
fmt.Fprintf(opt.Out, "%d pass, %d skip (%s)\n", pass, skip, duration)
return ctx.Err()
} | ginkgo | |||
file | openshift/openshift-tests-private | 2b65620b-df41-4229-a2e7-4dc25a82887b | cmd_runtest | import (
"fmt"
"io"
"os"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
) | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runtest.go | package ginkgo
import (
"fmt"
"io"
"os"
"regexp"
"strings"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
)
type ExitError struct {
Code int
}
func (e ExitError) Error() string {
return fmt.Sprintf("exit with code %d", e.Code)
}
// TestOptions handles running a single test.
type TestOptions struct {
DryRun bool
Out io.Writer
ErrOut io.Writer
}
var _ ginkgo.GinkgoTestingT = &TestOptions{}
func (opt *TestOptions) Run(args []string) error {
if len(args) != 1 {
return fmt.Errorf("only a single test name may be passed")
}
// Ignore the upstream suite behavior within test execution
ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes()
tests, err := testsForSuite()
if err != nil {
return err
}
var test *testCase
for _, t := range tests {
if t.name == args[0] {
test = t
break
}
}
if test == nil {
return fmt.Errorf("no test exists with that name: %s", args[0])
}
if opt.DryRun {
fmt.Fprintf(opt.Out, "Running test (dry-run)\n")
return nil
}
suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
suiteConfig.FocusStrings = []string{fmt.Sprintf("^ %s$", regexp.QuoteMeta(test.name))}
// These settings are matched to upstream's ginkgo configuration. See:
// https://github.com/kubernetes/kubernetes/blob/v1.25.0/test/e2e/framework/test_context.go#L354-L355
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
suiteConfig.EmitSpecProgress = true //it is removed when origin bump to k1.28, but we want to keep it.
// Randomize specs as well as suites
suiteConfig.RandomizeAllSpecs = true
// turn off stdout/stderr capture see https://github.com/kubernetes/kubernetes/pull/111240
suiteConfig.OutputInterceptorMode = "none" //it is removed when origin bump to k1.28, but we want to keep it.
// https://github.com/kubernetes/kubernetes/blob/v1.25.0/hack/ginkgo-e2e.sh#L172-L173
suiteConfig.Timeout = 24 * time.Hour
reporterConfig.NoColor = true
reporterConfig.Succinct = true //simple the information at the beginning SuiteWillBegin.
// reporterConfig.Verbose = true //for g.By when origin bump to k1.28, we do not use it because we alreay make exutil.By
ginkgo.SetReporterConfig(reporterConfig)
cwd, err := os.Getwd()
if err != nil {
return err
}
ginkgo.GetSuite().RunSpec(test.spec, ginkgo.Labels{}, "openshift extended e2e", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), suiteConfig, reporterConfig)
var summary types.SpecReport
for _, report := range ginkgo.GetSuite().GetReport().SpecReports {
if report.NumAttempts > 0 {
summary = report
}
}
switch {
case summary.State == types.SpecStatePassed:
case summary.State == types.SpecStateSkipped:
if len(summary.Failure.Message) > 0 {
fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)
}
if len(summary.Failure.ForwardedPanic) > 0 {
fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)
}
return ExitError{Code: 3}
case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted:
if len(summary.Failure.ForwardedPanic) > 0 {
if len(summary.Failure.Location.FullStackTrace) > 0 {
fmt.Fprintf(opt.ErrOut, "\n%s\n", summary.Failure.Location.FullStackTrace)
}
fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)
return ExitError{Code: 1}
}
fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)
return ExitError{Code: 1}
default:
return fmt.Errorf("unrecognized test case outcome: %#v", summary)
}
return nil
}
func (opt *TestOptions) Fail() {
// this function allows us to pass TestOptions as the first argument,
// it's empty becase we have failure check mechanism implemented above.
}
func lastFilenameSegment(filename string) string {
if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 {
return parts[len(parts)-1]
}
if parts := strings.Split(filename, "/src/"); len(parts) > 1 {
return parts[len(parts)-1]
}
return filename
}
| package ginkgo | ||||
function | openshift/openshift-tests-private | 88f00d12-cd84-4b4f-83d9-baa3dbbfba0c | Error | ['"fmt"'] | ['ExitError'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runtest.go | func (e ExitError) Error() string {
return fmt.Sprintf("exit with code %d", e.Code)
} | ginkgo | |||
function | openshift/openshift-tests-private | f3eaef84-275c-46e4-8805-7b2466a38a90 | Run | ['"fmt"', '"os"', '"regexp"', '"time"', '"github.com/onsi/ginkgo/v2/types"'] | ['ExitError', 'TestOptions'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runtest.go | func (opt *TestOptions) Run(args []string) error {
if len(args) != 1 {
return fmt.Errorf("only a single test name may be passed")
}
// Ignore the upstream suite behavior within test execution
ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes()
tests, err := testsForSuite()
if err != nil {
return err
}
var test *testCase
for _, t := range tests {
if t.name == args[0] {
test = t
break
}
}
if test == nil {
return fmt.Errorf("no test exists with that name: %s", args[0])
}
if opt.DryRun {
fmt.Fprintf(opt.Out, "Running test (dry-run)\n")
return nil
}
suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
suiteConfig.FocusStrings = []string{fmt.Sprintf("^ %s$", regexp.QuoteMeta(test.name))}
// These settings are matched to upstream's ginkgo configuration. See:
// https://github.com/kubernetes/kubernetes/blob/v1.25.0/test/e2e/framework/test_context.go#L354-L355
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
suiteConfig.EmitSpecProgress = true //it is removed when origin bump to k1.28, but we want to keep it.
// Randomize specs as well as suites
suiteConfig.RandomizeAllSpecs = true
// turn off stdout/stderr capture see https://github.com/kubernetes/kubernetes/pull/111240
suiteConfig.OutputInterceptorMode = "none" //it is removed when origin bump to k1.28, but we want to keep it.
// https://github.com/kubernetes/kubernetes/blob/v1.25.0/hack/ginkgo-e2e.sh#L172-L173
suiteConfig.Timeout = 24 * time.Hour
reporterConfig.NoColor = true
reporterConfig.Succinct = true //simple the information at the beginning SuiteWillBegin.
// reporterConfig.Verbose = true //for g.By when origin bump to k1.28, we do not use it because we alreay make exutil.By
ginkgo.SetReporterConfig(reporterConfig)
cwd, err := os.Getwd()
if err != nil {
return err
}
ginkgo.GetSuite().RunSpec(test.spec, ginkgo.Labels{}, "openshift extended e2e", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), suiteConfig, reporterConfig)
var summary types.SpecReport
for _, report := range ginkgo.GetSuite().GetReport().SpecReports {
if report.NumAttempts > 0 {
summary = report
}
}
switch {
case summary.State == types.SpecStatePassed:
case summary.State == types.SpecStateSkipped:
if len(summary.Failure.Message) > 0 {
fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)
}
if len(summary.Failure.ForwardedPanic) > 0 {
fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)
}
return ExitError{Code: 3}
case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted:
if len(summary.Failure.ForwardedPanic) > 0 {
if len(summary.Failure.Location.FullStackTrace) > 0 {
fmt.Fprintf(opt.ErrOut, "\n%s\n", summary.Failure.Location.FullStackTrace)
}
fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)
return ExitError{Code: 1}
}
fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)
return ExitError{Code: 1}
default:
return fmt.Errorf("unrecognized test case outcome: %#v", summary)
}
return nil
} | ginkgo | |||
function | openshift/openshift-tests-private | d8cc8fc4-f467-41ba-8874-c63bf6492254 | Fail | ['TestOptions'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runtest.go | func (opt *TestOptions) Fail() {
// this function allows us to pass TestOptions as the first argument,
// it's empty becase we have failure check mechanism implemented above.
} | ginkgo | ||||
function | openshift/openshift-tests-private | a40fcf6f-8272-4b45-a2f2-a1c29ea046bd | lastFilenameSegment | ['"strings"'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/cmd_runtest.go | func lastFilenameSegment(filename string) string {
if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 {
return parts[len(parts)-1]
}
if parts := strings.Split(filename, "/src/"); len(parts) > 1 {
return parts[len(parts)-1]
}
return filename
} | ginkgo | ||||
file | openshift/openshift-tests-private | 93297e9a-96b8-4de7-b95f-8baec005104b | ginkgo | import (
"math/rand"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
"k8s.io/apimachinery/pkg/util/errors"
) | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/ginkgo.go | package ginkgo
import (
"math/rand"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
"k8s.io/apimachinery/pkg/util/errors"
)
func testsForSuite() ([]*testCase, error) {
var tests []*testCase
var errs []error
// Avoid building the tree multiple times
if !ginkgo.GetSuite().InPhaseBuildTree() {
_ = ginkgo.GetSuite().BuildTree()
}
ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) {
// if append, ok := generated.Annotations[name]; ok {
// spec.AppendText(name)
// } else {
// panic(fmt.Sprintf("unable to find test %s", name))
// }
tc, err := newTestCaseFromGinkgoSpec(spec)
if err != nil {
errs = append(errs, err)
}
tests = append(tests, tc)
})
if len(errs) > 0 {
return nil, errors.NewAggregate(errs)
}
suiteConfig, _ := ginkgo.GinkgoConfiguration()
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
r.Shuffle(len(tests), func(i, j int) { tests[i], tests[j] = tests[j], tests[i] })
return tests, nil
}
| package ginkgo | ||||
function | openshift/openshift-tests-private | 5e2030be-12a9-47cf-b4b2-b43a9738af0d | testsForSuite | ['"math/rand"', '"github.com/onsi/ginkgo/v2/types"', '"k8s.io/apimachinery/pkg/util/errors"'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/ginkgo.go | func testsForSuite() ([]*testCase, error) {
var tests []*testCase
var errs []error
// Avoid building the tree multiple times
if !ginkgo.GetSuite().InPhaseBuildTree() {
_ = ginkgo.GetSuite().BuildTree()
}
ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) {
// if append, ok := generated.Annotations[name]; ok {
// spec.AppendText(name)
// } else {
// panic(fmt.Sprintf("unable to find test %s", name))
// }
tc, err := newTestCaseFromGinkgoSpec(spec)
if err != nil {
errs = append(errs, err)
}
tests = append(tests, tc)
})
if len(errs) > 0 {
return nil, errors.NewAggregate(errs)
}
suiteConfig, _ := ginkgo.GinkgoConfiguration()
r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
r.Shuffle(len(tests), func(i, j int) { tests[i], tests[j] = tests[j], tests[i] })
return tests, nil
} | ginkgo | ||||
file | openshift/openshift-tests-private | e2944dfa-aa27-46ce-a73b-212ebf4a1d2f | junit | import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strings"
"time"
) | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/junit.go | package ginkgo
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strings"
"time"
)
// The below types are directly marshalled into XML. The types correspond to jUnit
// XML schema, but do not contain all valid fields. For instance, the class name
// field for test cases is omitted, as this concept does not directly apply to Go.
// For XML specifications see http://help.catchsoftware.com/display/ET/JUnit+Format
// or view the XSD included in this package as 'junit.xsd'
// TestSuites represents a flat collection of jUnit test suites.
type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
// Suites are the jUnit test suites held in this collection
Suites []*JUnitTestSuite `xml:"testsuite"`
}
// TestSuite represents a single jUnit test suite, potentially holding child suites.
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
// Name is the name of the test suite
Name string `xml:"name,attr"`
// NumTests records the number of tests in the TestSuite
NumTests uint `xml:"tests,attr"`
// NumSkipped records the number of skipped tests in the suite
NumSkipped uint `xml:"skipped,attr"`
// NumFailed records the number of failed tests in the suite
NumFailed uint `xml:"failures,attr"`
// Duration is the time taken in seconds to run all tests in the suite
Duration float64 `xml:"time,attr"`
// Properties holds other properties of the test suite as a mapping of name to value
Properties []*TestSuiteProperty `xml:"properties,omitempty"`
// TestCases are the test cases contained in the test suite
TestCases []*JUnitTestCase `xml:"testcase"`
// Children holds nested test suites
Children []*JUnitTestSuite `xml:"testsuite"`
}
// TestSuiteProperty contains a mapping of a property name to a value
type TestSuiteProperty struct {
XMLName xml.Name `xml:"property"`
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
}
// JUnitTestCase represents a jUnit test case
type JUnitTestCase struct {
XMLName xml.Name `xml:"testcase"`
// Name is the name of the test case
Name string `xml:"name,attr"`
// Classname is an attribute set by the package type and is required
Classname string `xml:"classname,attr,omitempty"`
// Duration is the time taken in seconds to run the test
Duration float64 `xml:"time,attr"`
// SkipMessage holds the reason why the test was skipped
SkipMessage *SkipMessage `xml:"skipped"`
// FailureOutput holds the output from a failing test
FailureOutput *FailureOutput `xml:"failure"`
// SystemOut is output written to stdout during the execution of this test case
SystemOut string `xml:"system-out,omitempty"`
// SystemErr is output written to stderr during the execution of this test case
SystemErr string `xml:"system-err,omitempty"`
}
// SkipMessage holds a message explaining why a test was skipped
type SkipMessage struct {
XMLName xml.Name `xml:"skipped"`
// Message explains why the test was skipped
Message string `xml:"message,attr,omitempty"`
}
// FailureOutput holds the output from a failing test
type FailureOutput struct {
XMLName xml.Name `xml:"failure"`
// Message holds the failure message from the test
Message string `xml:"message,attr"`
// Output holds verbose failure output from the test
Output string `xml:",chardata"`
}
// TestResult is the result of a test case
type TestResult string
const (
TestResultPass TestResult = "pass"
TestResultSkip TestResult = "skip"
TestResultFail TestResult = "fail"
)
func writeJUnitReport(filePrefix, name string, tests []*testCase, dir string, duration time.Duration, errOut io.Writer, additionalResults ...*JUnitTestCase) error {
s := &JUnitTestSuite{
Name: name,
Duration: duration.Seconds(),
}
for _, test := range tests {
switch {
case test.skipped:
s.NumTests++
s.NumSkipped++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
SkipMessage: &SkipMessage{
Message: lastLinesUntil(string(test.out), 100, "skip ["),
},
})
case test.failed:
s.NumTests++
s.NumFailed++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
FailureOutput: &FailureOutput{
Output: lastLinesUntil(string(test.out), 100, "fail ["),
},
})
case test.success:
s.NumFailed++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
Duration: test.duration.Seconds(),
})
}
}
for _, result := range additionalResults {
switch {
case result.SkipMessage != nil:
s.NumSkipped++
case result.FailureOutput != nil:
s.NumFailed++
}
s.NumTests++
s.TestCases = append(s.TestCases, result)
}
out, err := xml.Marshal(s)
if err != nil {
return err
}
path := filepath.Join(dir, fmt.Sprintf("%s_%s.xml", filePrefix, time.Now().UTC().Format("20060102-150405")))
fmt.Fprintf(errOut, "Writing JUnit report to %s\n\n", path)
return ioutil.WriteFile(path, out, 0640)
}
func lastLinesUntil(output string, max int, until ...string) string {
output = strings.TrimSpace(output)
index := len(output) - 1
if index < 0 || max == 0 {
return output
}
for max > 0 {
next := strings.LastIndex(output[:index], "\n")
if next <= 0 {
return strings.TrimSpace(output)
}
// empty lines don't count
line := strings.TrimSpace(output[next+1 : index])
if len(line) > 0 {
max--
}
index = next
if stringStartsWithAny(line, until) {
break
}
}
return strings.TrimSpace(output[index:])
}
func stringStartsWithAny(s string, contains []string) bool {
for _, match := range contains {
if strings.HasPrefix(s, match) {
return true
}
}
return false
}
| package ginkgo | ||||
function | openshift/openshift-tests-private | d2965b7a-4483-4907-bc95-f31e01a5b2a6 | writeJUnitReport | ['"encoding/xml"', '"fmt"', '"io"', '"io/ioutil"', '"path/filepath"', '"time"'] | ['JUnitTestSuite', 'JUnitTestCase', 'SkipMessage', 'FailureOutput'] | github.com/openshift/openshift-tests-private/pkg/test/ginkgo/junit.go | func writeJUnitReport(filePrefix, name string, tests []*testCase, dir string, duration time.Duration, errOut io.Writer, additionalResults ...*JUnitTestCase) error {
s := &JUnitTestSuite{
Name: name,
Duration: duration.Seconds(),
}
for _, test := range tests {
switch {
case test.skipped:
s.NumTests++
s.NumSkipped++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
SkipMessage: &SkipMessage{
Message: lastLinesUntil(string(test.out), 100, "skip ["),
},
})
case test.failed:
s.NumTests++
s.NumFailed++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
SystemOut: string(test.out),
Duration: test.duration.Seconds(),
FailureOutput: &FailureOutput{
Output: lastLinesUntil(string(test.out), 100, "fail ["),
},
})
case test.success:
s.NumFailed++
s.TestCases = append(s.TestCases, &JUnitTestCase{
Name: test.name,
Duration: test.duration.Seconds(),
})
}
}
for _, result := range additionalResults {
switch {
case result.SkipMessage != nil:
s.NumSkipped++
case result.FailureOutput != nil:
s.NumFailed++
}
s.NumTests++
s.TestCases = append(s.TestCases, result)
}
out, err := xml.Marshal(s)
if err != nil {
return err
}
path := filepath.Join(dir, fmt.Sprintf("%s_%s.xml", filePrefix, time.Now().UTC().Format("20060102-150405")))
fmt.Fprintf(errOut, "Writing JUnit report to %s\n\n", path)
return ioutil.WriteFile(path, out, 0640)
} | ginkgo |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 55