element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | caa59a95-9fca-4d4e-b2e3-5d21d62e003c | getPodvmEnableGPU | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func getPodvmEnableGPU(oc *exutil.CLI, opNamespace, cmName string) (enGPU string) {
jsonpath := "-o=jsonpath={.data.ENABLE_NVIDIA_GPU}"
msg, err := oc.AsAdmin().Run("get").Args("configmap", cmName, "-n", opNamespace, jsonpath).Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Could not find %v in %v\n Error: %v", jsonpath, cmName, err))
e2e.Logf("ENABLE_NVIDIA_GPU is %v", msg)
return msg
} | kata | ||||
function | openshift/openshift-tests-private | b8b12ed2-8901-4681-adb5-6dc601745c02 | installKataContainerRPM | ['"fmt"', '"strings"'] | ['TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func installKataContainerRPM(oc *exutil.CLI, testrun *TestRunDescription) (rpmName string, err error) {
workerNodeList, err := exutil.GetClusterNodesBy(oc, "worker")
if err != nil || len(workerNodeList) < 1 {
err = fmt.Errorf("Error: no worker nodes: %v, %v", workerNodeList, err)
return rpmName, err
}
rpmName, err = checkNodesForKataContainerRPM(oc, testrun, workerNodeList)
if err != nil {
return rpmName, err
}
errors := ""
cmd := fmt.Sprintf("mount -o remount,rw /usr; rpm -Uvh /var/local/%v", rpmName)
for index := range workerNodeList {
msg, err := exutil.DebugNodeWithOptionsAndChroot(oc, workerNodeList[index], []string{"-q"}, "/bin/sh", "-c", cmd)
if !(strings.Contains(msg, "already installed") || strings.Contains(msg, "installing")) {
if err != nil {
errors = fmt.Sprintf("%vError trying to rpm -Uvh %v on %v: %v %v\n", errors, rpmName, workerNodeList[index], msg, err)
}
}
}
if errors != "" {
err = fmt.Errorf("Error: Scratch rpm errors: %v", errors)
}
return rpmName, err
} | kata | |||
function | openshift/openshift-tests-private | cefbc343-50e1-435e-a7ad-3b2bfeada91f | checkNodesForKataContainerRPM | ['"fmt"', '"strings"'] | ['TestRunDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkNodesForKataContainerRPM(oc *exutil.CLI, testrun *TestRunDescription, workerNodeList []string) (rpmName string, err error) {
// check if rpm exists
errors := ""
msg := ""
cmd := fmt.Sprintf("ls -1 /var/local | grep '^kata-containers.*rpm$'")
for index := range workerNodeList {
msg, err = exutil.DebugNodeWithOptionsAndChroot(oc, workerNodeList[index], []string{"-q"}, "/bin/sh", "-c", cmd)
if strings.Contains(msg, "kata-containers") && strings.Contains(msg, ".rpm") {
rpmName = strings.TrimRight(msg, "\n") // need test
}
if rpmName == "" {
errors = fmt.Sprintf("%vError finding /var/local/kata-containers.*rpm on %v: %v %v\n", errors, workerNodeList[index], msg, err)
}
}
if errors != "" {
err = fmt.Errorf("Errors finding rpm in /var/local: %v", errors)
}
return rpmName, err
} | kata | |||
function | openshift/openshift-tests-private | a807e8f7-8262-425d-a765-8c02347f8014 | verifyImageCreationJobSuccess | ['"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['PeerpodParam'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func verifyImageCreationJobSuccess(oc *exutil.CLI, namespace string, ppParam PeerpodParam, ciCmName string, provider string) (msg string, err error) {
var jobPodName string
err = wait.PollImmediate(10*time.Second, 15*time.Minute, func() (bool, error) {
msg, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "--field-selector=status.phase=Succeeded", "--selector=job-name=osc-podvm-image-creation", "-o=jsonpath={.items[0].metadata.name}").Output()
if err != nil || msg == "" {
e2e.Logf("Waiting for PodVM image creation job to complete")
return false, nil
}
jobPodName = msg
return true, nil
})
if err != nil {
return "", fmt.Errorf("Image creation job did not succeed within the expected time")
}
logs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(jobPodName, "-n", namespace).Output()
if err != nil {
return "", fmt.Errorf("Error retrieving logs: %v", err)
}
configmapData, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", ciCmName, "-n", namespace, "-o=jsonpath={.data}").Output()
if err != nil {
e2e.Failf("%v Configmap created by QE CI has error: %v", ciCmName, err)
}
ppParam, err = parseCIPpConfigMapData(provider, configmapData)
if err != nil {
e2e.Failf("Error getting ppParam %v", err)
}
if ppParam.LIBVIRT_PODVM_IMAGE_URI != "" {
if !strings.Contains(logs, "Checksum of the PodVM image:") {
return "", fmt.Errorf("Pulling image from LIBVIRT_PODVM_IMAGE_URI failed")
}
e2e.Logf("PodVM image pull logs validated successfully")
}
if !strings.Contains(logs, "Uploaded the image successfully") || !strings.Contains(logs, "configmap/peer-pods-cm patched") {
logLines := strings.Split(logs, "\n")
start := len(logLines) - 30
if start < 0 {
start = 0
}
endLogs := logLines[start:]
trimmedLogs := strings.Join(endLogs, "\n")
e2e.Logf("Job logs do not contain success messages: %v", trimmedLogs)
return "", fmt.Errorf("Failed to get expected success message from the job logs")
}
configMapOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("cm", "peer-pods-cm", "-n", namespace, "-o=jsonpath={.data.LIBVIRT_IMAGE_ID}").Output()
if err != nil {
return "", fmt.Errorf("Failed to retrieve LIBVIRT_IMAGE_ID from ConfigMap: %v", err)
}
if !strings.Contains(logs, fmt.Sprintf("vol-upload: found option <vol>: %s", configMapOutput)) {
return "", fmt.Errorf("LIBVIRT_IMAGE_ID in ConfigMap does not match the logs")
}
return logs, nil
} | kata | |||
function | openshift/openshift-tests-private | 234ee384-6597-4a8e-811f-e04714cb3795 | checkSEEnabled | ['"fmt"', '"os/exec"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func checkSEEnabled(oc *exutil.CLI, podName, namespace string) error {
var errors []string
result, err := oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", namespace, "--", "cat", "/sys/firmware/uv/prot_virt_guest").Output()
if err != nil || result != "1" {
errors = append(errors, fmt.Sprintf("prot_virt_guest is not 1, got %v", result))
}
result, err = oc.AsAdmin().WithoutNamespace().Run("exec").Args(podName, "-n", namespace, "--", "grep", "facilities", "/proc/cpuinfo").Output()
if err != nil || !strings.Contains(result, "158") {
errors = append(errors, fmt.Sprintf("'facilities' in /proc/cpuinfo does not contain 158, got %v", result))
}
if len(errors) > 0 {
return fmt.Errorf("SE-enabled checks failed: %v", strings.Join(errors, "; "))
}
g.By("SE checks passed for pod " + podName)
return nil
} | kata | ||||
function | openshift/openshift-tests-private | 371c4722-d3ef-49da-844d-807b54f19542 | deleteOperator | ['"fmt"', '"time"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func deleteOperator(oc *exutil.CLI, sub SubscriptionDescription) (msg string, err error) {
//get csv from sub
csvName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", sub.subName, "-n", sub.namespace, "-o=jsonpath={.status.installedCSV}").Output()
o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("ERROR: cannot get sub %v installedCSV %v %v", sub.subName, csvName, err))
o.Expect(csvName).NotTo(o.BeEmpty(), fmt.Sprintf("installedCSV value is empty: %v", csvName))
//delete csv
msg, err = deleteResource(oc, "csv", csvName, sub.namespace, resSnoose*time.Second, 10*time.Second)
if err == nil {
//delete sub
msg, err = deleteResource(oc, "sub", sub.subName, sub.namespace, resSnoose*time.Second, 10*time.Second)
}
return msg, err
} | kata | |||
function | openshift/openshift-tests-private | 14349929-9c18-436c-8b7a-e26ee372345f | testControlPod | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func testControlPod(oc *exutil.CLI, namespace, resType, resName, desiredCountJsonPath, actualCountJsonPath, podLabel string) (msg string, err error) {
// Check the resource Type for desired count by looking at the jsonpath
// Check the actual count at this jsonpath
// Wait until the actual count == desired count then set expectedPods to the actual count
// Verify count of "Running" pods with podLabel matches expectedPods
expectedPods, msg, err := checkResourceJsonpathMatch(oc, resType, resName, namespace, desiredCountJsonPath, actualCountJsonPath)
if err == nil {
if msg == "" {
return "", fmt.Errorf("%v does not match %v in %v %v %v %v", desiredCountJsonPath, actualCountJsonPath, resName, resType, msg, err)
}
msg, err = checkLabeledPodsExpectedRunning(oc, namespace, podLabel, expectedPods)
if msg == "" {
return "", fmt.Errorf("Could not find pods labeled %v %v %v", podLabel, msg, err)
}
}
return msg, err
} | kata | ||||
function | openshift/openshift-tests-private | 3469a463-9a40-4eb1-a313-650579fd2c0c | configureTrustee | ['"encoding/json"', '"fmt"', '"net/http"', '"path/filepath"', '"strings"'] | ['SubscriptionDescription'] | github.com/openshift/openshift-tests-private/test/extended/kata/kata_util.go | func configureTrustee(oc *exutil.CLI, trusteeSubscription SubscriptionDescription, testDataDir, startingTrusteeURL string) (trusteeURL string, err error) {
var (
trusteeKbsconfigTemplate = filepath.Join(testDataDir, "kbsconfig-template.yaml")
rvpsReferenceValuesCMTemplate = filepath.Join(testDataDir, "rvps-reference-values-template.yaml")
resourcePolicyCMTemplate = filepath.Join(testDataDir, "resource-policy-template.yaml")
securityPolicyCMTemplate = filepath.Join(testDataDir, "security-policy-template.json")
kbsconfigCMTemplate = filepath.Join(testDataDir, "kbs-config-cm-template.yaml")
trusteeCosignPublicKey = filepath.Join(testDataDir, "trustee-cosign-publickey.pem")
trusteeKbsPublicKey = filepath.Join(testDataDir, "kbs-auth-public-key")
)
msg, err := oc.AsAdmin().Run("create").Args("secret", "generic", "kbs-auth-public-key",
"--from-file=publicKey="+trusteeKbsPublicKey, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created kbs-auth-public-key secret: %v %v", msg, err)
}
templateArgs := fmt.Sprintf("NAME=%v INSECUREHTTP=true", trusteeSubscription.namespace)
kbsConfigCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
kbsconfigCMTemplate, "-p", templateArgs).OutputToFile(getRandomString() + "kbs-config-cm.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, kbsConfigCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created kbs-config-cm: %v", err)
templateArgs = fmt.Sprintf("NAMESPACE=%v", trusteeSubscription.namespace)
rvpsReferenceValuesCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
rvpsReferenceValuesCMTemplate, "-p", templateArgs).OutputToFile(getRandomString() + "rvps-reference-values.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, rvpsReferenceValuesCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created rvps-reference-values: %v", err)
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "kbsres1", "--from-literal",
"key1=res1val1", "--from-literal", "key2=res1val2", "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created kbres1 secret: %v %v", msg, err)
}
trusteePolicyRego := "package policy default allow = true"
resourcePolicyCMFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
resourcePolicyCMTemplate, "-n", trusteeSubscription.namespace, "-p", "NAMESPACE="+trusteeSubscription.namespace,
"-p", "POLICYREGO="+trusteePolicyRego).OutputToFile(getRandomString() + "resource-policy.json")
err = ensureConfigmapIsApplied(oc, trusteeSubscription.namespace, resourcePolicyCMFile)
if err != nil {
return trusteeURL, err
}
e2e.Logf("TRUSTEE Created resource-policy cm: %v", err)
// Attestation Policy goes here
// secret security-policy DONE
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "security-policy",
"--from-file=osc="+securityPolicyCMTemplate, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created security-policy secret: %v %v", msg, err)
}
// secret cosign-public-key DONE
msg, err = oc.AsAdmin().Run("create").Args("secret", "generic", "cosign-public-key",
"--from-file=test="+trusteeCosignPublicKey, "-n", trusteeSubscription.namespace).Output()
if err != nil {
e2e.Logf("TRUSTEE Created cosign-public-key secret: %v %v", msg, err)
}
// need to ensureSecret?
// kbsconfig
kbsSecretResources := `["kbsres1","security-policy", "cosign-public-key"]`
kbsconfigFile, _ := oc.AsAdmin().Run("process").Args("--ignore-unknown-parameters=true", "-f",
trusteeKbsconfigTemplate, "-n", trusteeSubscription.namespace,
"-p", "KBSSECRETRESOURCES="+kbsSecretResources).OutputToFile(getRandomString() + "kbsconfig.json")
msg, err = oc.AsAdmin().Run("apply").Args("-f", kbsconfigFile, "-n", trusteeSubscription.namespace).Output()
e2e.Logf("TRUSTEE Applied kbsconfig %v: %v %v", kbsconfigFile, msg, err)
if startingTrusteeURL == "" { // use internal trustee
node, err := exutil.GetFirstWorkerNode(oc)
if err != nil || node == "" {
return trusteeURL, fmt.Errorf("could not get 1st worker node: %v err: %v", node, err)
}
msg, err = oc.AsAdmin().Run("get").Args("node", node, "-o=jsonpath={.status.addresses..address}").Output()
if err != nil || msg == "" {
return trusteeURL, fmt.Errorf("Could not get ip of %v: %v %v", node, msg, err)
}
nodeIP := strings.Fields(msg)[0]
nodePort, err := oc.AsAdmin().Run("get").Args("-n", trusteeSubscription.namespace,
"service", "kbs-service", "-o=jsonpath={.spec.ports..nodePort}").Output()
if err != nil {
return trusteeURL, fmt.Errorf("Could not retrieve nodePort from kbs-service: %v %v", nodePort, err)
}
trusteeURL = fmt.Sprintf("http://%v:%v", nodeIP, nodePort)
}
return trusteeURL, err
} | kata | |||
file | openshift/openshift-tests-private | d3eff9a7-3e23-41c9-89cc-f3bd48e43cb9 | const | github.com/openshift/openshift-tests-private/test/extended/logging/const.go | package logging
const (
// the namespace where legacy clusterlogging and clusterlogforwarder are in
loggingNS = "openshift-logging"
// the namespace where cluster-logging-operator is in
cloNS = "openshift-logging"
// the namespace where loki-operator is in
loNS = "openshift-operators-redhat"
apiPath = "/api/logs/v1/"
queryPath = "/loki/api/v1/query"
queryRangePath = "/loki/api/v1/query_range"
labelsPath = "/loki/api/v1/labels"
labelValuesPath = "/loki/api/v1/label/%s/values"
seriesPath = "/loki/api/v1/series"
tailPath = "/loki/api/v1/tail"
rulesPath = "/loki/api/v1/rules"
minioNS = "minio-aosqe"
minioSecret = "minio-creds"
javaExc = `com.google.devtools.search.cloud.feeder.MakeLog: RuntimeException: Run from this message!
at com.my.app.Object.do$a1(MakeLog.java:50)
at java.lang.Thing.call(Thing.java:10)
at com.my.app.Object.help(MakeLog.java:40)
at sun.javax.API.method(API.java:100)
at com.jetty.Framework.main(MakeLog.java:30)
`
complexJavaExc = `javax.servlet.ServletException: Something bad happened
at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:60)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)
at com.example.myproject.ExceptionHandlerFilter.doFilter(ExceptionHandlerFilter.java:28)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)
at com.example.myproject.OutputBufferFilter.doFilter(OutputBufferFilter.java:33)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1157)
at org.mortbay.jetty.servlet.ServletHandler.handle(ServletHandler.java:388)
at org.mortbay.jetty.security.SecurityHandler.handle(SecurityHandler.java:216)
at org.mortbay.jetty.servlet.SessionHandler.handle(SessionHandler.java:182)
at org.mortbay.jetty.handler.ContextHandler.handle(ContextHandler.java:765)
at org.mortbay.jetty.webapp.WebAppContext.handle(WebAppContext.java:418)
at org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)
at org.mortbay.jetty.Server.handle(Server.java:326)
at org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)
at org.mortbay.jetty.HttpConnection$RequestHandler.content(HttpConnection.java:943)
at org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:756)
at org.mortbay.jetty.HttpParser.parseAvailable(HttpParser.java:218)
at org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)
at org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)
at org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)
Caused by: com.example.myproject.MyProjectServletException
at com.example.myproject.MyServlet.doPost(MyServlet.java:169)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:727)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.mortbay.jetty.servlet.ServletHolder.handle(ServletHolder.java:511)
at org.mortbay.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1166)
at com.example.myproject.OpenSessionInViewFilter.doFilter(OpenSessionInViewFilter.java:30)
... 27 common frames omitted
`
nestedJavaExc = `java.lang.RuntimeException: javax.mail.SendFailedException: Invalid Addresses;
nested exception is:
com.sun.mail.smtp.SMTPAddressFailedException: 550 5.7.1 <[REDACTED_EMAIL_ADDRESS]>... Relaying denied
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.sendWithSmtp(AutomaticEmailFacade.java:236)
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.sendSingleEmail(AutomaticEmailFacade.java:285)
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.lambda$sendSingleEmail$3(AutomaticEmailFacade.java:254)
at java.util.Optional.ifPresent(Optional.java:159)
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.sendSingleEmail(AutomaticEmailFacade.java:253)
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.sendSingleEmail(AutomaticEmailFacade.java:249)
at com.nethunt.crm.api.email.EmailSender.lambda$notifyPerson$0(EmailSender.java:80)
at com.nethunt.crm.api.util.ManagedExecutor.lambda$execute$0(ManagedExecutor.java:36)
at com.nethunt.crm.api.util.RequestContextActivator.lambda$withRequestContext$0(RequestContextActivator.java:36)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.base/java.lang.Thread.run(Thread.java:748)
Caused by: javax.mail.SendFailedException: Invalid Addresses;
nested exception is:
com.sun.mail.smtp.SMTPAddressFailedException: 550 5.7.1 <[REDACTED_EMAIL_ADDRESS]>... Relaying denied
at com.sun.mail.smtp.SMTPTransport.rcptTo(SMTPTransport.java:2064)
at com.sun.mail.smtp.SMTPTransport.sendMessage(SMTPTransport.java:1286)
at com.nethunt.crm.api.server.adminsync.AutomaticEmailFacade.sendWithSmtp(AutomaticEmailFacade.java:229)
... 12 more
Caused by: com.sun.mail.smtp.SMTPAddressFailedException: 550 5.7.1 <[REDACTED_EMAIL_ADDRESS]>... Relaying denied
`
nodeJsExc = `ReferenceError: myArray is not defined
at next (/app/node_modules/express/lib/router/index.js:256:14)
at /app/node_modules/express/lib/router/index.js:615:15
at next (/app/node_modules/express/lib/router/index.js:271:10)
at Function.process_params (/app/node_modules/express/lib/router/index.js:330:12)
at /app/node_modules/express/lib/router/index.js:277:22
at Layer.handle [as handle_request] (/app/node_modules/express/lib/router/layer.js:95:5)
at Route.dispatch (/app/node_modules/express/lib/router/route.js:112:3)
at next (/app/node_modules/express/lib/router/route.js:131:13)
at Layer.handle [as handle_request] (/app/node_modules/express/lib/router/layer.js:95:5)
at /app/app.js:52:3
`
clientJsExc = `Error
at bls (<anonymous>:3:9)
at <anonymous>:6:4
at a_function_name
at Object.InjectedScript._evaluateOn (http://<anonymous>/file.js?foo=bar:875:140)
at Object.InjectedScript.evaluate (<anonymous>)
`
v8JsExc = `V8 errors stack trace
eval at Foo.a (eval at Bar.z (myscript.js:10:3))
at new Contructor.Name (native)
at new FunctionName (unknown location)
at Type.functionName [as methodName] (file(copy).js?query='yes':12:9)
at functionName [as methodName] (native)
at Type.main(sample(copy).js:6:4)
`
pythonExc = `Traceback (most recent call last):
File "/base/data/home/runtimes/python27/python27_lib/versions/third_party/webapp2-2.5.2/webapp2.py", line 1535, in __call__
rv = self.handle_exception(request, response, e)
File "/base/data/home/apps/s~nearfieldspy/1.378705245900539993/nearfieldspy.py", line 17, in start
return get()
File "/base/data/home/apps/s~nearfieldspy/1.378705245900539993/nearfieldspy.py", line 5, in get
raise Exception('spam', 'eggs')
Exception: ('spam', 'eggs')
`
phpExc = `exception 'Exception' with message 'Custom exception' in /home/joe/work/test-php/test.php:5
Stack trace:
#0 /home/joe/work/test-php/test.php(9): func1()
#1 /home/joe/work/test-php/test.php(13): func2()
#2 {main}
`
phpOnGaeExc = `PHP Fatal error: Uncaught exception 'Exception' with message 'message' in /base/data/home/apps/s~crash-example-php/1.388306779641080894/errors.php:60
Stack trace:
#0 [internal function]: ErrorEntryGenerator::{closure}()
#1 /base/data/home/apps/s~crash-example-php/1.388306779641080894/errors.php(20): call_user_func_array(Object(Closure), Array)
#2 /base/data/home/apps/s~crash-example-php/1.388306779641080894/index.php(36): ErrorEntry->__call('raise', Array)
#3 /base/data/home/apps/s~crash-example-php/1.388306779641080894/index.php(36): ErrorEntry->raise()
#4 {main}
thrown in /base/data/home/apps/s~crash-example-php/1.388306779641080894/errors.php on line 60
`
goExc = `panic: my panic
goroutine 4 [running]:
panic(0x45cb40, 0x47ad70)
/usr/local/go/src/runtime/panic.go:542 +0x46c fp=0xc42003f7b8 sp=0xc42003f710 pc=0x422f7c
main.main.func1(0xc420024120)
foo.go:6 +0x39 fp=0xc42003f7d8 sp=0xc42003f7b8 pc=0x451339
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:2337 +0x1 fp=0xc42003f7e0 sp=0xc42003f7d8 pc=0x44b4d1
created by main.main
foo.go:5 +0x58
goroutine 1 [chan receive]:
runtime.gopark(0x4739b8, 0xc420024178, 0x46fcd7, 0xc, 0xc420028e17, 0x3)
/usr/local/go/src/runtime/proc.go:280 +0x12c fp=0xc420053e30 sp=0xc420053e00 pc=0x42503c
runtime.goparkunlock(0xc420024178, 0x46fcd7, 0xc, 0x1000f010040c217, 0x3)
/usr/local/go/src/runtime/proc.go:286 +0x5e fp=0xc420053e70 sp=0xc420053e30 pc=0x42512e
runtime.chanrecv(0xc420024120, 0x0, 0xc420053f01, 0x4512d8)
/usr/local/go/src/runtime/chan.go:506 +0x304 fp=0xc420053f20 sp=0xc420053e70 pc=0x4046b4
runtime.chanrecv1(0xc420024120, 0x0)
/usr/local/go/src/runtime/chan.go:388 +0x2b fp=0xc420053f50 sp=0xc420053f20 pc=0x40439b
main.main()
foo.go:9 +0x6f fp=0xc420053f80 sp=0xc420053f50 pc=0x4512ef
runtime.main()
/usr/local/go/src/runtime/proc.go:185 +0x20d fp=0xc420053fe0 sp=0xc420053f80 pc=0x424bad
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:2337 +0x1 fp=0xc420053fe8 sp=0xc420053fe0 pc=0x44b4d1
goroutine 2 [force gc (idle)]:
runtime.gopark(0x4739b8, 0x4ad720, 0x47001e, 0xf, 0x14, 0x1)
/usr/local/go/src/runtime/proc.go:280 +0x12c fp=0xc42003e768 sp=0xc42003e738 pc=0x42503c
runtime.goparkunlock(0x4ad720, 0x47001e, 0xf, 0xc420000114, 0x1)
/usr/local/go/src/runtime/proc.go:286 +0x5e fp=0xc42003e7a8 sp=0xc42003e768 pc=0x42512e
runtime.forcegchelper()
/usr/local/go/src/runtime/proc.go:238 +0xcc fp=0xc42003e7e0 sp=0xc42003e7a8 pc=0x424e5c
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:2337 +0x1 fp=0xc42003e7e8 sp=0xc42003e7e0 pc=0x44b4d1
created by runtime.init.4
/usr/local/go/src/runtime/proc.go:227 +0x35
goroutine 3 [GC sweep wait]:
runtime.gopark(0x4739b8, 0x4ad7e0, 0x46fdd2, 0xd, 0x419914, 0x1)
/usr/local/go/src/runtime/proc.go:280 +0x12c fp=0xc42003ef60 sp=0xc42003ef30 pc=0x42503c
runtime.goparkunlock(0x4ad7e0, 0x46fdd2, 0xd, 0x14, 0x1)
/usr/local/go/src/runtime/proc.go:286 +0x5e fp=0xc42003efa0 sp=0xc42003ef60 pc=0x42512e
runtime.bgsweep(0xc42001e150)
/usr/local/go/src/runtime/mgcsweep.go:52 +0xa3 fp=0xc42003efd8 sp=0xc42003efa0 pc=0x419973
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:2337 +0x1 fp=0xc42003efe0 sp=0xc42003efd8 pc=0x44b4d1
created by runtime.gcenable
/usr/local/go/src/runtime/mgc.go:216 +0x58
`
goOnGaeExc = `panic: runtime error: index out of range
goroutine 12 [running]:
main88989.memoryAccessException()
crash_example_go.go:58 +0x12a
main88989.handler(0x2afb7042a408, 0xc01042f880, 0xc0104d3450)
crash_example_go.go:36 +0x7ec
net/http.HandlerFunc.ServeHTTP(0x13e5128, 0x2afb7042a408, 0xc01042f880, 0xc0104d3450)
go/src/net/http/server.go:1265 +0x56
net/http.(*ServeMux).ServeHTTP(0xc01045cab0, 0x2afb7042a408, 0xc01042f880, 0xc0104d3450)
go/src/net/http/server.go:1541 +0x1b4
appengine_internal.executeRequestSafely(0xc01042f880, 0xc0104d3450)
go/src/appengine_internal/api_prod.go:288 +0xb7
appengine_internal.(*server).HandleRequest(0x15819b0, 0xc010401560, 0xc0104c8180, 0xc010431380, 0x0, 0x0)
go/src/appengine_internal/api_prod.go:222 +0x102b
reflect.Value.call(0x1243fe0, 0x15819b0, 0x113, 0x12c8a20, 0x4, 0xc010485f78, 0x3, 0x3, 0x0, 0x0, ...)
/tmp/appengine/go/src/reflect/value.go:419 +0x10fd
reflect.Value.Call(0x1243fe0, 0x15819b0, 0x113, 0xc010485f78, 0x3, 0x3, 0x0, 0x0, 0x0)
/tmp/ap
`
goSignalExc = `panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x7fd34f]
goroutine 5 [running]:
panics.nilPtrDereference()
panics/panics.go:33 +0x1f
panics.Wait()
panics/panics.go:16 +0x3b
created by main.main
server.go:20 +0x91
`
goHTTP = `http: panic serving [::1]:54143: test panic
goroutine 24 [running]:
net/http.(*conn).serve.func1(0xc00007eaa0)
/usr/local/go/src/net/http/server.go:1746 +0xd0
panic(0x12472a0, 0x12ece10)
/usr/local/go/src/runtime/panic.go:513 +0x1b9
main.doPanic(0x12f0ea0, 0xc00010e1c0, 0xc000104400)
/Users/ingvar/src/go/src/httppanic.go:8 +0x39
net/http.HandlerFunc.ServeHTTP(0x12be2e8, 0x12f0ea0, 0xc00010e1c0, 0xc000104400)
/usr/local/go/src/net/http/server.go:1964 +0x44
net/http.(*ServeMux).ServeHTTP(0x14a17a0, 0x12f0ea0, 0xc00010e1c0, 0xc000104400)
/usr/local/go/src/net/http/server.go:2361 +0x127
net/http.serverHandler.ServeHTTP(0xc000085040, 0x12f0ea0, 0xc00010e1c0, 0xc000104400)
/usr/local/go/src/net/http/server.go:2741 +0xab
net/http.(*conn).serve(0xc00007eaa0, 0x12f10a0, 0xc00008a780)
/usr/local/go/src/net/http/server.go:1847 +0x646
created by net/http.(*Server).Serve
/usr/local/go/src/net/http/server.go:2851 +0x2f5
`
rubyExc = `NoMethodError (undefined method ` + "`" + `resursivewordload' for #<BooksController:0x007f8dd9a0c738>):
app/controllers/books_controller.rb:69:in ` + "`" + `recursivewordload'
app/controllers/books_controller.rb:75:in ` + "`" + `loadword'
app/controllers/books_controller.rb:79:in ` + "`" + `loadline'
app/controllers/books_controller.rb:83:in ` + "`" + `loadparagraph'
app/controllers/books_controller.rb:87:in ` + "`" + `loadpage'
app/controllers/books_controller.rb:91:in ` + "`" + `onload'
app/controllers/books_controller.rb:95:in ` + "`" + `loadrecursive'
app/controllers/books_controller.rb:99:in ` + "`" + `requestload'
app/controllers/books_controller.rb:118:in ` + "`" + `generror'
config/error_reporting_logger.rb:62:in ` + "`" + `tagged'
`
//Please be careful when editing this file, there should have 2 blankspaces in the line below `RAILS_EXC = ` ActionController::RoutingError (No route matches [GET] "/settings"):`
railsExc = ` ActionController::RoutingError (No route matches [GET] "/settings"):
actionpack (5.1.4) lib/action_dispatch/middleware/debug_exceptions.rb:63:in ` + "`" + `call'
actionpack (5.1.4) lib/action_dispatch/middleware/show_exceptions.rb:31:in ` + "`" + `call'
railties (5.1.4) lib/rails/rack/logger.rb:36:in ` + "`" + `call_app'
railties (5.1.4) lib/rails/rack/logger.rb:24:in ` + "`" + `block in call'
activesupport (5.1.4) lib/active_support/tagged_logging.rb:69:in ` + "`" + `block in tagged'
activesupport (5.1.4) lib/active_support/tagged_logging.rb:26:in ` + "`" + `tagged'
activesupport (5.1.4) lib/active_support/tagged_logging.rb:69:in ` + "`" + `tagged'
railties (5.1.4) lib/rails/rack/logger.rb:24:in ` + "`" + `call'
actionpack (5.1.4) lib/action_dispatch/middleware/remote_ip.rb:79:in ` + "`" + `call'
actionpack (5.1.4) lib/action_dispatch/middleware/request_id.rb:25:in ` + "`" + `call'
rack (2.0.3) lib/rack/method_override.rb:22:in ` + "`" + `call'
rack (2.0.3) lib/rack/runtime.rb:22:in ` + "`" + `call'
activesupport (5.1.4) lib/active_support/cache/strategy/local_cache_middleware.rb:27:in ` + "`" + `call'
actionpack (5.1.4) lib/action_dispatch/middleware/executor.rb:12:in ` + "`" + `call'
rack (2.0.3) lib/rack/sendfile.rb:111:in ` + "`" + `call'
railties (5.1.4) lib/rails/engine.rb:522:in ` + "`" + `call'
puma (3.10.0) lib/puma/configuration.rb:225:in ` + "`" + `call'
puma (3.10.0) lib/puma/server.rb:605:in ` + "`" + `handle_request'
puma (3.10.0) lib/puma/server.rb:437:in ` + "`" + `process_client'
puma (3.10.0) lib/puma/server.rb:301:in ` + "`" + `block in run'
puma (3.10.0) lib/puma/thread_pool.rb:120:in ` + "`" + `block in spawn_thread'
`
csharpExc = `System.Collections.Generic.KeyNotFoundException: The given key was not present in the dictionary.
at System.Collections.Generic.Dictionary` + "`" + `2[System.String,System.Collections.Generic.Dictionary` + "`" + `2[System.Int32,System.Double]].get_Item (System.String key) [0x00000] in <filename unknown>:0
at File3.Consolidator_Class.Function5 (System.Collections.Generic.Dictionary` + "`" + `2 names, System.Text.StringBuilder param_4) [0x00007] in /usr/local/google/home/Csharp/another file.csharp:9
at File3.Consolidator_Class.Function4 (System.Text.StringBuilder param_4, System.Double[,,] array) [0x00013] in /usr/local/google/home/Csharp/another file.csharp:23
at File3.Consolidator_Class.Function3 (Int32 param_3) [0x0000f] in /usr/local/google/home/Csharp/another file.csharp:27
at File3.Consolidator_Class.Function3 (System.Text.StringBuilder param_3) [0x00007] in /usr/local/google/home/Csharp/another file.csharp:32
at File2.Processor.Function2 (System.Int32& param_2, System.Collections.Generic.Stack` + "`" + `1& numbers) [0x00003] in /usr/local/google/home/Csharp/File2.csharp:19
at File2.Processor.Random2 () [0x00037] in /usr/local/google/home/Csharp/File2.csharp:28
at File2.Processor.Function1 (Int32 param_1, System.Collections.Generic.Dictionary` + "`" + `2 map) [0x00007] in /usr/local/google/home/Csharp/File2.csharp:34
at Main.Welcome+<Main>c__AnonStorey0.<>m__0 () [0x00006] in /usr/local/google/home/Csharp/hello.csharp:48
at System.Threading.Thread.StartInternal () [0x00000] in <filename unknown>:0
`
csharpNestedExc = `System.InvalidOperationException: This is the outer exception ---> System.InvalidOperationException: This is the inner exception
at ExampleApp.NestedExceptionExample.LowestLevelMethod() in c:/ExampleApp/ExampleApp/NestedExceptionExample.cs:line 33
at ExampleApp.NestedExceptionExample.ThirdLevelMethod() in c:/ExampleApp/ExampleApp/NestedExceptionExample.cs:line 28
at ExampleApp.NestedExceptionExample.SecondLevelMethod() in c:/ExampleApp/ExampleApp/NestedExceptionExample.cs:line 18
--- End of inner exception stack trace ---
at ExampleApp.NestedExceptionExample.SecondLevelMethod() in c:/ExampleApp/ExampleApp/NestedExceptionExample.cs:line 22
at ExampleApp.NestedExceptionExample.TopLevelMethod() in c:/ExampleApp/ExampleApp/NestedExceptionExample.cs:line 11
at ExampleApp.Program.Main(String[] args) in c:/ExampleApp/ExampleApp/Program.cs:line 11
`
csharpAsyncExc = `System.InvalidOperationException: This is an exception
at ExampleApp2.AsyncExceptionExample.LowestLevelMethod() in c:/ExampleApp/ExampleApp/AsyncExceptionExample.cs:line 36
at ExampleApp2.AsyncExceptionExample.<ThirdLevelMethod>d__2.MoveNext() in c:/ExampleApp/ExampleApp/AsyncExceptionExample.cs:line 31
--- End of stack trace from previous location where exception was thrown ---
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.GetResult()
at ExampleApp2.AsyncExceptionExample.<SecondLevelMethod>d__1.MoveNext() in c:/ExampleApp/ExampleApp/AsyncExceptionExample.cs:line 25
--- End of stack trace from previous location where exception was thrown ---
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.GetResult()
at ExampleApp2.AsyncExceptionExample.<TopLevelMethod>d__0.MoveNext() in c:/ExampleApp/ExampleApp/AsyncExceptionExample.cs:line 14
`
dartErr = `Unhandled exception:
Instance of 'MyError'
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:15:20)
#1 printError (file:///path/to/code/dartFile.dart:37:13)
#2 main (file:///path/to/code/dartFile.dart:15:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartExc = `Unhandled exception:
Exception: exception message
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:17:20)
#1 printError (file:///path/to/code/dartFile.dart:37:13)
#2 main (file:///path/to/code/dartFile.dart:17:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartAsyncErr = `Unhandled exception:
Bad state: oops
#0 handleFailure (file:///test/example/http/handling_an_httprequest_error.dart:16:3)
#1 main (file:///test/example/http/handling_an_httprequest_error.dart:24:5)
<asynchronous suspension>
#2 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#3 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartDivideByZeroErr = `Unhandled exception:
IntegerDivisionByZeroException
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:27:20)
#1 printError (file:///path/to/code/dartFile.dart:42:13)
#2 main (file:///path/to/code/dartFile.dart:27:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartArgumentErr = `Unhandled exception:
Invalid argument(s): invalid argument
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:23:20)
#1 printError (file:///path/to/code/dartFile.dart:42:13)
#2 main (file:///path/to/code/dartFile.dart:23:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartRangeErr = `Unhandled exception:
RangeError (index): Invalid value: Valid value range is empty: 1
#0 List.[] (dart:core-patch/growable_array.dart:151)
#1 main.<anonymous closure> (file:///path/to/code/dartFile.dart:31:23)
#2 printError (file:///path/to/code/dartFile.dart:42:13)
#3 main (file:///path/to/code/dartFile.dart:29:3)
#4 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#5 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartAssertionErr = `Unhandled exception:
Assertion failed
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:9:20)
#1 printError (file:///path/to/code/dartFile.dart:36:13)
#2 main (file:///path/to/code/dartFile.dart:9:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartAbstractClassErr = `Unhandled exception:
Cannot instantiate abstract class LNClassName: _url 'null' line null
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:12:20)
#1 printError (file:///path/to/code/dartFile.dart:36:13)
#2 main (file:///path/to/code/dartFile.dart:12:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartReadStaticErr = `Unhandled exception:
Reading static variable 'variable' during its initialization
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:28:20)
#1 printError (file:///path/to/code/dartFile.dart:43:13)
#2 main (file:///path/to/code/dartFile.dart:28:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartUnimplementedErr = `Unhandled exception:
UnimplementedError: unimplemented
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:38:20)
#1 printError (file:///path/to/code/dartFile.dart:61:13)
#2 main (file:///path/to/code/dartFile.dart:38:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartUnsupportedErr = `Unhandled exception:
Unsupported operation: unsupported
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:36:20)
#1 printError (file:///path/to/code/dartFile.dart:61:13)
#2 main (file:///path/to/code/dartFile.dart:36:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartConcurrentModificationErr = `Unhandled exception:
Concurrent modification during iteration.
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:35:20)
#1 printError (file:///path/to/code/dartFile.dart:61:13)
#2 main (file:///path/to/code/dartFile.dart:35:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartOOMErr = `Unhandled exception:
Out of Memory
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:34:20)
#1 printError (file:///path/to/code/dartFile.dart:61:13)
#2 main (file:///path/to/code/dartFile.dart:34:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartStackOverflowErr = `Unhandled exception:
Stack Overflow
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:33:20)
#1 printError (file:///path/to/code/dartFile.dart:61:13)
#2 main (file:///path/to/code/dartFile.dart:33:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartFallthroughErr = `Unhandled exception:
'null': Switch case fall-through at line null.
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:39:20)
#1 printError (file:///path/to/code/dartFile.dart:51:13)
#2 main (file:///path/to/code/dartFile.dart:39:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartTypeErr = `Unhandled exception:
'file:///path/to/code/dartFile.dart': malformed type: line 7 pos 24: cannot resolve class 'NoType' from '::'
printError( () { new NoType(); } );
^
#0 _TypeError._throwNew (dart:core-patch/errors_patch.dart:82)
#1 main.<anonymous closure> (file:///path/to/code/dartFile.dart:7:24)
#2 printError (file:///path/to/code/dartFile.dart:36:13)
#3 main (file:///path/to/code/dartFile.dart:7:3)
#4 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#5 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartFormatErr = `Unhandled exception:
FormatException: format exception
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:25:20)
#1 printError (file:///path/to/code/dartFile.dart:42:13)
#2 main (file:///path/to/code/dartFile.dart:25:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartFormatWithCodeErr = `Unhandled exception:
FormatException: Invalid base64 data (at line 3, character 8)
this is not valid
^
#0 main.<anonymous closure> (file:///path/to/code/dartFile.dart:24:20)
#1 printError (file:///path/to/code/dartFile.dart:42:13)
#2 main (file:///path/to/code/dartFile.dart:24:3)
#3 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#4 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartNoMethodErr = `Unhandled exception:
NoSuchMethodError: No constructor 'TypeError' with matching arguments declared in class 'TypeError'.
Receiver: Type: class 'TypeError'
Tried calling: new TypeError("Invalid base64 data", "invalid", 36)
Found: new TypeError()
#0 NoSuchMethodError._throwNew (dart:core-patch/errors_patch.dart:196)
#1 main.<anonymous closure> (file:///path/to/code/dartFile.dart:8:39)
#2 printError (file:///path/to/code/dartFile.dart:36:13)
#3 main (file:///path/to/code/dartFile.dart:8:3)
#4 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#5 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
dartNoMethodGlobalErr = `Unhandled exception:
NoSuchMethodError: No top-level method 'noMethod' declared.
Receiver: top-level
Tried calling: noMethod()
#0 NoSuchMethodError._throwNew (dart:core-patch/errors_patch.dart:196)
#1 main.<anonymous closure> (file:///path/to/code/dartFile.dart:10:20)
#2 printError (file:///path/to/code/dartFile.dart:36:13)
#3 main (file:///path/to/code/dartFile.dart:10:3)
#4 _startIsolate.<anonymous closure> (dart:isolate-patch/isolate_patch.dart:265)
#5 _RawReceivePortImpl._handleMessage (dart:isolate-patch/isolate_patch.dart:151)
`
)
| package logging | |||||
file | openshift/openshift-tests-private | f16aa807-473d-4c16-bfbd-f2eabbb961df | elasticsearch_utils | import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
) | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | package logging
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
type externalES struct {
namespace string
version string // support 6 and 7
serverName string // ES cluster name, configmap/sa/deploy/svc name
httpSSL bool // `true` means enable `xpack.security.http.ssl`
clientAuth bool // `true` means `xpack.security.http.ssl.client_authentication: required`, only can be set to `true` when httpSSL is `true`
clientPrivateKeyPassphrase string // only works when clientAuth is true
userAuth bool // `true` means enable user auth
username string // shouldn't be empty when `userAuth: true`
password string // shouldn't be empty when `userAuth: true`
secretName string //the name of the secret for the collector to use, it shouldn't be empty when `httpSSL: true` or `userAuth: true`
loggingNS string //the namespace where the collector pods deployed in
}
func (es externalES) createPipelineSecret(oc *exutil.CLI, keysPath string) {
// create pipeline secret if needed
cmd := []string{"secret", "generic", es.secretName, "-n", es.loggingNS}
if es.clientAuth {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt", "--from-file=ca-bundle.crt="+keysPath+"/ca.crt")
if es.clientPrivateKeyPassphrase != "" {
cmd = append(cmd, "--from-literal=passphrase="+es.clientPrivateKeyPassphrase)
}
} else if es.httpSSL && !es.clientAuth {
cmd = append(cmd, "--from-file=ca-bundle.crt="+keysPath+"/ca.crt")
}
if es.userAuth {
cmd = append(cmd, "--from-literal=username="+es.username, "--from-literal=password="+es.password)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
resource{"secret", es.secretName, es.loggingNS}.WaitForResourceToAppear(oc)
}
func (es externalES) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", es.serverName, es.namespace}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
if es.userAuth {
o.Expect(es.username).NotTo(o.BeEmpty(), "Please provide username!")
o.Expect(es.password).NotTo(o.BeEmpty(), "Please provide password!")
}
if es.httpSSL || es.clientAuth || es.userAuth {
o.Expect(es.secretName).NotTo(o.BeEmpty(), "Please provide pipeline secret name!")
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{es.serverName, es.namespace, es.clientPrivateKeyPassphrase}
cert.generateCerts(oc, keysPath)
// create secret for ES if needed
if es.httpSSL || es.clientAuth {
r := resource{"secret", es.serverName, es.namespace}
err = oc.WithoutNamespace().Run("create").Args("secret", "generic", "-n", r.namespace, r.name, "--from-file=elasticsearch.key="+keysPath+"/server.key", "--from-file=elasticsearch.crt="+keysPath+"/server.crt", "--from-file=admin-ca="+keysPath+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
r.WaitForResourceToAppear(oc)
}
// create pipeline secret in logging project
es.createPipelineSecret(oc, keysPath)
}
// get file path per the configurations
filePath := []string{"testdata", "logging", "external-log-stores", "elasticsearch", es.version}
if es.httpSSL {
filePath = append(filePath, "https")
} else {
o.Expect(es.clientAuth).NotTo(o.BeTrue(), "Unsupported configuration, please correct it!")
filePath = append(filePath, "http")
}
if es.userAuth {
filePath = append(filePath, "user_auth")
} else {
filePath = append(filePath, "no_user")
}
// create configmap
cm := resource{"configmap", es.serverName, es.namespace}
cmFilePath := append(filePath, "configmap.yaml")
cmFile := exutil.FixturePath(cmFilePath...)
cmPatch := []string{"-f", cmFile, "-n", cm.namespace, "-p", "NAMESPACE=" + es.namespace, "-p", "NAME=" + es.serverName}
if es.userAuth {
cmPatch = append(cmPatch, "-p", "USERNAME="+es.username, "-p", "PASSWORD="+es.password)
}
if es.httpSSL {
if es.clientAuth {
cmPatch = append(cmPatch, "-p", "CLIENT_AUTH=required")
} else {
cmPatch = append(cmPatch, "-p", "CLIENT_AUTH=none")
}
}
// set xpack.ml.enable to false when the architecture is not amd64
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node.Status.NodeInfo.Architecture != "amd64" {
cmPatch = append(cmPatch, "-p", "MACHINE_LEARNING=false")
break
}
}
cm.applyFromTemplate(oc, cmPatch...)
// create deployment and expose svc
deploy := resource{"deployment", es.serverName, es.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", es.namespace, "-p", "NAMESPACE="+es.namespace, "-p", "NAME="+es.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, es.namespace, es.serverName)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", es.namespace, "deployment", es.serverName, "--name="+es.serverName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// expose route
if es.httpSSL {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", es.namespace, "route", "passthrough", "--service="+es.serverName, "--port=9200").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("svc/"+es.serverName, "-n", es.namespace, "--port=9200").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
}
func (es externalES) remove(oc *exutil.CLI) {
resource{"route", es.serverName, es.namespace}.clear(oc)
resource{"service", es.serverName, es.namespace}.clear(oc)
resource{"configmap", es.serverName, es.namespace}.clear(oc)
resource{"deployment", es.serverName, es.namespace}.clear(oc)
resource{"serviceaccount", es.serverName, es.namespace}.clear(oc)
if es.httpSSL || es.userAuth {
resource{"secret", es.secretName, es.loggingNS}.clear(oc)
}
if es.httpSSL {
resource{"secret", es.serverName, es.namespace}.clear(oc)
}
}
func (es externalES) getPodName(oc *exutil.CLI) string {
esPods, err := oc.AdminKubeClient().CoreV1().Pods(es.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=" + es.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(esPods.Items); i++ {
names = append(names, esPods.Items[i].Name)
}
return names[0]
}
func (es externalES) baseCurlString() string {
curlString := "curl -H \"Content-Type: application/json\""
if es.userAuth {
curlString += " -u " + es.username + ":" + es.password
}
if es.httpSSL {
if es.clientAuth {
curlString += " --cert /usr/share/elasticsearch/config/secret/elasticsearch.crt --key /usr/share/elasticsearch/config/secret/elasticsearch.key"
}
curlString += " --cacert /usr/share/elasticsearch/config/secret/admin-ca -s https://localhost:9200/"
} else {
curlString += " -s http://localhost:9200/"
}
return curlString
}
func (es externalES) getIndices(oc *exutil.CLI) ([]ESIndex, error) {
cmd := es.baseCurlString() + "_cat/indices?format=JSON"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 9*time.Second)
indices := []ESIndex{}
json.Unmarshal([]byte(stdout), &indices)
return indices, err
}
func (es externalES) waitForIndexAppear(oc *exutil.CLI, indexName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
indices, err := es.getIndices(oc)
count := 0
for _, index := range indices {
if strings.Contains(index.Index, indexName) {
if index.Health != "red" {
docCount, _ := strconv.Atoi(index.DocsCount)
count += docCount
}
}
}
if count > 0 && err == nil {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Index %s didn't appear or the doc count is 0 in last 3 minutes.", indexName))
}
func (es externalES) getDocCount(oc *exutil.CLI, indexName string, queryString string) (int64, error) {
cmd := es.baseCurlString() + indexName + "*/_count?format=JSON -d '" + queryString + "'"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 5*time.Second, 30*time.Second)
res := CountResult{}
json.Unmarshal([]byte(stdout), &res)
return res.Count, err
}
func (es externalES) waitForProjectLogsAppear(oc *exutil.CLI, projectName string, indexName string) {
query := "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \"" + projectName + "\"}}}"
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logCount, err := es.getDocCount(oc, indexName, query)
if err != nil {
return false, err
}
if logCount > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The logs of project %s didn't collected to index %s in last 180 seconds.", projectName, indexName))
}
func (es externalES) searchDocByQuery(oc *exutil.CLI, indexName string, queryString string) SearchResult {
cmd := es.baseCurlString() + indexName + "*/_search?format=JSON -d '" + queryString + "'"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 30*time.Second)
o.Expect(err).ShouldNot(o.HaveOccurred())
res := SearchResult{}
//data := bytes.NewReader([]byte(stdout))
//_ = json.NewDecoder(data).Decode(&res)
json.Unmarshal([]byte(stdout), &res)
return res
}
func (es externalES) removeIndices(oc *exutil.CLI, indexName string) {
cmd := es.baseCurlString() + indexName + " -X DELETE"
_, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 30*time.Second)
o.Expect(err).ShouldNot(o.HaveOccurred())
}
| package logging | ||||
function | openshift/openshift-tests-private | 4731ec21-9b9c-49bb-928d-0037e123cc02 | createPipelineSecret | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) createPipelineSecret(oc *exutil.CLI, keysPath string) {
// create pipeline secret if needed
cmd := []string{"secret", "generic", es.secretName, "-n", es.loggingNS}
if es.clientAuth {
cmd = append(cmd, "--from-file=tls.key="+keysPath+"/client.key", "--from-file=tls.crt="+keysPath+"/client.crt", "--from-file=ca-bundle.crt="+keysPath+"/ca.crt")
if es.clientPrivateKeyPassphrase != "" {
cmd = append(cmd, "--from-literal=passphrase="+es.clientPrivateKeyPassphrase)
}
} else if es.httpSSL && !es.clientAuth {
cmd = append(cmd, "--from-file=ca-bundle.crt="+keysPath+"/ca.crt")
}
if es.userAuth {
cmd = append(cmd, "--from-literal=username="+es.username, "--from-literal=password="+es.password)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(cmd...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
resource{"secret", es.secretName, es.loggingNS}.WaitForResourceToAppear(oc)
} | logging | ||||
function | openshift/openshift-tests-private | 68968c30-0f77-4abb-a3be-a9763b0ffe53 | deploy | ['"os"', '"os/exec"', '"path/filepath"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) deploy(oc *exutil.CLI) {
// create SA
sa := resource{"serviceaccount", es.serverName, es.namespace}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("serviceaccount", sa.name, "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
sa.WaitForResourceToAppear(oc)
if es.userAuth {
o.Expect(es.username).NotTo(o.BeEmpty(), "Please provide username!")
o.Expect(es.password).NotTo(o.BeEmpty(), "Please provide password!")
}
if es.httpSSL || es.clientAuth || es.userAuth {
o.Expect(es.secretName).NotTo(o.BeEmpty(), "Please provide pipeline secret name!")
// create a temporary directory
baseDir := exutil.FixturePath("testdata", "logging")
keysPath := filepath.Join(baseDir, "temp"+getRandomString())
defer exec.Command("rm", "-r", keysPath).Output()
err = os.MkdirAll(keysPath, 0755)
o.Expect(err).NotTo(o.HaveOccurred())
cert := certsConf{es.serverName, es.namespace, es.clientPrivateKeyPassphrase}
cert.generateCerts(oc, keysPath)
// create secret for ES if needed
if es.httpSSL || es.clientAuth {
r := resource{"secret", es.serverName, es.namespace}
err = oc.WithoutNamespace().Run("create").Args("secret", "generic", "-n", r.namespace, r.name, "--from-file=elasticsearch.key="+keysPath+"/server.key", "--from-file=elasticsearch.crt="+keysPath+"/server.crt", "--from-file=admin-ca="+keysPath+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
r.WaitForResourceToAppear(oc)
}
// create pipeline secret in logging project
es.createPipelineSecret(oc, keysPath)
}
// get file path per the configurations
filePath := []string{"testdata", "logging", "external-log-stores", "elasticsearch", es.version}
if es.httpSSL {
filePath = append(filePath, "https")
} else {
o.Expect(es.clientAuth).NotTo(o.BeTrue(), "Unsupported configuration, please correct it!")
filePath = append(filePath, "http")
}
if es.userAuth {
filePath = append(filePath, "user_auth")
} else {
filePath = append(filePath, "no_user")
}
// create configmap
cm := resource{"configmap", es.serverName, es.namespace}
cmFilePath := append(filePath, "configmap.yaml")
cmFile := exutil.FixturePath(cmFilePath...)
cmPatch := []string{"-f", cmFile, "-n", cm.namespace, "-p", "NAMESPACE=" + es.namespace, "-p", "NAME=" + es.serverName}
if es.userAuth {
cmPatch = append(cmPatch, "-p", "USERNAME="+es.username, "-p", "PASSWORD="+es.password)
}
if es.httpSSL {
if es.clientAuth {
cmPatch = append(cmPatch, "-p", "CLIENT_AUTH=required")
} else {
cmPatch = append(cmPatch, "-p", "CLIENT_AUTH=none")
}
}
// set xpack.ml.enable to false when the architecture is not amd64
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
for _, node := range nodes {
if node.Status.NodeInfo.Architecture != "amd64" {
cmPatch = append(cmPatch, "-p", "MACHINE_LEARNING=false")
break
}
}
cm.applyFromTemplate(oc, cmPatch...)
// create deployment and expose svc
deploy := resource{"deployment", es.serverName, es.namespace}
deployFilePath := append(filePath, "deployment.yaml")
deployFile := exutil.FixturePath(deployFilePath...)
err = deploy.applyFromTemplate(oc, "-f", deployFile, "-n", es.namespace, "-p", "NAMESPACE="+es.namespace, "-p", "NAME="+es.serverName)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, es.namespace, es.serverName)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", es.namespace, "deployment", es.serverName, "--name="+es.serverName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// expose route
if es.httpSSL {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-n", es.namespace, "route", "passthrough", "--service="+es.serverName, "--port=9200").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("svc/"+es.serverName, "-n", es.namespace, "--port=9200").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
} | logging | |||
function | openshift/openshift-tests-private | 7e51bea1-60ac-47e1-abc8-e04dae051e63 | remove | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) remove(oc *exutil.CLI) {
resource{"route", es.serverName, es.namespace}.clear(oc)
resource{"service", es.serverName, es.namespace}.clear(oc)
resource{"configmap", es.serverName, es.namespace}.clear(oc)
resource{"deployment", es.serverName, es.namespace}.clear(oc)
resource{"serviceaccount", es.serverName, es.namespace}.clear(oc)
if es.httpSSL || es.userAuth {
resource{"secret", es.secretName, es.loggingNS}.clear(oc)
}
if es.httpSSL {
resource{"secret", es.serverName, es.namespace}.clear(oc)
}
} | logging | ||||
function | openshift/openshift-tests-private | 4254b7c6-2249-432f-9663-bd6304283da2 | getPodName | ['"context"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) getPodName(oc *exutil.CLI) string {
esPods, err := oc.AdminKubeClient().CoreV1().Pods(es.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app=" + es.serverName})
o.Expect(err).NotTo(o.HaveOccurred())
var names []string
for i := 0; i < len(esPods.Items); i++ {
names = append(names, esPods.Items[i].Name)
}
return names[0]
} | logging | |||
function | openshift/openshift-tests-private | d4f36ccc-4827-4cfa-8874-dce978113b6a | baseCurlString | ['"encoding/json"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) baseCurlString() string {
curlString := "curl -H \"Content-Type: application/json\""
if es.userAuth {
curlString += " -u " + es.username + ":" + es.password
}
if es.httpSSL {
if es.clientAuth {
curlString += " --cert /usr/share/elasticsearch/config/secret/elasticsearch.crt --key /usr/share/elasticsearch/config/secret/elasticsearch.key"
}
curlString += " --cacert /usr/share/elasticsearch/config/secret/admin-ca -s https://localhost:9200/"
} else {
curlString += " -s http://localhost:9200/"
}
return curlString
} | logging | |||
function | openshift/openshift-tests-private | 1b9857fe-060c-4c1d-85d9-0f187ae96a5a | getIndices | ['"encoding/json"', '"time"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) getIndices(oc *exutil.CLI) ([]ESIndex, error) {
cmd := es.baseCurlString() + "_cat/indices?format=JSON"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 9*time.Second)
indices := []ESIndex{}
json.Unmarshal([]byte(stdout), &indices)
return indices, err
} | logging | |||
function | openshift/openshift-tests-private | 47e9e3ca-f9a7-49fe-b7d2-33ba37f4c7fa | waitForIndexAppear | ['"context"', '"fmt"', '"strconv"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) waitForIndexAppear(oc *exutil.CLI, indexName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
indices, err := es.getIndices(oc)
count := 0
for _, index := range indices {
if strings.Contains(index.Index, indexName) {
if index.Health != "red" {
docCount, _ := strconv.Atoi(index.DocsCount)
count += docCount
}
}
}
if count > 0 && err == nil {
return true, nil
}
return false, err
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Index %s didn't appear or the doc count is 0 in last 3 minutes.", indexName))
} | logging | |||
function | openshift/openshift-tests-private | aa35c505-6293-4eea-add8-62e121ed6211 | getDocCount | ['"encoding/json"', '"time"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) getDocCount(oc *exutil.CLI, indexName string, queryString string) (int64, error) {
cmd := es.baseCurlString() + indexName + "*/_count?format=JSON -d '" + queryString + "'"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 5*time.Second, 30*time.Second)
res := CountResult{}
json.Unmarshal([]byte(stdout), &res)
return res.Count, err
} | logging | |||
function | openshift/openshift-tests-private | 96604fe0-3ff0-46a5-ba8e-7b424fe42492 | waitForProjectLogsAppear | ['"context"', '"fmt"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) waitForProjectLogsAppear(oc *exutil.CLI, projectName string, indexName string) {
query := "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \"" + projectName + "\"}}}"
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logCount, err := es.getDocCount(oc, indexName, query)
if err != nil {
return false, err
}
if logCount > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("The logs of project %s didn't collected to index %s in last 180 seconds.", projectName, indexName))
} | logging | |||
function | openshift/openshift-tests-private | 0dfd243b-1a9b-4310-b394-50426ad7f847 | searchDocByQuery | ['"encoding/json"', '"time"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) searchDocByQuery(oc *exutil.CLI, indexName string, queryString string) SearchResult {
cmd := es.baseCurlString() + indexName + "*/_search?format=JSON -d '" + queryString + "'"
stdout, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 30*time.Second)
o.Expect(err).ShouldNot(o.HaveOccurred())
res := SearchResult{}
//data := bytes.NewReader([]byte(stdout))
//_ = json.NewDecoder(data).Decode(&res)
json.Unmarshal([]byte(stdout), &res)
return res
} | logging | |||
function | openshift/openshift-tests-private | a95bcaf8-5e90-45fc-b061-e40140791e4e | removeIndices | ['"time"'] | ['externalES'] | github.com/openshift/openshift-tests-private/test/extended/logging/elasticsearch_utils.go | func (es externalES) removeIndices(oc *exutil.CLI, indexName string) {
cmd := es.baseCurlString() + indexName + " -X DELETE"
_, err := e2eoutput.RunHostCmdWithRetries(es.namespace, es.getPodName(oc), cmd, 3*time.Second, 30*time.Second)
o.Expect(err).ShouldNot(o.HaveOccurred())
} | logging | |||
file | openshift/openshift-tests-private | 1a29fdfb-8cdd-483e-96ff-589e65bbc414 | splunk_util | import (
"context"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | package logging
import (
"context"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
func (s *splunkPodServer) checkLogs(query string) bool {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
searchID, err := s.requestSearchTask(query)
if err != nil {
e2e.Logf("error getting search ID: %v", err)
return false, nil
}
searchResult, err := s.getSearchResult(searchID)
if err != nil {
e2e.Logf("hit error when querying logs with %s: %v, try next round", query, err)
return false, nil
}
if searchResult == nil || len(searchResult.Results) == 0 {
e2e.Logf("no logs found for the query: %s, try next round", query)
return false, nil
}
e2e.Logf("found records for the query: %s", query)
return true, nil
})
return err == nil
}
func (s *splunkPodServer) auditLogFound() bool {
return s.checkLogs("log_type=audit|head 1")
}
func (s *splunkPodServer) anyLogFound() bool {
for _, logType := range []string{"infrastructure", "application", "audit"} {
if s.checkLogs("log_type=" + logType + "|head 1") {
return true
}
}
return false
}
func (s *splunkPodServer) allQueryFound(queries []string) bool {
if len(queries) == 0 {
queries = []string{
"log_type=application|head 1",
"log_type=\"infrastructure\" _SYSTEMD_INVOCATION_ID |head 1",
"log_type=\"infrastructure\" container_image|head 1",
"log_type=\"audit\" .linux-audit.log|head 1",
"log_type=\"audit\" .ovn-audit.log|head 1",
"log_type=\"audit\" .k8s-audit.log|head 1",
"log_type=\"audit\" .openshift-audit.log|head 1",
}
}
//return false if any query fail
foundAll := true
for _, query := range queries {
if !s.checkLogs(query) {
foundAll = false
}
}
return foundAll
}
func (s *splunkPodServer) allTypeLogsFound() bool {
queries := []string{
"log_type=\"infrastructure\" _SYSTEMD_INVOCATION_ID |head 1",
"log_type=\"infrastructure\" container_image|head 1",
"log_type=application|head 1",
"log_type=audit|head 1",
}
return s.allQueryFound(queries)
}
func (s *splunkPodServer) getSearchResult(searchID string) (*splunkSearchResult, error) {
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(s.adminUser+":"+s.adminPassword)),
)
params := url.Values{}
params.Add("output_mode", "json")
var searchResult *splunkSearchResult
resp, err1 := doHTTPRequest(h, "https://"+s.splunkdRoute, "/services/search/jobs/"+searchID+"/results", params.Encode(), "GET", true, 5, nil, 200)
if err1 != nil {
return nil, fmt.Errorf("failed to get response: %v", err1)
}
err2 := json.Unmarshal(resp, &searchResult)
if err2 != nil {
return nil, fmt.Errorf("failed to unmarshal splunk response: %v", err2)
}
return searchResult, nil
}
func (s *splunkPodServer) searchLogs(query string) (*splunkSearchResult, error) {
searchID, err := s.requestSearchTask(query)
if err != nil {
return nil, fmt.Errorf("error getting search ID: %v", err)
}
return s.getSearchResult(searchID)
}
func (s *splunkPodServer) requestSearchTask(query string) (string, error) {
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(s.adminUser+":"+s.adminPassword)),
)
params := url.Values{}
params.Set("search", "search "+query)
resp, err := doHTTPRequest(h, "https://"+s.splunkdRoute, "/services/search/jobs", "", "POST", true, 2, strings.NewReader(params.Encode()), 201)
if err != nil {
return "", err
}
resmap := splunkSearchResp{}
err = xml.Unmarshal(resp, &resmap)
if err != nil {
return "", err
}
return resmap.Sid, nil
}
// Set the default values to the splunkPodServer Object
func (s *splunkPodServer) init() {
s.adminUser = "admin"
s.adminPassword = getRandomString()
s.hecToken = uuid.New().String()
//https://idelta.co.uk/generate-hec-tokens-with-python/,https://docs.splunk.com/Documentation/SplunkCloud/9.0.2209/Security/Passwordbestpracticesforadministrators
s.serviceName = s.name + "-0"
s.serviceURL = s.serviceName + "." + s.namespace + ".svc"
if s.name == "" {
s.name = "splunk-default"
}
//authType must be one of "http|tls_serveronly|tls_mutual"
//Note: when authType==http, you can still access splunk via https://${splunk_route}
if s.authType == "" {
s.authType = "http"
}
if s.version == "" {
s.version = "9.0"
}
//Exit if anyone of caFile, keyFile,CertFile is null
if s.authType == "tls_clientauth" || s.authType == "tls_serveronly" {
o.Expect(s.caFile == "").To(o.BeFalse())
o.Expect(s.keyFile == "").To(o.BeFalse())
o.Expect(s.certFile == "").To(o.BeFalse())
}
}
func (s *splunkPodServer) deploy(oc *exutil.CLI) {
// Get route URL of splunk service
appDomain, err := getAppDomain(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//splunkd route URL
s.splunkdRoute = s.name + "-splunkd-" + s.namespace + "." + appDomain
//splunkd hec URL
s.hecRoute = s.name + "-hec-" + s.namespace + "." + appDomain
s.webRoute = s.name + "-web-" + s.namespace + "." + appDomain
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "nonroot", "-z", "default", "-n", s.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Create secret used by splunk
switch s.authType {
case "http":
s.deployHTTPSplunk(oc)
case "tls_clientauth":
s.deployCustomCertClientForceSplunk(oc)
case "tls_serveronly":
s.deployCustomCertSplunk(oc)
default:
s.deployHTTPSplunk(oc)
}
//In general, it take 1 minutes to be started, here wait 30second before call waitForStatefulsetReady
time.Sleep(30 * time.Second)
waitForStatefulsetReady(oc, s.namespace, s.name)
}
func (s *splunkPodServer) deployHTTPSplunk(oc *exutil.CLI) {
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
//Create secret for splunk Statefulset
secretTemplate := filepath.Join(filePath, "secret_splunk_template.yaml")
secret := resource{"secret", s.name, s.namespace}
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword)
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+s.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-edge_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (s *splunkPodServer) genHecPemFile(hecFile string) error {
dat1, err := os.ReadFile(s.certFile)
if err != nil {
e2e.Logf("Can not find the certFile %s", s.certFile)
return err
}
dat2, err := os.ReadFile(s.keyFile)
if err != nil {
e2e.Logf("Can not find the keyFile %s", s.keyFile)
return err
}
dat3, err := os.ReadFile(s.caFile)
if err != nil {
e2e.Logf("Can not find the caFile %s", s.caFile)
return err
}
buf := []byte{}
buf = append(buf, dat1...)
buf = append(buf, dat2...)
buf = append(buf, dat3...)
err = os.WriteFile(hecFile, buf, 0644)
return err
}
func (s *splunkPodServer) deployCustomCertSplunk(oc *exutil.CLI) {
//Create basic secret content for splunk Statefulset
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
secretTemplate := filepath.Join(filePath, "secret_tls_splunk_template.yaml")
if s.passphrase != "" {
secretTemplate = filepath.Join(filePath, "secret_tls_passphrase_splunk_template.yaml")
}
secret := resource{"secret", s.name, s.namespace}
if s.passphrase != "" {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "PASSPHASE="+s.passphrase)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword)
o.Expect(err).NotTo(o.HaveOccurred())
}
//HEC need all in one PEM file.
hecPemFile := "/tmp/" + getRandomString() + "hecAllKeys.crt"
defer os.Remove(hecPemFile)
err := s.genHecPemFile(hecPemFile)
o.Expect(err).NotTo(o.HaveOccurred())
//The secret will be mounted into splunk pods and used in server.conf,inputs.conf
args := []string{"data", "secret/" + secret.name, "-n", secret.namespace}
args = append(args, "--from-file=hec.pem="+hecPemFile)
args = append(args, "--from-file=ca.pem="+s.caFile)
args = append(args, "--from-file=key.pem="+s.keyFile)
args = append(args, "--from-file=cert.pem="+s.certFile)
if s.passphrase != "" {
args = append(args, "--from-literal=passphrase="+s.passphrase)
}
err = oc.AsAdmin().WithoutNamespace().Run("set").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+splunkSfs.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (s *splunkPodServer) deployCustomCertClientForceSplunk(oc *exutil.CLI) {
//Create secret for splunk Statefulset
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
secretTemplate := filepath.Join(filePath, "secret_tls_splunk_template.yaml")
if s.passphrase != "" {
secretTemplate = filepath.Join(filePath, "secret_tls_passphrase_splunk_template.yaml")
}
secret := resource{"secret", s.name, s.namespace}
if s.passphrase != "" {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "HEC_CLIENTAUTH=True", "-p", "PASSPHASE="+s.passphrase)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "HEC_CLIENTAUTH=True")
o.Expect(err).NotTo(o.HaveOccurred())
}
//HEC need all in one PEM file.
hecPemFile := "/tmp/" + getRandomString() + "hecAllKeys.crt"
defer os.Remove(hecPemFile)
err := s.genHecPemFile(hecPemFile)
o.Expect(err).NotTo(o.HaveOccurred())
//The secret will be mounted into splunk pods and used in server.conf,inputs.conf
secretArgs := []string{"data", "secret/" + secret.name, "-n", secret.namespace}
secretArgs = append(secretArgs, "--from-file=hec.pem="+hecPemFile)
secretArgs = append(secretArgs, "--from-file=ca.pem="+s.caFile)
secretArgs = append(secretArgs, "--from-file=key.pem="+s.keyFile)
secretArgs = append(secretArgs, "--from-file=cert.pem="+s.certFile)
if s.passphrase != "" {
secretArgs = append(secretArgs, "--from-literal=passphrase="+s.passphrase)
}
err = oc.AsAdmin().WithoutNamespace().Run("set").Args(secretArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+splunkSfs.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
}
func (s *splunkPodServer) destroy(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", s.name+"-hec", "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", s.name+"-splunkd", "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("statefulset", s.name, "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", s.name, "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-scc-from-user", "nonroot", "-z", "default", "-n", s.namespace).Execute()
}
// createIndexes adds custom index(es) into splunk
func (s *splunkPodServer) createIndexes(oc *exutil.CLI, indexes ...string) error {
splunkPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", s.namespace, "pod", "-l", "app.kubernetes.io/instance="+s.name, "-ojsonpath={.items[0].metadata.name}").Output()
if err != nil {
return fmt.Errorf("error getting splunk pod: %v", err)
}
for _, index := range indexes {
// curl -k -u admin:gjc2t9jx https://localhost:8089/servicesNS/admin/search/data/indexes -d name=devtutorial
cmd := "curl -k -u admin:" + s.adminPassword + " https://localhost:8089/servicesNS/admin/search/data/indexes -d name=" + index
stdout, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("exec").Args("-n", s.namespace, splunkPod, "--", "/bin/sh", "-x", "-c", cmd).Output()
if err != nil {
e2e.Logf("query output: %v", stdout)
return fmt.Errorf("can't create index %s, error: %v", index, err)
}
}
return nil
}
// Create the secret which is used in CLF
func (toSp *toSplunkSecret) create(oc *exutil.CLI) {
secretArgs := []string{"secret", "generic", toSp.name, "-n", toSp.namespace}
if toSp.hecToken != "" {
secretArgs = append(secretArgs, "--from-literal=hecToken="+toSp.hecToken)
}
if toSp.caFile != "" {
secretArgs = append(secretArgs, "--from-file=ca-bundle.crt="+toSp.caFile)
}
if toSp.keyFile != "" {
secretArgs = append(secretArgs, "--from-file=tls.key="+toSp.keyFile)
}
if toSp.certFile != "" {
secretArgs = append(secretArgs, "--from-file=tls.crt="+toSp.certFile)
}
if toSp.passphrase != "" {
secretArgs = append(secretArgs, "--from-literal=passphrase="+toSp.passphrase)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(secretArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (toSp *toSplunkSecret) delete(oc *exutil.CLI) {
s := resource{"secret", toSp.name, toSp.namespace}
s.clear(oc)
}
| package logging | ||||
function | openshift/openshift-tests-private | 1a77b0c5-4d1c-4b49-b8de-caa3bfc3d8d4 | checkLogs | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) checkLogs(query string) bool {
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
searchID, err := s.requestSearchTask(query)
if err != nil {
e2e.Logf("error getting search ID: %v", err)
return false, nil
}
searchResult, err := s.getSearchResult(searchID)
if err != nil {
e2e.Logf("hit error when querying logs with %s: %v, try next round", query, err)
return false, nil
}
if searchResult == nil || len(searchResult.Results) == 0 {
e2e.Logf("no logs found for the query: %s, try next round", query)
return false, nil
}
e2e.Logf("found records for the query: %s", query)
return true, nil
})
return err == nil
} | logging | ||||
function | openshift/openshift-tests-private | 78f1f205-7b01-44fa-8db8-718071654507 | auditLogFound | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) auditLogFound() bool {
return s.checkLogs("log_type=audit|head 1")
} | logging | |||||
function | openshift/openshift-tests-private | 810b2b28-bcb5-4945-a87e-63e1379794b0 | anyLogFound | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) anyLogFound() bool {
for _, logType := range []string{"infrastructure", "application", "audit"} {
if s.checkLogs("log_type=" + logType + "|head 1") {
return true
}
}
return false
} | logging | |||||
function | openshift/openshift-tests-private | 676cfab8-d761-423a-99e6-44551712a40b | allQueryFound | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) allQueryFound(queries []string) bool {
if len(queries) == 0 {
queries = []string{
"log_type=application|head 1",
"log_type=\"infrastructure\" _SYSTEMD_INVOCATION_ID |head 1",
"log_type=\"infrastructure\" container_image|head 1",
"log_type=\"audit\" .linux-audit.log|head 1",
"log_type=\"audit\" .ovn-audit.log|head 1",
"log_type=\"audit\" .k8s-audit.log|head 1",
"log_type=\"audit\" .openshift-audit.log|head 1",
}
}
//return false if any query fail
foundAll := true
for _, query := range queries {
if !s.checkLogs(query) {
foundAll = false
}
}
return foundAll
} | logging | |||||
function | openshift/openshift-tests-private | 7e2a3d83-5fcb-42d2-bcfb-059f28c1a067 | allTypeLogsFound | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) allTypeLogsFound() bool {
queries := []string{
"log_type=\"infrastructure\" _SYSTEMD_INVOCATION_ID |head 1",
"log_type=\"infrastructure\" container_image|head 1",
"log_type=application|head 1",
"log_type=audit|head 1",
}
return s.allQueryFound(queries)
} | logging | |||||
function | openshift/openshift-tests-private | f7a3461b-3eb2-4442-9876-8aba699a51dd | getSearchResult | ['"encoding/base64"', '"encoding/json"', '"fmt"', '"net/http"', '"net/url"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) getSearchResult(searchID string) (*splunkSearchResult, error) {
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(s.adminUser+":"+s.adminPassword)),
)
params := url.Values{}
params.Add("output_mode", "json")
var searchResult *splunkSearchResult
resp, err1 := doHTTPRequest(h, "https://"+s.splunkdRoute, "/services/search/jobs/"+searchID+"/results", params.Encode(), "GET", true, 5, nil, 200)
if err1 != nil {
return nil, fmt.Errorf("failed to get response: %v", err1)
}
err2 := json.Unmarshal(resp, &searchResult)
if err2 != nil {
return nil, fmt.Errorf("failed to unmarshal splunk response: %v", err2)
}
return searchResult, nil
} | logging | ||||
function | openshift/openshift-tests-private | ac5296a8-4311-49b4-ab27-d9330f0424ba | searchLogs | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) searchLogs(query string) (*splunkSearchResult, error) {
searchID, err := s.requestSearchTask(query)
if err != nil {
return nil, fmt.Errorf("error getting search ID: %v", err)
}
return s.getSearchResult(searchID)
} | logging | ||||
function | openshift/openshift-tests-private | 8de06750-8c8c-48b0-95bd-3481fd56014f | requestSearchTask | ['"encoding/base64"', '"encoding/json"', '"encoding/xml"', '"net/http"', '"net/url"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) requestSearchTask(query string) (string, error) {
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(s.adminUser+":"+s.adminPassword)),
)
params := url.Values{}
params.Set("search", "search "+query)
resp, err := doHTTPRequest(h, "https://"+s.splunkdRoute, "/services/search/jobs", "", "POST", true, 2, strings.NewReader(params.Encode()), 201)
if err != nil {
return "", err
}
resmap := splunkSearchResp{}
err = xml.Unmarshal(resp, &resmap)
if err != nil {
return "", err
}
return resmap.Sid, nil
} | logging | ||||
function | openshift/openshift-tests-private | a92d3d7d-7c5a-4afd-a44a-bcd0fda21832 | init | ['"net/http"', '"github.com/google/uuid"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) init() {
s.adminUser = "admin"
s.adminPassword = getRandomString()
s.hecToken = uuid.New().String()
//https://idelta.co.uk/generate-hec-tokens-with-python/,https://docs.splunk.com/Documentation/SplunkCloud/9.0.2209/Security/Passwordbestpracticesforadministrators
s.serviceName = s.name + "-0"
s.serviceURL = s.serviceName + "." + s.namespace + ".svc"
if s.name == "" {
s.name = "splunk-default"
}
//authType must be one of "http|tls_serveronly|tls_mutual"
//Note: when authType==http, you can still access splunk via https://${splunk_route}
if s.authType == "" {
s.authType = "http"
}
if s.version == "" {
s.version = "9.0"
}
//Exit if anyone of caFile, keyFile,CertFile is null
if s.authType == "tls_clientauth" || s.authType == "tls_serveronly" {
o.Expect(s.caFile == "").To(o.BeFalse())
o.Expect(s.keyFile == "").To(o.BeFalse())
o.Expect(s.certFile == "").To(o.BeFalse())
}
} | logging | ||||
function | openshift/openshift-tests-private | e4f7ceef-fadb-4d92-b4b4-dd2031ec049c | deploy | ['"net/http"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) deploy(oc *exutil.CLI) {
// Get route URL of splunk service
appDomain, err := getAppDomain(oc)
o.Expect(err).NotTo(o.HaveOccurred())
//splunkd route URL
s.splunkdRoute = s.name + "-splunkd-" + s.namespace + "." + appDomain
//splunkd hec URL
s.hecRoute = s.name + "-hec-" + s.namespace + "." + appDomain
s.webRoute = s.name + "-web-" + s.namespace + "." + appDomain
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-scc-to-user", "nonroot", "-z", "default", "-n", s.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// Create secret used by splunk
switch s.authType {
case "http":
s.deployHTTPSplunk(oc)
case "tls_clientauth":
s.deployCustomCertClientForceSplunk(oc)
case "tls_serveronly":
s.deployCustomCertSplunk(oc)
default:
s.deployHTTPSplunk(oc)
}
//In general, it take 1 minutes to be started, here wait 30second before call waitForStatefulsetReady
time.Sleep(30 * time.Second)
waitForStatefulsetReady(oc, s.namespace, s.name)
} | logging | ||||
function | openshift/openshift-tests-private | a8bb5843-9932-4c7a-a18f-5e3be679eab7 | deployHTTPSplunk | ['"net/http"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) deployHTTPSplunk(oc *exutil.CLI) {
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
//Create secret for splunk Statefulset
secretTemplate := filepath.Join(filePath, "secret_splunk_template.yaml")
secret := resource{"secret", s.name, s.namespace}
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword)
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+s.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-edge_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | 6d677810-53ae-4dfc-9e7b-d4b280469b47 | genHecPemFile | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) genHecPemFile(hecFile string) error {
dat1, err := os.ReadFile(s.certFile)
if err != nil {
e2e.Logf("Can not find the certFile %s", s.certFile)
return err
}
dat2, err := os.ReadFile(s.keyFile)
if err != nil {
e2e.Logf("Can not find the keyFile %s", s.keyFile)
return err
}
dat3, err := os.ReadFile(s.caFile)
if err != nil {
e2e.Logf("Can not find the caFile %s", s.caFile)
return err
}
buf := []byte{}
buf = append(buf, dat1...)
buf = append(buf, dat2...)
buf = append(buf, dat3...)
err = os.WriteFile(hecFile, buf, 0644)
return err
} | logging | ||||
function | openshift/openshift-tests-private | 578b425d-ff34-4e8e-8a9a-9cb4b9ed4fbc | deployCustomCertSplunk | ['"net/http"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) deployCustomCertSplunk(oc *exutil.CLI) {
//Create basic secret content for splunk Statefulset
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
secretTemplate := filepath.Join(filePath, "secret_tls_splunk_template.yaml")
if s.passphrase != "" {
secretTemplate = filepath.Join(filePath, "secret_tls_passphrase_splunk_template.yaml")
}
secret := resource{"secret", s.name, s.namespace}
if s.passphrase != "" {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "PASSPHASE="+s.passphrase)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword)
o.Expect(err).NotTo(o.HaveOccurred())
}
//HEC need all in one PEM file.
hecPemFile := "/tmp/" + getRandomString() + "hecAllKeys.crt"
defer os.Remove(hecPemFile)
err := s.genHecPemFile(hecPemFile)
o.Expect(err).NotTo(o.HaveOccurred())
//The secret will be mounted into splunk pods and used in server.conf,inputs.conf
args := []string{"data", "secret/" + secret.name, "-n", secret.namespace}
args = append(args, "--from-file=hec.pem="+hecPemFile)
args = append(args, "--from-file=ca.pem="+s.caFile)
args = append(args, "--from-file=key.pem="+s.keyFile)
args = append(args, "--from-file=cert.pem="+s.certFile)
if s.passphrase != "" {
args = append(args, "--from-literal=passphrase="+s.passphrase)
}
err = oc.AsAdmin().WithoutNamespace().Run("set").Args(args...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+splunkSfs.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | 90872e13-9931-4980-b00e-6daee07a3aa7 | deployCustomCertClientForceSplunk | ['"net/http"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) deployCustomCertClientForceSplunk(oc *exutil.CLI) {
//Create secret for splunk Statefulset
filePath := exutil.FixturePath("testdata", "logging", "external-log-stores", "splunk")
secretTemplate := filepath.Join(filePath, "secret_tls_splunk_template.yaml")
if s.passphrase != "" {
secretTemplate = filepath.Join(filePath, "secret_tls_passphrase_splunk_template.yaml")
}
secret := resource{"secret", s.name, s.namespace}
if s.passphrase != "" {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "HEC_CLIENTAUTH=True", "-p", "PASSPHASE="+s.passphrase)
o.Expect(err).NotTo(o.HaveOccurred())
} else {
err := secret.applyFromTemplate(oc, "-f", secretTemplate, "-p", "NAME="+secret.name, "-p", "HEC_TOKEN="+s.hecToken, "-p", "PASSWORD="+s.adminPassword, "-p", "HEC_CLIENTAUTH=True")
o.Expect(err).NotTo(o.HaveOccurred())
}
//HEC need all in one PEM file.
hecPemFile := "/tmp/" + getRandomString() + "hecAllKeys.crt"
defer os.Remove(hecPemFile)
err := s.genHecPemFile(hecPemFile)
o.Expect(err).NotTo(o.HaveOccurred())
//The secret will be mounted into splunk pods and used in server.conf,inputs.conf
secretArgs := []string{"data", "secret/" + secret.name, "-n", secret.namespace}
secretArgs = append(secretArgs, "--from-file=hec.pem="+hecPemFile)
secretArgs = append(secretArgs, "--from-file=ca.pem="+s.caFile)
secretArgs = append(secretArgs, "--from-file=key.pem="+s.keyFile)
secretArgs = append(secretArgs, "--from-file=cert.pem="+s.certFile)
if s.passphrase != "" {
secretArgs = append(secretArgs, "--from-literal=passphrase="+s.passphrase)
}
err = oc.AsAdmin().WithoutNamespace().Run("set").Args(secretArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//create splunk StatefulSet
statefulsetTemplate := filepath.Join(filePath, "statefulset_splunk-"+s.version+"_template.yaml")
splunkSfs := resource{"StatefulSet", s.name, s.namespace}
err = splunkSfs.applyFromTemplate(oc, "-f", statefulsetTemplate, "-p", "NAME="+splunkSfs.name)
o.Expect(err).NotTo(o.HaveOccurred())
//create route for splunk service
routeHecTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeHec := resource{"route", s.name + "-hec", s.namespace}
err = routeHec.applyFromTemplate(oc, "-f", routeHecTemplate, "-p", "NAME="+routeHec.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=http-hec", "-p", "ROUTE_HOST="+s.hecRoute)
o.Expect(err).NotTo(o.HaveOccurred())
routeSplunkdTemplate := filepath.Join(filePath, "route-passthrough_splunk_template.yaml")
routeSplunkd := resource{"route", s.name + "-splunkd", s.namespace}
err = routeSplunkd.applyFromTemplate(oc, "-f", routeSplunkdTemplate, "-p", "NAME="+routeSplunkd.name, "-p", "SERVICE_NAME="+s.serviceName, "-p", "PORT_NAME=https-splunkd", "-p", "ROUTE_HOST="+s.splunkdRoute)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | d867cfea-cdb5-4f51-b028-8a348de66c55 | destroy | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) destroy(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", s.name+"-hec", "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("route", s.name+"-splunkd", "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("statefulset", s.name, "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("delete").Args("secret", s.name, "-n", s.namespace).Execute()
oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "remove-scc-from-user", "nonroot", "-z", "default", "-n", s.namespace).Execute()
} | logging | |||||
function | openshift/openshift-tests-private | 972fcf5c-f131-40cf-9440-677e2e64ab84 | createIndexes | ['"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (s *splunkPodServer) createIndexes(oc *exutil.CLI, indexes ...string) error {
splunkPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", s.namespace, "pod", "-l", "app.kubernetes.io/instance="+s.name, "-ojsonpath={.items[0].metadata.name}").Output()
if err != nil {
return fmt.Errorf("error getting splunk pod: %v", err)
}
for _, index := range indexes {
// curl -k -u admin:gjc2t9jx https://localhost:8089/servicesNS/admin/search/data/indexes -d name=devtutorial
cmd := "curl -k -u admin:" + s.adminPassword + " https://localhost:8089/servicesNS/admin/search/data/indexes -d name=" + index
stdout, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("exec").Args("-n", s.namespace, splunkPod, "--", "/bin/sh", "-x", "-c", cmd).Output()
if err != nil {
e2e.Logf("query output: %v", stdout)
return fmt.Errorf("can't create index %s, error: %v", index, err)
}
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | fc3bf88f-f53d-4167-b535-6a79a464c4cf | create | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (toSp *toSplunkSecret) create(oc *exutil.CLI) {
secretArgs := []string{"secret", "generic", toSp.name, "-n", toSp.namespace}
if toSp.hecToken != "" {
secretArgs = append(secretArgs, "--from-literal=hecToken="+toSp.hecToken)
}
if toSp.caFile != "" {
secretArgs = append(secretArgs, "--from-file=ca-bundle.crt="+toSp.caFile)
}
if toSp.keyFile != "" {
secretArgs = append(secretArgs, "--from-file=tls.key="+toSp.keyFile)
}
if toSp.certFile != "" {
secretArgs = append(secretArgs, "--from-file=tls.crt="+toSp.certFile)
}
if toSp.passphrase != "" {
secretArgs = append(secretArgs, "--from-literal=passphrase="+toSp.passphrase)
}
err := oc.AsAdmin().WithoutNamespace().Run("create").Args(secretArgs...).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||||
function | openshift/openshift-tests-private | 753de088-1311-4f26-b67a-4576ba61ef8f | delete | github.com/openshift/openshift-tests-private/test/extended/logging/splunk_util.go | func (toSp *toSplunkSecret) delete(oc *exutil.CLI) {
s := resource{"secret", toSp.name, toSp.namespace}
s.clear(oc)
} | logging | |||||
test | openshift/openshift-tests-private | 06a26d56-9762-479d-b72a-e2ed50808b7d | vector_http | import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | package logging
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logfwdhttp", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("vector forward logs to external store over http", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
g.By("deploy CLO")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
})
// author [email protected]
g.It("Author:anli-CPaasrunOnly-Medium-61253-VA-IAC.03-vector forward logs to fluentdserver over http - mtls", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
keyPassphase := getRandomString()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: true,
clientPrivateKeyPassphrase: keyPassphase,
secretName: "to-fluentd-61253",
loggingNS: clfNS,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-" + getRandomString(),
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-mtls.yaml"),
secretName: fluentdS.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
})
g.It("Author:anli-CPaasrunOnly-High-60933-vector Forward logs to fluentd over http - https", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60933",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60933",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-ca.yaml"),
secretName: fluentdS.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
})
g.It("Author:anli-CPaasrunOnly-Medium-60926-vector Forward logs to fluentd over http - http", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: false,
clientAuth: false,
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60926",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "http-output.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
})
g.It("Author:anli-CPaasrunOnly-Medium-60936-vector Forward logs to fluentd over http - TLSSkipVerify", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60936",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
//Create a fake secret from root ca which is used for TLSSkipVerify
fakeSecret := resource{"secret", "fake-bundle-60936", fluentdProj}
defer fakeSecret.clear(oc)
dirname := "/tmp/60936-keys"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/kube-root-ca.crt", "-n", loggingNS, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", fakeSecret.name, "-n", fakeSecret.namespace, "--from-file=ca-bundle.crt="+dirname+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60936",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-skipverify.yaml"),
secretName: fakeSecret.name,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
})
g.It("Author:ikanse-CPaasrunOnly-High-61567-Collector-External HTTP output sink Fluentd complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use Old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploy the log generator app")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60933",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-61567",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-61567.yaml"),
secretName: fluentdS.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("The HTTP sink in Vector config must use the Old tlsSecurityProfile")
searchString := `[sinks.output_httpout_app.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
ca_file = "/var/run/ocp-collector/secrets/to-fluentd-60933/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
g.By("Set Intermediate tlsSecurityProfile for the External HTTP output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Intermediate"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The HTTP sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_httpout_app.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
ca_file = "/var/run/ocp-collector/secrets/to-fluentd-60933/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external HTTP (Fluentd) server.")
g.By("Delete the Fluentdserver pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", fluentdProj, "-l", "component=fluentdtest").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, fluentdProj, "component=fluentdtest")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
})
g.It("Author:anli-CPaasrunOnly-Critical-65131-VA-IAC.03 mCLF Inputs.receiver.http over http with default values", func() {
clfNS := oc.Namespace()
fluentdNS := clfNS
g.By("deploy fluentd server")
keyPassphase := getRandomString()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdNS,
serverAuth: true,
clientAuth: true,
clientPrivateKeyPassphrase: keyPassphase,
secretName: "to-fluentd-65131",
loggingNS: clfNS,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "http-to-http",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "httpserver-to-httpoutput.yaml"),
secretName: fluentdS.secretName,
serviceAccountName: "clf-" + getRandomString(),
collectAuditLogs: false,
waitForPodReady: true,
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("send two records to httpserver")
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver."+clfNS+".svc:8443", `{"data":"record1"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver."+clfNS+".svc:8443", `{"data":"record2"}`)).To(o.BeTrue())
g.By("check auditlogs in fluentd server")
fluentdS.checkData(oc, true, "audit.log")
})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | 2b51ca61-652d-487c-96b6-8aa745b3b998 | Author:anli-CPaasrunOnly-Medium-61253-VA-IAC.03-vector forward logs to fluentdserver over http - mtls | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:anli-CPaasrunOnly-Medium-61253-VA-IAC.03-vector forward logs to fluentdserver over http - mtls", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
keyPassphase := getRandomString()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: true,
clientPrivateKeyPassphrase: keyPassphase,
secretName: "to-fluentd-61253",
loggingNS: clfNS,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-" + getRandomString(),
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-mtls.yaml"),
secretName: fluentdS.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | 468a9c61-8283-4c56-a10c-16f12fd5b30d | Author:anli-CPaasrunOnly-High-60933-vector Forward logs to fluentd over http - https | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:anli-CPaasrunOnly-High-60933-vector Forward logs to fluentd over http - https", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60933",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60933",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-ca.yaml"),
secretName: fluentdS.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | 7f7eea51-9153-4a3f-b80d-6369dfe7fa0a | Author:anli-CPaasrunOnly-Medium-60926-vector Forward logs to fluentd over http - http | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:anli-CPaasrunOnly-Medium-60926-vector Forward logs to fluentd over http - http", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: false,
clientAuth: false,
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60926",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "http-output.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | 2aae19fc-d3a1-4ba0-a837-c80e6ff50da0 | Author:anli-CPaasrunOnly-Medium-60936-vector Forward logs to fluentd over http - TLSSkipVerify | ['"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:anli-CPaasrunOnly-Medium-60936-vector Forward logs to fluentd over http - TLSSkipVerify", func() {
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60936",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
//Create a fake secret from root ca which is used for TLSSkipVerify
fakeSecret := resource{"secret", "fake-bundle-60936", fluentdProj}
defer fakeSecret.clear(oc)
dirname := "/tmp/60936-keys"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/kube-root-ca.crt", "-n", loggingNS, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", fakeSecret.name, "-n", fakeSecret.namespace, "--from-file=ca-bundle.crt="+dirname+"/ca.crt").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60936",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-output-skipverify.yaml"),
secretName: fakeSecret.name,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
fluentdS.checkData(oc, true, "audit.log")
fluentdS.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | 889eca2d-17f9-4a53-84c9-697394be87f2 | Author:ikanse-CPaasrunOnly-High-61567-Collector-External HTTP output sink Fluentd complies with the tlsSecurityProfile configuration.[Slow][Disruptive] | ['"fmt"', '"path/filepath"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:ikanse-CPaasrunOnly-High-61567-Collector-External HTTP output sink Fluentd complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use Old profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"old":{},"type":"Old"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Deploy the log generator app")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("deploy fluentd server")
oc.SetupProject()
fluentdProj := oc.Namespace()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdProj,
serverAuth: true,
clientAuth: false,
secretName: "to-fluentd-60933",
loggingNS: fluentdProj,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-61567",
namespace: fluentdProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "https-61567.yaml"),
secretName: fluentdS.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("The HTTP sink in Vector config must use the Old tlsSecurityProfile")
searchString := `[sinks.output_httpout_app.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
ca_file = "/var/run/ocp-collector/secrets/to-fluentd-60933/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
g.By("Set Intermediate tlsSecurityProfile for the External HTTP output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Intermediate"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The HTTP sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_httpout_app.tls]
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
ca_file = "/var/run/ocp-collector/secrets/to-fluentd-60933/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external HTTP (Fluentd) server.")
g.By("Delete the Fluentdserver pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", fluentdProj, "-l", "component=fluentdtest").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, fluentdProj, "component=fluentdtest")
g.By("check logs in fluentd server")
fluentdS.checkData(oc, true, "app.log")
}) | |||||
test case | openshift/openshift-tests-private | 0d1e9670-ee08-4dcf-b22a-9239bac000f1 | Author:anli-CPaasrunOnly-Critical-65131-VA-IAC.03 mCLF Inputs.receiver.http over http with default values | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_http.go | g.It("Author:anli-CPaasrunOnly-Critical-65131-VA-IAC.03 mCLF Inputs.receiver.http over http with default values", func() {
clfNS := oc.Namespace()
fluentdNS := clfNS
g.By("deploy fluentd server")
keyPassphase := getRandomString()
fluentdS := fluentdServer{
serverName: "fluentdtest",
namespace: fluentdNS,
serverAuth: true,
clientAuth: true,
clientPrivateKeyPassphrase: keyPassphase,
secretName: "to-fluentd-65131",
loggingNS: clfNS,
inPluginType: "http",
}
defer fluentdS.remove(oc)
fluentdS.deploy(oc)
g.By("create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "http-to-http",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "httpserver-to-httpoutput.yaml"),
secretName: fluentdS.secretName,
serviceAccountName: "clf-" + getRandomString(),
collectAuditLogs: false,
waitForPodReady: true,
}
defer clf.delete(oc)
clf.create(oc, "URL=https://"+fluentdS.serverName+"."+fluentdS.namespace+".svc:24224")
g.By("send two records to httpserver")
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver."+clfNS+".svc:8443", `{"data":"record1"}`)).To(o.BeTrue())
o.Expect(postDataToHttpserver(oc, clfNS, "https://"+clf.name+"-httpserver."+clfNS+".svc:8443", `{"data":"record2"}`)).To(o.BeTrue())
g.By("check auditlogs in fluentd server")
fluentdS.checkData(oc, true, "audit.log")
}) | |||||
test | openshift/openshift-tests-private | 82694511-48a5-4829-9bc8-97bc1d409aff | vector_rsyslog | import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
) | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | package logging
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-syslog", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Test logforwarding to syslog via vector as collector", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("Deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
// author [email protected]
g.It("Author:gkarager-CPaasrunOnly-High-60699-Vector-Forward logs to syslog(RFCRFCThirtyOneSixtyFour)", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: false,
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60699",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "syslog.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "RFC=RFC3164", "URL=udp://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
})
g.It("Author:gkarager-CPaasrunOnly-WRS-Critical-61479-VA-IAC.03-Vector-Forward logs to syslog(tls)", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "instance",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
})
g.It("Author:gkarager-CPaasrunOnly-High-61477-Vector-Forward logs to syslog - mtls with private key passphrase", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
loggingNS: clfNS,
clientKeyPassphrase: "test-rsyslog-mtls",
secretName: "rsyslog-mtls",
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-61477",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-mtls.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
searchString := `key_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/tls.key"
crt_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString, rsyslog.clientKeyPassphrase)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
})
g.It("Author:ikanse-CPaasrunOnly-High-62527-Collector External syslog output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-62527",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("The Syslog sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_external_syslog.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-tls/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
g.By("Set Intermediate tlsSecurityProfile for the External Syslog output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Intermediate"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Syslog sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_external_syslog.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-tls/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Syslog server.")
g.By("Delete the rsyslog pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", syslogProj, "-l", "component=rsyslog").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, syslogProj, "component=rsyslog")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
})
g.It("Author:qitang-CPaasrunOnly-Medium-71143-Collect or exclude audit logs.", func() {
exutil.By("Deploy rsyslog server")
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-71143",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectAuditLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514", "INPUTREFS=[\"audit\"]")
exutil.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "audit.log")
exutil.By("Update CLF to collect linux audit logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "selected-audit", "type": "audit", "audit": {"sources":["auditd"]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["selected-audit"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
exutil.By("Check data in log store, only linux audit logs should be collected")
rsyslog.checkData(oc, true, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
exutil.By("Update CLF to collect kubeAPI audit logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["kubeAPI"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Check data in log store, only kubeAPI audit logs should be collected")
rsyslog.checkData(oc, true, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
exutil.By("Update CLF to collect openshiftAPI audit logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["openshiftAPI"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Check data in log store, only openshiftAPI audit logs should be collected")
rsyslog.checkData(oc, true, "audit-openshiftAPI.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
if strings.Contains(checkNetworkType(oc), "ovnkubernetes") {
exutil.By("Update CLF to collect OVN audit logs")
patch := `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["ovn"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Create a test project, enable OVN network log collection on it, add the OVN log app and network policies for the project")
oc.SetupProject()
ovnProj := oc.Namespace()
ovn := resource{"deployment", "ovn-app", ovnProj}
ovnAuditTemplate := filepath.Join(loggingBaseDir, "generatelog", "42981.yaml")
err := ovn.applyFromTemplate(oc, "-n", ovn.namespace, "-f", ovnAuditTemplate, "-p", "NAMESPACE="+ovn.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, ovnProj, ovn.name)
g.By("Access the OVN app pod from another pod in the same project to generate OVN ACL messages")
ovnPods, err := oc.AdminKubeClient().CoreV1().Pods(ovnProj).List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovn-app"})
o.Expect(err).NotTo(o.HaveOccurred())
podIP := ovnPods.Items[0].Status.PodIP
e2e.Logf("Pod IP is %s ", podIP)
var ovnCurl string
if strings.Contains(podIP, ":") {
ovnCurl = "curl --globoff [" + podIP + "]:8080"
} else {
ovnCurl = "curl --globoff " + podIP + ":8080"
}
_, err = e2eoutput.RunHostCmdWithRetries(ovnProj, ovnPods.Items[1].Name, ovnCurl, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check for the generated OVN audit logs on the OpenShift cluster nodes")
nodeLogs, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", ovnProj, "node-logs", "-l", "beta.kubernetes.io/os=linux", "--path=/ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(nodeLogs, ovnProj)).Should(o.BeTrue(), "The OVN logs doesn't contain logs from project %s", ovnProj)
exutil.By("Check data in log store, only ovn audit logs should be collected")
rsyslog.checkData(oc, true, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
}
})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | adac3d8d-39cd-4662-a6bf-b5b333040dfc | Author:gkarager-CPaasrunOnly-High-60699-Vector-Forward logs to syslog(RFCRFCThirtyOneSixtyFour) | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | g.It("Author:gkarager-CPaasrunOnly-High-60699-Vector-Forward logs to syslog(RFCRFCThirtyOneSixtyFour)", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: false,
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-60699",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "syslog.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "RFC=RFC3164", "URL=udp://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | e7e8bba6-2b5c-442c-9d73-03fc65ff759f | Author:gkarager-CPaasrunOnly-WRS-Critical-61479-VA-IAC.03-Vector-Forward logs to syslog(tls) | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | g.It("Author:gkarager-CPaasrunOnly-WRS-Critical-61479-VA-IAC.03-Vector-Forward logs to syslog(tls)", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "instance",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | 0abb3028-e948-43d2-9d61-d3a0300e5b22 | Author:gkarager-CPaasrunOnly-High-61477-Vector-Forward logs to syslog - mtls with private key passphrase | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | g.It("Author:gkarager-CPaasrunOnly-High-61477-Vector-Forward logs to syslog - mtls with private key passphrase", func() {
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
loggingNS: clfNS,
clientKeyPassphrase: "test-rsyslog-mtls",
secretName: "rsyslog-mtls",
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder/instance")
clf := clusterlogforwarder{
name: "clf-61477",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-mtls.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
searchString := `key_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/tls.key"
crt_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-mtls/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString, rsyslog.clientKeyPassphrase)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | efa14a87-ce46-41e1-bc5b-a090d3840422 | Author:ikanse-CPaasrunOnly-High-62527-Collector External syslog output complies with the tlsSecurityProfile configuration.[Slow][Disruptive] | ['"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | g.It("Author:ikanse-CPaasrunOnly-High-62527-Collector External syslog output complies with the tlsSecurityProfile configuration.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256"],"minTLSVersion":"VersionTLS12"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy rsyslog server")
oc.SetupProject()
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
g.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-62527",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514")
g.By("The Syslog sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_external_syslog.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-tls/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
g.By("Set Intermediate tlsSecurityProfile for the External Syslog output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Intermediate"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Syslog sink in Vector config must use the Intermediate tlsSecurityProfile")
searchString = `[sinks.output_external_syslog.tls]
enabled = true
min_tls_version = "VersionTLS12"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384"
ca_file = "/var/run/ocp-collector/secrets/rsyslog-tls/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Syslog server.")
g.By("Delete the rsyslog pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", syslogProj, "-l", "component=rsyslog").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, syslogProj, "component=rsyslog")
g.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "app-container.log")
rsyslog.checkData(oc, true, "infra-container.log")
rsyslog.checkData(oc, true, "audit.log")
rsyslog.checkData(oc, true, "infra.log")
}) | |||||
test case | openshift/openshift-tests-private | c674cd10-28fe-43ab-a1ce-2dd6a5692603 | Author:qitang-CPaasrunOnly-Medium-71143-Collect or exclude audit logs. | ['"context"', '"path/filepath"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_rsyslog.go | g.It("Author:qitang-CPaasrunOnly-Medium-71143-Collect or exclude audit logs.", func() {
exutil.By("Deploy rsyslog server")
syslogProj := oc.Namespace()
rsyslog := rsyslog{
serverName: "rsyslog",
namespace: syslogProj,
tls: true,
secretName: "rsyslog-tls",
loggingNS: syslogProj,
}
defer rsyslog.remove(oc)
rsyslog.deploy(oc)
exutil.By("Create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-71143",
namespace: syslogProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "rsyslog-serverAuth.yaml"),
secretName: rsyslog.secretName,
waitForPodReady: true,
collectAuditLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "URL=tls://"+rsyslog.serverName+"."+rsyslog.namespace+".svc:6514", "INPUTREFS=[\"audit\"]")
exutil.By("Check logs in rsyslog server")
rsyslog.checkData(oc, true, "audit.log")
exutil.By("Update CLF to collect linux audit logs")
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "selected-audit", "type": "audit", "audit": {"sources":["auditd"]}}]},{"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["selected-audit"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
exutil.By("Check data in log store, only linux audit logs should be collected")
rsyslog.checkData(oc, true, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
exutil.By("Update CLF to collect kubeAPI audit logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["kubeAPI"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Check data in log store, only kubeAPI audit logs should be collected")
rsyslog.checkData(oc, true, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
exutil.By("Update CLF to collect openshiftAPI audit logs")
patch = `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["openshiftAPI"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Check data in log store, only openshiftAPI audit logs should be collected")
rsyslog.checkData(oc, true, "audit-openshiftAPI.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
rsyslog.checkData(oc, false, "audit-ovn.log")
if strings.Contains(checkNetworkType(oc), "ovnkubernetes") {
exutil.By("Update CLF to collect OVN audit logs")
patch := `[{"op": "replace", "path": "/spec/inputs/0/audit/sources", "value": ["ovn"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", "-n", rsyslog.namespace, "-l", "component="+rsyslog.serverName).Execute()
WaitForDeploymentPodsToBeReady(oc, rsyslog.namespace, rsyslog.serverName)
exutil.By("Create a test project, enable OVN network log collection on it, add the OVN log app and network policies for the project")
oc.SetupProject()
ovnProj := oc.Namespace()
ovn := resource{"deployment", "ovn-app", ovnProj}
ovnAuditTemplate := filepath.Join(loggingBaseDir, "generatelog", "42981.yaml")
err := ovn.applyFromTemplate(oc, "-n", ovn.namespace, "-f", ovnAuditTemplate, "-p", "NAMESPACE="+ovn.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, ovnProj, ovn.name)
g.By("Access the OVN app pod from another pod in the same project to generate OVN ACL messages")
ovnPods, err := oc.AdminKubeClient().CoreV1().Pods(ovnProj).List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovn-app"})
o.Expect(err).NotTo(o.HaveOccurred())
podIP := ovnPods.Items[0].Status.PodIP
e2e.Logf("Pod IP is %s ", podIP)
var ovnCurl string
if strings.Contains(podIP, ":") {
ovnCurl = "curl --globoff [" + podIP + "]:8080"
} else {
ovnCurl = "curl --globoff " + podIP + ":8080"
}
_, err = e2eoutput.RunHostCmdWithRetries(ovnProj, ovnPods.Items[1].Name, ovnCurl, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check for the generated OVN audit logs on the OpenShift cluster nodes")
nodeLogs, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", ovnProj, "node-logs", "-l", "beta.kubernetes.io/os=linux", "--path=/ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(nodeLogs, ovnProj)).Should(o.BeTrue(), "The OVN logs doesn't contain logs from project %s", ovnProj)
exutil.By("Check data in log store, only ovn audit logs should be collected")
rsyslog.checkData(oc, true, "audit-ovn.log")
rsyslog.checkData(oc, false, "audit-kubeAPI.log")
rsyslog.checkData(oc, false, "audit-openshiftAPI.log")
rsyslog.checkData(oc, false, "audit-linux.log")
}
}) | |||||
test | openshift/openshift-tests-private | 632c2c2a-6ab5-4793-8fb6-0dc5870b2ec7 | vector_es | import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
) | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | package logging
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("vector-es", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Vector User-Managed-ES tests", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
g.By("deploy CLO")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("CPaasrunOnly-Author:ikanse-Critical-49390-Vector Collecting Kubernetes events using event router", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
es := externalES{
namespace: esProj,
version: "8",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
secretName: "ees-49390",
loggingNS: esProj,
}
defer es.remove(oc)
es.deploy(oc)
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-49390",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: es.secretName,
waitForPodReady: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+es.serverName+"."+esProj+".svc:9200", "ES_VERSION="+es.version, "INPUT_REFS=[\"infrastructure\"]")
g.By("Deploy the Event Router")
evt := eventRouter{
name: "logging-eventrouter",
namespace: cloNS,
template: filepath.Join(loggingBaseDir, "eventrouter", "eventrouter.yaml"),
}
defer evt.delete(oc)
evt.deploy(oc)
g.By("Check event logs in the Event Router pod")
podList, err := oc.AdminKubeClient().CoreV1().Pods(evt.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=eventrouter"})
o.Expect(err).NotTo(o.HaveOccurred())
checkLogsFromRs(oc, "pods", podList.Items[0].Name, evt.namespace, "kube-eventrouter", "ADDED")
checkLogsFromRs(oc, "pods", podList.Items[0].Name, evt.namespace, "kube-eventrouter", "Update")
g.By("Check for Event Router logs in Elasticsearch")
checkLog := "{\"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.pod_name\": \"" + podList.Items[0].Name + "\"}}}"
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs := es.searchDocByQuery(oc, "infra", checkLog)
return len(logs.Hits.DataHits) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "No Event Router logs found when using vector as log collector.")
})
g.It("Author:ikanse-CPaasrunOnly-Critical-53995-Vector Collect OVN audit logs", func() {
g.By("Check the network type for the test")
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovnkubernetes") {
g.Skip("Skip for non-supported network type, type is not OVNKubernetes!!!")
}
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "clf-53995",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-53995",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+ees.namespace+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check audit index in ES pod")
ees.waitForIndexAppear(oc, "audit")
g.By("Create a test project, enable OVN network log collection on it, add the OVN log app and network policies for the project")
oc.SetupProject()
ovnProj := oc.Namespace()
ovn := resource{"deployment", "ovn-app", ovnProj}
ovnTemplate := filepath.Join(loggingBaseDir, "generatelog", "42981.yaml")
err := ovn.applyFromTemplate(oc, "-n", ovn.namespace, "-f", ovnTemplate, "-p", "NAMESPACE="+ovn.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, ovnProj, ovn.name)
g.By("Access the OVN app pod from another pod in the same project to generate OVN ACL messages")
ovnPods, err := oc.AdminKubeClient().CoreV1().Pods(ovnProj).List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovn-app"})
o.Expect(err).NotTo(o.HaveOccurred())
podIP := ovnPods.Items[0].Status.PodIP
e2e.Logf("Pod IP is %s ", podIP)
var ovnCurl string
if strings.Contains(podIP, ":") {
ovnCurl = "curl --globoff [" + podIP + "]:8080"
} else {
ovnCurl = "curl --globoff " + podIP + ":8080"
}
_, err = e2eoutput.RunHostCmdWithRetries(ovnProj, ovnPods.Items[1].Name, ovnCurl, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check for the generated OVN audit logs on the OpenShift cluster nodes")
nodeLogs, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", ovnProj, "node-logs", "-l", "beta.kubernetes.io/os=linux", "--path=/ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeLogs).Should(o.ContainSubstring(ovnProj), "The OVN logs doesn't contain logs from project %s", ovnProj)
g.By("Check for the generated OVN audit logs in Elasticsearch")
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
query := "{\"query\":{\"query_string\":{\"query\":\"verdict=allow AND severity=alert AND tcp,vlan_tci AND tcp_flags=ack\",\"default_field\":\"message\"}}}"
res := ees.searchDocByQuery(oc, "audit", query)
return len(res.Hits.DataHits) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "The ovn audit logs are not collected")
})
// author [email protected]
g.It("Author:qitang-CPaasrunOnly-Medium-76073-Medium-74944-Send logs from containers in the same pod to separate indices and parse json logs", func() {
app := oc.Namespace()
containerName := "log-76073-" + getRandomString()
multiContainerJSONLog := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
err := oc.WithoutNamespace().Run("new-app").Args("-f", multiContainerJSONLog, "-n", app, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "json-log-76073",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
eesURL := "https://" + ees.serverName + "." + ees.namespace + ".svc:9200"
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-76073",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectInfrastructureLogs: true,
collectAuditLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL="+eesURL, "ES_VERSION="+ees.version, "INDEX={.kubernetes.container_name||.log_type||\"none\"}")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "parse-json-logs", "type": "parse"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["parse-json-logs"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// for container logs, they're indexed by container name
// for non-container logs, they're indexed by log_type
g.By("check indices in externale ES")
ees.waitForIndexAppear(oc, containerName+"-0")
ees.waitForIndexAppear(oc, containerName+"-1")
ees.waitForIndexAppear(oc, containerName+"-2")
ees.waitForIndexAppear(oc, "kube-") // infra container logs
ees.waitForIndexAppear(oc, "infrastructure")
ees.waitForIndexAppear(oc, "audit")
exutil.By("check logs in ES, json logs should be parsed to json format")
queryContainerLog := func(container string) string {
return "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.container_name\": \"" + container + "\"}}}"
}
// in index app-$containerName-0, only logs in container $containerName-0 are stored in it
log0 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-0"))
o.Expect(len(log0.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log0.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log01 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-1"))
o.Expect(len(log01.Hits.DataHits) == 0).To(o.BeTrue())
log02 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-2"))
o.Expect(len(log02.Hits.DataHits) == 0).To(o.BeTrue())
// in index app-$containerName-1, only logs in container $containerName-1 are stored in it
log1 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-1"))
o.Expect(len(log1.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log1.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log10 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-0"))
o.Expect(len(log10.Hits.DataHits) == 0).To(o.BeTrue())
log12 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-2"))
o.Expect(len(log12.Hits.DataHits) == 0).To(o.BeTrue())
// in index app-$app-project, only logs in container $containerName-2 are stored in it
log2 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-2"))
o.Expect(len(log2.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log2.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log20 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-0"))
o.Expect(len(log20.Hits.DataHits) == 0).To(o.BeTrue())
log21 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-1"))
o.Expect(len(log21.Hits.DataHits) == 0).To(o.BeTrue())
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-52131-Vector Logs from different projects are forwarded to the same index if the pods have same annotation", func() {
containerName := "log-52131-" + getRandomString()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
app1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app1, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
app2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app2, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "json-log-52131",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
eesURL := "https://" + ees.serverName + "." + ees.namespace + ".svc:9200"
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-52131",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "INDEX={.kubernetes.container_name||\"none-container-logs\"}", "ES_URL="+eesURL, "ES_VERSION="+ees.version, "INPUT_REFS=[\"application\"]")
g.By("check indices in externale ES")
ees.waitForIndexAppear(oc, containerName)
g.By("check data in ES")
for _, proj := range []string{app1, app2} {
count, err := ees.getDocCount(oc, containerName, "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+proj+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count > 0).To(o.BeTrue())
}
})
g.It("Author:qitang-CPaasrunOnly-Medium-74947-New filter openshiftLabels testing", func() {
exutil.By("Create Elasticsearch")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-74947",
namespace: esProj,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+getRouteAddress(oc, ees.namespace, ees.serverName)+":80", "ES_VERSION="+ees.version)
exutil.By("Check logs in ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
exutil.By("Add new filter to the ClusterLogForwarder")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "openshift-labels", "type": "openshiftLabels", "openshiftLabels": {"label-test": "ocp-74947", "clf/observability.openshift.io": "logging-74947"}}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["openshift-labels"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs with label in ES")
checkLog := `{"size": 1, "sort": [{"@timestamp": {"order":"desc"}}], "query": {"bool": {"must": [{"match": {"openshift.labels.label-test": "ocp-74947"}},{"match": {"openshift.labels.clf_observability_openshift_io": "logging-74947"}}]}}}`
indexName := []string{"app", "infra", "audit"}
for i := 0; i < len(indexName); i++ {
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
logs := ees.searchDocByQuery(oc, indexName[i], checkLog)
if logs.Hits.Total > 0 || len(logs.Hits.DataHits) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("No %s logs found with label in extranl ES", indexName[i]))
}
})
g.It("CPaasrunOnly-Author:ikanse-Medium-48593-Vector ClusterLogForwarder Label each message type differently and send all to the same output", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-48593",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "48593.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Check logs with pipeline label in external ES")
indexName := []string{"app", "infra", "audit"}
for i := 0; i < len(indexName); i++ {
checkLog := "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match\": {\"openshift.labels.logging\": \"" + indexName[i] + "-logs\"}}}"
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
logs := ees.searchDocByQuery(oc, indexName[i], checkLog)
if logs.Hits.Total > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("No %s logs found with pipeline label in extranl ES", indexName[i]))
}
})
g.It("CPaasrunOnly-Author:ikanse-High-46882-High-47061-Vector ClusterLogForwarder forward logs to Elasticsearch insecure forward and metadata check", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: false,
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
app := oc.Namespace()
// to test fix for LOG-3463, add labels to the app project
_, err := exutil.AddLabelsToSpecificResource(oc, "ns/"+app, "", "app=logging-apps", "app.kubernetes.io/instance=logging-apps-test", "app.test=test")
o.Expect(err).NotTo(o.HaveOccurred())
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_non_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterID, err := getClusterID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-46882",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
ees.waitForProjectLogsAppear(oc, app, "app")
appLogs := ees.searchDocByQuery(oc, "app", "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+app+"\"}}}")
log := appLogs.Hits.DataHits[0].Source
o.Expect(log.Message == "ㄅㄉˇˋㄓˊ˙ㄚㄞㄢㄦㄆ 中国 883.317µs ā á ǎ à ō ó ▅ ▆ ▇ █ 々").Should(o.BeTrue())
o.Expect(log.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
o.Expect(log.OpenShift.Sequence > 0).Should(o.BeTrue())
o.Expect(log.Kubernetes.NamespaceLabels["app_kubernetes_io_instance"] == "logging-apps-test").Should(o.BeTrue())
o.Expect(log.Kubernetes.NamespaceLabels["app_test"] == "test").Should(o.BeTrue())
infraLogs := ees.searchDocByQuery(oc, "infra", "")
o.Expect(infraLogs.Hits.DataHits[0].Source.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
auditLogs := ees.searchDocByQuery(oc, "audit", "")
o.Expect(auditLogs.Hits.DataHits[0].Source.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
for _, logType := range []string{"app", "infra", "audit"} {
for _, field := range []string{"@timestamp", "openshift.cluster_id", "openshift.sequence"} {
count, err := ees.getDocCount(oc, logType, "{\"query\": {\"bool\": {\"must_not\": {\"exists\": {\"field\": \""+field+"\"}}}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
}
}
})
g.It("Author:ikanse-CPaasrunOnly-High-55396-alert rule CollectorNodeDown testing", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: true,
secretName: "ees-https",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-55396",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
enableMonitoring: true,
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, `SECURITY_PROFILE={"type": "Old"}`)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Patch the collector Prometheus Rule for alert CollectorNodeDown to set alert firing time to 2m")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("prometheusrules", "collector", "--type=json", "-p", `[{"op": "replace", "path": "/spec/groups/0/rules/0/for", "value":"10m"}]`, "-n", cloNS).Execute()
er := oc.AsAdmin().WithoutNamespace().Run("patch").Args("prometheusrules", "collector", "--type=json", "-p", `[{"op": "replace", "path": "/spec/groups/0/rules/0/for", "value":"2m"}]`, "-n", cloNS).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Update clusterlogforwarder to set the cpu and memory for collector pods")
resource := `[{"op": "replace", "path": "/spec/collector/resources", "value": {"limits": {"memory": "128Mi", "cpu": "10m"}, "requests": {"cpu": "1m", "memory": "2Mi"}}}]`
clf.update(oc, "", resource, "--type=json")
g.By("Check the alert CollectorNodeDown is in state firing or pending")
checkAlert(oc, getSAToken(oc, "prometheus-k8s", "openshift-monitoring"), "CollectorNodeDown", "firing/pending", 5)
})
g.It("CPaasrunOnly-Author:ikanse-Medium-55200-Medium-47753-Vector Forward logs to external Elasticsearch with username password HTTP ES 6.x", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-http",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-47753",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
})
g.It("CPaasrunOnly-Author:ikanse-Medium-55199-Medium-47755-Vector Forward logs to external Elasticsearch with username password HTTPS ES 7.x", func() {
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "elasticsearch-server",
httpSSL: true,
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-47755",
loggingNS: clfNS,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-55199",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
enableMonitoring: true,
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
})
g.It("CPaasrunOnly-Author:ikanse-High-61450-Collector-External Elasticsearch output complies with the tlsSecurityProfile config.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256","TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","TLS_CHACHA20_POLY1305_SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","DHE-RSA-CHACHA20-POLY1305","ECDHE-ECDSA-AES128-SHA256","ECDHE-RSA-AES128-SHA256","ECDHE-ECDSA-AES128-SHA","ECDHE-RSA-AES128-SHA","ECDHE-ECDSA-AES256-SHA384","ECDHE-RSA-AES256-SHA384","ECDHE-ECDSA-AES256-SHA","ECDHE-RSA-AES256-SHA","DHE-RSA-AES128-SHA256","DHE-RSA-AES256-SHA256","AES128-GCM-SHA256","AES256-GCM-SHA384","AES128-SHA256","AES256-SHA256"],"minTLSVersion":"VersionTLS10"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
secretName: "ees-https",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-61450",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("The Elasticsearch sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_es_created_by_user.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256"
key_file = "/var/run/ocp-collector/secrets/ees-https/tls.key"
crt_file = "/var/run/ocp-collector/secrets/ees-https/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/ees-https/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Set Old tlsSecurityProfile for the External ES output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Old"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Elasticsearch sink in Vector config must use the Old tlsSecurityProfile")
searchString = `[sinks.output_es_created_by_user.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
key_file = "/var/run/ocp-collector/secrets/ees-https/tls.key"
crt_file = "/var/run/ocp-collector/secrets/ees-https/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/ees-https/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Elasticsearch server.")
g.By("Delete the Elasticsearch server pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", esProj, "-l", "app=elasticsearch-server").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, esProj, "app=elasticsearch-server")
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
})
g.It("CPaasrunOnly-Author:qitang-High-71000-Collect or exclude logs by namespace[Slow]", func() {
exutil.By("Deploy Elasticsearch")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "elasticsearch-server-71000",
httpSSL: true,
clientAuth: true,
secretName: "ees-https-71000",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Deploy CLF to exclude some logs by setting excludeNamespaces")
clf := clusterlogforwarder{
name: "clf-71000",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "INPUT_REFS=[\"application\"]", "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"namespace":"logging-project-71000-2"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
for i := 0; i < 3; i++ {
ns := "logging-project-71000-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
appNS := "logging-71000-test-1"
defer oc.DeleteSpecifiedNamespaceAsAdmin(appNS)
oc.CreateSpecifiedNamespaceAsAdmin(appNS)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", appNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check data in ES, logs from project/logging-project-71000-2 shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
ees.waitForProjectLogsAppear(oc, appNS, "app")
count, err := ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \"logging-project-71000-2\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
exutil.By("Update CLF to exclude all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes/0/namespace", "value": "*"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Check data in ES, no logs should be collected")
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
// sleep 10 seconds for collector pods to work with new configurations
time.Sleep(10 * time.Second)
indices, err := ees.getIndices(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(indices) > 0 {
for _, i := range indices {
o.Expect(strings.Contains(i.Index, "app")).ShouldNot(o.BeTrue())
}
}
exutil.By("Update CLF to set include namespaces")
patch = `[{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "logging-project-71000*"}]}, {"op": "replace", "path": "/spec/inputs/0/application/excludes/0/namespace", "value": "logging-project-71000-2"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
exutil.By("Check data in ES, logs from project/logging-project-71000-2 and " + appNS + "shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
for _, ns := range []string{appNS, "logging-project-71000-2"} {
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \""+ns+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project "+ns+", this is not expected")
}
exutil.By("Remove excludes from CLF")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
exutil.By("Check data in ES, logs from logging-project-71000*, other logs shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-2", "app")
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \""+appNS+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project "+appNS+", this is not expected")
exutil.By("Update CLF to include all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes/0/namespace", "value": "*"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Check data in ES, all application logs should be collected, but no logs from infra projects")
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
for _, ns := range []string{appNS, "logging-project-71000-0", "logging-project-71000-1", "logging-project-71000-2"} {
ees.waitForProjectLogsAppear(oc, ns, "app")
}
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \"openshift@\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project openshift*, this is not expected")
})
//author [email protected]
g.It("Author:qitang-CPaasrunOnly-High-51740-Vector Preserve k8s Common Labels", func() {
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
labels := map[string]string{
"app.kubernetes.io/name": "test",
"app.kubernetes.io/instance": "functionaltest",
"app.kubernetes.io/version": "123",
"app.kubernetes.io/component": "thecomponent",
"app.kubernetes.io/part-of": "clusterlogging",
"app.kubernetes.io/managed-by": "clusterloggingoperator",
"app.kubernetes.io/created-by": "anoperator",
"run": "test-51740",
"test": "test-logging-51740",
}
processedLabels := map[string]string{
"app_kubernetes_io_name": "test",
"app_kubernetes_io_instance": "functionaltest",
"app_kubernetes_io_version": "123",
"app_kubernetes_io_component": "thecomponent",
"app_kubernetes_io_part-of": "clusterlogging",
"app_kubernetes_io_managed-by": "clusterloggingoperator",
"app_kubernetes_io_created-by": "anoperator",
"run": "test-51740",
"test": "test-logging-51740",
}
labelJSON, _ := json.Marshal(labels)
labelStr := string(labelJSON)
app := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-f", loglabeltemplate, "-n", app, "-p", "LABELS="+labelStr).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//For this case, we need to cover ES and non-ES, and we need to check the log entity in log store,
//to make the functions simple, here use external loki as the non-ES log store
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: false,
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-51740",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, "INPUT_REFS=[\"application\"]")
lokiURL := "http://" + loki.name + "." + lokiNS + ".svc:3100"
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "loki-server", "type": "loki", "loki": {"url": "` + lokiURL + `"}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "loki-server"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("check data in ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, app, "app")
dataInES := ees.searchDocByQuery(oc, "app", "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+app+"\"}}}")
k8sLabelsInES := dataInES.Hits.DataHits[0].Source.Kubernetes.Lables
o.Expect(len(k8sLabelsInES) > 0).Should(o.BeTrue())
o.Expect(reflect.DeepEqual(processedLabels, k8sLabelsInES)).Should(o.BeTrue())
g.By("check data in Loki")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", app)
if err != nil {
return false, err
}
if appLogs.Status == "success" && len(appLogs.Data.Result) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't find app logs")
dataInLoki, _ := lc.searchByNamespace("", app)
lokiLog := extractLogEntities(dataInLoki)
k8sLabelsInLoki := lokiLog[0].Kubernetes.Lables
o.Expect(reflect.DeepEqual(processedLabels, k8sLabelsInLoki)).Should(o.BeTrue())
flatLabelsInLoki := lokiLog[0].Kubernetes.FlatLabels
o.Expect(len(flatLabelsInLoki) == 0).Should(o.BeTrue())
})
g.It("Author:qitang-CPaasrunOnly-Critical-74927-Forward logs to elasticsearch 8.x.", func() {
exutil.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "8",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-74927",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-74927",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth-mtls.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, "INDEX=logging-74927.{.log_type||\"none-typed-logs\"}-write",
`TUNING={"compression": "zlib", "deliveryMode": "AtLeastOnce", "maxRetryDuration": 30, "maxWrite": "20M", "minRetryDuration": 10}`)
clf.update(oc, "", `[{"op": "add", "path": "/spec/outputs/0/rateLimit", value: {"maxRecordsPerSecond": 5000}}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in ES")
ees.waitForIndexAppear(oc, "logging-74927.application-write")
ees.waitForIndexAppear(oc, "logging-74927.infrastructure-write")
ees.waitForIndexAppear(oc, "logging-74927.audit-write")
exutil.By("Check configurations in collector pods")
expectedConfigs := []string{
`[transforms.output_es_created_by_user_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_external_es_viaqdedot_2"]
window_secs = 1
threshold = 5000`,
`compression = "zlib"`,
`[sinks.output_es_created_by_user.batch]
max_bytes = 20000000`,
`[sinks.output_es_created_by_user.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
`[sinks.output_es_created_by_user.request]
retry_initial_backoff_secs = 10
retry_max_duration_secs = 30`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | 57506e1e-5d77-4ac6-a58e-2c38472d174c | CPaasrunOnly-Author:ikanse-Critical-49390-Vector Collecting Kubernetes events using event router | ['"context"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-Critical-49390-Vector Collecting Kubernetes events using event router", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
es := externalES{
namespace: esProj,
version: "8",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
secretName: "ees-49390",
loggingNS: esProj,
}
defer es.remove(oc)
es.deploy(oc)
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-49390",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: es.secretName,
waitForPodReady: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+es.serverName+"."+esProj+".svc:9200", "ES_VERSION="+es.version, "INPUT_REFS=[\"infrastructure\"]")
g.By("Deploy the Event Router")
evt := eventRouter{
name: "logging-eventrouter",
namespace: cloNS,
template: filepath.Join(loggingBaseDir, "eventrouter", "eventrouter.yaml"),
}
defer evt.delete(oc)
evt.deploy(oc)
g.By("Check event logs in the Event Router pod")
podList, err := oc.AdminKubeClient().CoreV1().Pods(evt.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "component=eventrouter"})
o.Expect(err).NotTo(o.HaveOccurred())
checkLogsFromRs(oc, "pods", podList.Items[0].Name, evt.namespace, "kube-eventrouter", "ADDED")
checkLogsFromRs(oc, "pods", podList.Items[0].Name, evt.namespace, "kube-eventrouter", "Update")
g.By("Check for Event Router logs in Elasticsearch")
checkLog := "{\"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.pod_name\": \"" + podList.Items[0].Name + "\"}}}"
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs := es.searchDocByQuery(oc, "infra", checkLog)
return len(logs.Hits.DataHits) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "No Event Router logs found when using vector as log collector.")
}) | |||||
test case | openshift/openshift-tests-private | 787f3232-bf39-43ab-803b-7040e0a3ce55 | Author:ikanse-CPaasrunOnly-Critical-53995-Vector Collect OVN audit logs | ['"context"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:ikanse-CPaasrunOnly-Critical-53995-Vector Collect OVN audit logs", func() {
g.By("Check the network type for the test")
networkType := checkNetworkType(oc)
if !strings.Contains(networkType, "ovnkubernetes") {
g.Skip("Skip for non-supported network type, type is not OVNKubernetes!!!")
}
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "clf-53995",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-53995",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+ees.namespace+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check audit index in ES pod")
ees.waitForIndexAppear(oc, "audit")
g.By("Create a test project, enable OVN network log collection on it, add the OVN log app and network policies for the project")
oc.SetupProject()
ovnProj := oc.Namespace()
ovn := resource{"deployment", "ovn-app", ovnProj}
ovnTemplate := filepath.Join(loggingBaseDir, "generatelog", "42981.yaml")
err := ovn.applyFromTemplate(oc, "-n", ovn.namespace, "-f", ovnTemplate, "-p", "NAMESPACE="+ovn.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
WaitForDeploymentPodsToBeReady(oc, ovnProj, ovn.name)
g.By("Access the OVN app pod from another pod in the same project to generate OVN ACL messages")
ovnPods, err := oc.AdminKubeClient().CoreV1().Pods(ovnProj).List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovn-app"})
o.Expect(err).NotTo(o.HaveOccurred())
podIP := ovnPods.Items[0].Status.PodIP
e2e.Logf("Pod IP is %s ", podIP)
var ovnCurl string
if strings.Contains(podIP, ":") {
ovnCurl = "curl --globoff [" + podIP + "]:8080"
} else {
ovnCurl = "curl --globoff " + podIP + ":8080"
}
_, err = e2eoutput.RunHostCmdWithRetries(ovnProj, ovnPods.Items[1].Name, ovnCurl, 3*time.Second, 30*time.Second)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check for the generated OVN audit logs on the OpenShift cluster nodes")
nodeLogs, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("-n", ovnProj, "node-logs", "-l", "beta.kubernetes.io/os=linux", "--path=/ovn/acl-audit-log.log").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(nodeLogs).Should(o.ContainSubstring(ovnProj), "The OVN logs doesn't contain logs from project %s", ovnProj)
g.By("Check for the generated OVN audit logs in Elasticsearch")
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
query := "{\"query\":{\"query_string\":{\"query\":\"verdict=allow AND severity=alert AND tcp,vlan_tci AND tcp_flags=ack\",\"default_field\":\"message\"}}}"
res := ees.searchDocByQuery(oc, "audit", query)
return len(res.Hits.DataHits) > 0, nil
})
exutil.AssertWaitPollNoErr(err, "The ovn audit logs are not collected")
}) | |||||
test case | openshift/openshift-tests-private | a7007df1-7d8a-4587-a2a3-de0a184c7bf9 | Author:qitang-CPaasrunOnly-Medium-76073-Medium-74944-Send logs from containers in the same pod to separate indices and parse json logs | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:qitang-CPaasrunOnly-Medium-76073-Medium-74944-Send logs from containers in the same pod to separate indices and parse json logs", func() {
app := oc.Namespace()
containerName := "log-76073-" + getRandomString()
multiContainerJSONLog := filepath.Join(loggingBaseDir, "generatelog", "multi_container_json_log_template.yaml")
err := oc.WithoutNamespace().Run("new-app").Args("-f", multiContainerJSONLog, "-n", app, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "json-log-76073",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
eesURL := "https://" + ees.serverName + "." + ees.namespace + ".svc:9200"
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-76073",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectInfrastructureLogs: true,
collectAuditLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL="+eesURL, "ES_VERSION="+ees.version, "INDEX={.kubernetes.container_name||.log_type||\"none\"}")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "parse-json-logs", "type": "parse"}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value":["parse-json-logs"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
// for container logs, they're indexed by container name
// for non-container logs, they're indexed by log_type
g.By("check indices in externale ES")
ees.waitForIndexAppear(oc, containerName+"-0")
ees.waitForIndexAppear(oc, containerName+"-1")
ees.waitForIndexAppear(oc, containerName+"-2")
ees.waitForIndexAppear(oc, "kube-") // infra container logs
ees.waitForIndexAppear(oc, "infrastructure")
ees.waitForIndexAppear(oc, "audit")
exutil.By("check logs in ES, json logs should be parsed to json format")
queryContainerLog := func(container string) string {
return "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.container_name\": \"" + container + "\"}}}"
}
// in index app-$containerName-0, only logs in container $containerName-0 are stored in it
log0 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-0"))
o.Expect(len(log0.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log0.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log01 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-1"))
o.Expect(len(log01.Hits.DataHits) == 0).To(o.BeTrue())
log02 := ees.searchDocByQuery(oc, containerName+"-0", queryContainerLog(containerName+"-2"))
o.Expect(len(log02.Hits.DataHits) == 0).To(o.BeTrue())
// in index app-$containerName-1, only logs in container $containerName-1 are stored in it
log1 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-1"))
o.Expect(len(log1.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log1.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log10 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-0"))
o.Expect(len(log10.Hits.DataHits) == 0).To(o.BeTrue())
log12 := ees.searchDocByQuery(oc, containerName+"-1", queryContainerLog(containerName+"-2"))
o.Expect(len(log12.Hits.DataHits) == 0).To(o.BeTrue())
// in index app-$app-project, only logs in container $containerName-2 are stored in it
log2 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-2"))
o.Expect(len(log2.Hits.DataHits) > 0).To(o.BeTrue())
o.Expect(log2.Hits.DataHits[0].Source.Structured.Message != "").Should(o.BeTrue())
log20 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-0"))
o.Expect(len(log20.Hits.DataHits) == 0).To(o.BeTrue())
log21 := ees.searchDocByQuery(oc, containerName+"-2", queryContainerLog(containerName+"-1"))
o.Expect(len(log21.Hits.DataHits) == 0).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | ed750d58-54c8-4d70-9b6b-74b6ef0b4577 | CPaasrunOnly-Author:qitang-Medium-52131-Vector Logs from different projects are forwarded to the same index if the pods have same annotation | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:qitang-Medium-52131-Vector Logs from different projects are forwarded to the same index if the pods have same annotation", func() {
containerName := "log-52131-" + getRandomString()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
app1 := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app1, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
app2 := oc.Namespace()
err = oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app2, "-p", "CONTAINER="+containerName).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "external-es",
httpSSL: true,
secretName: "json-log-52131",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
eesURL := "https://" + ees.serverName + "." + ees.namespace + ".svc:9200"
g.By("create clusterlogforwarder")
clf := clusterlogforwarder{
name: "clf-52131",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
serviceAccountName: "clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "INDEX={.kubernetes.container_name||\"none-container-logs\"}", "ES_URL="+eesURL, "ES_VERSION="+ees.version, "INPUT_REFS=[\"application\"]")
g.By("check indices in externale ES")
ees.waitForIndexAppear(oc, containerName)
g.By("check data in ES")
for _, proj := range []string{app1, app2} {
count, err := ees.getDocCount(oc, containerName, "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+proj+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count > 0).To(o.BeTrue())
}
}) | |||||
test case | openshift/openshift-tests-private | 29c07c30-4ce0-4638-b656-7e887a80f846 | Author:qitang-CPaasrunOnly-Medium-74947-New filter openshiftLabels testing | ['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:qitang-CPaasrunOnly-Medium-74947-New filter openshiftLabels testing", func() {
exutil.By("Create Elasticsearch")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-74947",
namespace: esProj,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+getRouteAddress(oc, ees.namespace, ees.serverName)+":80", "ES_VERSION="+ees.version)
exutil.By("Check logs in ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
exutil.By("Add new filter to the ClusterLogForwarder")
patch := `[{"op": "add", "path": "/spec/filters", "value": [{"name": "openshift-labels", "type": "openshiftLabels", "openshiftLabels": {"label-test": "ocp-74947", "clf/observability.openshift.io": "logging-74947"}}]}, {"op": "add", "path": "/spec/pipelines/0/filterRefs", "value": ["openshift-labels"]}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs with label in ES")
checkLog := `{"size": 1, "sort": [{"@timestamp": {"order":"desc"}}], "query": {"bool": {"must": [{"match": {"openshift.labels.label-test": "ocp-74947"}},{"match": {"openshift.labels.clf_observability_openshift_io": "logging-74947"}}]}}}`
indexName := []string{"app", "infra", "audit"}
for i := 0; i < len(indexName); i++ {
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
logs := ees.searchDocByQuery(oc, indexName[i], checkLog)
if logs.Hits.Total > 0 || len(logs.Hits.DataHits) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("No %s logs found with label in extranl ES", indexName[i]))
}
}) | |||||
test case | openshift/openshift-tests-private | 7c164cd8-b929-4a6e-a38a-969601366080 | CPaasrunOnly-Author:ikanse-Medium-48593-Vector ClusterLogForwarder Label each message type differently and send all to the same output | ['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-Medium-48593-Vector ClusterLogForwarder Label each message type differently and send all to the same output", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-48593",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "48593.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Check logs with pipeline label in external ES")
indexName := []string{"app", "infra", "audit"}
for i := 0; i < len(indexName); i++ {
checkLog := "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match\": {\"openshift.labels.logging\": \"" + indexName[i] + "-logs\"}}}"
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 60*time.Second, true, func(context.Context) (done bool, err error) {
logs := ees.searchDocByQuery(oc, indexName[i], checkLog)
if logs.Hits.Total > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("No %s logs found with pipeline label in extranl ES", indexName[i]))
}
}) | |||||
test case | openshift/openshift-tests-private | 262a49b7-62c7-4d17-90d0-9386be9eefd8 | CPaasrunOnly-Author:ikanse-High-46882-High-47061-Vector ClusterLogForwarder forward logs to Elasticsearch insecure forward and metadata check | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-High-46882-High-47061-Vector ClusterLogForwarder forward logs to Elasticsearch insecure forward and metadata check", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: false,
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
app := oc.Namespace()
// to test fix for LOG-3463, add labels to the app project
_, err := exutil.AddLabelsToSpecificResource(oc, "ns/"+app, "", "app=logging-apps", "app.kubernetes.io/instance=logging-apps-test", "app.test=test")
o.Expect(err).NotTo(o.HaveOccurred())
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_non_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", app).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
clusterID, err := getClusterID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-46882",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
ees.waitForProjectLogsAppear(oc, app, "app")
appLogs := ees.searchDocByQuery(oc, "app", "{\"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+app+"\"}}}")
log := appLogs.Hits.DataHits[0].Source
o.Expect(log.Message == "ㄅㄉˇˋㄓˊ˙ㄚㄞㄢㄦㄆ 中国 883.317µs ā á ǎ à ō ó ▅ ▆ ▇ █ 々").Should(o.BeTrue())
o.Expect(log.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
o.Expect(log.OpenShift.Sequence > 0).Should(o.BeTrue())
o.Expect(log.Kubernetes.NamespaceLabels["app_kubernetes_io_instance"] == "logging-apps-test").Should(o.BeTrue())
o.Expect(log.Kubernetes.NamespaceLabels["app_test"] == "test").Should(o.BeTrue())
infraLogs := ees.searchDocByQuery(oc, "infra", "")
o.Expect(infraLogs.Hits.DataHits[0].Source.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
auditLogs := ees.searchDocByQuery(oc, "audit", "")
o.Expect(auditLogs.Hits.DataHits[0].Source.OpenShift.ClusterID == clusterID).Should(o.BeTrue())
for _, logType := range []string{"app", "infra", "audit"} {
for _, field := range []string{"@timestamp", "openshift.cluster_id", "openshift.sequence"} {
count, err := ees.getDocCount(oc, logType, "{\"query\": {\"bool\": {\"must_not\": {\"exists\": {\"field\": \""+field+"\"}}}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
}
}
}) | |||||
test case | openshift/openshift-tests-private | ead5a5de-10d0-4983-954f-9d6a96adf68c | Author:ikanse-CPaasrunOnly-High-55396-alert rule CollectorNodeDown testing | ['"encoding/json"', '"path/filepath"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:ikanse-CPaasrunOnly-High-55396-alert rule CollectorNodeDown testing", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: true,
secretName: "ees-https",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-55396",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-https.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
enableMonitoring: true,
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, `SECURITY_PROFILE={"type": "Old"}`)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Patch the collector Prometheus Rule for alert CollectorNodeDown to set alert firing time to 2m")
defer oc.AsAdmin().WithoutNamespace().Run("patch").Args("prometheusrules", "collector", "--type=json", "-p", `[{"op": "replace", "path": "/spec/groups/0/rules/0/for", "value":"10m"}]`, "-n", cloNS).Execute()
er := oc.AsAdmin().WithoutNamespace().Run("patch").Args("prometheusrules", "collector", "--type=json", "-p", `[{"op": "replace", "path": "/spec/groups/0/rules/0/for", "value":"2m"}]`, "-n", cloNS).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Update clusterlogforwarder to set the cpu and memory for collector pods")
resource := `[{"op": "replace", "path": "/spec/collector/resources", "value": {"limits": {"memory": "128Mi", "cpu": "10m"}, "requests": {"cpu": "1m", "memory": "2Mi"}}}]`
clf.update(oc, "", resource, "--type=json")
g.By("Check the alert CollectorNodeDown is in state firing or pending")
checkAlert(oc, getSAToken(oc, "prometheus-k8s", "openshift-monitoring"), "CollectorNodeDown", "firing/pending", 5)
}) | |||||
test case | openshift/openshift-tests-private | 26aa99ee-d544-405e-9c93-c39ea509efbc | CPaasrunOnly-Author:ikanse-Medium-55200-Medium-47753-Vector Forward logs to external Elasticsearch with username password HTTP ES 6.x | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-Medium-55200-Medium-47753-Vector Forward logs to external Elasticsearch with username password HTTP ES 6.x", func() {
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-http",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-47753",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
}) | |||||
test case | openshift/openshift-tests-private | 3e90c90a-5a73-4c78-b471-9a7b03eccb0b | CPaasrunOnly-Author:ikanse-Medium-55199-Medium-47755-Vector Forward logs to external Elasticsearch with username password HTTPS ES 7.x | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-Medium-55199-Medium-47755-Vector Forward logs to external Elasticsearch with username password HTTPS ES 7.x", func() {
oc.SetupProject()
clfNS := oc.Namespace()
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "elasticsearch-server",
httpSSL: true,
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-47755",
loggingNS: clfNS,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-55199",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth-https.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
enableMonitoring: true,
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
}) | |||||
test case | openshift/openshift-tests-private | a3439e67-d6db-4113-a49e-c2175fe3bfce | CPaasrunOnly-Author:ikanse-High-61450-Collector-External Elasticsearch output complies with the tlsSecurityProfile config.[Slow][Disruptive] | ['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', '"time"', 'e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:ikanse-High-61450-Collector-External Elasticsearch output complies with the tlsSecurityProfile config.[Slow][Disruptive]", func() {
g.By("Configure the global tlsSecurityProfile to use custom profile")
ogTLS, er := oc.AsAdmin().WithoutNamespace().Run("get").Args("apiserver/cluster", "-o", "jsonpath={.spec.tlsSecurityProfile}").Output()
o.Expect(er).NotTo(o.HaveOccurred())
if ogTLS == "" {
ogTLS = "null"
}
ogPatch := fmt.Sprintf(`[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": %s}]`, ogTLS)
defer func() {
oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", ogPatch).Execute()
waitForOperatorsRunning(oc)
}()
patch := `[{"op": "replace", "path": "/spec/tlsSecurityProfile", "value": {"custom":{"ciphers":["ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256","TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","TLS_CHACHA20_POLY1305_SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","DHE-RSA-CHACHA20-POLY1305","ECDHE-ECDSA-AES128-SHA256","ECDHE-RSA-AES128-SHA256","ECDHE-ECDSA-AES128-SHA","ECDHE-RSA-AES128-SHA","ECDHE-ECDSA-AES256-SHA384","ECDHE-RSA-AES256-SHA384","ECDHE-ECDSA-AES256-SHA","ECDHE-RSA-AES256-SHA","DHE-RSA-AES128-SHA256","DHE-RSA-AES256-SHA256","AES128-GCM-SHA256","AES256-GCM-SHA384","AES128-SHA256","AES256-SHA256"],"minTLSVersion":"VersionTLS10"},"type":"Custom"}}]`
er = oc.AsAdmin().WithoutNamespace().Run("patch").Args("apiserver/cluster", "--type=json", "-p", patch).Execute()
o.Expect(er).NotTo(o.HaveOccurred())
g.By("Make sure that all the Cluster Operators are in healthy state before progressing.")
waitForOperatorsRunning(oc)
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
secretName: "ees-https",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create ClusterLogForwarder instance")
clf := clusterlogforwarder{
name: "clf-61450",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: ees.secretName,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
g.By("The Elasticsearch sink in Vector config must use the Custom tlsSecurityProfile")
searchString := `[sinks.output_es_created_by_user.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256"
key_file = "/var/run/ocp-collector/secrets/ees-https/tls.key"
crt_file = "/var/run/ocp-collector/secrets/ees-https/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/ees-https/ca-bundle.crt"`
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
g.By("Set Old tlsSecurityProfile for the External ES output.")
patch = `[{"op": "add", "path": "/spec/outputs/0/tls/securityProfile", "value": {"type": "Old"}}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("The Elasticsearch sink in Vector config must use the Old tlsSecurityProfile")
searchString = `[sinks.output_es_created_by_user.tls]
min_tls_version = "VersionTLS10"
ciphersuites = "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-CHACHA20-POLY1305,ECDHE-RSA-CHACHA20-POLY1305,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES256-GCM-SHA384,DHE-RSA-CHACHA20-POLY1305,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-RSA-AES128-SHA256,DHE-RSA-AES256-SHA256,AES128-GCM-SHA256,AES256-GCM-SHA384,AES128-SHA256,AES256-SHA256,AES128-SHA,AES256-SHA,DES-CBC3-SHA"
key_file = "/var/run/ocp-collector/secrets/ees-https/tls.key"
crt_file = "/var/run/ocp-collector/secrets/ees-https/tls.crt"
ca_file = "/var/run/ocp-collector/secrets/ees-https/ca-bundle.crt"`
result, err = checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", searchString)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).To(o.BeTrue(), "the configuration %s is not in vector.toml", searchString)
g.By("Check for errors in collector pod logs.")
e2e.Logf("Wait for a minute before the collector logs are generated.")
time.Sleep(60 * time.Second)
collectorLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("-n", clf.namespace, "--selector=app.kubernetes.io/component=collector").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(collectorLogs, "Error trying to connect")).ShouldNot(o.BeTrue(), "Unable to connect to the external Elasticsearch server.")
g.By("Delete the Elasticsearch server pod to recollect logs")
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pods", "-n", esProj, "-l", "app=elasticsearch-server").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, esProj, "app=elasticsearch-server")
g.By("Check logs in external ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForIndexAppear(oc, "infra")
ees.waitForIndexAppear(oc, "audit")
}) | |||||
test case | openshift/openshift-tests-private | 485ebb27-093b-4fcf-ba64-6e5472baff1a | CPaasrunOnly-Author:qitang-High-71000-Collect or exclude logs by namespace[Slow] | ['"encoding/json"', '"path/filepath"', '"strconv"', '"strings"', '"time"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("CPaasrunOnly-Author:qitang-High-71000-Collect or exclude logs by namespace[Slow]", func() {
exutil.By("Deploy Elasticsearch")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "7",
serverName: "elasticsearch-server-71000",
httpSSL: true,
clientAuth: true,
secretName: "ees-https-71000",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Deploy CLF to exclude some logs by setting excludeNamespaces")
clf := clusterlogforwarder{
name: "clf-71000",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-mtls.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "INPUT_REFS=[\"application\"]", "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version)
patch := `[{"op": "add", "path": "/spec/inputs", "value": [{"name": "new-app", "type": "application", "application": {"excludes": [{"namespace":"logging-project-71000-2"}]}}]}, {"op": "replace", "path": "/spec/pipelines/0/inputRefs", "value": ["new-app"]}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Create project for app logs and deploy the log generator")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
for i := 0; i < 3; i++ {
ns := "logging-project-71000-" + strconv.Itoa(i)
defer oc.DeleteSpecifiedNamespaceAsAdmin(ns)
oc.CreateSpecifiedNamespaceAsAdmin(ns)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", ns).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
appNS := "logging-71000-test-1"
defer oc.DeleteSpecifiedNamespaceAsAdmin(appNS)
oc.CreateSpecifiedNamespaceAsAdmin(appNS)
err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("-f", jsonLogFile, "-n", appNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Check data in ES, logs from project/logging-project-71000-2 shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
ees.waitForProjectLogsAppear(oc, appNS, "app")
count, err := ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \"logging-project-71000-2\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue())
exutil.By("Update CLF to exclude all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/excludes/0/namespace", "value": "*"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Check data in ES, no logs should be collected")
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
// sleep 10 seconds for collector pods to work with new configurations
time.Sleep(10 * time.Second)
indices, err := ees.getIndices(oc)
o.Expect(err).NotTo(o.HaveOccurred())
if len(indices) > 0 {
for _, i := range indices {
o.Expect(strings.Contains(i.Index, "app")).ShouldNot(o.BeTrue())
}
}
exutil.By("Update CLF to set include namespaces")
patch = `[{"op": "add", "path": "/spec/inputs/0/application/includes", "value": [{"namespace": "logging-project-71000*"}]}, {"op": "replace", "path": "/spec/inputs/0/application/excludes/0/namespace", "value": "logging-project-71000-2"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
exutil.By("Check data in ES, logs from project/logging-project-71000-2 and " + appNS + "shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
for _, ns := range []string{appNS, "logging-project-71000-2"} {
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \""+ns+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project "+ns+", this is not expected")
}
exutil.By("Remove excludes from CLF")
patch = `[{"op": "remove", "path": "/spec/inputs/0/application/excludes"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
exutil.By("Check data in ES, logs from logging-project-71000*, other logs shouldn't be collected")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-0", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-1", "app")
ees.waitForProjectLogsAppear(oc, "logging-project-71000-2", "app")
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \""+appNS+"\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project "+appNS+", this is not expected")
exutil.By("Update CLF to include all namespaces")
patch = `[{"op": "replace", "path": "/spec/inputs/0/application/includes/0/namespace", "value": "*"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Check data in ES, all application logs should be collected, but no logs from infra projects")
// sleep 10 seconds for collector pods to send the cached records
time.Sleep(10 * time.Second)
ees.removeIndices(oc, "application")
for _, ns := range []string{appNS, "logging-project-71000-0", "logging-project-71000-1", "logging-project-71000-2"} {
ees.waitForProjectLogsAppear(oc, ns, "app")
}
count, err = ees.getDocCount(oc, "app", "{\"query\": {\"regexp\": {\"kubernetes.namespace_name\": \"openshift@\"}}}")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(count == 0).Should(o.BeTrue(), "find logs from project openshift*, this is not expected")
}) | |||||
test case | openshift/openshift-tests-private | 09d83755-d9bb-416d-8447-b55941a4a914 | Author:qitang-CPaasrunOnly-High-51740-Vector Preserve k8s Common Labels | ['"context"', '"encoding/json"', '"path/filepath"', '"reflect"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:qitang-CPaasrunOnly-High-51740-Vector Preserve k8s Common Labels", func() {
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
labels := map[string]string{
"app.kubernetes.io/name": "test",
"app.kubernetes.io/instance": "functionaltest",
"app.kubernetes.io/version": "123",
"app.kubernetes.io/component": "thecomponent",
"app.kubernetes.io/part-of": "clusterlogging",
"app.kubernetes.io/managed-by": "clusterloggingoperator",
"app.kubernetes.io/created-by": "anoperator",
"run": "test-51740",
"test": "test-logging-51740",
}
processedLabels := map[string]string{
"app_kubernetes_io_name": "test",
"app_kubernetes_io_instance": "functionaltest",
"app_kubernetes_io_version": "123",
"app_kubernetes_io_component": "thecomponent",
"app_kubernetes_io_part-of": "clusterlogging",
"app_kubernetes_io_managed-by": "clusterloggingoperator",
"app_kubernetes_io_created-by": "anoperator",
"run": "test-51740",
"test": "test-logging-51740",
}
labelJSON, _ := json.Marshal(labels)
labelStr := string(labelJSON)
app := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-f", loglabeltemplate, "-n", app, "-p", "LABELS="+labelStr).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
//For this case, we need to cover ES and non-ES, and we need to check the log entity in log store,
//to make the functions simple, here use external loki as the non-ES log store
g.By("Create Loki project and deploy Loki Server")
oc.SetupProject()
lokiNS := oc.Namespace()
loki := externalLoki{"loki-server", lokiNS}
defer loki.remove(oc)
loki.deployLoki(oc)
g.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "6",
serverName: "elasticsearch-server",
httpSSL: false,
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
g.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-51740",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
collectApplicationLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, "INPUT_REFS=[\"application\"]")
lokiURL := "http://" + loki.name + "." + lokiNS + ".svc:3100"
patch := `[{"op": "add", "path": "/spec/outputs/-", "value": {"name": "loki-server", "type": "loki", "loki": {"url": "` + lokiURL + `"}}}, {"op": "add", "path": "/spec/pipelines/0/outputRefs/-", "value": "loki-server"}]`
clf.update(oc, "", patch, "--type=json")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
g.By("check data in ES")
ees.waitForIndexAppear(oc, "app")
ees.waitForProjectLogsAppear(oc, app, "app")
dataInES := ees.searchDocByQuery(oc, "app", "{\"size\": 1, \"sort\": [{\"@timestamp\": {\"order\":\"desc\"}}], \"query\": {\"match_phrase\": {\"kubernetes.namespace_name\": \""+app+"\"}}}")
k8sLabelsInES := dataInES.Hits.DataHits[0].Source.Kubernetes.Lables
o.Expect(len(k8sLabelsInES) > 0).Should(o.BeTrue())
o.Expect(reflect.DeepEqual(processedLabels, k8sLabelsInES)).Should(o.BeTrue())
g.By("check data in Loki")
route := "http://" + getRouteAddress(oc, loki.namespace, loki.name)
lc := newLokiClient(route)
err = wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
appLogs, err := lc.searchByNamespace("", app)
if err != nil {
return false, err
}
if appLogs.Status == "success" && len(appLogs.Data.Result) > 0 {
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't find app logs")
dataInLoki, _ := lc.searchByNamespace("", app)
lokiLog := extractLogEntities(dataInLoki)
k8sLabelsInLoki := lokiLog[0].Kubernetes.Lables
o.Expect(reflect.DeepEqual(processedLabels, k8sLabelsInLoki)).Should(o.BeTrue())
flatLabelsInLoki := lokiLog[0].Kubernetes.FlatLabels
o.Expect(len(flatLabelsInLoki) == 0).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | 28f6f035-155f-49e4-a820-8fb44e3d1873 | Author:qitang-CPaasrunOnly-Critical-74927-Forward logs to elasticsearch 8.x. | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/vector_es.go | g.It("Author:qitang-CPaasrunOnly-Critical-74927-Forward logs to elasticsearch 8.x.", func() {
exutil.By("Create external Elasticsearch instance")
esProj := oc.Namespace()
ees := externalES{
namespace: esProj,
version: "8",
serverName: "elasticsearch-server",
httpSSL: true,
clientAuth: true,
userAuth: true,
username: "user1",
password: getRandomString(),
secretName: "ees-74927",
loggingNS: esProj,
}
defer ees.remove(oc)
ees.deploy(oc)
exutil.By("Create project for app logs and deploy the log generator app")
oc.SetupProject()
appProj := oc.Namespace()
loglabeltemplate := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", loglabeltemplate).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create ClusterLogForwarder")
clf := clusterlogforwarder{
name: "clf-74927",
namespace: esProj,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch-userauth-mtls.yaml"),
secretName: ees.secretName,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=https://"+ees.serverName+"."+esProj+".svc:9200", "ES_VERSION="+ees.version, "INDEX=logging-74927.{.log_type||\"none-typed-logs\"}-write",
`TUNING={"compression": "zlib", "deliveryMode": "AtLeastOnce", "maxRetryDuration": 30, "maxWrite": "20M", "minRetryDuration": 10}`)
clf.update(oc, "", `[{"op": "add", "path": "/spec/outputs/0/rateLimit", value: {"maxRecordsPerSecond": 5000}}]`, "--type=json")
clf.waitForCollectorPodsReady(oc)
exutil.By("Check logs in ES")
ees.waitForIndexAppear(oc, "logging-74927.application-write")
ees.waitForIndexAppear(oc, "logging-74927.infrastructure-write")
ees.waitForIndexAppear(oc, "logging-74927.audit-write")
exutil.By("Check configurations in collector pods")
expectedConfigs := []string{
`[transforms.output_es_created_by_user_throttle]
type = "throttle"
inputs = ["pipeline_forward_to_external_es_viaqdedot_2"]
window_secs = 1
threshold = 5000`,
`compression = "zlib"`,
`[sinks.output_es_created_by_user.batch]
max_bytes = 20000000`,
`[sinks.output_es_created_by_user.buffer]
type = "disk"
when_full = "block"
max_size = 268435488`,
`[sinks.output_es_created_by_user.request]
retry_initial_backoff_secs = 10
retry_max_duration_secs = 30`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
}) | |||||
test | openshift/openshift-tests-private | 04b702dc-9d40-4ddd-b6d9-5aedc316748a | acceptance | import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/acceptance.go | // Package logging is used to test openshift-logging features
package logging
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] LOGGING Logging", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("log-accept", exutil.KubeConfigPath())
loggingBaseDir string
CLO, LO SubscriptionObjects
)
g.BeforeEach(func() {
exutil.SkipBaselineCaps(oc, "None")
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO = SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
SkipCaseWhenFailed: true,
}
LO = SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
SkipCaseWhenFailed: true,
}
g.By("deploy CLO")
CLO.SubscribeOperator(oc)
oc.SetupProject()
})
// author [email protected]
g.It("Author:qitang-CPaasrunBoth-Critical-74397-[InterOps] Forward logs to lokistack.[Slow][Serial]", func() {
g.By("deploy LO")
LO.SubscribeOperator(oc)
s := getStorageType(oc)
sc, err := getStorageClassName(oc)
if err != nil || len(sc) == 0 {
g.Skip("can't get storageclass from cluster, skip this case")
}
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if !hasMaster(oc) {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74397",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74397",
storageClass: sc,
bucketName: "logging-loki-74397-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-74397",
namespace: loggingNS,
serviceAccountName: "logcollector-74397",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74397",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
//check logs in loki stack
g.By("check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
labels, err := lc.listLabels(logType, "")
o.Expect(err).NotTo(o.HaveOccurred(), "got error when checking %s log labels", logType)
e2e.Logf("\nthe %s log labels are: %v\n", logType, labels)
}
journalLog, err := lc.searchLogsInLoki("infrastructure", `{log_type = "infrastructure", kubernetes_namespace_name !~ ".+"}`)
o.Expect(err).NotTo(o.HaveOccurred())
journalLogs := extractLogEntities(journalLog)
o.Expect(len(journalLogs) > 0).Should(o.BeTrue(), "can't find journal logs in lokistack")
e2e.Logf("find journal logs")
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check if the ServiceMonitor object for Vector is created.")
resource{"servicemonitor", clf.name, clf.namespace}.WaitForResourceToAppear(oc)
promToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
g.By("check metrics exposed by collector")
for _, job := range []string{clf.name, "logfilesmetricexporter"} {
checkMetric(oc, promToken, "{job=\""+job+"\"}", 3)
}
for _, metric := range []string{"log_logged_bytes_total", "vector_component_received_events_total"} {
checkMetric(oc, promToken, metric, 3)
}
g.By("check metrics exposed by loki")
svcs, err := oc.AdminKubeClient().CoreV1().Services(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
for _, svc := range svcs.Items {
if !strings.Contains(svc.Name, "grpc") && !strings.Contains(svc.Name, "ring") {
checkMetric(oc, promToken, "{job=\""+svc.Name+"\"}", 3)
}
}
for _, metric := range []string{"loki_boltdb_shipper_compactor_running", "loki_distributor_bytes_received_total", "loki_inflight_requests", "workqueue_work_duration_seconds_bucket{namespace=\"" + loNS + "\", job=\"loki-operator-controller-manager-metrics-service\"}", "loki_build_info", "loki_ingester_streams_created_total"} {
checkMetric(oc, promToken, metric, 3)
}
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
g.It("Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74926-[InterOps] Forward logs to cloudwatch.", func() {
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
groupName: "logging-74926-" + getInfrastructureName(oc) + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
secretNamespace: clfNS,
secretName: "logging-74926-" + getRandomString(),
}
cw.init(oc)
defer cw.deleteResources(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-74926",
namespace: clfNS,
secretName: cw.secretName,
templateFile: template,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
enableMonitoring: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, `TUNING={"compression": "snappy", "deliveryMode": "AtMostOnce", "maxRetryDuration": 20, "maxWrite": "10M", "minRetryDuration": 5}`)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
g.By("Check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
exutil.By("check tuning in collector configurations")
expectedConfigs := []string{
`compression = "snappy"`,
`[sinks.output_cloudwatch.batch]
max_bytes = 10000000`,
`[sinks.output_cloudwatch.buffer]
when_full = "drop_newest"`,
`[sinks.output_cloudwatch.request]
retry_initial_backoff_secs = 5
retry_max_duration_secs = 20`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
})
//author [email protected]
g.It("Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74924-Forward logs to GCL", func() {
projectID, err := getGCPProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-74924",
}
defer gcl.removeLogs()
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
gcpSecret := resource{"secret", "gcp-secret-74924", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-74924",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName)
for _, logType := range []string{"infrastructure", "audit", "application"} {
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s logs are not found", logType))
}
err = gcl.waitForLogsAppearByNamespace(appProj)
exutil.AssertWaitPollNoErr(err, "can't find app logs from project/"+appProj)
// Check tuning options for GCL under collector configMap
expectedConfigs := []string{"[sinks.output_gcp_logging.batch]", "[sinks.output_gcp_logging.buffer]", "[sinks.output_gcp_logging.request]", "retry_initial_backoff_secs = 10", "retry_max_duration_secs = 20"}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
})
//author [email protected]
g.It("Author:anli-CPaasrunBoth-ConnectedOnly-Critical-71772-Forward logs to az Log Analytics -- full options", func() {
platform := exutil.CheckPlatform(oc)
if platform == "azure" && exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("Skip on the workload identity enabled cluster!")
}
var (
resourceGroupName string
location string
)
infraName := getInfrastructureName(oc)
if platform != "azure" {
if !readAzureCredentials() {
g.Skip("Skip for the platform is not Azure and can't get credentials from env vars.")
}
resourceGroupName = infraName + "-logging-71772-rg"
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
location = "westus" //TODO: define default location
_, err := createAzureResourceGroup(resourceGroupName, azureSubscriptionID, location, cred)
defer deleteAzureResourceGroup(resourceGroupName, azureSubscriptionID, cred)
if err != nil {
g.Skip("Failed to create azure resource group: " + err.Error() + ", skip the case.")
}
e2e.Logf("Successfully created resource group %s", resourceGroupName)
} else {
cloudName := getAzureCloudName(oc)
if !(cloudName == "azurepubliccloud" || cloudName == "azureusgovernmentcloud") {
g.Skip("The case can only be running on Azure Public and Azure US Goverment now!")
}
resourceGroupName, _ = exutil.GetAzureCredentialFromCluster(oc)
}
g.By("Prepre Azure Log Storage Env")
workSpaceName := infraName + "case71772"
azLog, err := newAzureLog(oc, location, resourceGroupName, workSpaceName, "case71772")
defer azLog.deleteWorkspace()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create log producer")
clfNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", clfNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy CLF to send logs to Log Analytics")
azureSecret := resource{"secret", "azure-secret-71772", clfNS}
defer azureSecret.clear(oc)
err = azLog.createSecret(oc, azureSecret.name, azureSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71772",
namespace: clfNS,
secretName: azureSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "azureMonitor.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "PREFIX_OR_NAME="+azLog.tPrefixOrName, "CUSTOMER_ID="+azLog.customerID, "RESOURCE_ID="+azLog.workspaceID, "AZURE_HOST="+azLog.host)
g.By("Verify the test result")
for _, tableName := range []string{azLog.tPrefixOrName + "infra_log_CL", azLog.tPrefixOrName + "audit_log_CL", azLog.tPrefixOrName + "app_log_CL"} {
_, err := azLog.getLogByTable(tableName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find logs from %s in AzureLogWorkspace", tableName))
}
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | d8ef1eb1-8be3-4b69-b0bf-8a0d5bd5e087 | Author:qitang-CPaasrunBoth-Critical-74397-[InterOps] Forward logs to lokistack.[Slow][Serial] | ['"context"', '"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/acceptance.go | g.It("Author:qitang-CPaasrunBoth-Critical-74397-[InterOps] Forward logs to lokistack.[Slow][Serial]", func() {
g.By("deploy LO")
LO.SubscribeOperator(oc)
s := getStorageType(oc)
sc, err := getStorageClassName(oc)
if err != nil || len(sc) == 0 {
g.Skip("can't get storageclass from cluster, skip this case")
}
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if !hasMaster(oc) {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74397",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74397",
storageClass: sc,
bucketName: "logging-loki-74397-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-74397",
namespace: loggingNS,
serviceAccountName: "logcollector-74397",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74397",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
//check logs in loki stack
g.By("check logs in loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"application", "infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
labels, err := lc.listLabels(logType, "")
o.Expect(err).NotTo(o.HaveOccurred(), "got error when checking %s log labels", logType)
e2e.Logf("\nthe %s log labels are: %v\n", logType, labels)
}
journalLog, err := lc.searchLogsInLoki("infrastructure", `{log_type = "infrastructure", kubernetes_namespace_name !~ ".+"}`)
o.Expect(err).NotTo(o.HaveOccurred())
journalLogs := extractLogEntities(journalLog)
o.Expect(len(journalLogs) > 0).Should(o.BeTrue(), "can't find journal logs in lokistack")
e2e.Logf("find journal logs")
lc.waitForLogsAppearByProject("application", appProj)
g.By("Check if the ServiceMonitor object for Vector is created.")
resource{"servicemonitor", clf.name, clf.namespace}.WaitForResourceToAppear(oc)
promToken := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
g.By("check metrics exposed by collector")
for _, job := range []string{clf.name, "logfilesmetricexporter"} {
checkMetric(oc, promToken, "{job=\""+job+"\"}", 3)
}
for _, metric := range []string{"log_logged_bytes_total", "vector_component_received_events_total"} {
checkMetric(oc, promToken, metric, 3)
}
g.By("check metrics exposed by loki")
svcs, err := oc.AdminKubeClient().CoreV1().Services(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
for _, svc := range svcs.Items {
if !strings.Contains(svc.Name, "grpc") && !strings.Contains(svc.Name, "ring") {
checkMetric(oc, promToken, "{job=\""+svc.Name+"\"}", 3)
}
}
for _, metric := range []string{"loki_boltdb_shipper_compactor_running", "loki_distributor_bytes_received_total", "loki_inflight_requests", "workqueue_work_duration_seconds_bucket{namespace=\"" + loNS + "\", job=\"loki-operator-controller-manager-metrics-service\"}", "loki_build_info", "loki_ingester_streams_created_total"} {
checkMetric(oc, promToken, metric, 3)
}
exutil.By("Validate log streams are pushed to external storage bucket/container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
}) | |||||
test case | openshift/openshift-tests-private | 5b3e9140-d365-4b2c-8a15-8b0bf2c9f051 | Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74926-[InterOps] Forward logs to cloudwatch. | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/acceptance.go | g.It("Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74926-[InterOps] Forward logs to cloudwatch.", func() {
clfNS := oc.Namespace()
cw := cloudwatchSpec{
collectorSAName: "cloudwatch-" + getRandomString(),
groupName: "logging-74926-" + getInfrastructureName(oc) + `.{.log_type||"none-typed-logs"}`,
logTypes: []string{"infrastructure", "application", "audit"},
secretNamespace: clfNS,
secretName: "logging-74926-" + getRandomString(),
}
cw.init(oc)
defer cw.deleteResources(oc)
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if !cw.hasMaster {
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
}
g.By("Create clusterlogforwarder")
var template string
if cw.stsEnabled {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-iamRole.yaml")
} else {
template = filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "cloudwatch-accessKey.yaml")
}
clf := clusterlogforwarder{
name: "clf-74926",
namespace: clfNS,
secretName: cw.secretName,
templateFile: template,
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
enableMonitoring: true,
serviceAccountName: cw.collectorSAName,
}
defer clf.delete(oc)
clf.createServiceAccount(oc)
cw.createClfSecret(oc)
clf.create(oc, "REGION="+cw.awsRegion, "GROUP_NAME="+cw.groupName, `TUNING={"compression": "snappy", "deliveryMode": "AtMostOnce", "maxRetryDuration": 20, "maxWrite": "10M", "minRetryDuration": 5}`)
nodes, err := clf.getCollectorNodeNames(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cw.nodes = append(cw.nodes, nodes...)
g.By("Check logs in Cloudwatch")
o.Expect(cw.logsFound()).To(o.BeTrue())
exutil.By("check tuning in collector configurations")
expectedConfigs := []string{
`compression = "snappy"`,
`[sinks.output_cloudwatch.batch]
max_bytes = 10000000`,
`[sinks.output_cloudwatch.buffer]
when_full = "drop_newest"`,
`[sinks.output_cloudwatch.request]
retry_initial_backoff_secs = 5
retry_max_duration_secs = 20`,
}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | db016dc3-1f19-4201-9f3c-6a169d85993e | Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74924-Forward logs to GCL | ['"context"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/acceptance.go | g.It("Author:qitang-CPaasrunBoth-ConnectedOnly-Critical-74924-Forward logs to GCL", func() {
projectID, err := getGCPProjectID(oc)
o.Expect(err).NotTo(o.HaveOccurred())
gcl := googleCloudLogging{
projectID: projectID,
logName: getInfrastructureName(oc) + "-74924",
}
defer gcl.removeLogs()
g.By("Create log producer")
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
oc.SetupProject()
clfNS := oc.Namespace()
gcpSecret := resource{"secret", "gcp-secret-74924", clfNS}
defer gcpSecret.clear(oc)
err = createSecretForGCL(oc, gcpSecret.name, gcpSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-74924",
namespace: clfNS,
secretName: gcpSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "googleCloudLogging.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "ID_TYPE=project", "ID_VALUE="+gcl.projectID, "LOG_ID="+gcl.logName)
for _, logType := range []string{"infrastructure", "audit", "application"} {
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := gcl.getLogByType(logType)
if err != nil {
return false, err
}
return len(logs) > 0, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("%s logs are not found", logType))
}
err = gcl.waitForLogsAppearByNamespace(appProj)
exutil.AssertWaitPollNoErr(err, "can't find app logs from project/"+appProj)
// Check tuning options for GCL under collector configMap
expectedConfigs := []string{"[sinks.output_gcp_logging.batch]", "[sinks.output_gcp_logging.buffer]", "[sinks.output_gcp_logging.request]", "retry_initial_backoff_secs = 10", "retry_max_duration_secs = 20"}
result, err := checkCollectorConfiguration(oc, clf.namespace, clf.name+"-config", expectedConfigs...)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(result).Should(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | cfbc1bf1-f0ed-4096-ba53-2d3651edb0dd | Author:anli-CPaasrunBoth-ConnectedOnly-Critical-71772-Forward logs to az Log Analytics -- full options | ['"fmt"', '"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/acceptance.go | g.It("Author:anli-CPaasrunBoth-ConnectedOnly-Critical-71772-Forward logs to az Log Analytics -- full options", func() {
platform := exutil.CheckPlatform(oc)
if platform == "azure" && exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("Skip on the workload identity enabled cluster!")
}
var (
resourceGroupName string
location string
)
infraName := getInfrastructureName(oc)
if platform != "azure" {
if !readAzureCredentials() {
g.Skip("Skip for the platform is not Azure and can't get credentials from env vars.")
}
resourceGroupName = infraName + "-logging-71772-rg"
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
location = "westus" //TODO: define default location
_, err := createAzureResourceGroup(resourceGroupName, azureSubscriptionID, location, cred)
defer deleteAzureResourceGroup(resourceGroupName, azureSubscriptionID, cred)
if err != nil {
g.Skip("Failed to create azure resource group: " + err.Error() + ", skip the case.")
}
e2e.Logf("Successfully created resource group %s", resourceGroupName)
} else {
cloudName := getAzureCloudName(oc)
if !(cloudName == "azurepubliccloud" || cloudName == "azureusgovernmentcloud") {
g.Skip("The case can only be running on Azure Public and Azure US Goverment now!")
}
resourceGroupName, _ = exutil.GetAzureCredentialFromCluster(oc)
}
g.By("Prepre Azure Log Storage Env")
workSpaceName := infraName + "case71772"
azLog, err := newAzureLog(oc, location, resourceGroupName, workSpaceName, "case71772")
defer azLog.deleteWorkspace()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Create log producer")
clfNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", clfNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploy CLF to send logs to Log Analytics")
azureSecret := resource{"secret", "azure-secret-71772", clfNS}
defer azureSecret.clear(oc)
err = azLog.createSecret(oc, azureSecret.name, azureSecret.namespace)
o.Expect(err).NotTo(o.HaveOccurred())
clf := clusterlogforwarder{
name: "clf-71772",
namespace: clfNS,
secretName: azureSecret.name,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "azureMonitor.yaml"),
waitForPodReady: true,
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
serviceAccountName: "test-clf-" + getRandomString(),
}
defer clf.delete(oc)
clf.create(oc, "PREFIX_OR_NAME="+azLog.tPrefixOrName, "CUSTOMER_ID="+azLog.customerID, "RESOURCE_ID="+azLog.workspaceID, "AZURE_HOST="+azLog.host)
g.By("Verify the test result")
for _, tableName := range []string{azLog.tPrefixOrName + "infra_log_CL", azLog.tPrefixOrName + "audit_log_CL", azLog.tPrefixOrName + "app_log_CL"} {
_, err := azLog.getLogByTable(tableName)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find logs from %s in AzureLogWorkspace", tableName))
}
}) | |||||
test | openshift/openshift-tests-private | 26b9e31b-96f3-480c-bc1e-c386689698b4 | aws_utils | import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go-v2/service/iam"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/sts"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | package logging
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go-v2/service/iam"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/sts"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// Check if credentials exist for STS clusters
func checkAWSCredentials() bool {
//set AWS_SHARED_CREDENTIALS_FILE from CLUSTER_PROFILE_DIR as the first priority"
prowConfigDir, present := os.LookupEnv("CLUSTER_PROFILE_DIR")
if present {
awsCredFile := filepath.Join(prowConfigDir, ".awscred")
if _, err := os.Stat(awsCredFile); err == nil {
err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", awsCredFile)
if err == nil {
e2e.Logf("use CLUSTER_PROFILE_DIR/.awscred")
return true
}
}
}
// check if AWS_SHARED_CREDENTIALS_FILE exist
_, present = os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if present {
e2e.Logf("use Env AWS_SHARED_CREDENTIALS_FILE")
return true
}
// check if AWS_SECRET_ACCESS_KEY exist
_, keyIDPresent := os.LookupEnv("AWS_ACCESS_KEY_ID")
_, keyPresent := os.LookupEnv("AWS_SECRET_ACCESS_KEY")
if keyIDPresent && keyPresent {
e2e.Logf("use Env AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY")
return true
}
// check if $HOME/.aws/credentials exist
home, _ := os.UserHomeDir()
if _, err := os.Stat(home + "/.aws/credentials"); err == nil {
e2e.Logf("use HOME/.aws/credentials")
return true
}
return false
}
func getAWSCredentialFromFile(file string) error {
data, err := os.ReadFile(file)
if err != nil {
return err
}
s := strings.Split(string(data), "\n")
for i := 0; i < len(s); i++ {
if strings.Contains(s[i], "aws_access_key_id") {
aws_access_key_id := strings.TrimSpace(strings.Split(s[i], "=")[1])
os.Setenv("AWS_ACCESS_KEY_ID", aws_access_key_id)
}
if strings.Contains(s[i], "aws_secret_access_key") {
aws_secret_access_key := strings.TrimSpace(strings.Split(s[i], "=")[1])
os.Setenv("AWS_SECRET_ACCESS_KEY", aws_secret_access_key)
}
}
return nil
}
// get AWS Account ID
func getAwsAccount(stsClient *sts.Client) (string, string) {
result, err := stsClient.GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{})
o.Expect(err).NotTo(o.HaveOccurred())
awsAccount := aws.ToString(result.Account)
awsUserArn := aws.ToString(result.Arn)
return awsAccount, awsUserArn
}
func readDefaultSDKExternalConfigurations(ctx context.Context, region string) aws.Config {
cfg, err := awsConfig.LoadDefaultConfig(ctx,
awsConfig.WithRegion(region),
)
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
}
// initialize a s3 client with credential
func newS3Client(cfg aws.Config) *s3.Client {
return s3.NewFromConfig(cfg)
}
// new AWS STS client
func newStsClient(cfg aws.Config) *sts.Client {
return sts.NewFromConfig(cfg)
}
// Create AWS IAM client
func newIamClient(cfg aws.Config) *iam.Client {
return iam.NewFromConfig(cfg)
}
// aws iam create-role
func iamCreateRole(iamClient *iam.Client, trustPolicy string, roleName string) string {
e2e.Logf("Create iam role %v", roleName)
result, err := iamClient.CreateRole(context.TODO(), &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "couldn't create role "+roleName)
roleArn := aws.ToString(result.Role.Arn)
return roleArn
}
// aws iam delete-role
func iamDeleteRole(iamClient *iam.Client, roleName string) {
_, err := iamClient.DeleteRole(context.TODO(), &iam.DeleteRoleInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Couldn't delete role %s: %v", roleName, err)
}
}
// aws iam create-policy
func iamCreatePolicy(iamClient *iam.Client, mgmtPolicy string, policyName string) string {
e2e.Logf("Create iam policy %v", policyName)
result, err := iamClient.CreatePolicy(context.TODO(), &iam.CreatePolicyInput{
PolicyDocument: aws.String(mgmtPolicy),
PolicyName: aws.String(policyName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't create policy"+policyName)
policyArn := aws.ToString(result.Policy.Arn)
return policyArn
}
// aws iam delete-policy
func iamDeletePolicy(iamClient *iam.Client, policyArn string) {
_, err := iamClient.DeletePolicy(context.TODO(), &iam.DeletePolicyInput{
PolicyArn: aws.String(policyArn),
})
if err != nil {
e2e.Logf("Couldn't delete policy %v: %v", policyArn, err)
}
}
// This func creates a IAM role, attaches custom trust policy and managed permission policy
func createIAMRoleOnAWS(iamClient *iam.Client, trustPolicy string, roleName string, policyArn string) string {
result, err := iamClient.CreateRole(context.TODO(), &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't create role %v", roleName)
roleArn := aws.ToString(result.Role.Arn)
//Adding managed permission policy if provided
if policyArn != "" {
_, err = iamClient.AttachRolePolicy(context.TODO(), &iam.AttachRolePolicyInput{
PolicyArn: aws.String(policyArn),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
}
return roleArn
}
// Deletes IAM role and attached policies
func deleteIAMroleonAWS(iamClient *iam.Client, roleName string) {
// List attached policies of the IAM role
listAttachedPoliciesOutput, err := iamClient.ListAttachedRolePolicies(context.TODO(), &iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Error listing attached policies of IAM role %s", roleName)
}
if len(listAttachedPoliciesOutput.AttachedPolicies) == 0 {
e2e.Logf("No attached policies under IAM role: %s", roleName)
}
if len(listAttachedPoliciesOutput.AttachedPolicies) != 0 {
// Detach attached policy from the IAM role
for _, policy := range listAttachedPoliciesOutput.AttachedPolicies {
_, err := iamClient.DetachRolePolicy(context.TODO(), &iam.DetachRolePolicyInput{
RoleName: aws.String(roleName),
PolicyArn: policy.PolicyArn,
})
if err != nil {
e2e.Logf("Error detaching policy: %s", *policy.PolicyName)
} else {
e2e.Logf("Detached policy: %s", *policy.PolicyName)
}
}
}
// Delete the IAM role
_, err = iamClient.DeleteRole(context.TODO(), &iam.DeleteRoleInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Error deleting IAM role: %s", roleName)
} else {
e2e.Logf("IAM role deleted successfully: %s", roleName)
}
}
// Create role_arn required for Loki deployment on STS clusters
func createIAMRoleForLokiSTSDeployment(iamClient *iam.Client, oidcName, awsAccountID, partition, lokiNamespace, lokiStackName, roleName string) string {
policyArn := "arn:" + partition + ":iam::aws:policy/AmazonS3FullAccess"
lokiTrustPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:%s:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": [
"system:serviceaccount:%s:%s",
"system:serviceaccount:%s:%s-ruler"
]
}
}
}
]
}`
lokiTrustPolicy = fmt.Sprintf(lokiTrustPolicy, partition, awsAccountID, oidcName, oidcName, lokiNamespace, lokiStackName, lokiNamespace, lokiStackName)
roleArn := createIAMRoleOnAWS(iamClient, lokiTrustPolicy, roleName, policyArn)
return roleArn
}
// Creates Loki object storage secret on AWS STS cluster
func createObjectStorageSecretOnAWSSTSCluster(oc *exutil.CLI, region, storageSecret, bucketName, namespace string) {
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", storageSecret, "--from-literal=region="+region, "--from-literal=bucketnames="+bucketName, "--from-literal=audience=openshift", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// Function to check if tenant logs are present under the S3 bucket.
// Returns success if any one of the tenants under tenants[] are found.
func validatesIfLogsArePushedToS3Bucket(s3Client *s3.Client, bucketName string, tenants []string) {
// Poll to check contents of the s3 bucket
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
listObjectsOutput, err := s3Client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
})
if err != nil {
return false, err
}
for _, object := range listObjectsOutput.Contents {
for _, tenantName := range tenants {
if strings.Contains(*object.Key, tenantName) {
e2e.Logf("Logs %s found under the bucket: %s", *object.Key, bucketName)
return true, nil
}
}
}
e2e.Logf("Waiting for data to be available under bucket: %s", bucketName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the bucket: "+bucketName)
}
// cloudWatchSpec the basic object which describe all common test options
type cloudwatchSpec struct {
awsRoleName string
awsRoleArn string
awsRegion string
awsPolicyName string
awsPolicyArn string
awsPartition string //The partition in which the resource is located, valid when the cluster is STS, ref: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arns-syntax
clusterPlatformType string
collectorSAName string // the service account for collector pod to use
cwClient *cloudwatchlogs.Client
groupName string // the strategy for grouping logstreams, for example: '{.log_type||"none"}'
hasMaster bool // wether the cluster has master nodes or not
iamClient *iam.Client
logTypes []string //default: "['infrastructure','application', 'audit']"
nodes []string // Cluster Nodes Names, required when checking infrastructure/audit logs and strict=true
ovnEnabled bool // if ovn is enabled
secretName string // the name of the secret for the collector to use
secretNamespace string // the namespace where the collector pods to be deployed
stsEnabled bool // Is sts enabled on the cluster
selAppNamespaces []string //The app namespaces should be collected and verified
selNamespacesID []string // The UUIDs of all app namespaces should be collected
disAppNamespaces []string //The namespaces should not be collected and verified
}
// Set the default values to the cloudwatchSpec Object, you need to change the default in It if needs
func (cw *cloudwatchSpec) init(oc *exutil.CLI) {
if checkNetworkType(oc) == "ovnkubernetes" {
cw.ovnEnabled = true
}
cw.hasMaster = hasMaster(oc)
cw.clusterPlatformType = exutil.CheckPlatform(oc)
if cw.clusterPlatformType == "aws" {
if exutil.IsSTSCluster(oc) {
if !checkAWSCredentials() {
g.Skip("Skip since no AWS credetials.")
}
cw.stsEnabled = true
} else {
clusterinfra.GetAwsCredentialFromCluster(oc)
}
} else {
credFile, filePresent := os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if filePresent {
err := getAWSCredentialFromFile(credFile)
if err != nil {
g.Skip("Skip for the platform is not AWS and can't get credentials from file " + credFile)
}
} else {
_, keyIDPresent := os.LookupEnv("AWS_ACCESS_KEY_ID")
_, secretKeyPresent := os.LookupEnv("AWS_SECRET_ACCESS_KEY")
if !keyIDPresent || !secretKeyPresent {
g.Skip("Skip for the platform is not AWS and there is no AWS credentials set")
}
}
}
if cw.awsRegion == "" {
region, _ := exutil.GetAWSClusterRegion(oc)
if region != "" {
cw.awsRegion = region
} else {
// use us-east-2 as default region
cw.awsRegion = "us-east-2"
}
}
if cw.stsEnabled {
//Note: AWS China is not added, and the partition is `aws-cn`.
if strings.HasPrefix(cw.awsRegion, "us-gov") {
cw.awsPartition = "aws-us-gov"
} else {
cw.awsPartition = "aws"
}
//Create IAM roles for cloudwatch
cw.createIAMCloudwatchRole(oc)
}
cw.newCloudwatchClient()
e2e.Logf("Init cloudwatchSpec done")
}
func (cw *cloudwatchSpec) setGroupName(groupName string) {
cw.groupName = groupName
}
func (cw *cloudwatchSpec) newCloudwatchClient() {
cfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred())
// Create a Cloudwatch service client
cw.cwClient = cloudwatchlogs.NewFromConfig(cfg)
}
func (cw *cloudwatchSpec) newIamClient() {
cfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred())
cw.iamClient = iam.NewFromConfig(cfg)
}
func (cw *cloudwatchSpec) newIamRole(oc *exutil.CLI) {
oidcProvider, e := getOIDC(oc)
o.Expect(e).NotTo(o.HaveOccurred())
awscfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred(), "failed to load AWS configuration")
stsClient := sts.NewFromConfig(awscfg)
accountID, _ := getAwsAccount(stsClient)
trustPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:%s:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": "system:serviceaccount:%s:%s"
}
}
}
]
}`
trustPolicy = fmt.Sprintf(trustPolicy, cw.awsPartition, accountID, oidcProvider, oidcProvider, cw.secretNamespace, cw.collectorSAName)
cw.awsRoleArn = iamCreateRole(cw.iamClient, trustPolicy, cw.awsRoleName)
}
func (cw *cloudwatchSpec) newIamPolicy() {
mgmtPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:PutRetentionPolicy"
],
"Resource": "arn:%s:logs:*:*:*"
}
]
}`
cw.awsPolicyArn = iamCreatePolicy(cw.iamClient, fmt.Sprintf(mgmtPolicy, cw.awsPartition), cw.awsPolicyName)
}
func (cw *cloudwatchSpec) createIAMCloudwatchRole(oc *exutil.CLI) {
if os.Getenv("AWS_CLOUDWATCH_ROLE_ARN") != "" {
cw.awsRoleArn = os.Getenv("AWS_CLOUDWATCH_ROLE_ARN")
return
}
cw.awsRoleName = cw.secretName + "-" + getInfrastructureName(oc)
cw.awsPolicyName = cw.awsRoleName
cw.newIamClient()
e2e.Logf("Created aws iam role: %v", cw.awsRoleName)
cw.newIamRole(oc)
cw.newIamPolicy()
_, err := cw.iamClient.AttachRolePolicy(context.TODO(), &iam.AttachRolePolicyInput{
PolicyArn: &cw.awsPolicyArn,
RoleName: &cw.awsRoleName,
})
o.Expect(err).NotTo(o.HaveOccurred())
}
func (cw *cloudwatchSpec) deleteIAMCloudwatchRole() {
cw.iamClient.DetachRolePolicy(context.TODO(), &iam.DetachRolePolicyInput{
PolicyArn: aws.String(cw.awsPolicyArn),
RoleName: aws.String(cw.awsRoleName),
},
)
iamDeleteRole(cw.iamClient, cw.awsRoleName)
iamDeletePolicy(cw.iamClient, cw.awsPolicyArn)
}
// Create Cloudwatch Secret. note: use credential files can avoid leak in output
func (cw *cloudwatchSpec) createClfSecret(oc *exutil.CLI) {
var err error
if cw.stsEnabled {
token, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", cw.collectorSAName, "--audience=openshift", "--duration=24h", "-n", cw.secretNamespace).Output()
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", cw.secretName, "--from-literal=role_arn="+cw.awsRoleArn, "--from-literal=token="+token, "-n", cw.secretNamespace).Execute()
} else {
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", cw.secretName, "--from-literal=aws_access_key_id="+os.Getenv("AWS_ACCESS_KEY_ID"), "--from-literal=aws_secret_access_key="+os.Getenv("AWS_SECRET_ACCESS_KEY"), "-n", cw.secretNamespace).Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
}
// trigger DeleteLogGroup. sometimes, the api return success, but the resource are still there. now wait up to 3 minutes to make the delete success as more as possible.
func (cw *cloudwatchSpec) deleteGroups(groupPrefix string) {
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 90*time.Second, true, func(context.Context) (done bool, err error) {
logGroupNames, _ := cw.getLogGroupNames(groupPrefix)
if len(logGroupNames) == 0 {
return true, nil
}
for _, name := range logGroupNames {
_, err := cw.cwClient.DeleteLogGroup(context.TODO(), &cloudwatchlogs.DeleteLogGroupInput{LogGroupName: &name})
if err != nil {
e2e.Logf("Can't delete log group: %s", name)
} else {
e2e.Logf("Log group %s is deleted", name)
}
}
return false, nil
})
}
// clean the Cloudwatch resources
func (cw *cloudwatchSpec) deleteResources(oc *exutil.CLI) {
resource{"secret", cw.secretName, cw.secretNamespace}.clear(oc)
cw.deleteGroups("")
//delete roles when the role is created in case
if cw.stsEnabled && os.Getenv("AWS_CLOUDWATCH_ROLE_ARN") == "" {
cw.deleteIAMCloudwatchRole()
}
}
// Return Cloudwatch GroupNames
func (cw cloudwatchSpec) getLogGroupNames(groupPrefix string) ([]string, error) {
var (
groupNames []string
)
if groupPrefix == "" {
if strings.Contains(cw.groupName, "{") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
groupPrefix = cw.groupName
}
}
logGroupDesc, err := cw.cwClient.DescribeLogGroups(context.TODO(), &cloudwatchlogs.DescribeLogGroupsInput{
LogGroupNamePrefix: &groupPrefix,
})
if err != nil {
return groupNames, fmt.Errorf("can't get log groups from cloudwatch: %v", err)
}
for _, group := range logGroupDesc.LogGroups {
groupNames = append(groupNames, *group.LogGroupName)
}
nextToken := logGroupDesc.NextToken
for nextToken != nil {
logGroupDesc, err = cw.cwClient.DescribeLogGroups(context.TODO(), &cloudwatchlogs.DescribeLogGroupsInput{
LogGroupNamePrefix: &groupPrefix,
NextToken: nextToken,
})
if err != nil {
return groupNames, fmt.Errorf("can't get log groups from cloudwatch: %v", err)
}
for _, group := range logGroupDesc.LogGroups {
groupNames = append(groupNames, *group.LogGroupName)
}
nextToken = logGroupDesc.NextToken
}
return groupNames, nil
}
func (cw *cloudwatchSpec) waitForLogGroupsAppear(groupPrefix, keyword string) error {
if groupPrefix == "" {
if strings.Contains(cw.groupName, "{") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
groupPrefix = cw.groupName
}
}
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
groups, err := cw.getLogGroupNames(groupPrefix)
if err != nil {
e2e.Logf("error getting log groups: %v", err)
return false, nil
}
if len(groups) == 0 {
e2e.Logf("no log groups match the prefix: %s", groupPrefix)
return false, nil
}
e2e.Logf("the log group names %v", groups)
if keyword != "" {
return containSubstring(groups, keyword), nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("can't find log groups with prefix: %s", groupPrefix)
}
return nil
}
// Get Stream names matching the logTypes and project names.
func (cw *cloudwatchSpec) getLogStreamNames(groupName string, streamPrefix string) ([]string, error) {
var (
logStreamNames []string
err error
logStreamDesc *cloudwatchlogs.DescribeLogStreamsOutput
logStreamsInput cloudwatchlogs.DescribeLogStreamsInput
)
if streamPrefix == "" {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
}
} else {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
LogStreamNamePrefix: &streamPrefix,
}
}
logStreamDesc, err = cw.cwClient.DescribeLogStreams(context.TODO(), &logStreamsInput)
if err != nil {
return logStreamNames, fmt.Errorf("can't get log streams: %v", err)
}
for _, stream := range logStreamDesc.LogStreams {
logStreamNames = append(logStreamNames, *stream.LogStreamName)
}
nextToken := logStreamDesc.NextToken
for nextToken != nil {
if streamPrefix == "" {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
NextToken: nextToken,
}
} else {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
LogStreamNamePrefix: &streamPrefix,
NextToken: nextToken,
}
}
logStreamDesc, err = cw.cwClient.DescribeLogStreams(context.TODO(), &logStreamsInput)
if err != nil {
return logStreamNames, fmt.Errorf("can't get log streams from cloudwatch: %v", err)
}
for _, stream := range logStreamDesc.LogStreams {
logStreamNames = append(logStreamNames, *stream.LogStreamName)
}
nextToken = logStreamDesc.NextToken
}
return logStreamNames, nil
}
// In this function, verify if the infra container logs are forwarded to Cloudwatch or not
func (cw *cloudwatchSpec) checkInfraContainerLogs(strict bool) bool {
var (
infraLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
o.Expect(err).NotTo(o.HaveOccurred())
if len(logGroupNames) == 0 {
return false
}
if strings.Contains(cw.groupName, "{.log_type") {
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.infrastructure$`)
match := r.MatchString(e)
if match {
infraLogGroupNames = append(infraLogGroupNames, e)
}
}
}
if len(infraLogGroupNames) == 0 {
infraLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for infra container logs are %v", infraLogGroupNames)
// get all the log streams under the log groups
for _, group := range infraLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if strings.Contains(stream, ".openshift-") {
logStreams = append(logStreams, stream)
}
}
}
// when strict=true, return ture if we can find podLogStream for all nodes
if strict {
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
for _, node := range cw.nodes {
if !containSubstring(logStreams, node+".openshift-") {
e2e.Logf("can't find log stream %s", node+".openshift-")
return false
}
}
return true
} else {
return len(logStreams) > 0
}
}
// list streams, check streams, provide the log streams in this function?
// In this function, verify the system logs present on Cloudwatch
func (cw *cloudwatchSpec) checkInfraNodeLogs(strict bool) bool {
var (
infraLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return false
}
for _, group := range logGroupNames {
r, _ := regexp.Compile(`.*\.infrastructure$`)
match := r.MatchString(group)
if match {
infraLogGroupNames = append(infraLogGroupNames, group)
}
}
if len(infraLogGroupNames) == 0 {
infraLogGroupNames = logGroupNames
}
e2e.Logf("the infra node log group names are %v", infraLogGroupNames)
// get all the log streams under the log groups
for _, group := range infraLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if strings.Contains(stream, ".journal.system") {
logStreams = append(logStreams, stream)
}
}
}
e2e.Logf("the infrastructure node log streams: %v", logStreams)
// when strict=true, return ture if we can find log streams from all nodes
if strict {
var expectedStreamNames []string
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
//stream name: ip-10-0-152-69.journal.system
if cw.clusterPlatformType == "aws" {
for _, node := range cw.nodes {
expectedStreamNames = append(expectedStreamNames, strings.Split(node, ".")[0])
}
} else {
expectedStreamNames = append(expectedStreamNames, cw.nodes...)
}
for _, name := range expectedStreamNames {
streamName := name + ".journal.system"
if !contain(logStreams, streamName) {
e2e.Logf("can't find log stream %s", streamName)
return false
}
}
return true
} else {
return len(logStreams) > 0
}
}
// In this function, verify the system logs present on Cloudwatch
func (cw *cloudwatchSpec) infrastructureLogsFound(strict bool) bool {
return cw.checkInfraContainerLogs(strict) && cw.checkInfraNodeLogs(strict)
}
/*
In this function, verify all type of audit logs can be found.
when strict=false, test pass when all type of audit logs are found
when strict=true, test pass if any audit log is found.
stream:
ip-10-0-90-156.us-east-2.compute.internal
*/
func (cw *cloudwatchSpec) auditLogsFound(strict bool) bool {
var (
auditLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return false
}
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.audit$`)
match := r.MatchString(e)
if match {
auditLogGroupNames = append(auditLogGroupNames, e)
}
}
if len(auditLogGroupNames) == 0 {
auditLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for audit logs are %v", auditLogGroupNames)
// stream name: ip-10-0-74-46.us-east-2.compute.internal
// get all the log streams under the log groups
for _, group := range auditLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
logStreams = append(logStreams, streams...)
}
// when strict=true, return ture if we can find podLogStream for all nodes
if strict {
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
for _, node := range cw.nodes {
if !containSubstring(logStreams, node) {
e2e.Logf("can't find log stream from node: %s", node)
return false
}
}
return true
} else {
return len(logStreams) > 0
}
}
// check if the container logs are grouped by namespace_id
func (cw *cloudwatchSpec) checkLogGroupByNamespaceID() bool {
var (
groupPrefix string
)
if strings.Contains(cw.groupName, ".kubernetes.namespace_id") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
e2e.Logf("the group name doesn't contain .kubernetes.namespace_id, no need to call this function")
return false
}
for _, namespaceID := range cw.selNamespacesID {
groupErr := cw.waitForLogGroupsAppear(groupPrefix, namespaceID)
if groupErr != nil {
e2e.Logf("can't find log group named %s", namespaceID)
return false
}
}
return true
}
// check if the container logs are grouped by namespace_name
func (cw *cloudwatchSpec) checkLogGroupByNamespaceName() bool {
var (
groupPrefix string
)
if strings.Contains(cw.groupName, ".kubernetes.namespace_name") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
e2e.Logf("the group name doesn't contain .kubernetes.namespace_name, no need to call this function")
return false
}
for _, namespaceName := range cw.selAppNamespaces {
groupErr := cw.waitForLogGroupsAppear(groupPrefix, namespaceName)
if groupErr != nil {
e2e.Logf("can't find log group named %s", namespaceName)
return false
}
}
for _, ns := range cw.disAppNamespaces {
groups, err := cw.getLogGroupNames(groupPrefix)
if err != nil {
return false
}
if containSubstring(groups, ns) {
return false
}
}
return true
}
func (cw *cloudwatchSpec) getApplicationLogStreams() ([]string, error) {
var (
appLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return logStreams, err
}
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.application$`)
match := r.MatchString(e)
if match {
appLogGroupNames = append(appLogGroupNames, e)
}
}
if len(appLogGroupNames) == 0 {
appLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for application logs are %v", appLogGroupNames)
for _, group := range appLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if !strings.Contains(stream, "ip-10-0") {
logStreams = append(logStreams, stream)
}
}
}
return logStreams, nil
}
// The index to find application logs
// GroupType
//
// logType: anli48022-gwbb4.application
// namespaceName: anli48022-gwbb4.aosqe-log-json-1638788875
// namespaceUUID: anli48022-gwbb4.0471c739-e38c-4590-8a96-fdd5298d47ae,uuid.audit,uuid.infrastructure
func (cw *cloudwatchSpec) applicationLogsFound() bool {
if (len(cw.selAppNamespaces) > 0 || len(cw.disAppNamespaces) > 0) && strings.Contains(cw.groupName, ".kubernetes.namespace_id") {
return cw.checkLogGroupByNamespaceName()
}
if len(cw.selNamespacesID) > 0 {
return cw.checkLogGroupByNamespaceID()
}
logStreams, err := cw.getApplicationLogStreams()
if err != nil || len(logStreams) == 0 {
return false
}
for _, ns := range cw.selAppNamespaces {
if !containSubstring(logStreams, ns) {
e2e.Logf("can't find logs from project %s", ns)
return false
}
}
for _, ns := range cw.disAppNamespaces {
if containSubstring(logStreams, ns) {
e2e.Logf("find logs from project %s, this is not expected", ns)
return false
}
}
return true
}
// The common function to verify if logs can be found or not. In general, customized the cloudwatchSpec before call this function
func (cw *cloudwatchSpec) logsFound() bool {
var (
appLogSuccess = true
infraLogSuccess = true
auditLogSuccess = true
)
for _, logType := range cw.logTypes {
switch logType {
case "infrastructure":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.infrastructureLogsFound(true), nil
})
if err != nil {
e2e.Logf("can't find infrastructure in given time")
infraLogSuccess = false
}
case "audit":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.auditLogsFound(false), nil
})
if err != nil {
e2e.Logf("can't find audit logs in given time")
auditLogSuccess = false
}
case "application":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.applicationLogsFound(), nil
})
if err != nil {
e2e.Logf("can't find application logs in given time")
appLogSuccess = false
}
}
}
return infraLogSuccess && auditLogSuccess && appLogSuccess
}
func (cw *cloudwatchSpec) getLogRecordsByNamespace(limit int32, logGroupName string, namespaceName string) ([]LogEntity, error) {
var (
output *cloudwatchlogs.FilterLogEventsOutput
logs []LogEntity
)
streamNames, streamErr := cw.getLogStreamNames(logGroupName, namespaceName)
if streamErr != nil {
return logs, streamErr
}
e2e.Logf("the log streams: %v", streamNames)
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
output, err = cw.filterLogEvents(limit, logGroupName, "", streamNames...)
if err != nil {
e2e.Logf("get error when filter events in cloudwatch, try next time")
return false, nil
}
if len(output.Events) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("the query is not completed in 5 minutes or there is no log record matches the query: %v", err)
}
for _, event := range output.Events {
var log LogEntity
json.Unmarshal([]byte(*event.Message), &log)
logs = append(logs, log)
}
return logs, nil
}
// aws logs filter-log-events --log-group-name logging-47052-qitang-fips-zfpgd.application --log-stream-name-prefix=var.log.pods.e2e-test-logfwd-namespace-x8mzw
func (cw *cloudwatchSpec) filterLogEvents(limit int32, logGroupName, logStreamNamePrefix string, logStreamNames ...string) (*cloudwatchlogs.FilterLogEventsOutput, error) {
if len(logStreamNamePrefix) > 0 && len(logStreamNames) > 0 {
return nil, fmt.Errorf("invalidParameterException: logStreamNamePrefix and logStreamNames are specified")
}
var (
err error
output *cloudwatchlogs.FilterLogEventsOutput
)
if len(logStreamNamePrefix) > 0 {
output, err = cw.cwClient.FilterLogEvents(context.TODO(), &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &logGroupName,
LogStreamNamePrefix: &logStreamNamePrefix,
Limit: &limit,
})
} else if len(logStreamNames) > 0 {
output, err = cw.cwClient.FilterLogEvents(context.TODO(), &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &logGroupName,
LogStreamNames: logStreamNames,
Limit: &limit,
})
}
return output, err
}
| package logging | ||||
function | openshift/openshift-tests-private | c4292030-c462-4439-8379-f955294972db | checkAWSCredentials | ['"os"', '"path/filepath"', '"github.com/aws/aws-sdk-go-v2/aws"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func checkAWSCredentials() bool {
//set AWS_SHARED_CREDENTIALS_FILE from CLUSTER_PROFILE_DIR as the first priority"
prowConfigDir, present := os.LookupEnv("CLUSTER_PROFILE_DIR")
if present {
awsCredFile := filepath.Join(prowConfigDir, ".awscred")
if _, err := os.Stat(awsCredFile); err == nil {
err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", awsCredFile)
if err == nil {
e2e.Logf("use CLUSTER_PROFILE_DIR/.awscred")
return true
}
}
}
// check if AWS_SHARED_CREDENTIALS_FILE exist
_, present = os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if present {
e2e.Logf("use Env AWS_SHARED_CREDENTIALS_FILE")
return true
}
// check if AWS_SECRET_ACCESS_KEY exist
_, keyIDPresent := os.LookupEnv("AWS_ACCESS_KEY_ID")
_, keyPresent := os.LookupEnv("AWS_SECRET_ACCESS_KEY")
if keyIDPresent && keyPresent {
e2e.Logf("use Env AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY")
return true
}
// check if $HOME/.aws/credentials exist
home, _ := os.UserHomeDir()
if _, err := os.Stat(home + "/.aws/credentials"); err == nil {
e2e.Logf("use HOME/.aws/credentials")
return true
}
return false
} | logging | ||||
function | openshift/openshift-tests-private | 992ee3eb-e7e3-4ec4-97d7-33a24675fc38 | getAWSCredentialFromFile | ['"os"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func getAWSCredentialFromFile(file string) error {
data, err := os.ReadFile(file)
if err != nil {
return err
}
s := strings.Split(string(data), "\n")
for i := 0; i < len(s); i++ {
if strings.Contains(s[i], "aws_access_key_id") {
aws_access_key_id := strings.TrimSpace(strings.Split(s[i], "=")[1])
os.Setenv("AWS_ACCESS_KEY_ID", aws_access_key_id)
}
if strings.Contains(s[i], "aws_secret_access_key") {
aws_secret_access_key := strings.TrimSpace(strings.Split(s[i], "=")[1])
os.Setenv("AWS_SECRET_ACCESS_KEY", aws_secret_access_key)
}
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | d7c1fff0-42f8-4446-a841-93aaaa263ff3 | getAwsAccount | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/sts"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func getAwsAccount(stsClient *sts.Client) (string, string) {
result, err := stsClient.GetCallerIdentity(context.TODO(), &sts.GetCallerIdentityInput{})
o.Expect(err).NotTo(o.HaveOccurred())
awsAccount := aws.ToString(result.Account)
awsUserArn := aws.ToString(result.Arn)
return awsAccount, awsUserArn
} | logging | ||||
function | openshift/openshift-tests-private | 4e7684d7-ced6-4fe7-81a5-6b5a6520bc40 | readDefaultSDKExternalConfigurations | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func readDefaultSDKExternalConfigurations(ctx context.Context, region string) aws.Config {
cfg, err := awsConfig.LoadDefaultConfig(ctx,
awsConfig.WithRegion(region),
)
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
} | logging | ||||
function | openshift/openshift-tests-private | ab66b81b-3ced-4123-b5ee-87144c34a5cb | newS3Client | ['"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func newS3Client(cfg aws.Config) *s3.Client {
return s3.NewFromConfig(cfg)
} | logging | ||||
function | openshift/openshift-tests-private | 03b7f19a-0874-441a-ab90-6c079d7d9629 | newStsClient | ['"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/sts"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func newStsClient(cfg aws.Config) *sts.Client {
return sts.NewFromConfig(cfg)
} | logging | ||||
function | openshift/openshift-tests-private | 8720bb19-01c7-4218-ac9c-f91f879b2f0b | newIamClient | ['"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func newIamClient(cfg aws.Config) *iam.Client {
return iam.NewFromConfig(cfg)
} | logging | ||||
function | openshift/openshift-tests-private | fa1ada78-7d4f-4de9-bcdb-3580c067211c | iamCreateRole | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func iamCreateRole(iamClient *iam.Client, trustPolicy string, roleName string) string {
e2e.Logf("Create iam role %v", roleName)
result, err := iamClient.CreateRole(context.TODO(), &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "couldn't create role "+roleName)
roleArn := aws.ToString(result.Role.Arn)
return roleArn
} | logging | ||||
function | openshift/openshift-tests-private | 7c2c4f91-ca9f-4a89-aea8-4b550cd9d350 | iamDeleteRole | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func iamDeleteRole(iamClient *iam.Client, roleName string) {
_, err := iamClient.DeleteRole(context.TODO(), &iam.DeleteRoleInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Couldn't delete role %s: %v", roleName, err)
}
} | logging | ||||
function | openshift/openshift-tests-private | 3d5ba680-3c65-4a7c-b424-15fae0c9ad95 | iamCreatePolicy | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func iamCreatePolicy(iamClient *iam.Client, mgmtPolicy string, policyName string) string {
e2e.Logf("Create iam policy %v", policyName)
result, err := iamClient.CreatePolicy(context.TODO(), &iam.CreatePolicyInput{
PolicyDocument: aws.String(mgmtPolicy),
PolicyName: aws.String(policyName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't create policy"+policyName)
policyArn := aws.ToString(result.Policy.Arn)
return policyArn
} | logging | ||||
function | openshift/openshift-tests-private | c2c0b972-5815-4fe3-91a8-3826af87b9fc | iamDeletePolicy | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func iamDeletePolicy(iamClient *iam.Client, policyArn string) {
_, err := iamClient.DeletePolicy(context.TODO(), &iam.DeletePolicyInput{
PolicyArn: aws.String(policyArn),
})
if err != nil {
e2e.Logf("Couldn't delete policy %v: %v", policyArn, err)
}
} | logging | ||||
function | openshift/openshift-tests-private | bacbc662-cda6-4859-9165-5c3d1d3d9fa7 | createIAMRoleOnAWS | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func createIAMRoleOnAWS(iamClient *iam.Client, trustPolicy string, roleName string, policyArn string) string {
result, err := iamClient.CreateRole(context.TODO(), &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred(), "Couldn't create role %v", roleName)
roleArn := aws.ToString(result.Role.Arn)
//Adding managed permission policy if provided
if policyArn != "" {
_, err = iamClient.AttachRolePolicy(context.TODO(), &iam.AttachRolePolicyInput{
PolicyArn: aws.String(policyArn),
RoleName: aws.String(roleName),
})
o.Expect(err).NotTo(o.HaveOccurred())
}
return roleArn
} | logging | ||||
function | openshift/openshift-tests-private | e7f2d143-7e9f-4772-ac83-dd8fcc178f24 | deleteIAMroleonAWS | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func deleteIAMroleonAWS(iamClient *iam.Client, roleName string) {
// List attached policies of the IAM role
listAttachedPoliciesOutput, err := iamClient.ListAttachedRolePolicies(context.TODO(), &iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Error listing attached policies of IAM role %s", roleName)
}
if len(listAttachedPoliciesOutput.AttachedPolicies) == 0 {
e2e.Logf("No attached policies under IAM role: %s", roleName)
}
if len(listAttachedPoliciesOutput.AttachedPolicies) != 0 {
// Detach attached policy from the IAM role
for _, policy := range listAttachedPoliciesOutput.AttachedPolicies {
_, err := iamClient.DetachRolePolicy(context.TODO(), &iam.DetachRolePolicyInput{
RoleName: aws.String(roleName),
PolicyArn: policy.PolicyArn,
})
if err != nil {
e2e.Logf("Error detaching policy: %s", *policy.PolicyName)
} else {
e2e.Logf("Detached policy: %s", *policy.PolicyName)
}
}
}
// Delete the IAM role
_, err = iamClient.DeleteRole(context.TODO(), &iam.DeleteRoleInput{
RoleName: aws.String(roleName),
})
if err != nil {
e2e.Logf("Error deleting IAM role: %s", roleName)
} else {
e2e.Logf("IAM role deleted successfully: %s", roleName)
}
} | logging | ||||
function | openshift/openshift-tests-private | 4d9cf732-f753-47f3-977c-d5cd1e6f1061 | createIAMRoleForLokiSTSDeployment | ['"fmt"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"', '"github.com/aws/aws-sdk-go-v2/service/sts"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func createIAMRoleForLokiSTSDeployment(iamClient *iam.Client, oidcName, awsAccountID, partition, lokiNamespace, lokiStackName, roleName string) string {
policyArn := "arn:" + partition + ":iam::aws:policy/AmazonS3FullAccess"
lokiTrustPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:%s:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": [
"system:serviceaccount:%s:%s",
"system:serviceaccount:%s:%s-ruler"
]
}
}
}
]
}`
lokiTrustPolicy = fmt.Sprintf(lokiTrustPolicy, partition, awsAccountID, oidcName, oidcName, lokiNamespace, lokiStackName, lokiNamespace, lokiStackName)
roleArn := createIAMRoleOnAWS(iamClient, lokiTrustPolicy, roleName, policyArn)
return roleArn
} | logging | ||||
function | openshift/openshift-tests-private | 1a3ea640-77de-4028-b5c2-861841d272a8 | createObjectStorageSecretOnAWSSTSCluster | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func createObjectStorageSecretOnAWSSTSCluster(oc *exutil.CLI, region, storageSecret, bucketName, namespace string) {
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", storageSecret, "--from-literal=region="+region, "--from-literal=bucketnames="+bucketName, "--from-literal=audience=openshift", "-n", namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||||
function | openshift/openshift-tests-private | 574cd939-1761-46b7-a259-b694d6fc7509 | validatesIfLogsArePushedToS3Bucket | ['"context"', '"strings"', '"time"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func validatesIfLogsArePushedToS3Bucket(s3Client *s3.Client, bucketName string, tenants []string) {
// Poll to check contents of the s3 bucket
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
listObjectsOutput, err := s3Client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
})
if err != nil {
return false, err
}
for _, object := range listObjectsOutput.Contents {
for _, tenantName := range tenants {
if strings.Contains(*object.Key, tenantName) {
e2e.Logf("Logs %s found under the bucket: %s", *object.Key, bucketName)
return true, nil
}
}
}
e2e.Logf("Waiting for data to be available under bucket: %s", bucketName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the bucket: "+bucketName)
} | logging | ||||
function | openshift/openshift-tests-private | 87394528-b6df-4bf6-8a42-4b6363cfba93 | init | ['"os"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) init(oc *exutil.CLI) {
if checkNetworkType(oc) == "ovnkubernetes" {
cw.ovnEnabled = true
}
cw.hasMaster = hasMaster(oc)
cw.clusterPlatformType = exutil.CheckPlatform(oc)
if cw.clusterPlatformType == "aws" {
if exutil.IsSTSCluster(oc) {
if !checkAWSCredentials() {
g.Skip("Skip since no AWS credetials.")
}
cw.stsEnabled = true
} else {
clusterinfra.GetAwsCredentialFromCluster(oc)
}
} else {
credFile, filePresent := os.LookupEnv("AWS_SHARED_CREDENTIALS_FILE")
if filePresent {
err := getAWSCredentialFromFile(credFile)
if err != nil {
g.Skip("Skip for the platform is not AWS and can't get credentials from file " + credFile)
}
} else {
_, keyIDPresent := os.LookupEnv("AWS_ACCESS_KEY_ID")
_, secretKeyPresent := os.LookupEnv("AWS_SECRET_ACCESS_KEY")
if !keyIDPresent || !secretKeyPresent {
g.Skip("Skip for the platform is not AWS and there is no AWS credentials set")
}
}
}
if cw.awsRegion == "" {
region, _ := exutil.GetAWSClusterRegion(oc)
if region != "" {
cw.awsRegion = region
} else {
// use us-east-2 as default region
cw.awsRegion = "us-east-2"
}
}
if cw.stsEnabled {
//Note: AWS China is not added, and the partition is `aws-cn`.
if strings.HasPrefix(cw.awsRegion, "us-gov") {
cw.awsPartition = "aws-us-gov"
} else {
cw.awsPartition = "aws"
}
//Create IAM roles for cloudwatch
cw.createIAMCloudwatchRole(oc)
}
cw.newCloudwatchClient()
e2e.Logf("Init cloudwatchSpec done")
} | logging | |||
function | openshift/openshift-tests-private | de6bc4e6-df73-4c1d-8c04-dbeecb9bcd04 | setGroupName | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) setGroupName(groupName string) {
cw.groupName = groupName
} | logging | ||||
function | openshift/openshift-tests-private | 505c71f0-3a31-424c-9c5a-19db61e9b4a5 | newCloudwatchClient | ['"context"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) newCloudwatchClient() {
cfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred())
// Create a Cloudwatch service client
cw.cwClient = cloudwatchlogs.NewFromConfig(cfg)
} | logging | |||
function | openshift/openshift-tests-private | 5cb00b40-0baf-4a17-aae7-5747a397a36e | newIamClient | ['"context"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) newIamClient() {
cfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred())
cw.iamClient = iam.NewFromConfig(cfg)
} | logging | |||
function | openshift/openshift-tests-private | 6119f3e4-4285-49c3-8366-014cfe43c7b2 | newIamRole | ['"context"', '"fmt"', '"github.com/aws/aws-sdk-go-v2/service/iam"', '"github.com/aws/aws-sdk-go-v2/service/sts"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) newIamRole(oc *exutil.CLI) {
oidcProvider, e := getOIDC(oc)
o.Expect(e).NotTo(o.HaveOccurred())
awscfg, err := awsConfig.LoadDefaultConfig(context.TODO(), awsConfig.WithRegion(cw.awsRegion))
o.Expect(err).NotTo(o.HaveOccurred(), "failed to load AWS configuration")
stsClient := sts.NewFromConfig(awscfg)
accountID, _ := getAwsAccount(stsClient)
trustPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:%s:iam::%s:oidc-provider/%s"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"%s:sub": "system:serviceaccount:%s:%s"
}
}
}
]
}`
trustPolicy = fmt.Sprintf(trustPolicy, cw.awsPartition, accountID, oidcProvider, oidcProvider, cw.secretNamespace, cw.collectorSAName)
cw.awsRoleArn = iamCreateRole(cw.iamClient, trustPolicy, cw.awsRoleName)
} | logging | |||
function | openshift/openshift-tests-private | 24687bce-d978-4395-9bb0-603061adad2b | newIamPolicy | ['"fmt"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) newIamPolicy() {
mgmtPolicy := `{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:PutRetentionPolicy"
],
"Resource": "arn:%s:logs:*:*:*"
}
]
}`
cw.awsPolicyArn = iamCreatePolicy(cw.iamClient, fmt.Sprintf(mgmtPolicy, cw.awsPartition), cw.awsPolicyName)
} | logging | |||
function | openshift/openshift-tests-private | 0fb856b4-7e78-4443-ac60-669cf3b37449 | createIAMCloudwatchRole | ['"context"', '"os"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) createIAMCloudwatchRole(oc *exutil.CLI) {
if os.Getenv("AWS_CLOUDWATCH_ROLE_ARN") != "" {
cw.awsRoleArn = os.Getenv("AWS_CLOUDWATCH_ROLE_ARN")
return
}
cw.awsRoleName = cw.secretName + "-" + getInfrastructureName(oc)
cw.awsPolicyName = cw.awsRoleName
cw.newIamClient()
e2e.Logf("Created aws iam role: %v", cw.awsRoleName)
cw.newIamRole(oc)
cw.newIamPolicy()
_, err := cw.iamClient.AttachRolePolicy(context.TODO(), &iam.AttachRolePolicyInput{
PolicyArn: &cw.awsPolicyArn,
RoleName: &cw.awsRoleName,
})
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | 1b295e87-a4ff-4ab6-b75d-14f1b259e8da | deleteIAMCloudwatchRole | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/iam"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) deleteIAMCloudwatchRole() {
cw.iamClient.DetachRolePolicy(context.TODO(), &iam.DetachRolePolicyInput{
PolicyArn: aws.String(cw.awsPolicyArn),
RoleName: aws.String(cw.awsRoleName),
},
)
iamDeleteRole(cw.iamClient, cw.awsRoleName)
iamDeletePolicy(cw.iamClient, cw.awsPolicyArn)
} | logging | |||
function | openshift/openshift-tests-private | 4f91920d-b9e1-4b27-93de-b4a7164ca28e | createClfSecret | ['"os"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) createClfSecret(oc *exutil.CLI) {
var err error
if cw.stsEnabled {
token, _ := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", cw.collectorSAName, "--audience=openshift", "--duration=24h", "-n", cw.secretNamespace).Output()
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", cw.secretName, "--from-literal=role_arn="+cw.awsRoleArn, "--from-literal=token="+token, "-n", cw.secretNamespace).Execute()
} else {
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", cw.secretName, "--from-literal=aws_access_key_id="+os.Getenv("AWS_ACCESS_KEY_ID"), "--from-literal=aws_secret_access_key="+os.Getenv("AWS_SECRET_ACCESS_KEY"), "-n", cw.secretNamespace).Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | da073119-167f-4870-9665-edee60878ce8 | deleteGroups | ['"context"', '"time"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) deleteGroups(groupPrefix string) {
wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 90*time.Second, true, func(context.Context) (done bool, err error) {
logGroupNames, _ := cw.getLogGroupNames(groupPrefix)
if len(logGroupNames) == 0 {
return true, nil
}
for _, name := range logGroupNames {
_, err := cw.cwClient.DeleteLogGroup(context.TODO(), &cloudwatchlogs.DeleteLogGroupInput{LogGroupName: &name})
if err != nil {
e2e.Logf("Can't delete log group: %s", name)
} else {
e2e.Logf("Log group %s is deleted", name)
}
}
return false, nil
})
} | logging |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.