element_type
stringclasses 4
values | project_name
stringclasses 1
value | uuid
stringlengths 36
36
| name
stringlengths 0
346
| imports
stringlengths 0
2.67k
| structs
stringclasses 761
values | interfaces
stringclasses 22
values | file_location
stringclasses 545
values | code
stringlengths 26
8.07M
| global_vars
stringclasses 7
values | package
stringclasses 124
values | tags
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
function | openshift/openshift-tests-private | d4a41189-74f7-4b55-a308-1aa1123b8e49 | deleteResources | ['"os"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) deleteResources(oc *exutil.CLI) {
resource{"secret", cw.secretName, cw.secretNamespace}.clear(oc)
cw.deleteGroups("")
//delete roles when the role is created in case
if cw.stsEnabled && os.Getenv("AWS_CLOUDWATCH_ROLE_ARN") == "" {
cw.deleteIAMCloudwatchRole()
}
} | logging | |||
function | openshift/openshift-tests-private | 6f6ae98e-6ac1-477c-aa5f-1beb3aff92fe | getLogGroupNames | ['"context"', '"fmt"', '"strings"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw cloudwatchSpec) getLogGroupNames(groupPrefix string) ([]string, error) {
var (
groupNames []string
)
if groupPrefix == "" {
if strings.Contains(cw.groupName, "{") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
groupPrefix = cw.groupName
}
}
logGroupDesc, err := cw.cwClient.DescribeLogGroups(context.TODO(), &cloudwatchlogs.DescribeLogGroupsInput{
LogGroupNamePrefix: &groupPrefix,
})
if err != nil {
return groupNames, fmt.Errorf("can't get log groups from cloudwatch: %v", err)
}
for _, group := range logGroupDesc.LogGroups {
groupNames = append(groupNames, *group.LogGroupName)
}
nextToken := logGroupDesc.NextToken
for nextToken != nil {
logGroupDesc, err = cw.cwClient.DescribeLogGroups(context.TODO(), &cloudwatchlogs.DescribeLogGroupsInput{
LogGroupNamePrefix: &groupPrefix,
NextToken: nextToken,
})
if err != nil {
return groupNames, fmt.Errorf("can't get log groups from cloudwatch: %v", err)
}
for _, group := range logGroupDesc.LogGroups {
groupNames = append(groupNames, *group.LogGroupName)
}
nextToken = logGroupDesc.NextToken
}
return groupNames, nil
} | logging | |||
function | openshift/openshift-tests-private | f8c0b973-2593-41b9-89d7-b69d3d375ed4 | waitForLogGroupsAppear | ['"context"', '"fmt"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) waitForLogGroupsAppear(groupPrefix, keyword string) error {
if groupPrefix == "" {
if strings.Contains(cw.groupName, "{") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
groupPrefix = cw.groupName
}
}
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
groups, err := cw.getLogGroupNames(groupPrefix)
if err != nil {
e2e.Logf("error getting log groups: %v", err)
return false, nil
}
if len(groups) == 0 {
e2e.Logf("no log groups match the prefix: %s", groupPrefix)
return false, nil
}
e2e.Logf("the log group names %v", groups)
if keyword != "" {
return containSubstring(groups, keyword), nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("can't find log groups with prefix: %s", groupPrefix)
}
return nil
} | logging | |||
function | openshift/openshift-tests-private | 08329698-290b-48f1-b6c7-b95cde552991 | getLogStreamNames | ['"context"', '"fmt"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) getLogStreamNames(groupName string, streamPrefix string) ([]string, error) {
var (
logStreamNames []string
err error
logStreamDesc *cloudwatchlogs.DescribeLogStreamsOutput
logStreamsInput cloudwatchlogs.DescribeLogStreamsInput
)
if streamPrefix == "" {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
}
} else {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
LogStreamNamePrefix: &streamPrefix,
}
}
logStreamDesc, err = cw.cwClient.DescribeLogStreams(context.TODO(), &logStreamsInput)
if err != nil {
return logStreamNames, fmt.Errorf("can't get log streams: %v", err)
}
for _, stream := range logStreamDesc.LogStreams {
logStreamNames = append(logStreamNames, *stream.LogStreamName)
}
nextToken := logStreamDesc.NextToken
for nextToken != nil {
if streamPrefix == "" {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
NextToken: nextToken,
}
} else {
logStreamsInput = cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: &groupName,
LogStreamNamePrefix: &streamPrefix,
NextToken: nextToken,
}
}
logStreamDesc, err = cw.cwClient.DescribeLogStreams(context.TODO(), &logStreamsInput)
if err != nil {
return logStreamNames, fmt.Errorf("can't get log streams from cloudwatch: %v", err)
}
for _, stream := range logStreamDesc.LogStreams {
logStreamNames = append(logStreamNames, *stream.LogStreamName)
}
nextToken = logStreamDesc.NextToken
}
return logStreamNames, nil
} | logging | |||
function | openshift/openshift-tests-private | d0b1f7f8-e098-4b74-8f0d-a695f51b848a | checkInfraContainerLogs | ['"regexp"', '"strings"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) checkInfraContainerLogs(strict bool) bool {
var (
infraLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
o.Expect(err).NotTo(o.HaveOccurred())
if len(logGroupNames) == 0 {
return false
}
if strings.Contains(cw.groupName, "{.log_type") {
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.infrastructure$`)
match := r.MatchString(e)
if match {
infraLogGroupNames = append(infraLogGroupNames, e)
}
}
}
if len(infraLogGroupNames) == 0 {
infraLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for infra container logs are %v", infraLogGroupNames)
// get all the log streams under the log groups
for _, group := range infraLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if strings.Contains(stream, ".openshift-") {
logStreams = append(logStreams, stream)
}
}
}
// when strict=true, return ture if we can find podLogStream for all nodes
if strict {
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
for _, node := range cw.nodes {
if !containSubstring(logStreams, node+".openshift-") {
e2e.Logf("can't find log stream %s", node+".openshift-")
return false
}
}
return true
} else {
return len(logStreams) > 0
}
} | logging | |||
function | openshift/openshift-tests-private | 736822f7-a59e-438b-9c23-dbbf9b9de8e5 | checkInfraNodeLogs | ['"regexp"', '"strings"', '"github.com/aws/aws-sdk-go-v2/aws"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) checkInfraNodeLogs(strict bool) bool {
var (
infraLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return false
}
for _, group := range logGroupNames {
r, _ := regexp.Compile(`.*\.infrastructure$`)
match := r.MatchString(group)
if match {
infraLogGroupNames = append(infraLogGroupNames, group)
}
}
if len(infraLogGroupNames) == 0 {
infraLogGroupNames = logGroupNames
}
e2e.Logf("the infra node log group names are %v", infraLogGroupNames)
// get all the log streams under the log groups
for _, group := range infraLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if strings.Contains(stream, ".journal.system") {
logStreams = append(logStreams, stream)
}
}
}
e2e.Logf("the infrastructure node log streams: %v", logStreams)
// when strict=true, return ture if we can find log streams from all nodes
if strict {
var expectedStreamNames []string
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
//stream name: ip-10-0-152-69.journal.system
if cw.clusterPlatformType == "aws" {
for _, node := range cw.nodes {
expectedStreamNames = append(expectedStreamNames, strings.Split(node, ".")[0])
}
} else {
expectedStreamNames = append(expectedStreamNames, cw.nodes...)
}
for _, name := range expectedStreamNames {
streamName := name + ".journal.system"
if !contain(logStreams, streamName) {
e2e.Logf("can't find log stream %s", streamName)
return false
}
}
return true
} else {
return len(logStreams) > 0
}
} | logging | |||
function | openshift/openshift-tests-private | 2567643f-e557-40a1-b640-2580af626ab7 | infrastructureLogsFound | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) infrastructureLogsFound(strict bool) bool {
return cw.checkInfraContainerLogs(strict) && cw.checkInfraNodeLogs(strict)
} | logging | ||||
function | openshift/openshift-tests-private | afba0b42-92f8-4051-94a7-c1bcc4d59709 | auditLogsFound | ['"regexp"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) auditLogsFound(strict bool) bool {
var (
auditLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return false
}
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.audit$`)
match := r.MatchString(e)
if match {
auditLogGroupNames = append(auditLogGroupNames, e)
}
}
if len(auditLogGroupNames) == 0 {
auditLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for audit logs are %v", auditLogGroupNames)
// stream name: ip-10-0-74-46.us-east-2.compute.internal
// get all the log streams under the log groups
for _, group := range auditLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
logStreams = append(logStreams, streams...)
}
// when strict=true, return ture if we can find podLogStream for all nodes
if strict {
if len(cw.nodes) == 0 {
e2e.Logf("node name is empty, please get node names at first")
return false
}
for _, node := range cw.nodes {
if !containSubstring(logStreams, node) {
e2e.Logf("can't find log stream from node: %s", node)
return false
}
}
return true
} else {
return len(logStreams) > 0
}
} | logging | |||
function | openshift/openshift-tests-private | 14db4c25-3d4a-4a2b-83bf-b55bff35ffe4 | checkLogGroupByNamespaceID | ['"strings"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) checkLogGroupByNamespaceID() bool {
var (
groupPrefix string
)
if strings.Contains(cw.groupName, ".kubernetes.namespace_id") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
e2e.Logf("the group name doesn't contain .kubernetes.namespace_id, no need to call this function")
return false
}
for _, namespaceID := range cw.selNamespacesID {
groupErr := cw.waitForLogGroupsAppear(groupPrefix, namespaceID)
if groupErr != nil {
e2e.Logf("can't find log group named %s", namespaceID)
return false
}
}
return true
} | logging | |||
function | openshift/openshift-tests-private | 38275a74-51dd-4d45-831a-98dbc25ba245 | checkLogGroupByNamespaceName | ['"strings"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) checkLogGroupByNamespaceName() bool {
var (
groupPrefix string
)
if strings.Contains(cw.groupName, ".kubernetes.namespace_name") {
groupPrefix = strings.Split(cw.groupName, "{")[0]
} else {
e2e.Logf("the group name doesn't contain .kubernetes.namespace_name, no need to call this function")
return false
}
for _, namespaceName := range cw.selAppNamespaces {
groupErr := cw.waitForLogGroupsAppear(groupPrefix, namespaceName)
if groupErr != nil {
e2e.Logf("can't find log group named %s", namespaceName)
return false
}
}
for _, ns := range cw.disAppNamespaces {
groups, err := cw.getLogGroupNames(groupPrefix)
if err != nil {
return false
}
if containSubstring(groups, ns) {
return false
}
}
return true
} | logging | |||
function | openshift/openshift-tests-private | 38070f18-be62-4597-8927-04ba9f1cd9f3 | getApplicationLogStreams | ['"regexp"', '"strings"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) getApplicationLogStreams() ([]string, error) {
var (
appLogGroupNames []string
logStreams []string
)
logGroupNames, err := cw.getLogGroupNames("")
if err != nil || len(logGroupNames) == 0 {
return logStreams, err
}
for _, e := range logGroupNames {
r, _ := regexp.Compile(`.*\.application$`)
match := r.MatchString(e)
if match {
appLogGroupNames = append(appLogGroupNames, e)
}
}
if len(appLogGroupNames) == 0 {
appLogGroupNames = logGroupNames
}
e2e.Logf("the log group names for application logs are %v", appLogGroupNames)
for _, group := range appLogGroupNames {
streams, _ := cw.getLogStreamNames(group, "")
for _, stream := range streams {
if !strings.Contains(stream, "ip-10-0") {
logStreams = append(logStreams, stream)
}
}
}
return logStreams, nil
} | logging | |||
function | openshift/openshift-tests-private | 332dabc5-8430-4c9a-9fb7-0231b9540ad0 | applicationLogsFound | ['"strings"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) applicationLogsFound() bool {
if (len(cw.selAppNamespaces) > 0 || len(cw.disAppNamespaces) > 0) && strings.Contains(cw.groupName, ".kubernetes.namespace_id") {
return cw.checkLogGroupByNamespaceName()
}
if len(cw.selNamespacesID) > 0 {
return cw.checkLogGroupByNamespaceID()
}
logStreams, err := cw.getApplicationLogStreams()
if err != nil || len(logStreams) == 0 {
return false
}
for _, ns := range cw.selAppNamespaces {
if !containSubstring(logStreams, ns) {
e2e.Logf("can't find logs from project %s", ns)
return false
}
}
for _, ns := range cw.disAppNamespaces {
if containSubstring(logStreams, ns) {
e2e.Logf("find logs from project %s, this is not expected", ns)
return false
}
}
return true
} | logging | |||
function | openshift/openshift-tests-private | 8652c359-24a0-4f4c-855f-b7202f5890a8 | logsFound | ['"context"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) logsFound() bool {
var (
appLogSuccess = true
infraLogSuccess = true
auditLogSuccess = true
)
for _, logType := range cw.logTypes {
switch logType {
case "infrastructure":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.infrastructureLogsFound(true), nil
})
if err != nil {
e2e.Logf("can't find infrastructure in given time")
infraLogSuccess = false
}
case "audit":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.auditLogsFound(false), nil
})
if err != nil {
e2e.Logf("can't find audit logs in given time")
auditLogSuccess = false
}
case "application":
err := wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
return cw.applicationLogsFound(), nil
})
if err != nil {
e2e.Logf("can't find application logs in given time")
appLogSuccess = false
}
}
}
return infraLogSuccess && auditLogSuccess && appLogSuccess
} | logging | |||
function | openshift/openshift-tests-private | 4c22772b-d856-4ef6-aab9-3d6fbd30c237 | getLogRecordsByNamespace | ['"context"', '"encoding/json"', '"fmt"', '"time"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) getLogRecordsByNamespace(limit int32, logGroupName string, namespaceName string) ([]LogEntity, error) {
var (
output *cloudwatchlogs.FilterLogEventsOutput
logs []LogEntity
)
streamNames, streamErr := cw.getLogStreamNames(logGroupName, namespaceName)
if streamErr != nil {
return logs, streamErr
}
e2e.Logf("the log streams: %v", streamNames)
err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
output, err = cw.filterLogEvents(limit, logGroupName, "", streamNames...)
if err != nil {
e2e.Logf("get error when filter events in cloudwatch, try next time")
return false, nil
}
if len(output.Events) == 0 {
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("the query is not completed in 5 minutes or there is no log record matches the query: %v", err)
}
for _, event := range output.Events {
var log LogEntity
json.Unmarshal([]byte(*event.Message), &log)
logs = append(logs, log)
}
return logs, nil
} | logging | |||
function | openshift/openshift-tests-private | 21ac3a63-a360-4b7b-b7c2-29cb72c78b71 | filterLogEvents | ['"context"', '"fmt"', '"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs"'] | ['cloudwatchSpec'] | github.com/openshift/openshift-tests-private/test/extended/logging/aws_utils.go | func (cw *cloudwatchSpec) filterLogEvents(limit int32, logGroupName, logStreamNamePrefix string, logStreamNames ...string) (*cloudwatchlogs.FilterLogEventsOutput, error) {
if len(logStreamNamePrefix) > 0 && len(logStreamNames) > 0 {
return nil, fmt.Errorf("invalidParameterException: logStreamNamePrefix and logStreamNames are specified")
}
var (
err error
output *cloudwatchlogs.FilterLogEventsOutput
)
if len(logStreamNamePrefix) > 0 {
output, err = cw.cwClient.FilterLogEvents(context.TODO(), &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &logGroupName,
LogStreamNamePrefix: &logStreamNamePrefix,
Limit: &limit,
})
} else if len(logStreamNames) > 0 {
output, err = cw.cwClient.FilterLogEvents(context.TODO(), &cloudwatchlogs.FilterLogEventsInput{
LogGroupName: &logGroupName,
LogStreamNames: logStreamNames,
Limit: &limit,
})
}
return output, err
} | logging | |||
file | openshift/openshift-tests-private | e3440393-6e0d-4a0c-b812-29cc0d4cf3b7 | azure_utils | import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
azarm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
azcloud "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azRuntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/monitor/query/azlogs"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/google/uuid"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | package logging
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
azarm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
azcloud "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azRuntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/monitor/query/azlogs"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/google/uuid"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// Creates a new default Azure credential
func createNewDefaultAzureCredential() *azidentity.DefaultAzureCredential {
cred, err := azidentity.NewDefaultAzureCredential(nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to obtain a credential")
return cred
}
// Function to create a managed identity on Azure
func createManagedIdentityOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, lokiStackName, resourceGroup, region string) (string, string) {
// Create the MSI client
client, err := armmsi.NewUserAssignedIdentitiesClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create MSI client")
// Configure the managed identity
identity := armmsi.Identity{
Location: ®ion,
}
// Create the identity
result, err := client.CreateOrUpdate(context.Background(), resourceGroup, lokiStackName, identity, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create or update the identity")
return *result.Properties.ClientID, *result.Properties.PrincipalID
}
// Function to create Federated Credentials on Azure
func createFederatedCredentialforLoki(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, managedIdentityName, lokiServiceAccount, lokiStackNS, federatedCredentialName, serviceAccountIssuer, resourceGroup string) {
subjectName := "system:serviceaccount:" + lokiStackNS + ":" + lokiServiceAccount
// Create the Federated Identity Credentials client
client, err := armmsi.NewFederatedIdentityCredentialsClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create federated identity credentials client")
// Create or update the federated identity credential
result, err := client.CreateOrUpdate(
context.Background(),
resourceGroup,
managedIdentityName,
federatedCredentialName,
armmsi.FederatedIdentityCredential{
Properties: &armmsi.FederatedIdentityCredentialProperties{
Issuer: &serviceAccountIssuer,
Subject: &subjectName,
Audiences: []*string{azto.Ptr("api://AzureADTokenExchange")},
},
},
nil,
)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create or update the federated credential: "+federatedCredentialName)
e2e.Logf("Federated credential created/updated successfully: %s\n", *result.Name)
}
// Assigns role to a Azure Managed Identity on subscription level scope
func createRoleAssignmentForManagedIdentity(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, identityPrincipalID string) {
clientFactory, err := armauthorization.NewClientFactory(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create instance of ClientFactory")
scope := "/subscriptions/" + azureSubscriptionID
// Below is standard role definition ID for Storage Blob Data Contributor built-in role
roleDefinitionID := scope + "/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe"
// Create or update a role assignment by scope and name
_, err = clientFactory.NewRoleAssignmentsClient().Create(context.Background(), scope, uuid.NewString(), armauthorization.RoleAssignmentCreateParameters{
Properties: &armauthorization.RoleAssignmentProperties{
PrincipalID: azto.Ptr(identityPrincipalID),
PrincipalType: azto.Ptr(armauthorization.PrincipalTypeServicePrincipal),
RoleDefinitionID: azto.Ptr(roleDefinitionID),
},
}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Role Assignment operation failure....")
}
// Creates Azure storage account
func createStorageAccountOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroup, region string) string {
storageAccountName := "aosqelogging" + getRandomString()
// Create the storage account
storageClient, err := armstorage.NewAccountsClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred())
result, err := storageClient.BeginCreate(context.Background(), resourceGroup, storageAccountName, armstorage.AccountCreateParameters{
Location: azto.Ptr(region),
SKU: &armstorage.SKU{
Name: azto.Ptr(armstorage.SKUNameStandardLRS),
},
Kind: azto.Ptr(armstorage.KindStorageV2),
}, nil)
o.Expect(err).NotTo(o.HaveOccurred())
// Poll until the Storage account is ready
_, err = result.PollUntilDone(context.Background(), &azRuntime.PollUntilDoneOptions{
Frequency: 10 * time.Second,
})
o.Expect(err).NotTo(o.HaveOccurred(), "Storage account is not ready...")
os.Setenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT", storageAccountName)
return storageAccountName
}
// Returns the Azure environment and storage account URI suffixes
func getStorageAccountURISuffixAndEnvForAzure(oc *exutil.CLI) (string, string) {
// To return account URI suffix and env
cloudName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
storageAccountURISuffix := ".blob.core.windows.net"
environment := "AzureGlobal"
// Currently we don't have template support for STS/WIF on Azure Government
// The below code should be ok to run when support is added for WIF
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
storageAccountURISuffix = ".blob.core.usgovcloudapi.net"
environment = "AzureUSGovernment"
}
if strings.ToLower(cloudName) == "azurechinacloud" {
storageAccountURISuffix = ".blob.core.chinacloudapi.cn"
environment = "AzureChinaCloud"
}
if strings.ToLower(cloudName) == "azuregermancloud" {
environment = "AzureGermanCloud"
storageAccountURISuffix = ".blob.core.cloudapi.de"
}
return environment, storageAccountURISuffix
}
// Creates a blob container under the provided storageAccount
func createBlobContaineronAzure(defaultAzureCred *azidentity.DefaultAzureCredential, storageAccountName, storageAccountURISuffix, containerName string) {
blobServiceClient, err := azblob.NewClient(fmt.Sprintf("https://%s%s", storageAccountName, storageAccountURISuffix), defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = blobServiceClient.CreateContainer(context.Background(), containerName, nil)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("%s container created successfully: ", containerName)
}
// Creates Loki object storage secret required on Azure STS/WIF clusters
func createLokiObjectStorageSecretForWIF(oc *exutil.CLI, lokiStackNS, objectStorageSecretName, environment, containerName, storageAccountName string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", lokiStackNS, objectStorageSecretName, "--from-literal=environment="+environment, "--from-literal=container="+containerName, "--from-literal=account_name="+storageAccountName).Execute()
}
// Deletes a storage account in Microsoft Azure
func deleteAzureStorageAccount(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroupName, storageAccountName string) {
clientFactory, err := armstorage.NewClientFactory(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create instance of ClientFactory for storage account deletion")
_, err = clientFactory.NewAccountsClient().Delete(context.Background(), resourceGroupName, storageAccountName, nil)
if err != nil {
e2e.Logf("Error while deleting storage account: %s", err.Error())
} else {
e2e.Logf("storage account deleted successfully..")
}
}
// Deletes the Azure Managed identity
func deleteManagedIdentityOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroupName, identityName string) {
client, err := armmsi.NewUserAssignedIdentitiesClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create MSI client for identity deletion")
_, err = client.Delete(context.Background(), resourceGroupName, identityName, nil)
if err != nil {
e2e.Logf("Error deleting identity: %s", err.Error())
} else {
e2e.Logf("managed identity deleted successfully...")
}
}
// patches CLIENT_ID, SUBSCRIPTION_ID, TENANT_ID AND REGION into Loki subscription on Azure WIF clusters
func patchLokiConfigIntoLokiSubscription(oc *exutil.CLI, azureSubscriptionID, identityClientID, region string) {
patchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "CLIENTID",
"value": "%s"
},
{
"name": "TENANTID",
"value": "%s"
},
{
"name": "SUBSCRIPTIONID",
"value": "%s"
},
{
"name": "REGION",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", fmt.Sprintf(patchConfig, identityClientID, os.Getenv("AZURE_TENANT_ID"), azureSubscriptionID, region), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Patching Loki Operator failed...")
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
}
// Performs creation of Managed Identity, Associated Federated credentials, Role assignment to the managed identity and object storage creation on Azure
func performManagedIdentityAndSecretSetupForAzureWIF(oc *exutil.CLI, lokistackName, lokiStackNS, azureContainerName, lokiStackStorageSecretName string) {
region, err := getAzureClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
serviceAccountIssuer, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
resourceGroup, err := getAzureResourceGroupFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
identityClientID, identityPrincipalID := createManagedIdentityOnAzure(cred, azureSubscriptionID, lokistackName, resourceGroup, region)
createFederatedCredentialforLoki(cred, azureSubscriptionID, lokistackName, lokistackName, lokiStackNS, "openshift-logging-"+lokistackName, serviceAccountIssuer, resourceGroup)
createFederatedCredentialforLoki(cred, azureSubscriptionID, lokistackName, lokistackName+"-ruler", lokiStackNS, "openshift-logging-"+lokistackName+"-ruler", serviceAccountIssuer, resourceGroup)
createRoleAssignmentForManagedIdentity(cred, azureSubscriptionID, identityPrincipalID)
patchLokiConfigIntoLokiSubscription(oc, azureSubscriptionID, identityClientID, region)
storageAccountName := createStorageAccountOnAzure(cred, azureSubscriptionID, resourceGroup, region)
environment, storageAccountURISuffix := getStorageAccountURISuffixAndEnvForAzure(oc)
createBlobContaineronAzure(cred, storageAccountName, storageAccountURISuffix, azureContainerName)
err = createLokiObjectStorageSecretForWIF(oc, lokiStackNS, lokiStackStorageSecretName, environment, azureContainerName, storageAccountName)
o.Expect(err).NotTo(o.HaveOccurred())
}
// Function to check if tenant logs are present under the Azure blob Container.
// Use getStorageAccountURISuffixAndEnvForAzure() to get the storage account URI suffix.
// Returns success if any one of the tenants under tenants[] are found.
func validatesIfLogsArePushedToAzureContainer(storageAccountURISuffix, storageAccountName, containerName string, tenants []string) {
cred := createNewDefaultAzureCredential()
// Create a new Blob service client
serviceClient, err := azblob.NewClient("https://"+storageAccountName+storageAccountURISuffix, cred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create service client..")
// Poll to check log streams are flushed to container referenced under loki object storage secret
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
// Create a client to interact with the container and List blobs in the container
pager := serviceClient.NewListBlobsFlatPager(containerName, nil)
for pager.More() {
// advance to the next page
page, err := pager.NextPage(context.TODO())
o.Expect(err).NotTo(o.HaveOccurred())
// check the blob names for this page
for _, blob := range page.Segment.BlobItems {
for _, tenantName := range tenants {
if strings.Contains(*blob.Name, tenantName) {
e2e.Logf("Logs %s found under the container: %s", *blob.Name, containerName)
return true, nil
}
}
}
}
e2e.Logf("Waiting for data to be available under container: %s", containerName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the container: "+containerName)
}
func getAzureResourceGroupFromCluster(oc *exutil.CLI) (string, error) {
resourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructures", "cluster", "-o=jsonpath={.status.platformStatus.azure.resourceGroupName}").Output()
return resourceGroup, err
}
// Get region/location of cluster running on Azure Cloud
func getAzureClusterRegion(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
}
// Define the function to create a resource group.
func createAzureResourceGroup(resourceGroupName, subscriptionId, location string, credential azcore.TokenCredential) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) {
rgClient, _ := armresources.NewResourceGroupsClient(subscriptionId, credential, nil)
param := armresources.ResourceGroup{
Location: azto.Ptr(location),
}
return rgClient.CreateOrUpdate(context.Background(), resourceGroupName, param, nil)
}
// Delete a resource group.
func deleteAzureResourceGroup(resourceGroupName, subscriptionId string, credential azcore.TokenCredential) error {
rgClient, _ := armresources.NewResourceGroupsClient(subscriptionId, credential, nil)
poller, err := rgClient.BeginDelete(context.Background(), resourceGroupName, nil)
if err != nil {
return err
}
if _, err := poller.PollUntilDone(context.Background(), nil); err != nil {
return err
}
e2e.Logf("Successfully deleted resource group: %s", resourceGroupName)
return nil
}
type azureCredentials struct {
SubscriptionID string `json:"subscriptionId"`
ClientID string `json:"clientId"`
ClientSecret string `json:"clientSecret"`
TenantID string `json:"tenantId"`
}
// To read Azure subscription json file from local disk.
// Also injects ENV vars needed to perform certain operations on Managed Identities.
func readAzureCredentials() bool {
var (
azureCredFile string
azureCred azureCredentials
)
authFile, present := os.LookupEnv("AZURE_AUTH_LOCATION")
if present {
azureCredFile = authFile
} else {
envDir, present := os.LookupEnv("CLUSTER_PROFILE_DIR")
if present {
azureCredFile = filepath.Join(envDir, "osServicePrincipal.json")
}
}
if len(azureCredFile) > 0 {
fileContent, err := os.ReadFile(azureCredFile)
if err != nil {
e2e.Logf("can't read file %s: %v", azureCredFile, err)
return false
}
json.Unmarshal(fileContent, &azureCred)
os.Setenv("AZURE_SUBSCRIPTION_ID", azureCred.SubscriptionID)
os.Setenv("AZURE_TENANT_ID", azureCred.TenantID)
os.Setenv("AZURE_CLIENT_ID", azureCred.ClientID)
os.Setenv("AZURE_CLIENT_SECRET", azureCred.ClientSecret)
return true
}
return false
}
type azureMonitorLog struct {
azCred *azidentity.DefaultAzureCredential
clientOpts azpolicy.ClientOptions
customerID string
host string
location string
primaryKey string
resourceGroupName string
secondaryKey string
subscriptionID string
tPrefixOrName string // Depend on how we defined the logType in CLF template, it can be the table name or the table name name prefix.
workspaceID string
workspaceName string
}
// checkout the cloudType of this cluster's platform
func getAzureCloudName(oc *exutil.CLI) string {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
if err == nil && len(cloudName) > 0 {
return strings.ToLower(cloudName)
}
return ""
}
func (azLog *azureMonitorLog) getSourceGroupLocation() error {
resourceGroupClient, err := armresources.NewResourceGroupsClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
ctx := context.Background()
resourceGroupGetResponse, err := resourceGroupClient.Get(
ctx,
azLog.resourceGroupName,
nil,
)
if err != nil {
return err
}
azLog.location = *resourceGroupGetResponse.ResourceGroup.Location
return nil
}
func (azLog *azureMonitorLog) createLogWorkspace() error {
e2e.Logf("Creating workspace")
workspacesClient, err := armoperationalinsights.NewWorkspacesClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
ctx := context.Background()
pollerResp, err := workspacesClient.BeginCreateOrUpdate(
ctx,
azLog.resourceGroupName,
azLog.workspaceName,
armoperationalinsights.Workspace{
Location: azto.Ptr(azLog.location),
Properties: &armoperationalinsights.WorkspaceProperties{},
},
nil,
)
if err != nil {
return err
}
workspace, err := pollerResp.PollUntilDone(ctx, nil)
if err != nil {
return err
}
azLog.workspaceID = *workspace.ID
azLog.workspaceName = *workspace.Name
azLog.customerID = *workspace.Properties.CustomerID
shareKeyClient, err := armoperationalinsights.NewSharedKeysClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
resp, err := shareKeyClient.GetSharedKeys(ctx, azLog.resourceGroupName, azLog.workspaceName, nil)
if err != nil {
return err
}
azLog.primaryKey = *resp.PrimarySharedKey
azLog.secondaryKey = *resp.SecondarySharedKey
return nil
}
// Get azureMonitoring from Envs. CreateOrUpdate Log Analytics workspace.
func newAzureLog(oc *exutil.CLI, location, resouceGroupName, workspaceName, tPrefixOrName string) (azureMonitorLog, error) {
var (
azLog azureMonitorLog
err error
)
azLog.tPrefixOrName = tPrefixOrName
azLog.workspaceName = workspaceName
azLog.resourceGroupName = resouceGroupName
// The workspace name must be between 4 and 63 characters.
// The workspace name can contain only letters, numbers and '-'. The '-' shouldn't be the first or the last symbol.
azLog.subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID")
if len(azLog.subscriptionID) == 0 {
dat, err := oc.AsAdmin().WithoutNamespace().Run("get", "-n", "kube-system", "secret/azure-credentials", "-ojsonpath={.data.azure_subscription_id}").Output()
if err != nil {
return azLog, fmt.Errorf("failed to get secret/azure-credentials")
}
data, err := base64.StdEncoding.DecodeString(dat)
if err != nil {
return azLog, fmt.Errorf("failed to decode subscription_id from secret/azure-credentials")
}
azLog.subscriptionID = string(data)
if len(azLog.subscriptionID) == 0 {
return azLog, fmt.Errorf("failed as subscriptionID is empty")
}
}
platform := exutil.CheckPlatform(oc)
if platform == "azure" {
cloudName := getAzureCloudName(oc)
switch cloudName {
case "azurepubliccloud":
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzurePublic}
azLog.host = "ods.opinsights.azure.com"
case "azureusgovernmentcloud":
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzureGovernment}
azLog.host = "ods.opinsights.azure.us"
case "azurechinacloud":
//azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzureChina}
return azLog, fmt.Errorf("skip on AzureChinaCloud")
case "azuregermancloud":
return azLog, fmt.Errorf("skip on AzureGermanCloud")
case "azurestackcloud":
return azLog, fmt.Errorf("skip on AzureStackCloud")
default:
return azLog, fmt.Errorf("skip on %s", cloudName)
}
} else {
//TODO: get az cloud type from env vars
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzurePublic}
azLog.host = "ods.opinsights.azure.com"
}
azLog.azCred, err = azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: azLog.clientOpts},
)
if err != nil {
return azLog, err
}
if location != "" {
azLog.location = location
} else {
err = azLog.getSourceGroupLocation()
if err != nil {
return azLog, err
}
}
err = azLog.createLogWorkspace()
if err != nil {
return azLog, err
}
return azLog, nil
}
// Create a secret for collector pods to forward logs to Log Analytics workspaces.
func (azLog *azureMonitorLog) createSecret(oc *exutil.CLI, name, namespace string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-literal=shared_key="+azLog.primaryKey).Execute()
}
// query logs per table in Log Analytics workspaces.
func (azLog *azureMonitorLog) getLogByTable(logTable string) ([]azlogs.Row, error) {
queryString := logTable + "| where TimeGenerated > ago(5m)|top 10 by TimeGenerated"
e2e.Logf("query %v", queryString)
var entries []azlogs.Row
client, err := azlogs.NewClient(azLog.azCred,
&azlogs.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return entries, err
}
//https://learn.microsoft.com/en-us/cli/azure/monitor/log-analytics?view=azure-cli-latest
//https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err1 := client.QueryWorkspace(
context.TODO(),
azLog.customerID,
azlogs.QueryBody{
Query: azto.Ptr(queryString),
},
nil)
if err1 != nil {
e2e.Logf("azlogs QueryWorkspace error: %v. continue", err1)
return false, nil
}
if res.Error != nil {
e2e.Logf("azlogs QueryWorkspace response error: %v, continue", res.Error)
return false, nil
}
for _, table := range res.Tables {
entries = append(entries, table.Rows...)
}
return len(entries) > 0, nil
})
return entries, err
}
// Delete LogWorkspace
func (azLog *azureMonitorLog) deleteWorkspace() error {
e2e.Logf("Delete workspace %v", azLog.workspaceName)
ctx := context.Background()
workspacesClient, err := armoperationalinsights.NewWorkspacesClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
workspacesClient.BeginDelete(ctx, azLog.resourceGroupName, azLog.workspaceName, &armoperationalinsights.WorkspacesClientBeginDeleteOptions{Force: new(bool)})
return nil
}
| package logging | ||||
function | openshift/openshift-tests-private | 40d06183-efbe-43ce-8d93-08834c9a627e | createNewDefaultAzureCredential | ['azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createNewDefaultAzureCredential() *azidentity.DefaultAzureCredential {
cred, err := azidentity.NewDefaultAzureCredential(nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to obtain a credential")
return cred
} | logging | ||||
function | openshift/openshift-tests-private | 2ea2577e-0748-4ff7-b982-5ee4378cb6b2 | createManagedIdentityOnAzure | ['"context"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createManagedIdentityOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, lokiStackName, resourceGroup, region string) (string, string) {
// Create the MSI client
client, err := armmsi.NewUserAssignedIdentitiesClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create MSI client")
// Configure the managed identity
identity := armmsi.Identity{
Location: ®ion,
}
// Create the identity
result, err := client.CreateOrUpdate(context.Background(), resourceGroup, lokiStackName, identity, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create or update the identity")
return *result.Properties.ClientID, *result.Properties.PrincipalID
} | logging | ||||
function | openshift/openshift-tests-private | 16f871fa-2262-4e1b-a351-12b47dcf0e86 | createFederatedCredentialforLoki | ['"context"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createFederatedCredentialforLoki(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, managedIdentityName, lokiServiceAccount, lokiStackNS, federatedCredentialName, serviceAccountIssuer, resourceGroup string) {
subjectName := "system:serviceaccount:" + lokiStackNS + ":" + lokiServiceAccount
// Create the Federated Identity Credentials client
client, err := armmsi.NewFederatedIdentityCredentialsClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create federated identity credentials client")
// Create or update the federated identity credential
result, err := client.CreateOrUpdate(
context.Background(),
resourceGroup,
managedIdentityName,
federatedCredentialName,
armmsi.FederatedIdentityCredential{
Properties: &armmsi.FederatedIdentityCredentialProperties{
Issuer: &serviceAccountIssuer,
Subject: &subjectName,
Audiences: []*string{azto.Ptr("api://AzureADTokenExchange")},
},
},
nil,
)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create or update the federated credential: "+federatedCredentialName)
e2e.Logf("Federated credential created/updated successfully: %s\n", *result.Name)
} | logging | ||||
function | openshift/openshift-tests-private | afadb165-4134-4b45-a97e-3c3b8815d992 | createRoleAssignmentForManagedIdentity | ['"context"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/google/uuid"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createRoleAssignmentForManagedIdentity(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, identityPrincipalID string) {
clientFactory, err := armauthorization.NewClientFactory(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create instance of ClientFactory")
scope := "/subscriptions/" + azureSubscriptionID
// Below is standard role definition ID for Storage Blob Data Contributor built-in role
roleDefinitionID := scope + "/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe"
// Create or update a role assignment by scope and name
_, err = clientFactory.NewRoleAssignmentsClient().Create(context.Background(), scope, uuid.NewString(), armauthorization.RoleAssignmentCreateParameters{
Properties: &armauthorization.RoleAssignmentProperties{
PrincipalID: azto.Ptr(identityPrincipalID),
PrincipalType: azto.Ptr(armauthorization.PrincipalTypeServicePrincipal),
RoleDefinitionID: azto.Ptr(roleDefinitionID),
},
}, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "Role Assignment operation failure....")
} | logging | ||||
function | openshift/openshift-tests-private | 9289ec30-856f-4af7-b9b2-12ddc5b72f13 | createStorageAccountOnAzure | ['"context"', '"os"', '"time"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createStorageAccountOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroup, region string) string {
storageAccountName := "aosqelogging" + getRandomString()
// Create the storage account
storageClient, err := armstorage.NewAccountsClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred())
result, err := storageClient.BeginCreate(context.Background(), resourceGroup, storageAccountName, armstorage.AccountCreateParameters{
Location: azto.Ptr(region),
SKU: &armstorage.SKU{
Name: azto.Ptr(armstorage.SKUNameStandardLRS),
},
Kind: azto.Ptr(armstorage.KindStorageV2),
}, nil)
o.Expect(err).NotTo(o.HaveOccurred())
// Poll until the Storage account is ready
_, err = result.PollUntilDone(context.Background(), &azRuntime.PollUntilDoneOptions{
Frequency: 10 * time.Second,
})
o.Expect(err).NotTo(o.HaveOccurred(), "Storage account is not ready...")
os.Setenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT", storageAccountName)
return storageAccountName
} | logging | ||||
function | openshift/openshift-tests-private | ef75a2e6-57aa-4849-ad26-aab781717819 | getStorageAccountURISuffixAndEnvForAzure | ['"strings"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func getStorageAccountURISuffixAndEnvForAzure(oc *exutil.CLI) (string, string) {
// To return account URI suffix and env
cloudName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
storageAccountURISuffix := ".blob.core.windows.net"
environment := "AzureGlobal"
// Currently we don't have template support for STS/WIF on Azure Government
// The below code should be ok to run when support is added for WIF
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
storageAccountURISuffix = ".blob.core.usgovcloudapi.net"
environment = "AzureUSGovernment"
}
if strings.ToLower(cloudName) == "azurechinacloud" {
storageAccountURISuffix = ".blob.core.chinacloudapi.cn"
environment = "AzureChinaCloud"
}
if strings.ToLower(cloudName) == "azuregermancloud" {
environment = "AzureGermanCloud"
storageAccountURISuffix = ".blob.core.cloudapi.de"
}
return environment, storageAccountURISuffix
} | logging | ||||
function | openshift/openshift-tests-private | 088a98d8-ba61-46eb-b639-1b4b92987ae2 | createBlobContaineronAzure | ['"context"', '"fmt"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createBlobContaineronAzure(defaultAzureCred *azidentity.DefaultAzureCredential, storageAccountName, storageAccountURISuffix, containerName string) {
blobServiceClient, err := azblob.NewClient(fmt.Sprintf("https://%s%s", storageAccountName, storageAccountURISuffix), defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = blobServiceClient.CreateContainer(context.Background(), containerName, nil)
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("%s container created successfully: ", containerName)
} | logging | ||||
function | openshift/openshift-tests-private | a05a2a6c-8081-4b8e-a34f-a2065d6118ea | createLokiObjectStorageSecretForWIF | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createLokiObjectStorageSecretForWIF(oc *exutil.CLI, lokiStackNS, objectStorageSecretName, environment, containerName, storageAccountName string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", lokiStackNS, objectStorageSecretName, "--from-literal=environment="+environment, "--from-literal=container="+containerName, "--from-literal=account_name="+storageAccountName).Execute()
} | logging | |||||
function | openshift/openshift-tests-private | 22278cb0-0892-46cb-a37e-6bfe8ddfb2f8 | deleteAzureStorageAccount | ['"context"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func deleteAzureStorageAccount(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroupName, storageAccountName string) {
clientFactory, err := armstorage.NewClientFactory(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create instance of ClientFactory for storage account deletion")
_, err = clientFactory.NewAccountsClient().Delete(context.Background(), resourceGroupName, storageAccountName, nil)
if err != nil {
e2e.Logf("Error while deleting storage account: %s", err.Error())
} else {
e2e.Logf("storage account deleted successfully..")
}
} | logging | ||||
function | openshift/openshift-tests-private | 084c3703-d6c2-4d51-8a52-ed1bb9d060f1 | deleteManagedIdentityOnAzure | ['"context"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func deleteManagedIdentityOnAzure(defaultAzureCred *azidentity.DefaultAzureCredential, azureSubscriptionID, resourceGroupName, identityName string) {
client, err := armmsi.NewUserAssignedIdentitiesClient(azureSubscriptionID, defaultAzureCred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create MSI client for identity deletion")
_, err = client.Delete(context.Background(), resourceGroupName, identityName, nil)
if err != nil {
e2e.Logf("Error deleting identity: %s", err.Error())
} else {
e2e.Logf("managed identity deleted successfully...")
}
} | logging | ||||
function | openshift/openshift-tests-private | 20db0e80-c43c-4242-a356-dc61ab3d396b | patchLokiConfigIntoLokiSubscription | ['"fmt"', '"os"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func patchLokiConfigIntoLokiSubscription(oc *exutil.CLI, azureSubscriptionID, identityClientID, region string) {
patchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "CLIENTID",
"value": "%s"
},
{
"name": "TENANTID",
"value": "%s"
},
{
"name": "SUBSCRIPTIONID",
"value": "%s"
},
{
"name": "REGION",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", fmt.Sprintf(patchConfig, identityClientID, os.Getenv("AZURE_TENANT_ID"), azureSubscriptionID, region), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred(), "Patching Loki Operator failed...")
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
} | logging | ||||
function | openshift/openshift-tests-private | fc301f1d-e058-4782-ba0a-e87253dc6d77 | performManagedIdentityAndSecretSetupForAzureWIF | ['"os"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func performManagedIdentityAndSecretSetupForAzureWIF(oc *exutil.CLI, lokistackName, lokiStackNS, azureContainerName, lokiStackStorageSecretName string) {
region, err := getAzureClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
serviceAccountIssuer, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
resourceGroup, err := getAzureResourceGroupFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
identityClientID, identityPrincipalID := createManagedIdentityOnAzure(cred, azureSubscriptionID, lokistackName, resourceGroup, region)
createFederatedCredentialforLoki(cred, azureSubscriptionID, lokistackName, lokistackName, lokiStackNS, "openshift-logging-"+lokistackName, serviceAccountIssuer, resourceGroup)
createFederatedCredentialforLoki(cred, azureSubscriptionID, lokistackName, lokistackName+"-ruler", lokiStackNS, "openshift-logging-"+lokistackName+"-ruler", serviceAccountIssuer, resourceGroup)
createRoleAssignmentForManagedIdentity(cred, azureSubscriptionID, identityPrincipalID)
patchLokiConfigIntoLokiSubscription(oc, azureSubscriptionID, identityClientID, region)
storageAccountName := createStorageAccountOnAzure(cred, azureSubscriptionID, resourceGroup, region)
environment, storageAccountURISuffix := getStorageAccountURISuffixAndEnvForAzure(oc)
createBlobContaineronAzure(cred, storageAccountName, storageAccountURISuffix, azureContainerName)
err = createLokiObjectStorageSecretForWIF(oc, lokiStackNS, lokiStackStorageSecretName, environment, azureContainerName, storageAccountName)
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | ||||
function | openshift/openshift-tests-private | 8041b08d-9c2b-460a-acff-0e52c970d818 | validatesIfLogsArePushedToAzureContainer | ['"context"', '"strings"', '"time"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func validatesIfLogsArePushedToAzureContainer(storageAccountURISuffix, storageAccountName, containerName string, tenants []string) {
cred := createNewDefaultAzureCredential()
// Create a new Blob service client
serviceClient, err := azblob.NewClient("https://"+storageAccountName+storageAccountURISuffix, cred, nil)
o.Expect(err).NotTo(o.HaveOccurred(), "failed to create service client..")
// Poll to check log streams are flushed to container referenced under loki object storage secret
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
// Create a client to interact with the container and List blobs in the container
pager := serviceClient.NewListBlobsFlatPager(containerName, nil)
for pager.More() {
// advance to the next page
page, err := pager.NextPage(context.TODO())
o.Expect(err).NotTo(o.HaveOccurred())
// check the blob names for this page
for _, blob := range page.Segment.BlobItems {
for _, tenantName := range tenants {
if strings.Contains(*blob.Name, tenantName) {
e2e.Logf("Logs %s found under the container: %s", *blob.Name, containerName)
return true, nil
}
}
}
}
e2e.Logf("Waiting for data to be available under container: %s", containerName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the container: "+containerName)
} | logging | ||||
function | openshift/openshift-tests-private | 18008482-e761-4d63-826b-6dbc97d052a3 | getAzureResourceGroupFromCluster | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func getAzureResourceGroupFromCluster(oc *exutil.CLI) (string, error) {
resourceGroup, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructures", "cluster", "-o=jsonpath={.status.platformStatus.azure.resourceGroupName}").Output()
return resourceGroup, err
} | logging | |||||
function | openshift/openshift-tests-private | 9a9bb22b-2481-4567-9e70-60968f42bada | getAzureClusterRegion | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func getAzureClusterRegion(oc *exutil.CLI) (string, error) {
return oc.AsAdmin().WithoutNamespace().Run("get").Args("node", `-ojsonpath={.items[].metadata.labels.topology\.kubernetes\.io/region}`).Output()
} | logging | |||||
function | openshift/openshift-tests-private | bfb20844-2a58-42e4-a8eb-536675489b87 | createAzureResourceGroup | ['"context"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func createAzureResourceGroup(resourceGroupName, subscriptionId, location string, credential azcore.TokenCredential) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) {
rgClient, _ := armresources.NewResourceGroupsClient(subscriptionId, credential, nil)
param := armresources.ResourceGroup{
Location: azto.Ptr(location),
}
return rgClient.CreateOrUpdate(context.Background(), resourceGroupName, param, nil)
} | logging | ||||
function | openshift/openshift-tests-private | 65a203f3-a805-45f8-9c1a-09d9422710cc | deleteAzureResourceGroup | ['"context"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func deleteAzureResourceGroup(resourceGroupName, subscriptionId string, credential azcore.TokenCredential) error {
rgClient, _ := armresources.NewResourceGroupsClient(subscriptionId, credential, nil)
poller, err := rgClient.BeginDelete(context.Background(), resourceGroupName, nil)
if err != nil {
return err
}
if _, err := poller.PollUntilDone(context.Background(), nil); err != nil {
return err
}
e2e.Logf("Successfully deleted resource group: %s", resourceGroupName)
return nil
} | logging | ||||
function | openshift/openshift-tests-private | 756d349f-fcf5-4d64-aa39-79509c9fe190 | readAzureCredentials | ['"encoding/json"', '"os"', '"path/filepath"'] | ['azureCredentials'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func readAzureCredentials() bool {
var (
azureCredFile string
azureCred azureCredentials
)
authFile, present := os.LookupEnv("AZURE_AUTH_LOCATION")
if present {
azureCredFile = authFile
} else {
envDir, present := os.LookupEnv("CLUSTER_PROFILE_DIR")
if present {
azureCredFile = filepath.Join(envDir, "osServicePrincipal.json")
}
}
if len(azureCredFile) > 0 {
fileContent, err := os.ReadFile(azureCredFile)
if err != nil {
e2e.Logf("can't read file %s: %v", azureCredFile, err)
return false
}
json.Unmarshal(fileContent, &azureCred)
os.Setenv("AZURE_SUBSCRIPTION_ID", azureCred.SubscriptionID)
os.Setenv("AZURE_TENANT_ID", azureCred.TenantID)
os.Setenv("AZURE_CLIENT_ID", azureCred.ClientID)
os.Setenv("AZURE_CLIENT_SECRET", azureCred.ClientSecret)
return true
}
return false
} | logging | |||
function | openshift/openshift-tests-private | 1a96d85d-2331-4cf7-919e-6d373469f874 | getAzureCloudName | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func getAzureCloudName(oc *exutil.CLI) string {
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
if err == nil && len(cloudName) > 0 {
return strings.ToLower(cloudName)
}
return ""
} | logging | ||||
function | openshift/openshift-tests-private | de446b21-1b94-4d4b-8d45-fd33790e7e4e | getSourceGroupLocation | ['"context"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"'] | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func (azLog *azureMonitorLog) getSourceGroupLocation() error {
resourceGroupClient, err := armresources.NewResourceGroupsClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
ctx := context.Background()
resourceGroupGetResponse, err := resourceGroupClient.Get(
ctx,
azLog.resourceGroupName,
nil,
)
if err != nil {
return err
}
azLog.location = *resourceGroupGetResponse.ResourceGroup.Location
return nil
} | logging | |||
function | openshift/openshift-tests-private | 742107b2-c003-42f2-876b-bf9093e507a8 | createLogWorkspace | ['"context"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights"'] | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func (azLog *azureMonitorLog) createLogWorkspace() error {
e2e.Logf("Creating workspace")
workspacesClient, err := armoperationalinsights.NewWorkspacesClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
ctx := context.Background()
pollerResp, err := workspacesClient.BeginCreateOrUpdate(
ctx,
azLog.resourceGroupName,
azLog.workspaceName,
armoperationalinsights.Workspace{
Location: azto.Ptr(azLog.location),
Properties: &armoperationalinsights.WorkspaceProperties{},
},
nil,
)
if err != nil {
return err
}
workspace, err := pollerResp.PollUntilDone(ctx, nil)
if err != nil {
return err
}
azLog.workspaceID = *workspace.ID
azLog.workspaceName = *workspace.Name
azLog.customerID = *workspace.Properties.CustomerID
shareKeyClient, err := armoperationalinsights.NewSharedKeysClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
resp, err := shareKeyClient.GetSharedKeys(ctx, azLog.resourceGroupName, azLog.workspaceName, nil)
if err != nil {
return err
}
azLog.primaryKey = *resp.PrimarySharedKey
azLog.secondaryKey = *resp.SecondarySharedKey
return nil
} | logging | |||
function | openshift/openshift-tests-private | 7a80dc26-cd36-4b81-8938-7acfacfcac58 | newAzureLog | ['"encoding/base64"', '"fmt"', '"os"', '"github.com/Azure/azure-sdk-for-go/sdk/azcore"', 'azcloud "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"', 'azto "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"', '"github.com/Azure/azure-sdk-for-go/sdk/azidentity"'] | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func newAzureLog(oc *exutil.CLI, location, resouceGroupName, workspaceName, tPrefixOrName string) (azureMonitorLog, error) {
var (
azLog azureMonitorLog
err error
)
azLog.tPrefixOrName = tPrefixOrName
azLog.workspaceName = workspaceName
azLog.resourceGroupName = resouceGroupName
// The workspace name must be between 4 and 63 characters.
// The workspace name can contain only letters, numbers and '-'. The '-' shouldn't be the first or the last symbol.
azLog.subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID")
if len(azLog.subscriptionID) == 0 {
dat, err := oc.AsAdmin().WithoutNamespace().Run("get", "-n", "kube-system", "secret/azure-credentials", "-ojsonpath={.data.azure_subscription_id}").Output()
if err != nil {
return azLog, fmt.Errorf("failed to get secret/azure-credentials")
}
data, err := base64.StdEncoding.DecodeString(dat)
if err != nil {
return azLog, fmt.Errorf("failed to decode subscription_id from secret/azure-credentials")
}
azLog.subscriptionID = string(data)
if len(azLog.subscriptionID) == 0 {
return azLog, fmt.Errorf("failed as subscriptionID is empty")
}
}
platform := exutil.CheckPlatform(oc)
if platform == "azure" {
cloudName := getAzureCloudName(oc)
switch cloudName {
case "azurepubliccloud":
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzurePublic}
azLog.host = "ods.opinsights.azure.com"
case "azureusgovernmentcloud":
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzureGovernment}
azLog.host = "ods.opinsights.azure.us"
case "azurechinacloud":
//azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzureChina}
return azLog, fmt.Errorf("skip on AzureChinaCloud")
case "azuregermancloud":
return azLog, fmt.Errorf("skip on AzureGermanCloud")
case "azurestackcloud":
return azLog, fmt.Errorf("skip on AzureStackCloud")
default:
return azLog, fmt.Errorf("skip on %s", cloudName)
}
} else {
//TODO: get az cloud type from env vars
azLog.clientOpts = azcore.ClientOptions{Cloud: azcloud.AzurePublic}
azLog.host = "ods.opinsights.azure.com"
}
azLog.azCred, err = azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: azLog.clientOpts},
)
if err != nil {
return azLog, err
}
if location != "" {
azLog.location = location
} else {
err = azLog.getSourceGroupLocation()
if err != nil {
return azLog, err
}
}
err = azLog.createLogWorkspace()
if err != nil {
return azLog, err
}
return azLog, nil
} | logging | |||
function | openshift/openshift-tests-private | e77e8874-8b63-4229-b494-2894ceb778db | createSecret | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func (azLog *azureMonitorLog) createSecret(oc *exutil.CLI, name, namespace string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-literal=shared_key="+azLog.primaryKey).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | 56c136f1-7a8c-43ca-8332-2399216511d7 | getLogByTable | ['"context"', '"time"', '"github.com/Azure/azure-sdk-for-go/sdk/monitor/query/azlogs"', '"k8s.io/apimachinery/pkg/util/wait"'] | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func (azLog *azureMonitorLog) getLogByTable(logTable string) ([]azlogs.Row, error) {
queryString := logTable + "| where TimeGenerated > ago(5m)|top 10 by TimeGenerated"
e2e.Logf("query %v", queryString)
var entries []azlogs.Row
client, err := azlogs.NewClient(azLog.azCred,
&azlogs.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return entries, err
}
//https://learn.microsoft.com/en-us/cli/azure/monitor/log-analytics?view=azure-cli-latest
//https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err1 := client.QueryWorkspace(
context.TODO(),
azLog.customerID,
azlogs.QueryBody{
Query: azto.Ptr(queryString),
},
nil)
if err1 != nil {
e2e.Logf("azlogs QueryWorkspace error: %v. continue", err1)
return false, nil
}
if res.Error != nil {
e2e.Logf("azlogs QueryWorkspace response error: %v, continue", res.Error)
return false, nil
}
for _, table := range res.Tables {
entries = append(entries, table.Rows...)
}
return len(entries) > 0, nil
})
return entries, err
} | logging | |||
function | openshift/openshift-tests-private | 6f0084f5-3911-4e89-9a77-4d0f15e57c93 | deleteWorkspace | ['"context"', '"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights"'] | ['azureMonitorLog'] | github.com/openshift/openshift-tests-private/test/extended/logging/azure_utils.go | func (azLog *azureMonitorLog) deleteWorkspace() error {
e2e.Logf("Delete workspace %v", azLog.workspaceName)
ctx := context.Background()
workspacesClient, err := armoperationalinsights.NewWorkspacesClient(azLog.subscriptionID, azLog.azCred,
&azarm.ClientOptions{
ClientOptions: azLog.clientOpts,
},
)
if err != nil {
return err
}
workspacesClient.BeginDelete(ctx, azLog.resourceGroupName, azLog.workspaceName, &armoperationalinsights.WorkspacesClientBeginDeleteOptions{Force: new(bool)})
return nil
} | logging | |||
test | openshift/openshift-tests-private | ec203056-23f8-4c6c-ab8b-89b65e679cfd | logging_operators | import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | package logging
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"time"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease vector-loki upgrade testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-loki-upgrade", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
if len(getStorageType(oc)) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
if !validateInfraForLoki(oc) {
g.Skip("Current platform not supported!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
clo := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
}
lo := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: "openshift-operators-redhat",
PackageName: "loki-operator",
}
g.By("uninstall CLO and LO")
clo.uninstallOperator(oc)
lo.uninstallOperator(oc)
for _, crd := range []string{"alertingrules.loki.grafana.com", "lokistacks.loki.grafana.com", "recordingrules.loki.grafana.com", "rulerconfigs.loki.grafana.com"} {
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("crd", crd).Execute()
}
})
g.AfterEach(func() {
for _, crd := range []string{"alertingrules.loki.grafana.com", "lokistacks.loki.grafana.com", "recordingrules.loki.grafana.com", "rulerconfigs.loki.grafana.com"} {
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("crd", crd).Execute()
}
})
// author [email protected]
g.It("Longduration-CPaasrunOnly-Author:qitang-Critical-53407-Cluster Logging upgrade with Vector as collector - minor version.[Serial][Slow]", func() {
g.Skip("Skip for logging 6.2 is not released!")
var targetchannel = "stable-6.2"
var oh OperatorHub
g.By("check source/redhat-operators status in operatorhub")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("operatorhub/cluster", "-ojson").Output()
o.Expect(err).NotTo(o.HaveOccurred())
json.Unmarshal([]byte(output), &oh)
var disabled bool
for _, source := range oh.Status.Sources {
if source.Name == "redhat-operators" {
disabled = source.Disabled
break
}
}
if disabled {
g.Skip("source/redhat-operators is disabled, skip this case.")
}
g.By(fmt.Sprintf("Subscribe operators to %s channel", targetchannel))
source := CatalogSourceObjects{
Channel: targetchannel,
SourceName: "redhat-operators",
SourceNamespace: "openshift-marketplace",
}
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
preCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: source,
}
preLO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: source,
}
defer preCLO.uninstallOperator(oc)
preCLO.SubscribeOperator(oc)
defer preLO.uninstallOperator(oc)
preLO.SubscribeOperator(oc)
g.By("Deploy lokistack")
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls := lokiStack{
name: "loki-53407",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-secret-53407",
storageClass: sc,
bucketName: "logging-loki-53407-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
//get current csv version
preCloCSV := preCLO.getInstalledCSV(oc)
preLoCSV := preLO.getInstalledCSV(oc)
// get currentCSV in packagemanifests
currentCloCSV := getCurrentCSVFromPackage(oc, "qe-app-registry", targetchannel, preCLO.PackageName)
currentLoCSV := getCurrentCSVFromPackage(oc, "qe-app-registry", targetchannel, preLO.PackageName)
var upgraded = false
//change source to qe-app-registry if needed, and wait for the new operators to be ready
if preCloCSV != currentCloCSV {
g.By(fmt.Sprintf("upgrade CLO to %s", currentCloCSV))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preCLO.Namespace, "sub/"+preCLO.PackageName, "-p", "{\"spec\": {\"source\": \"qe-app-registry\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, true, currentCloCSV, []string{"sub", preCLO.PackageName, "-n", preCLO.Namespace, "-ojsonpath={.status.currentCSV}"})
WaitForDeploymentPodsToBeReady(oc, preCLO.Namespace, preCLO.OperatorName)
upgraded = true
}
if preLoCSV != currentLoCSV {
g.By(fmt.Sprintf("upgrade LO to %s", currentLoCSV))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preLO.Namespace, "sub/"+preLO.PackageName, "-p", "{\"spec\": {\"source\": \"qe-app-registry\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, true, currentLoCSV, []string{"sub", preLO.PackageName, "-n", preLO.Namespace, "-ojsonpath={.status.currentCSV}"})
WaitForDeploymentPodsToBeReady(oc, preLO.Namespace, preLO.OperatorName)
upgraded = true
}
if upgraded {
g.By("waiting for the Loki and Vector pods to be ready after upgrade")
ls.waitForLokiStackToBeReady(oc)
clf.waitForCollectorPodsReady(oc)
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
// In upgrade testing, sometimes a pod may not be ready but the deployment/statefulset might be ready
// here add a step to check the pods' status
waitForPodReadyWithLabel(oc, ls.namespace, "app.kubernetes.io/instance="+ls.name)
g.By("checking if the collector can collect logs after upgrading")
oc.SetupProject()
appProj := oc.Namespace()
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
exutil.By("Check if the cm/grafana-dashboard-cluster-logging is created or not after upgrading")
resource{"configmap", "grafana-dashboard-cluster-logging", "openshift-config-managed"}.WaitForResourceToAppear(oc)
}
})
// author: [email protected]
g.It("Longduration-CPaasrunOnly-Author:qitang-Critical-53404-Cluster Logging upgrade with Vector as collector - major version.[Serial][Slow]", func() {
// to add logging 6.0, create a new catalog source with image: quay.io/openshift-qe-optional-operators/aosqe-index
catsrcTemplate := exutil.FixturePath("testdata", "logging", "subscription", "catsrc.yaml")
catsrc := resource{"catsrc", "logging-upgrade-" + getRandomString(), "openshift-marketplace"}
tag, err := getIndexImageTag(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer catsrc.clear(oc)
catsrc.applyFromTemplate(oc, "-f", catsrcTemplate, "-n", catsrc.namespace, "-p", "NAME="+catsrc.name, "-p", "IMAGE=quay.io/openshift-qe-optional-operators/aosqe-index:v"+tag)
waitForPodReadyWithLabel(oc, catsrc.namespace, "olm.catalogSource="+catsrc.name)
// for 6.2, test upgrade from 6.1 to 6.2
preSource := CatalogSourceObjects{"stable-6.1", catsrc.name, catsrc.namespace}
g.By(fmt.Sprintf("Subscribe operators to %s channel", preSource.Channel))
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
preCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: preSource,
}
preLO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: preSource,
}
defer preCLO.uninstallOperator(oc)
preCLO.SubscribeOperator(oc)
defer preLO.uninstallOperator(oc)
preLO.SubscribeOperator(oc)
g.By("Deploy lokistack")
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls := lokiStack{
name: "loki-53404",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-secret-53404",
storageClass: sc,
bucketName: "logging-loki-53404-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
//change channel, and wait for the new operators to be ready
var source = CatalogSourceObjects{"stable-6.1", "qe-app-registry", "openshift-marketplace"}
//change channel, and wait for the new operators to be ready
version := strings.Split(source.Channel, "-")[1]
g.By(fmt.Sprintf("upgrade CLO&LO to %s", source.Channel))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preCLO.Namespace, "sub/"+preCLO.PackageName, "-p", "{\"spec\": {\"channel\": \""+source.Channel+"\", \"source\": \""+source.SourceName+"\", \"sourceNamespace\": \""+source.SourceNamespace+"\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preLO.Namespace, "sub/"+preLO.PackageName, "-p", "{\"spec\": {\"channel\": \""+source.Channel+"\", \"source\": \""+source.SourceName+"\", \"sourceNamespace\": \""+source.SourceNamespace+"\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, version, []string{"sub", preCLO.PackageName, "-n", preCLO.Namespace, "-ojsonpath={.status.currentCSV}"})
cloCurrentCSV, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", preCLO.Namespace, preCLO.PackageName, "-ojsonpath={.status.currentCSV}").Output()
resource{"csv", cloCurrentCSV, preCLO.Namespace}.WaitForResourceToAppear(oc)
checkResource(oc, true, true, "Succeeded", []string{"csv", cloCurrentCSV, "-n", preCLO.Namespace, "-ojsonpath={.status.phase}"})
WaitForDeploymentPodsToBeReady(oc, preCLO.Namespace, preCLO.OperatorName)
checkResource(oc, true, false, version, []string{"sub", preLO.PackageName, "-n", preLO.Namespace, "-ojsonpath={.status.currentCSV}"})
loCurrentCSV, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", preLO.Namespace, preLO.PackageName, "-ojsonpath={.status.currentCSV}").Output()
resource{"csv", loCurrentCSV, preLO.Namespace}.WaitForResourceToAppear(oc)
checkResource(oc, true, true, "Succeeded", []string{"csv", loCurrentCSV, "-n", preLO.Namespace, "-ojsonpath={.status.phase}"})
WaitForDeploymentPodsToBeReady(oc, preLO.Namespace, preLO.OperatorName)
ls.waitForLokiStackToBeReady(oc)
clf.waitForCollectorPodsReady(oc)
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
// In upgrade testing, sometimes a pod may not be ready but the deployment/statefulset might be ready
// here add a step to check the pods' status
waitForPodReadyWithLabel(oc, ls.namespace, "app.kubernetes.io/instance="+ls.name)
g.By("checking if the collector can collect logs after upgrading")
oc.SetupProject()
appProj := oc.Namespace()
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("checking if regular user can view his logs after upgrading")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-logging-application-view", oc.Username(), "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
lc0 := newLokiClient(route).withToken(userToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc0.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't get application logs with normal user")
exutil.By("Check if the cm/grafana-dashboard-cluster-logging is created or not after upgrading")
resource{"configmap", "grafana-dashboard-cluster-logging", "openshift-config-managed"}.WaitForResourceToAppear(oc)
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease operator deployments", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-operators", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
})
g.It("CPaasrunOnly-Author:anli-Low-65518-deploy cluster-logging-operator after datadog-agent is deployed [Disruptive]", func() {
oc.SetupProject()
datadogNS := oc.Namespace()
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
ogPath := filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml")
podLabel := "app.kubernetes.io/name=datadog-operator"
g.By("Make the datadog operator ready")
sourceCert := CatalogSourceObjects{
Channel: "stable",
SourceName: "certified-operators",
SourceNamespace: "openshift-marketplace",
}
subDog := SubscriptionObjects{
OperatorName: "datadog-operator-certified",
PackageName: "datadog-operator-certified",
Namespace: datadogNS,
Subscription: subTemplate,
OperatorPodLabel: podLabel,
OperatorGroup: ogPath,
CatalogSource: sourceCert,
SkipCaseWhenFailed: true,
}
subDog.SubscribeOperator(oc)
g.By("Delete cluster-logging operator if exist")
sourceQE := CatalogSourceObjects{
Channel: "stable-6.1",
SourceName: "qe-app-registry",
SourceNamespace: "openshift-marketplace",
}
subCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: "openshift-logging",
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: sourceQE,
}
subCLO.uninstallOperator(oc)
g.By("deploy cluster-logging operator")
subCLO.SubscribeOperator(oc)
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease multi-mode testing", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-multiple-mode", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
// author: [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-64147-Deploy Logfilesmetricexporter as an independent pod.[Serial]", func() {
template := filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: template,
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
g.By("check metrics exposed by logfilemetricexporter")
checkMetric(oc, token, "{job=\"logfilesmetricexporter\"}", 5)
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-64147",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-64147",
storageClass: sc,
bucketName: "logging-loki-64147-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Remove clusterlogforwarder")
clf.delete(oc)
g.By("Check LFME pods, they should not be removed")
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
g.By("Remove LFME, the pods should be removed")
lfme.delete(oc)
g.By("Create LFME with invalid name")
lfmeInvalidName := resource{
kind: "logfilemetricexporters.logging.openshift.io",
name: "test-lfme-64147",
namespace: loggingNS,
}
defer lfmeInvalidName.clear(oc)
err = lfmeInvalidName.applyFromTemplate(oc, "-f", template, "-p", "NAME="+lfmeInvalidName.name, "-p", "NAMESPACE="+lfmeInvalidName.namespace)
o.Expect(strings.Contains(err.Error(), "metadata.name: Unsupported value: \""+lfmeInvalidName.name+"\": supported values: \"instance\"")).Should(o.BeTrue())
g.By("Create LFME with invalid namespace")
lfmeInvalidNamespace := logFileMetricExporter{
name: "instance",
namespace: oc.Namespace(),
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
}
defer lfmeInvalidNamespace.delete(oc)
lfmeInvalidNamespace.create(oc)
checkResource(oc, true, false, "validation failed: Invalid namespace name \""+lfmeInvalidNamespace.namespace+"\", instance must be in \"openshift-logging\" namespace", []string{"lfme/" + lfmeInvalidNamespace.name, "-n", lfmeInvalidNamespace.namespace, "-ojsonpath={.status.conditions[*].message}"})
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-65407-ClusterLogForwarder validation for the serviceaccount.[Slow]", func() {
clfNS := oc.Namespace()
exutil.By("Deploy ES server")
ees := externalES{
namespace: clfNS,
version: "8",
serverName: "elasticsearch-server",
loggingNS: clfNS,
}
defer ees.remove(oc)
ees.deploy(oc)
logFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
exutil.By("create pod to generate logs")
oc.SetupProject()
proj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", proj, "-f", logFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create clusterlogforwarder with a non-existing serviceaccount")
clf := clusterlogforwarder{
name: "collector-65407",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+ees.namespace+".svc:9200", "ES_VERSION="+ees.version, "SERVICE_ACCOUNT_NAME=logcollector", "INPUT_REFS=[\"application\"]")
checkResource(oc, true, false, `ServiceAccount "logcollector" not found`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
ds := resource{
kind: "daemonset",
name: clf.name,
namespace: clf.namespace,
}
dsErr := ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create the serviceaccount and create rolebinding to bind clusterrole to the serviceaccount")
sa := resource{
kind: "serviceaccount",
name: "logcollector",
namespace: clfNS,
}
defer sa.clear(oc)
err = createServiceAccount(oc, sa.namespace, sa.name)
o.Expect(err).NotTo(o.HaveOccurred(), "get error when creating serviceaccount "+sa.name)
defer oc.AsAdmin().WithoutNamespace().Run("policy").Args("remove-role-from-user", "collect-application-logs", fmt.Sprintf("system:serviceaccount:%s:%s", sa.namespace, sa.name), "-n", sa.namespace).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("policy").Args("add-role-to-user", "collect-application-logs", fmt.Sprintf("system:serviceaccount:%s:%s", sa.namespace, sa.name), "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for 2 minutes for CLO to update the status in CLF
time.Sleep(2 * time.Minute)
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create clusterrolebinding to bind clusterrole to the serviceaccount")
defer removeClusterRoleFromServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
addClusterRoleToServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
// wait for 2 minutes for CLO to update the status in CLF
time.Sleep(2 * time.Minute)
checkResource(oc, true, false, "True", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[?(@.type == \"Ready\")].status}"})
exutil.By("Collector pods should be deployed and logs can be forwarded to external log store")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
ees.waitForIndexAppear(oc, "app")
exutil.By("Delete the serviceaccount, the collector pods should be removed")
err = sa.clear(oc)
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, "ServiceAccount \""+sa.name+"\" not found", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Recreate the sa and add proper clusterroles to it, the collector pods should be recreated")
err = createServiceAccount(oc, sa.namespace, sa.name)
o.Expect(err).NotTo(o.HaveOccurred(), "get error when creating serviceaccount "+sa.name)
addClusterRoleToServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Remove spec.serviceAccount from CLF")
msg, err := clf.patch(oc, `[{"op": "remove", "path": "/spec/serviceAccount"}]`)
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(msg, "spec.serviceAccount: Required value")).To(o.BeTrue())
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-Medium-65408-ClusterLogForwarder validation when roles don't match.", func() {
clfNS := oc.Namespace()
loki := externalLoki{"loki-server", clfNS}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("Create ClusterLogForwarder with a serviceaccount which doesn't have proper clusterroles")
clf := clusterlogforwarder{
name: "collector-65408",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
serviceAccountName: "clf-collector",
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+loki.namespace+".svc:3100")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "audit" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
ds := resource{
kind: "daemonset",
name: clf.name,
namespace: clf.namespace,
}
dsErr := ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-application-logs to the new sa, then update the CLF to use the new sa")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-application-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-application-logs", "collect-application-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-application-logs", "collect-application-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-application-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["audit" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-infrastructure-logs to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-infrastructure-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-infrastructure-logs", "collect-infrastructure-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-infrastructure-logs", "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-infrastructure-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "audit"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-audit-logs to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-audit-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-audit-logs", "collect-audit-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-audit-logs", "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-audit-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add all clusterroles to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-all-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, logType := range []string{"application", "infrastructure", "audit"} {
role := "collect-" + logType + "-logs"
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-all-logs", role)
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-all-logs", role)
o.Expect(err).NotTo(o.HaveOccurred())
}
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-all-logs\"}}}", "--type=merge")
checkResource(oc, true, false, "True", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[?(@.type == \"Ready\")].status}"})
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Remove clusterrole from the serviceaccount, the collector pods should be removed")
err = removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-all-logs", "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["audit"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
})
// author [email protected]
g.It("CPaasrunOnly-Author:qitang-High-65685-Deploy CLO to all namespaces and verify prometheusrule/collector and cm/grafana-dashboard-cluster-logging are created along with the CLO.", func() {
csvs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "default", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csvs, "cluster-logging")).Should(o.BeTrue())
prometheusrule := resource{
kind: "prometheusrule",
name: "collector",
namespace: loggingNS,
}
prometheusrule.WaitForResourceToAppear(oc)
configmap := resource{
kind: "configmap",
name: "grafana-dashboard-cluster-logging",
namespace: "openshift-config-managed",
}
configmap.WaitForResourceToAppear(oc)
})
g.It("Author:qitang-CPaasrunOnly-Critical-74398-Manage logging collector pods via CLF.[Serial]", func() {
s := getStorageType(oc)
sc, err := getStorageClassName(oc)
if err != nil || len(sc) == 0 {
g.Skip("can't get storageclass from cluster, skip this case")
}
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74398",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74398",
storageClass: sc,
bucketName: "logging-loki-74398-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-74398",
namespace: loggingNS,
serviceAccountName: "logcollector-74398",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74398",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("check configurations in collector pods")
checkResource(oc, true, true, `{"limits":{"cpu":"6","memory":"2Gi"},"requests":{"cpu":"500m","memory":"64Mi"}}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.containers[].resources}"})
checkResource(oc, true, true, `{"kubernetes.io/os":"linux"}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.nodeSelector}"})
checkResource(oc, true, true, `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"},{"effect":"NoSchedule","key":"node.kubernetes.io/disk-pressure","operator":"Exists"}]`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.tolerations}"})
exutil.By("update collector configurations in CLF")
patch := `[{"op":"add","path":"/spec/collector","value":{"nodeSelector":{"logging":"test"},"resources":{"limits":{"cpu":1,"memory":"3Gi"},"requests":{"cpu":1,"memory":"1Gi","ephemeral-storage":"2Gi"}},"tolerations":[{"effect":"NoExecute","key":"test","operator":"Equal","tolerationSeconds":3000,"value":"logging"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitUntilPodsAreGone(oc, clf.namespace, "app.kubernetes.io/component=collector")
checkResource(oc, true, true, `{"limits":{"cpu":"1","memory":"3Gi"},"requests":{"cpu":"1","ephemeral-storage":"2Gi","memory":"1Gi"}}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.containers[].resources}"})
checkResource(oc, true, true, `{"kubernetes.io/os":"linux","logging":"test"}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.nodeSelector}"})
checkResource(oc, true, true, `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"},{"effect":"NoSchedule","key":"node.kubernetes.io/disk-pressure","operator":"Exists"},{"effect":"NoExecute","key":"test","operator":"Equal","tolerationSeconds":3000,"value":"logging"}]`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.tolerations}"})
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("remove the nodeSelector, collector pods should be deployed")
patch = `[{"op": "remove", "path": "/spec/collector/nodeSelector"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
lc.waitForLogsAppearByProject("application", appProj)
})
})
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease rapidast scan", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("logging-dast", exutil.KubeConfigPath())
loggingBaseDir string
)
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux,kubernetes.io/arch=amd64"})
if err != nil || len(nodes.Items) == 0 {
g.Skip("Skip for the cluster doesn't have amd64 node")
}
})
// author [email protected]
g.It("Author:anli-CPaasrunOnly-Critical-75070-clo operator should pass DAST", func() {
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
proj := oc.Namespace()
configFile := filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_observability_v1.yaml")
policyFile := filepath.Join(loggingBaseDir, "rapidast/customscan.policy")
_, err1 := rapidastScan(oc, proj, configFile, policyFile, "observability.openshift.io_v1")
configFile = filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_logging_v1.yaml")
_, err2 := rapidastScan(oc, proj, configFile, policyFile, "logging.openshift.io_v1")
configFile = filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_logging_v1alpha1.yaml")
_, err3 := rapidastScan(oc, proj, configFile, policyFile, "logging.openshift.io_v1alpha1")
if err1 != nil || err2 != nil || err3 != nil {
e2e.Failf("rapidast test failed, please check the result for more detail")
}
})
// author [email protected]
g.It("Author:anli-CPaasrunOnly-Critical-67424-Loki Operator should pass DAST test", func() {
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO.SubscribeOperator(oc)
proj := oc.Namespace()
configFile := filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_loki_v1.yaml")
policyFile := filepath.Join(loggingBaseDir, "rapidast/customscan.policy")
_, err := rapidastScan(oc, proj, configFile, policyFile, "loki.grafana.com_v1")
o.Expect(err).NotTo(o.HaveOccurred())
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | bc61736a-9804-4d1f-8f80-e48cd505d8f7 | Longduration-CPaasrunOnly-Author:qitang-Critical-53407-Cluster Logging upgrade with Vector as collector - minor version.[Serial][Slow] | ['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("Longduration-CPaasrunOnly-Author:qitang-Critical-53407-Cluster Logging upgrade with Vector as collector - minor version.[Serial][Slow]", func() {
g.Skip("Skip for logging 6.2 is not released!")
var targetchannel = "stable-6.2"
var oh OperatorHub
g.By("check source/redhat-operators status in operatorhub")
output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("operatorhub/cluster", "-ojson").Output()
o.Expect(err).NotTo(o.HaveOccurred())
json.Unmarshal([]byte(output), &oh)
var disabled bool
for _, source := range oh.Status.Sources {
if source.Name == "redhat-operators" {
disabled = source.Disabled
break
}
}
if disabled {
g.Skip("source/redhat-operators is disabled, skip this case.")
}
g.By(fmt.Sprintf("Subscribe operators to %s channel", targetchannel))
source := CatalogSourceObjects{
Channel: targetchannel,
SourceName: "redhat-operators",
SourceNamespace: "openshift-marketplace",
}
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
preCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: source,
}
preLO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: source,
}
defer preCLO.uninstallOperator(oc)
preCLO.SubscribeOperator(oc)
defer preLO.uninstallOperator(oc)
preLO.SubscribeOperator(oc)
g.By("Deploy lokistack")
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls := lokiStack{
name: "loki-53407",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-secret-53407",
storageClass: sc,
bucketName: "logging-loki-53407-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
//get current csv version
preCloCSV := preCLO.getInstalledCSV(oc)
preLoCSV := preLO.getInstalledCSV(oc)
// get currentCSV in packagemanifests
currentCloCSV := getCurrentCSVFromPackage(oc, "qe-app-registry", targetchannel, preCLO.PackageName)
currentLoCSV := getCurrentCSVFromPackage(oc, "qe-app-registry", targetchannel, preLO.PackageName)
var upgraded = false
//change source to qe-app-registry if needed, and wait for the new operators to be ready
if preCloCSV != currentCloCSV {
g.By(fmt.Sprintf("upgrade CLO to %s", currentCloCSV))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preCLO.Namespace, "sub/"+preCLO.PackageName, "-p", "{\"spec\": {\"source\": \"qe-app-registry\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, true, currentCloCSV, []string{"sub", preCLO.PackageName, "-n", preCLO.Namespace, "-ojsonpath={.status.currentCSV}"})
WaitForDeploymentPodsToBeReady(oc, preCLO.Namespace, preCLO.OperatorName)
upgraded = true
}
if preLoCSV != currentLoCSV {
g.By(fmt.Sprintf("upgrade LO to %s", currentLoCSV))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preLO.Namespace, "sub/"+preLO.PackageName, "-p", "{\"spec\": {\"source\": \"qe-app-registry\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, true, currentLoCSV, []string{"sub", preLO.PackageName, "-n", preLO.Namespace, "-ojsonpath={.status.currentCSV}"})
WaitForDeploymentPodsToBeReady(oc, preLO.Namespace, preLO.OperatorName)
upgraded = true
}
if upgraded {
g.By("waiting for the Loki and Vector pods to be ready after upgrade")
ls.waitForLokiStackToBeReady(oc)
clf.waitForCollectorPodsReady(oc)
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
// In upgrade testing, sometimes a pod may not be ready but the deployment/statefulset might be ready
// here add a step to check the pods' status
waitForPodReadyWithLabel(oc, ls.namespace, "app.kubernetes.io/instance="+ls.name)
g.By("checking if the collector can collect logs after upgrading")
oc.SetupProject()
appProj := oc.Namespace()
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
exutil.By("Check if the cm/grafana-dashboard-cluster-logging is created or not after upgrading")
resource{"configmap", "grafana-dashboard-cluster-logging", "openshift-config-managed"}.WaitForResourceToAppear(oc)
}
}) | |||||
test case | openshift/openshift-tests-private | 324e8624-ccc9-4453-819a-a479c923cbc7 | Longduration-CPaasrunOnly-Author:qitang-Critical-53404-Cluster Logging upgrade with Vector as collector - major version.[Serial][Slow] | ['"context"', '"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("Longduration-CPaasrunOnly-Author:qitang-Critical-53404-Cluster Logging upgrade with Vector as collector - major version.[Serial][Slow]", func() {
// to add logging 6.0, create a new catalog source with image: quay.io/openshift-qe-optional-operators/aosqe-index
catsrcTemplate := exutil.FixturePath("testdata", "logging", "subscription", "catsrc.yaml")
catsrc := resource{"catsrc", "logging-upgrade-" + getRandomString(), "openshift-marketplace"}
tag, err := getIndexImageTag(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer catsrc.clear(oc)
catsrc.applyFromTemplate(oc, "-f", catsrcTemplate, "-n", catsrc.namespace, "-p", "NAME="+catsrc.name, "-p", "IMAGE=quay.io/openshift-qe-optional-operators/aosqe-index:v"+tag)
waitForPodReadyWithLabel(oc, catsrc.namespace, "olm.catalogSource="+catsrc.name)
// for 6.2, test upgrade from 6.1 to 6.2
preSource := CatalogSourceObjects{"stable-6.1", catsrc.name, catsrc.namespace}
g.By(fmt.Sprintf("Subscribe operators to %s channel", preSource.Channel))
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
preCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: preSource,
}
preLO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: preSource,
}
defer preCLO.uninstallOperator(oc)
preCLO.SubscribeOperator(oc)
defer preLO.uninstallOperator(oc)
preLO.SubscribeOperator(oc)
g.By("Deploy lokistack")
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls := lokiStack{
name: "loki-53404",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-secret-53404",
storageClass: sc,
bucketName: "logging-loki-53404-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("deploy logfilesmetricexporter")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
//change channel, and wait for the new operators to be ready
var source = CatalogSourceObjects{"stable-6.1", "qe-app-registry", "openshift-marketplace"}
//change channel, and wait for the new operators to be ready
version := strings.Split(source.Channel, "-")[1]
g.By(fmt.Sprintf("upgrade CLO&LO to %s", source.Channel))
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preCLO.Namespace, "sub/"+preCLO.PackageName, "-p", "{\"spec\": {\"channel\": \""+source.Channel+"\", \"source\": \""+source.SourceName+"\", \"sourceNamespace\": \""+source.SourceNamespace+"\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("-n", preLO.Namespace, "sub/"+preLO.PackageName, "-p", "{\"spec\": {\"channel\": \""+source.Channel+"\", \"source\": \""+source.SourceName+"\", \"sourceNamespace\": \""+source.SourceNamespace+"\"}}", "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, version, []string{"sub", preCLO.PackageName, "-n", preCLO.Namespace, "-ojsonpath={.status.currentCSV}"})
cloCurrentCSV, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", preCLO.Namespace, preCLO.PackageName, "-ojsonpath={.status.currentCSV}").Output()
resource{"csv", cloCurrentCSV, preCLO.Namespace}.WaitForResourceToAppear(oc)
checkResource(oc, true, true, "Succeeded", []string{"csv", cloCurrentCSV, "-n", preCLO.Namespace, "-ojsonpath={.status.phase}"})
WaitForDeploymentPodsToBeReady(oc, preCLO.Namespace, preCLO.OperatorName)
checkResource(oc, true, false, version, []string{"sub", preLO.PackageName, "-n", preLO.Namespace, "-ojsonpath={.status.currentCSV}"})
loCurrentCSV, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("sub", "-n", preLO.Namespace, preLO.PackageName, "-ojsonpath={.status.currentCSV}").Output()
resource{"csv", loCurrentCSV, preLO.Namespace}.WaitForResourceToAppear(oc)
checkResource(oc, true, true, "Succeeded", []string{"csv", loCurrentCSV, "-n", preLO.Namespace, "-ojsonpath={.status.phase}"})
WaitForDeploymentPodsToBeReady(oc, preLO.Namespace, preLO.OperatorName)
ls.waitForLokiStackToBeReady(oc)
clf.waitForCollectorPodsReady(oc)
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
// In upgrade testing, sometimes a pod may not be ready but the deployment/statefulset might be ready
// here add a step to check the pods' status
waitForPodReadyWithLabel(oc, ls.namespace, "app.kubernetes.io/instance="+ls.name)
g.By("checking if the collector can collect logs after upgrading")
oc.SetupProject()
appProj := oc.Namespace()
defer removeClusterRoleFromServiceAccount(oc, appProj, "default", "cluster-admin")
addClusterRoleToServiceAccount(oc, appProj, "default", "cluster-admin")
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", appProj)
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "application logs are not found")
// Creating cluster roles to allow read access from LokiStack
defer deleteLokiClusterRolesForReadAccess(oc)
createLokiClusterRolesForReadAccess(oc)
g.By("checking if regular user can view his logs after upgrading")
err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("policy", "add-cluster-role-to-user", "cluster-logging-application-view", oc.Username(), "-n", appProj).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
userToken, err := oc.Run("whoami").Args("-t").Output()
o.Expect(err).NotTo(o.HaveOccurred())
lc0 := newLokiClient(route).withToken(userToken).retry(5)
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 180*time.Second, true, func(context.Context) (done bool, err error) {
res, err := lc0.searchByNamespace("application", appProj)
if err != nil {
e2e.Logf("\ngot err when getting application logs: %v, continue\n", err)
return false, nil
}
if len(res.Data.Result) > 0 {
return true, nil
}
e2e.Logf("\n len(res.Data.Result) not > 0, continue\n")
return false, nil
})
exutil.AssertWaitPollNoErr(err, "can't get application logs with normal user")
exutil.By("Check if the cm/grafana-dashboard-cluster-logging is created or not after upgrading")
resource{"configmap", "grafana-dashboard-cluster-logging", "openshift-config-managed"}.WaitForResourceToAppear(oc)
}) | |||||
test case | openshift/openshift-tests-private | 50504de0-2546-4579-9063-3ae9acc7cf8c | CPaasrunOnly-Author:anli-Low-65518-deploy cluster-logging-operator after datadog-agent is deployed [Disruptive] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("CPaasrunOnly-Author:anli-Low-65518-deploy cluster-logging-operator after datadog-agent is deployed [Disruptive]", func() {
oc.SetupProject()
datadogNS := oc.Namespace()
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
ogPath := filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml")
podLabel := "app.kubernetes.io/name=datadog-operator"
g.By("Make the datadog operator ready")
sourceCert := CatalogSourceObjects{
Channel: "stable",
SourceName: "certified-operators",
SourceNamespace: "openshift-marketplace",
}
subDog := SubscriptionObjects{
OperatorName: "datadog-operator-certified",
PackageName: "datadog-operator-certified",
Namespace: datadogNS,
Subscription: subTemplate,
OperatorPodLabel: podLabel,
OperatorGroup: ogPath,
CatalogSource: sourceCert,
SkipCaseWhenFailed: true,
}
subDog.SubscribeOperator(oc)
g.By("Delete cluster-logging operator if exist")
sourceQE := CatalogSourceObjects{
Channel: "stable-6.1",
SourceName: "qe-app-registry",
SourceNamespace: "openshift-marketplace",
}
subCLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: "openshift-logging",
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
CatalogSource: sourceQE,
}
subCLO.uninstallOperator(oc)
g.By("deploy cluster-logging operator")
subCLO.SubscribeOperator(oc)
}) | |||||
test case | openshift/openshift-tests-private | 9fcc1193-d86a-4488-a0cf-1f6833b0ddda | CPaasrunOnly-Author:qitang-Medium-64147-Deploy Logfilesmetricexporter as an independent pod.[Serial] | ['"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("CPaasrunOnly-Author:qitang-Medium-64147-Deploy Logfilesmetricexporter as an independent pod.[Serial]", func() {
template := filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml")
lfme := logFileMetricExporter{
name: "instance",
namespace: loggingNS,
template: template,
waitPodsReady: true,
}
defer lfme.delete(oc)
lfme.create(oc)
token := getSAToken(oc, "prometheus-k8s", "openshift-monitoring")
g.By("check metrics exposed by logfilemetricexporter")
checkMetric(oc, token, "{job=\"logfilesmetricexporter\"}", 5)
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR")
ls := lokiStack{
name: "loki-64147",
namespace: loggingNS,
tSize: "1x.demo",
storageType: getStorageType(oc),
storageSecret: "storage-64147",
storageClass: sc,
bucketName: "logging-loki-64147-" + getInfrastructureName(oc),
template: filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml"),
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "instance",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
g.By("Remove clusterlogforwarder")
clf.delete(oc)
g.By("Check LFME pods, they should not be removed")
WaitForDaemonsetPodsToBeReady(oc, lfme.namespace, "logfilesmetricexporter")
g.By("Remove LFME, the pods should be removed")
lfme.delete(oc)
g.By("Create LFME with invalid name")
lfmeInvalidName := resource{
kind: "logfilemetricexporters.logging.openshift.io",
name: "test-lfme-64147",
namespace: loggingNS,
}
defer lfmeInvalidName.clear(oc)
err = lfmeInvalidName.applyFromTemplate(oc, "-f", template, "-p", "NAME="+lfmeInvalidName.name, "-p", "NAMESPACE="+lfmeInvalidName.namespace)
o.Expect(strings.Contains(err.Error(), "metadata.name: Unsupported value: \""+lfmeInvalidName.name+"\": supported values: \"instance\"")).Should(o.BeTrue())
g.By("Create LFME with invalid namespace")
lfmeInvalidNamespace := logFileMetricExporter{
name: "instance",
namespace: oc.Namespace(),
template: filepath.Join(loggingBaseDir, "logfilemetricexporter", "lfme.yaml"),
}
defer lfmeInvalidNamespace.delete(oc)
lfmeInvalidNamespace.create(oc)
checkResource(oc, true, false, "validation failed: Invalid namespace name \""+lfmeInvalidNamespace.namespace+"\", instance must be in \"openshift-logging\" namespace", []string{"lfme/" + lfmeInvalidNamespace.name, "-n", lfmeInvalidNamespace.namespace, "-ojsonpath={.status.conditions[*].message}"})
}) | |||||
test case | openshift/openshift-tests-private | eec2bced-a054-498f-a9b3-a9aea04b476c | CPaasrunOnly-Author:qitang-Medium-65407-ClusterLogForwarder validation for the serviceaccount.[Slow] | ['"encoding/json"', '"fmt"', '"path/filepath"', '"strings"', '"time"', '"k8s.io/apimachinery/pkg/util/wait"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("CPaasrunOnly-Author:qitang-Medium-65407-ClusterLogForwarder validation for the serviceaccount.[Slow]", func() {
clfNS := oc.Namespace()
exutil.By("Deploy ES server")
ees := externalES{
namespace: clfNS,
version: "8",
serverName: "elasticsearch-server",
loggingNS: clfNS,
}
defer ees.remove(oc)
ees.deploy(oc)
logFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
exutil.By("create pod to generate logs")
oc.SetupProject()
proj := oc.Namespace()
err := oc.WithoutNamespace().Run("new-app").Args("-n", proj, "-f", logFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Create clusterlogforwarder with a non-existing serviceaccount")
clf := clusterlogforwarder{
name: "collector-65407",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "elasticsearch.yaml"),
}
defer clf.delete(oc)
clf.create(oc, "ES_URL=http://"+ees.serverName+"."+ees.namespace+".svc:9200", "ES_VERSION="+ees.version, "SERVICE_ACCOUNT_NAME=logcollector", "INPUT_REFS=[\"application\"]")
checkResource(oc, true, false, `ServiceAccount "logcollector" not found`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
ds := resource{
kind: "daemonset",
name: clf.name,
namespace: clf.namespace,
}
dsErr := ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create the serviceaccount and create rolebinding to bind clusterrole to the serviceaccount")
sa := resource{
kind: "serviceaccount",
name: "logcollector",
namespace: clfNS,
}
defer sa.clear(oc)
err = createServiceAccount(oc, sa.namespace, sa.name)
o.Expect(err).NotTo(o.HaveOccurred(), "get error when creating serviceaccount "+sa.name)
defer oc.AsAdmin().WithoutNamespace().Run("policy").Args("remove-role-from-user", "collect-application-logs", fmt.Sprintf("system:serviceaccount:%s:%s", sa.namespace, sa.name), "-n", sa.namespace).Execute()
err = oc.AsAdmin().WithoutNamespace().Run("policy").Args("add-role-to-user", "collect-application-logs", fmt.Sprintf("system:serviceaccount:%s:%s", sa.namespace, sa.name), "-n", sa.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for 2 minutes for CLO to update the status in CLF
time.Sleep(2 * time.Minute)
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create clusterrolebinding to bind clusterrole to the serviceaccount")
defer removeClusterRoleFromServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
addClusterRoleToServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
// wait for 2 minutes for CLO to update the status in CLF
time.Sleep(2 * time.Minute)
checkResource(oc, true, false, "True", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[?(@.type == \"Ready\")].status}"})
exutil.By("Collector pods should be deployed and logs can be forwarded to external log store")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
ees.waitForIndexAppear(oc, "app")
exutil.By("Delete the serviceaccount, the collector pods should be removed")
err = sa.clear(oc)
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, "ServiceAccount \""+sa.name+"\" not found", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Recreate the sa and add proper clusterroles to it, the collector pods should be recreated")
err = createServiceAccount(oc, sa.namespace, sa.name)
o.Expect(err).NotTo(o.HaveOccurred(), "get error when creating serviceaccount "+sa.name)
addClusterRoleToServiceAccount(oc, sa.namespace, sa.name, "collect-application-logs")
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Remove spec.serviceAccount from CLF")
msg, err := clf.patch(oc, `[{"op": "remove", "path": "/spec/serviceAccount"}]`)
o.Expect(err).To(o.HaveOccurred())
o.Expect(strings.Contains(msg, "spec.serviceAccount: Required value")).To(o.BeTrue())
}) | |||||
test case | openshift/openshift-tests-private | e763ce42-411d-490f-bbf1-15c22ac7e890 | CPaasrunOnly-Author:qitang-Medium-65408-ClusterLogForwarder validation when roles don't match. | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("CPaasrunOnly-Author:qitang-Medium-65408-ClusterLogForwarder validation when roles don't match.", func() {
clfNS := oc.Namespace()
loki := externalLoki{"loki-server", clfNS}
defer loki.remove(oc)
loki.deployLoki(oc)
exutil.By("Create ClusterLogForwarder with a serviceaccount which doesn't have proper clusterroles")
clf := clusterlogforwarder{
name: "collector-65408",
namespace: clfNS,
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "loki.yaml"),
serviceAccountName: "clf-collector",
}
defer clf.delete(oc)
clf.create(oc, "URL=http://"+loki.name+"."+loki.namespace+".svc:3100")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "audit" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
ds := resource{
kind: "daemonset",
name: clf.name,
namespace: clf.namespace,
}
dsErr := ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-application-logs to the new sa, then update the CLF to use the new sa")
err := oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-application-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-application-logs", "collect-application-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-application-logs", "collect-application-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-application-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["audit" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-infrastructure-logs to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-infrastructure-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-infrastructure-logs", "collect-infrastructure-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-infrastructure-logs", "collect-infrastructure-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-infrastructure-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "audit"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add clusterrole/collect-audit-logs to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-audit-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-audit-logs", "collect-audit-logs")
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-audit-logs", "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-audit-logs\"}}}", "--type=merge")
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["application" "infrastructure"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
exutil.By("Create a new sa and add all clusterroles to the new sa, then update the CLF to use the new sa")
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("sa", "collect-all-logs", "-n", clf.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, logType := range []string{"application", "infrastructure", "audit"} {
role := "collect-" + logType + "-logs"
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-all-logs", role)
err = addClusterRoleToServiceAccount(oc, clf.namespace, "collect-all-logs", role)
o.Expect(err).NotTo(o.HaveOccurred())
}
clf.update(oc, "", "{\"spec\": {\"serviceAccount\": {\"name\": \"collect-all-logs\"}}}", "--type=merge")
checkResource(oc, true, false, "True", []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[?(@.type == \"Ready\")].status}"})
WaitForDaemonsetPodsToBeReady(oc, clf.namespace, clf.name)
exutil.By("Remove clusterrole from the serviceaccount, the collector pods should be removed")
err = removeClusterRoleFromServiceAccount(oc, clf.namespace, "collect-all-logs", "collect-audit-logs")
o.Expect(err).NotTo(o.HaveOccurred())
checkResource(oc, true, false, `insufficient permissions on service account, not authorized to collect ["audit"] logs`, []string{"clf/" + clf.name, "-n", clf.namespace, "-ojsonpath={.status.conditions[*].message}"})
dsErr = ds.WaitUntilResourceIsGone(oc)
o.Expect(dsErr).NotTo(o.HaveOccurred())
}) | |||||
test case | openshift/openshift-tests-private | 25334439-a5ab-4dff-8c1f-46b00d6d7933 | CPaasrunOnly-Author:qitang-High-65685-Deploy CLO to all namespaces and verify prometheusrule/collector and cm/grafana-dashboard-cluster-logging are created along with the CLO. | ['"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("CPaasrunOnly-Author:qitang-High-65685-Deploy CLO to all namespaces and verify prometheusrule/collector and cm/grafana-dashboard-cluster-logging are created along with the CLO.", func() {
csvs, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("csv", "-n", "default", "-oname").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(strings.Contains(csvs, "cluster-logging")).Should(o.BeTrue())
prometheusrule := resource{
kind: "prometheusrule",
name: "collector",
namespace: loggingNS,
}
prometheusrule.WaitForResourceToAppear(oc)
configmap := resource{
kind: "configmap",
name: "grafana-dashboard-cluster-logging",
namespace: "openshift-config-managed",
}
configmap.WaitForResourceToAppear(oc)
}) | |||||
test case | openshift/openshift-tests-private | e3aa106b-0514-4ab9-90d5-d0a83b3b7484 | Author:qitang-CPaasrunOnly-Critical-74398-Manage logging collector pods via CLF.[Serial] | ['"encoding/json"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("Author:qitang-CPaasrunOnly-Critical-74398-Manage logging collector pods via CLF.[Serial]", func() {
s := getStorageType(oc)
sc, err := getStorageClassName(oc)
if err != nil || len(sc) == 0 {
g.Skip("can't get storageclass from cluster, skip this case")
}
exutil.By("deploy loki stack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-74398",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-74398",
storageClass: sc,
bucketName: "logging-loki-74398-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-74398",
namespace: loggingNS,
serviceAccountName: "logcollector-74398",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret-74398",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("check configurations in collector pods")
checkResource(oc, true, true, `{"limits":{"cpu":"6","memory":"2Gi"},"requests":{"cpu":"500m","memory":"64Mi"}}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.containers[].resources}"})
checkResource(oc, true, true, `{"kubernetes.io/os":"linux"}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.nodeSelector}"})
checkResource(oc, true, true, `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"},{"effect":"NoSchedule","key":"node.kubernetes.io/disk-pressure","operator":"Exists"}]`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.tolerations}"})
exutil.By("update collector configurations in CLF")
patch := `[{"op":"add","path":"/spec/collector","value":{"nodeSelector":{"logging":"test"},"resources":{"limits":{"cpu":1,"memory":"3Gi"},"requests":{"cpu":1,"memory":"1Gi","ephemeral-storage":"2Gi"}},"tolerations":[{"effect":"NoExecute","key":"test","operator":"Equal","tolerationSeconds":3000,"value":"logging"}]}}]`
clf.update(oc, "", patch, "--type=json")
WaitUntilPodsAreGone(oc, clf.namespace, "app.kubernetes.io/component=collector")
checkResource(oc, true, true, `{"limits":{"cpu":"1","memory":"3Gi"},"requests":{"cpu":"1","ephemeral-storage":"2Gi","memory":"1Gi"}}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.containers[].resources}"})
checkResource(oc, true, true, `{"kubernetes.io/os":"linux","logging":"test"}`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.nodeSelector}"})
checkResource(oc, true, true, `[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","operator":"Exists"},{"effect":"NoSchedule","key":"node.kubernetes.io/disk-pressure","operator":"Exists"},{"effect":"NoExecute","key":"test","operator":"Equal","tolerationSeconds":3000,"value":"logging"}]`, []string{"daemonset", clf.name, "-n", clf.namespace, "-ojsonpath={.spec.template.spec.tolerations}"})
appProj := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err = oc.WithoutNamespace().Run("new-app").Args("-n", appProj, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("remove the nodeSelector, collector pods should be deployed")
patch = `[{"op": "remove", "path": "/spec/collector/nodeSelector"}]`
clf.update(oc, "", patch, "--type=json")
clf.waitForCollectorPodsReady(oc)
lc.waitForLogsAppearByProject("application", appProj)
}) | |||||
test case | openshift/openshift-tests-private | 07aef4c0-c7f0-4e9a-9f6d-97d351ec7291 | Author:anli-CPaasrunOnly-Critical-75070-clo operator should pass DAST | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("Author:anli-CPaasrunOnly-Critical-75070-clo operator should pass DAST", func() {
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
CLO.SubscribeOperator(oc)
proj := oc.Namespace()
configFile := filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_observability_v1.yaml")
policyFile := filepath.Join(loggingBaseDir, "rapidast/customscan.policy")
_, err1 := rapidastScan(oc, proj, configFile, policyFile, "observability.openshift.io_v1")
configFile = filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_logging_v1.yaml")
_, err2 := rapidastScan(oc, proj, configFile, policyFile, "logging.openshift.io_v1")
configFile = filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_logging_v1alpha1.yaml")
_, err3 := rapidastScan(oc, proj, configFile, policyFile, "logging.openshift.io_v1alpha1")
if err1 != nil || err2 != nil || err3 != nil {
e2e.Failf("rapidast test failed, please check the result for more detail")
}
}) | |||||
test case | openshift/openshift-tests-private | 99017a16-94d5-46d8-bd7b-f2bd2489435b | Author:anli-CPaasrunOnly-Critical-67424-Loki Operator should pass DAST test | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/logging_operators.go | g.It("Author:anli-CPaasrunOnly-Critical-67424-Loki Operator should pass DAST test", func() {
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml"),
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO.SubscribeOperator(oc)
proj := oc.Namespace()
configFile := filepath.Join(loggingBaseDir, "rapidast/data_rapidastconfig_loki_v1.yaml")
policyFile := filepath.Join(loggingBaseDir, "rapidast/customscan.policy")
_, err := rapidastScan(oc, proj, configFile, policyFile, "loki.grafana.com_v1")
o.Expect(err).NotTo(o.HaveOccurred())
}) | |||||
test | openshift/openshift-tests-private | 283943b5-5722-44bf-8813-f9ef79978a5b | loki | import (
"context"
"os"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/loki.go | package logging
import (
"context"
"os"
"path/filepath"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("loki-stack", exutil.KubeConfigPath())
loggingBaseDir string
)
g.Context("Loki Stack testing", func() {
g.BeforeEach(func() {
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and LO")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
oc.SetupProject()
})
g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-Critical-48607-High-66088-High-64961-Loki Operator - Verify replica support and PodDisruptionBudget 1x.extra-small, 1x.small and 1x.medium t-shirt size[Serial]", func() {
// This test needs m5.8xlarge (AWS) instance type and similar instance requirement for other public clouds
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
if !validateInfraAndResourcesForLoki(oc, "150Gi", "64") {
g.Skip("Current platform not supported/resources not available for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.extra-small tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-66088",
namespace: cloNS,
tSize: "1x.extra-small",
storageType: objectStorage,
storageSecret: "storage-secret-66088",
storageClass: sc,
bucketName: "logging-loki-66088-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
g.By("Validate component replicas for 1x.extra-small bucket size")
lokiComponents := []string{"distributor", "gateway", "index-gateway", "query-frontend", "ingester", "querier", "ruler"}
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
replicacount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
g.By("Check PodDisruptionBudgets set for 1x.extra-small bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
g.By("Deploying LokiStack CR for 1x.small tshirt size")
ls.removeLokiStack(oc)
newls := ls.setTSize("1x.small")
err = newls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
newls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack redeployed")
g.By("Checking Replica count for 1x.small tshirt size")
replicacount, err = getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
g.By("Check PodDisruptionBudgets set for 1x.small bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
g.By("Redeploying LokiStack with 1x.medium tshirt size")
ls.removeLokiStack(oc)
newls = ls.setTSize("1x.medium")
err = newls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
newls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack redeployed")
g.By("Checking Replica replica for 1x.medium tshirt size")
replicacount, err = getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
if component == "ingester" || component == "querier" {
o.Expect(len(replicaCount) == 3).Should(o.BeTrue())
} else {
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
}
g.By("Check PodDisruptionBudgets set for 1x.medium bucket size")
for _, component := range lokiComponents {
if component == "ingester" || component == "querier" {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "2").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
} else {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
}
})
//Author: [email protected] (GitHub: kabirbhartiRH)
g.It("Author:kbharti-CPaasrunOnly-ConnectedOnly-High-48608-Loki Operator-Reconcile and re-create objects on accidental user deletes[Serial]", func() {
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-48608",
namespace: loggingNS,
tSize: "1x.demo",
storageType: objectStorage,
storageSecret: "storage-secret-48608",
storageClass: sc,
bucketName: "logging-loki-48608-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
e2e.Logf("Getting List of configmaps managed by Loki Controller")
lokiCMList, err := oc.AdminKubeClient().CoreV1().ConfigMaps(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(lokiCMList.Items) == 5).Should(o.BeTrue())
e2e.Logf("Deleting Loki Configmaps")
for _, items := range lokiCMList.Items {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("cm/"+items.Name, "-n", ls.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("Deleting Loki Distributor deployment")
distributorPods, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=distributor")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment/"+ls.name+"-distributor", "-n", ls.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range distributorPods {
er := resource{"pod", pod, ls.namespace}.WaitUntilResourceIsGone(oc)
o.Expect(er).NotTo(o.HaveOccurred())
}
e2e.Logf("Check to see reconciliation of Loki Distributor by Controller....")
ls.waitForLokiStackToBeReady(oc)
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=distributor"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items) == 1).Should(o.BeTrue())
e2e.Logf("Distributor deployment reconciled!")
e2e.Logf("Check to see reconciliation of configmaps by Controller....")
lokiCMList, err = oc.AdminKubeClient().CoreV1().ConfigMaps(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(lokiCMList.Items) == 5).Should(o.BeTrue())
e2e.Logf("Loki Configmaps are reconciled \n")
})
g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-High-48679-High-48616-Define limits and overrides per tenant for Loki and restart loki components on config change[Serial]", func() {
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-48679",
namespace: cloNS,
tSize: "1x.demo",
storageType: objectStorage,
storageSecret: "storage-secret-48679",
storageClass: sc,
bucketName: "logging-loki-48679-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
// Get names of some lokistack components before patching
querierPodNameBeforePatch, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=querier")
o.Expect(err).NotTo(o.HaveOccurred())
queryFrontendPodNameBeforePatch, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=query-frontend")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Patching lokiStack with limits and overrides")
patchConfig := `
spec:
limits:
tenants:
application:
ingestion:
ingestionRate: 20
maxLabelNameLength: 2048
maxLabelValueLength: 1024
infrastructure:
ingestion:
ingestionRate: 15
audit:
ingestion:
ingestionRate: 10
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check lokistack components are restarted")
existingPodNames := []string{querierPodNameBeforePatch[0], queryFrontendPodNameBeforePatch[0]}
for _, podName := range existingPodNames {
err := resource{"pod", podName, ls.namespace}.WaitUntilResourceIsGone(oc)
o.Expect(err).NotTo(o.HaveOccurred())
}
ls.waitForLokiStackToBeReady(oc)
g.By("Validate limits and overrides per tenant under runtime-config.yaml")
dirname := "/tmp/" + oc.Namespace() + "-comp-restart"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(lokiStackConf, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// validating overrides for application tenant
o.Expect(*runtimeConfig.Overrides.Application.IngestionRateMb).Should(o.Equal(20))
o.Expect(*runtimeConfig.Overrides.Application.MaxLabelNameLength).Should(o.Equal(2048))
o.Expect(*runtimeConfig.Overrides.Application.MaxLabelValueLength).Should(o.Equal(1024))
//validating overrides for infra tenant
o.Expect(*runtimeConfig.Overrides.Infrastructure.IngestionRateMb).Should(o.Equal(15))
o.Expect(runtimeConfig.Overrides.Infrastructure.MaxLabelNameLength).To(o.BeNil())
o.Expect(runtimeConfig.Overrides.Infrastructure.MaxLabelValueLength).To(o.BeNil())
//validating overrides for audit tenant
o.Expect(*runtimeConfig.Overrides.Audit.IngestionRateMb).Should(o.Equal(10))
o.Expect(runtimeConfig.Overrides.Audit.MaxLabelNameLength).To(o.BeNil())
o.Expect(runtimeConfig.Overrides.Audit.MaxLabelValueLength).To(o.BeNil())
e2e.Logf("overrides have been validated!")
})
g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-High-76729-LokiStack 1x.pico Support[Serial]", func() {
if !validateInfraAndResourcesForLoki(oc, "18Gi", "8") {
g.Skip("Skip this case for the cluster does't have enough resources")
}
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack CR for 1x.pico tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76729",
namespace: cloNS,
tSize: "1x.pico",
storageType: objectStorage,
storageSecret: "storage-secret-76729",
storageClass: sc,
bucketName: "logging-loki-76729-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("Validate component replicas for 1x.pico bucket size")
lokiComponents := []string{"distributor", "gateway", "index-gateway", "query-frontend", "querier", "ruler"}
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
replicacount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
ingesterReplicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=ingester")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(ingesterReplicaCount) == 3).Should(o.BeTrue())
exutil.By("Check PodDisruptionBudgets set for 1x.pico bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-ingester", "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-ingester", "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "2").Should(o.BeTrue())
})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | df0f4707-395e-4b45-aa97-52a90b37feab | CPaasrunOnly-ConnectedOnly-Author:kbharti-Critical-48607-High-66088-High-64961-Loki Operator - Verify replica support and PodDisruptionBudget 1x.extra-small, 1x.small and 1x.medium t-shirt size[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki.go | g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-Critical-48607-High-66088-High-64961-Loki Operator - Verify replica support and PodDisruptionBudget 1x.extra-small, 1x.small and 1x.medium t-shirt size[Serial]", func() {
// This test needs m5.8xlarge (AWS) instance type and similar instance requirement for other public clouds
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
if !validateInfraAndResourcesForLoki(oc, "150Gi", "64") {
g.Skip("Current platform not supported/resources not available for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.extra-small tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-66088",
namespace: cloNS,
tSize: "1x.extra-small",
storageType: objectStorage,
storageSecret: "storage-secret-66088",
storageClass: sc,
bucketName: "logging-loki-66088-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
g.By("Validate component replicas for 1x.extra-small bucket size")
lokiComponents := []string{"distributor", "gateway", "index-gateway", "query-frontend", "ingester", "querier", "ruler"}
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
replicacount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
g.By("Check PodDisruptionBudgets set for 1x.extra-small bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
g.By("Deploying LokiStack CR for 1x.small tshirt size")
ls.removeLokiStack(oc)
newls := ls.setTSize("1x.small")
err = newls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
newls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack redeployed")
g.By("Checking Replica count for 1x.small tshirt size")
replicacount, err = getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
g.By("Check PodDisruptionBudgets set for 1x.small bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
g.By("Redeploying LokiStack with 1x.medium tshirt size")
ls.removeLokiStack(oc)
newls = ls.setTSize("1x.medium")
err = newls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
newls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack redeployed")
g.By("Checking Replica replica for 1x.medium tshirt size")
replicacount, err = getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
if component == "ingester" || component == "querier" {
o.Expect(len(replicaCount) == 3).Should(o.BeTrue())
} else {
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
}
g.By("Check PodDisruptionBudgets set for 1x.medium bucket size")
for _, component := range lokiComponents {
if component == "ingester" || component == "querier" {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "2").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
} else {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
}
}) | |||||
test case | openshift/openshift-tests-private | 5fe4981c-72d7-4c25-9d50-5db26899aa03 | Author:kbharti-CPaasrunOnly-ConnectedOnly-High-48608-Loki Operator-Reconcile and re-create objects on accidental user deletes[Serial] | ['"context"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki.go | g.It("Author:kbharti-CPaasrunOnly-ConnectedOnly-High-48608-Loki Operator-Reconcile and re-create objects on accidental user deletes[Serial]", func() {
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-48608",
namespace: loggingNS,
tSize: "1x.demo",
storageType: objectStorage,
storageSecret: "storage-secret-48608",
storageClass: sc,
bucketName: "logging-loki-48608-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
e2e.Logf("Getting List of configmaps managed by Loki Controller")
lokiCMList, err := oc.AdminKubeClient().CoreV1().ConfigMaps(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(lokiCMList.Items) == 5).Should(o.BeTrue())
e2e.Logf("Deleting Loki Configmaps")
for _, items := range lokiCMList.Items {
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("cm/"+items.Name, "-n", ls.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("Deleting Loki Distributor deployment")
distributorPods, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=distributor")
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("delete").Args("deployment/"+ls.name+"-distributor", "-n", ls.namespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
for _, pod := range distributorPods {
er := resource{"pod", pod, ls.namespace}.WaitUntilResourceIsGone(oc)
o.Expect(er).NotTo(o.HaveOccurred())
}
e2e.Logf("Check to see reconciliation of Loki Distributor by Controller....")
ls.waitForLokiStackToBeReady(oc)
podList, err := oc.AdminKubeClient().CoreV1().Pods(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/component=distributor"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(podList.Items) == 1).Should(o.BeTrue())
e2e.Logf("Distributor deployment reconciled!")
e2e.Logf("Check to see reconciliation of configmaps by Controller....")
lokiCMList, err = oc.AdminKubeClient().CoreV1().ConfigMaps(ls.namespace).List(context.Background(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/created-by=lokistack-controller"})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(lokiCMList.Items) == 5).Should(o.BeTrue())
e2e.Logf("Loki Configmaps are reconciled \n")
}) | |||||
test case | openshift/openshift-tests-private | bc649a06-af8b-4cf8-bc7b-934a132c09dd | CPaasrunOnly-ConnectedOnly-Author:kbharti-High-48679-High-48616-Define limits and overrides per tenant for Loki and restart loki components on config change[Serial] | ['"os"', '"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki.go | g.It("CPaasrunOnly-ConnectedOnly-Author:kbharti-High-48679-High-48616-Define limits and overrides per tenant for Loki and restart loki components on config change[Serial]", func() {
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Deploying LokiStack CR for 1x.demo tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-48679",
namespace: cloNS,
tSize: "1x.demo",
storageType: objectStorage,
storageSecret: "storage-secret-48679",
storageClass: sc,
bucketName: "logging-loki-48679-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
// Get names of some lokistack components before patching
querierPodNameBeforePatch, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=querier")
o.Expect(err).NotTo(o.HaveOccurred())
queryFrontendPodNameBeforePatch, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=query-frontend")
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Patching lokiStack with limits and overrides")
patchConfig := `
spec:
limits:
tenants:
application:
ingestion:
ingestionRate: 20
maxLabelNameLength: 2048
maxLabelValueLength: 1024
infrastructure:
ingestion:
ingestionRate: 15
audit:
ingestion:
ingestionRate: 10
`
_, err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("lokistack", ls.name, "-n", ls.namespace, "--type", "merge", "-p", patchConfig).Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("Check lokistack components are restarted")
existingPodNames := []string{querierPodNameBeforePatch[0], queryFrontendPodNameBeforePatch[0]}
for _, podName := range existingPodNames {
err := resource{"pod", podName, ls.namespace}.WaitUntilResourceIsGone(oc)
o.Expect(err).NotTo(o.HaveOccurred())
}
ls.waitForLokiStackToBeReady(oc)
g.By("Validate limits and overrides per tenant under runtime-config.yaml")
dirname := "/tmp/" + oc.Namespace() + "-comp-restart"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+ls.name+"-config", "-n", ls.namespace, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
_, err = os.Stat(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
lokiStackConf, err := os.ReadFile(dirname + "/runtime-config.yaml")
o.Expect(err).NotTo(o.HaveOccurred())
runtimeConfig := RuntimeConfig{}
err = yaml.Unmarshal(lokiStackConf, &runtimeConfig)
o.Expect(err).NotTo(o.HaveOccurred())
// validating overrides for application tenant
o.Expect(*runtimeConfig.Overrides.Application.IngestionRateMb).Should(o.Equal(20))
o.Expect(*runtimeConfig.Overrides.Application.MaxLabelNameLength).Should(o.Equal(2048))
o.Expect(*runtimeConfig.Overrides.Application.MaxLabelValueLength).Should(o.Equal(1024))
//validating overrides for infra tenant
o.Expect(*runtimeConfig.Overrides.Infrastructure.IngestionRateMb).Should(o.Equal(15))
o.Expect(runtimeConfig.Overrides.Infrastructure.MaxLabelNameLength).To(o.BeNil())
o.Expect(runtimeConfig.Overrides.Infrastructure.MaxLabelValueLength).To(o.BeNil())
//validating overrides for audit tenant
o.Expect(*runtimeConfig.Overrides.Audit.IngestionRateMb).Should(o.Equal(10))
o.Expect(runtimeConfig.Overrides.Audit.MaxLabelNameLength).To(o.BeNil())
o.Expect(runtimeConfig.Overrides.Audit.MaxLabelValueLength).To(o.BeNil())
e2e.Logf("overrides have been validated!")
}) | |||||
test case | openshift/openshift-tests-private | e1b1cc32-25ba-4f5d-9f74-f85f68990937 | Author:qitang-CPaasrunOnly-ConnectedOnly-High-76729-LokiStack 1x.pico Support[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki.go | g.It("Author:qitang-CPaasrunOnly-ConnectedOnly-High-76729-LokiStack 1x.pico Support[Serial]", func() {
if !validateInfraAndResourcesForLoki(oc, "18Gi", "8") {
g.Skip("Skip this case for the cluster does't have enough resources")
}
objectStorage := getStorageType(oc)
if len(objectStorage) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, err := getStorageClassName(oc)
o.Expect(err).NotTo(o.HaveOccurred())
exutil.By("Deploying LokiStack CR for 1x.pico tshirt size")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "loki-76729",
namespace: cloNS,
tSize: "1x.pico",
storageType: objectStorage,
storageSecret: "storage-secret-76729",
storageClass: sc,
bucketName: "logging-loki-76729-" + getInfrastructureName(oc),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
e2e.Logf("LokiStack deployed")
exutil.By("Validate component replicas for 1x.pico bucket size")
lokiComponents := []string{"distributor", "gateway", "index-gateway", "query-frontend", "querier", "ruler"}
for _, component := range lokiComponents {
if component == "gateway" {
component = "lokistack-gateway"
}
replicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component="+component)
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicaCount) == 2).Should(o.BeTrue())
}
replicacount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=compactor")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(replicacount) == 1).Should(o.BeTrue())
ingesterReplicaCount, err := getPodNames(oc, ls.namespace, "app.kubernetes.io/component=ingester")
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(len(ingesterReplicaCount) == 3).Should(o.BeTrue())
exutil.By("Check PodDisruptionBudgets set for 1x.pico bucket size")
for _, component := range lokiComponents {
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-"+component, "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "1").Should(o.BeTrue())
}
minAvailable, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-ingester", "-n", cloNS, "-o=jsonpath={.spec.minAvailable}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(minAvailable == "1").Should(o.BeTrue())
disruptionsAllowed, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("PodDisruptionBudget", ls.name+"-ingester", "-n", cloNS, "-o=jsonpath={.status.disruptionsAllowed}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(disruptionsAllowed == "2").Should(o.BeTrue())
}) | |||||
test | openshift/openshift-tests-private | a07d8b5a-d700-4bc2-805e-265d1c890b88 | loki_managed_sts_wif | import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
) | github.com/openshift/openshift-tests-private/test/extended/logging/loki_managed_sts_wif.go | package logging
import (
"path/filepath"
"strings"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
)
var _ = g.Describe("[sig-openshift-logging] Logging NonPreRelease Loki - Managed auth/STS mode", func() {
defer g.GinkgoRecover()
var (
oc = exutil.NewCLI("loki-sts-wif-support", exutil.KubeConfigPath())
loggingBaseDir, s, sc string
)
g.BeforeEach(func() {
if !exutil.IsWorkloadIdentityCluster(oc) {
g.Skip("Not a STS/WIF cluster")
}
s = getStorageType(oc)
if len(s) == 0 {
g.Skip("Current cluster doesn't have a proper object storage for this test!")
}
sc, _ = getStorageClassName(oc)
if len(sc) == 0 {
g.Skip("The cluster doesn't have a storage class for this test!")
}
loggingBaseDir = exutil.FixturePath("testdata", "logging")
subTemplate := filepath.Join(loggingBaseDir, "subscription", "sub-template.yaml")
CLO := SubscriptionObjects{
OperatorName: "cluster-logging-operator",
Namespace: cloNS,
PackageName: "cluster-logging",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
LO := SubscriptionObjects{
OperatorName: "loki-operator-controller-manager",
Namespace: loNS,
PackageName: "loki-operator",
Subscription: subTemplate,
OperatorGroup: filepath.Join(loggingBaseDir, "subscription", "allnamespace-og.yaml"),
}
g.By("deploy CLO and Loki Operator")
CLO.SubscribeOperator(oc)
LO.SubscribeOperator(oc)
})
g.It("Author:kbharti-CPaasrunOnly-Critical-71534-Verify CCO support on AWS STS cluster and forward logs to default Loki[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if strings.ToLower(currentPlatform) != "aws" {
g.Skip("The platform is not AWS. Skipping case..")
}
g.By("Create log producer")
appNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71534",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71534",
storageClass: sc,
bucketName: "logging-loki-71534-" + getInfrastructureName(oc) + "-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
exutil.By("Deploy LokiStack")
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71534",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit", "application"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate that log streams are pushed to S3 bucket")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
// Case for Microsoft Azure WIF cluster
g.It("Author:kbharti-CPaasrunOnly-Critical-71773-Verify CCO support with custom region on a WIF cluster and forward logs to lokiStack logstore[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform != "azure" {
g.Skip("The platform is not Azure. Skipping case..")
}
exutil.By("Deploy LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71773",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71773",
storageClass: sc,
bucketName: "loki-71773-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Validate that credentials request created by LO has same region as the cluster (non-default scenario)
clusterRegion, err := getAzureClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
credentialsRequestRegion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", ls.name, "-n", ls.namespace, `-o=jsonpath={.spec.providerSpec.azureRegion}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(credentialsRequestRegion).Should(o.Equal(clusterRegion))
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71773",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate log streams are pushed to external Azure Blob container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
// Case for Microsoft Azure WIF cluster
g.It("Author:kbharti-CPaasrunOnly-Critical-71794-Verify CCO support with default region on a WIF cluster and forward logs to lokiStack logstore[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform != "azure" {
g.Skip("The platform is not Azure. Skipping case..")
}
exutil.By("Deploy LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71794",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71794",
storageClass: sc,
bucketName: "loki-71794-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
// Patch to remove region from Loki Operator subscription (default case)
removeRegion := `[
{
"op": "remove",
"path": "/spec/config/env/3"
}
]`
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", removeRegion, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// LO controller pod will restart with new configuration after region is removed
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
// Create LokiStack CR
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Validate that credentials request created by LO has region as 'centralus' for default case
defaultRegion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", ls.name, "-n", ls.namespace, `-o=jsonpath={.spec.providerSpec.azureRegion}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultRegion).Should(o.Equal("centralus"))
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71794",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate log streams are pushed to external Azure Blob container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
})
})
| package logging | ||||
test case | openshift/openshift-tests-private | 0a630ed8-8fc5-4330-aa41-804ee0ddec64 | Author:kbharti-CPaasrunOnly-Critical-71534-Verify CCO support on AWS STS cluster and forward logs to default Loki[Serial] | ['"path/filepath"', '"strings"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_managed_sts_wif.go | g.It("Author:kbharti-CPaasrunOnly-Critical-71534-Verify CCO support on AWS STS cluster and forward logs to default Loki[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if strings.ToLower(currentPlatform) != "aws" {
g.Skip("The platform is not AWS. Skipping case..")
}
g.By("Create log producer")
appNS := oc.Namespace()
jsonLogFile := filepath.Join(loggingBaseDir, "generatelog", "container_json_log_template.json")
err := oc.WithoutNamespace().Run("new-app").Args("-n", appNS, "-f", jsonLogFile).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
nodeName, err := genLinuxAuditLogsOnWorker(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer deleteLinuxAuditPolicyFromNode(oc, nodeName)
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71534",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71534",
storageClass: sc,
bucketName: "logging-loki-71534-" + getInfrastructureName(oc) + "-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
exutil.By("Deploy LokiStack")
defer ls.removeObjectStorage(oc)
err = ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71534",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit", "application"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate that log streams are pushed to S3 bucket")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
}) | |||||
test case | openshift/openshift-tests-private | 7733a4ef-d572-43e4-b48b-d7d840543a7a | Author:kbharti-CPaasrunOnly-Critical-71773-Verify CCO support with custom region on a WIF cluster and forward logs to lokiStack logstore[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_managed_sts_wif.go | g.It("Author:kbharti-CPaasrunOnly-Critical-71773-Verify CCO support with custom region on a WIF cluster and forward logs to lokiStack logstore[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform != "azure" {
g.Skip("The platform is not Azure. Skipping case..")
}
exutil.By("Deploy LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71773",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71773",
storageClass: sc,
bucketName: "loki-71773-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Validate that credentials request created by LO has same region as the cluster (non-default scenario)
clusterRegion, err := getAzureClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
credentialsRequestRegion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", ls.name, "-n", ls.namespace, `-o=jsonpath={.spec.providerSpec.azureRegion}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(credentialsRequestRegion).Should(o.Equal(clusterRegion))
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71773",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate log streams are pushed to external Azure Blob container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
}) | |||||
test case | openshift/openshift-tests-private | fe22ee0e-6aab-4d3d-898e-519aaf1ed90b | Author:kbharti-CPaasrunOnly-Critical-71794-Verify CCO support with default region on a WIF cluster and forward logs to lokiStack logstore[Serial] | ['"path/filepath"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_managed_sts_wif.go | g.It("Author:kbharti-CPaasrunOnly-Critical-71794-Verify CCO support with default region on a WIF cluster and forward logs to lokiStack logstore[Serial]", func() {
currentPlatform := exutil.CheckPlatform(oc)
if currentPlatform != "azure" {
g.Skip("The platform is not Azure. Skipping case..")
}
exutil.By("Deploy LokiStack")
lokiStackTemplate := filepath.Join(loggingBaseDir, "lokistack", "lokistack-simple.yaml")
ls := lokiStack{
name: "lokistack-71794",
namespace: loggingNS,
tSize: "1x.demo",
storageType: s,
storageSecret: "storage-secret-71794",
storageClass: sc,
bucketName: "loki-71794-" + exutil.GetRandomString(),
template: lokiStackTemplate,
}
defer ls.removeObjectStorage(oc)
err := ls.prepareResourcesForLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
// Patch to remove region from Loki Operator subscription (default case)
removeRegion := `[
{
"op": "remove",
"path": "/spec/config/env/3"
}
]`
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", removeRegion, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// LO controller pod will restart with new configuration after region is removed
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
// Create LokiStack CR
defer ls.removeLokiStack(oc)
err = ls.deployLokiStack(oc)
o.Expect(err).NotTo(o.HaveOccurred())
ls.waitForLokiStackToBeReady(oc)
// Validate that credentials request created by LO has region as 'centralus' for default case
defaultRegion, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", ls.name, "-n", ls.namespace, `-o=jsonpath={.spec.providerSpec.azureRegion}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(defaultRegion).Should(o.Equal("centralus"))
exutil.By("create a CLF to test forward to lokistack")
clf := clusterlogforwarder{
name: "clf-71794",
namespace: loggingNS,
serviceAccountName: "logcollector",
templateFile: filepath.Join(loggingBaseDir, "observability.openshift.io_clusterlogforwarder", "lokistack.yaml"),
secretName: "lokistack-secret",
collectApplicationLogs: true,
collectAuditLogs: true,
collectInfrastructureLogs: true,
waitForPodReady: true,
enableMonitoring: true,
}
clf.createServiceAccount(oc)
defer removeClusterRoleFromServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
err = addClusterRoleToServiceAccount(oc, clf.namespace, clf.serviceAccountName, "logging-collector-logs-writer")
o.Expect(err).NotTo(o.HaveOccurred())
defer resource{"secret", clf.secretName, clf.namespace}.clear(oc)
ls.createSecretFromGateway(oc, clf.secretName, clf.namespace, "")
defer clf.delete(oc)
clf.create(oc, "LOKISTACK_NAME="+ls.name, "LOKISTACK_NAMESPACE="+ls.namespace)
exutil.By("Validate Logs in Loki")
defer removeClusterRoleFromServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
err = addClusterRoleToServiceAccount(oc, oc.Namespace(), "default", "cluster-admin")
o.Expect(err).NotTo(o.HaveOccurred())
bearerToken := getSAToken(oc, "default", oc.Namespace())
route := "https://" + getRouteAddress(oc, ls.namespace, ls.name)
lc := newLokiClient(route).withToken(bearerToken).retry(5)
for _, logType := range []string{"infrastructure", "audit"} {
lc.waitForLogsAppearByKey(logType, "log_type", logType)
}
exutil.By("Validate log streams are pushed to external Azure Blob container")
ls.validateExternalObjectStorageForLogs(oc, []string{"application", "audit", "infrastructure"})
}) | |||||
file | openshift/openshift-tests-private | d61da851-4061-4e57-8841-940e96b486ab | loki_utils | import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"cloud.google.com/go/storage"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/iam/v1"
"google.golang.org/api/iterator"
apierrors "k8s.io/apimachinery/pkg/api/errors"
k8sresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
) | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | package logging
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"cloud.google.com/go/storage"
"github.com/aws/aws-sdk-go-v2/aws"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
g "github.com/onsi/ginkgo/v2"
o "github.com/onsi/gomega"
exutil "github.com/openshift/openshift-tests-private/test/extended/util"
"google.golang.org/api/cloudresourcemanager/v1"
"google.golang.org/api/iam/v1"
"google.golang.org/api/iterator"
apierrors "k8s.io/apimachinery/pkg/api/errors"
k8sresource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
e2e "k8s.io/kubernetes/test/e2e/framework"
)
// s3Credential defines the s3 credentials
type s3Credential struct {
Region string
AccessKeyID string
SecretAccessKey string
Endpoint string //the endpoint of s3 service
}
func getAWSCredentialFromCluster(oc *exutil.CLI) s3Credential {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/aws_access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/aws_secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
cred := s3Credential{Region: region, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
return cred
}
func getMinIOCreds(oc *exutil.CLI, ns string) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, ns, "minio")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
}
func generateS3Config(cred s3Credential) aws.Config {
var err error
var cfg aws.Config
if len(cred.Endpoint) > 0 {
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: cred.Endpoint,
HostnameImmutable: true,
Source: aws.EndpointSourceCustom,
}, nil
})
// For ODF and Minio, they're deployed in OCP clusters
// In some clusters, we can't connect it without proxy, here add proxy settings to s3 client when there has http_proxy or https_proxy in the env var
httpClient := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) {
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr.Proxy = http.ProxyURL(proxyURL)
}
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
})
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithEndpointResolverWithOptions(customResolver),
config.WithHTTPClient(httpClient),
config.WithRegion("auto"))
} else {
// aws s3
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithRegion(cred.Region))
}
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
}
func createS3Bucket(client *s3.Client, bucketName, region string) error {
// check if the bucket exists or not
// if exists, clear all the objects in the bucket
// if not, create the bucket
exist := false
buckets, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, bu := range buckets.Buckets {
if *bu.Name == bucketName {
exist = true
break
}
}
// clear all the objects in the bucket
if exist {
return emptyS3Bucket(client, bucketName)
}
/*
Per https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#API_CreateBucket_RequestBody,
us-east-1 is the default region and it's not a valid value of LocationConstraint,
using `LocationConstraint: types.BucketLocationConstraint("us-east-1")` gets error `InvalidLocationConstraint`.
Here remove the configration when the region is us-east-1
*/
if len(region) == 0 || region == "us-east-1" {
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName})
return err
}
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName, CreateBucketConfiguration: &types.CreateBucketConfiguration{LocationConstraint: types.BucketLocationConstraint(region)}})
return err
}
func deleteS3Bucket(client *s3.Client, bucketName string) error {
// empty bucket
err := emptyS3Bucket(client, bucketName)
if err != nil {
return err
}
// delete bucket
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: &bucketName})
return err
}
func emptyS3Bucket(client *s3.Client, bucketName string) error {
// List objects in the bucket
objects, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: &bucketName,
})
if err != nil {
return err
}
// Delete objects in the bucket
if len(objects.Contents) > 0 {
objectIdentifiers := make([]types.ObjectIdentifier, len(objects.Contents))
for i, object := range objects.Contents {
objectIdentifiers[i] = types.ObjectIdentifier{Key: object.Key}
}
quiet := true
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: &bucketName,
Delete: &types.Delete{
Objects: objectIdentifiers,
Quiet: &quiet,
},
})
if err != nil {
return err
}
}
// Check if there are more objects to delete and handle pagination
if *objects.IsTruncated {
return emptyS3Bucket(client, bucketName)
}
return nil
}
// createSecretForAWSS3Bucket creates a secret for Loki to connect to s3 bucket
func createSecretForAWSS3Bucket(oc *exutil.CLI, bucketName, secretName, ns string, cred s3Credential) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
endpoint := "https://s3." + cred.Region + ".amazonaws.com"
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-literal=access_key_id="+cred.AccessKeyID, "--from-literal=access_key_secret="+cred.SecretAccessKey, "--from-literal=region="+cred.Region, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
func createSecretForODFBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://s3.openshift-storage.svc:80"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/AWS_ACCESS_KEY_ID", "--from-file=access_key_secret="+dirname+"/AWS_SECRET_ACCESS_KEY", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
}
func createSecretForMinIOBucket(oc *exutil.CLI, bucketName, secretName, ns string, cred s3Credential) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-literal=access_key_id="+cred.AccessKeyID, "--from-literal=access_key_secret="+cred.SecretAccessKey, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+cred.Endpoint, "-n", ns).Execute()
}
func getGCPProjectNumber(projectID string) (string, error) {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return "", err
}
project, err := crmService.Projects.Get(projectID).Do()
if err != nil {
return "", err
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
}
func getGCPAudience(providerName string) (string, error) {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return "", fmt.Errorf("iam.NewService: %w", err)
}
audience, err := service.Projects.Locations.WorkloadIdentityPools.Providers.Get(providerName).Do()
if err != nil {
return "", fmt.Errorf("can't get audience: %v", err)
}
return audience.Oidc.AllowedAudiences[0], nil
}
func generateServiceAccountNameForGCS(clusterName string) string {
// Service Account should be between 6-30 characters long
name := clusterName + getRandomString()
return name
}
func createServiceAccountOnGCP(projectID, name string) (*iam.ServiceAccount, error) {
e2e.Logf("start to creating serviceaccount on GCP")
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return nil, fmt.Errorf("iam.NewService: %w", err)
}
request := &iam.CreateServiceAccountRequest{
AccountId: name,
ServiceAccount: &iam.ServiceAccount{
DisplayName: "Service Account for " + name,
},
}
account, err := service.Projects.ServiceAccounts.Create("projects/"+projectID, request).Do()
if err != nil {
return nil, fmt.Errorf("failed to create serviceaccount: %w", err)
}
e2e.Logf("Created service account: %v", account)
return account, nil
}
// ref: https://github.com/GoogleCloudPlatform/golang-samples/blob/main/iam/quickstart/quickstart.go
func addBinding(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
for _, b := range policy.Bindings {
if b.Role == role {
binding = b
break
}
}
if binding != nil {
// If the binding exists, adds the member to the binding
binding.Members = append(binding.Members, member)
} else {
// If the binding does not exist, adds a new binding to the policy
binding = &cloudresourcemanager.Binding{
Role: role,
Members: []string{member},
}
policy.Bindings = append(policy.Bindings, binding)
}
return setPolicy(crmService, projectID, policy)
}
// removeMember removes the member from the project's IAM policy
func removeMember(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
var bindingIndex int
for i, b := range policy.Bindings {
if b.Role == role {
binding = b
bindingIndex = i
break
}
}
if len(binding.Members) == 1 && binding.Members[0] == member {
// If the member is the only member in the binding, removes the binding
last := len(policy.Bindings) - 1
policy.Bindings[bindingIndex] = policy.Bindings[last]
policy.Bindings = policy.Bindings[:last]
} else {
// If there is more than one member in the binding, removes the member
var memberIndex int
var exist bool
for i, mm := range binding.Members {
if mm == member {
memberIndex = i
exist = true
break
}
}
if exist {
last := len(policy.Bindings[bindingIndex].Members) - 1
binding.Members[memberIndex] = binding.Members[last]
binding.Members = binding.Members[:last]
}
}
return setPolicy(crmService, projectID, policy)
}
// getPolicy gets the project's IAM policy
func getPolicy(crmService *cloudresourcemanager.Service, projectID string) (*cloudresourcemanager.Policy, error) {
request := new(cloudresourcemanager.GetIamPolicyRequest)
policy, err := crmService.Projects.GetIamPolicy(projectID, request).Do()
if err != nil {
return nil, err
}
return policy, nil
}
// setPolicy sets the project's IAM policy
func setPolicy(crmService *cloudresourcemanager.Service, projectID string, policy *cloudresourcemanager.Policy) error {
request := new(cloudresourcemanager.SetIamPolicyRequest)
request.Policy = policy
_, err := crmService.Projects.SetIamPolicy(projectID, request).Do()
return err
}
func grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := addBinding(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := addBinding(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, sub, err)
}
}
}
return nil
}
func removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := removeMember(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := removeMember(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, sub, err)
}
}
}
return nil
}
func removeServiceAccountFromGCP(name string) error {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return fmt.Errorf("iam.NewService: %w", err)
}
_, err = service.Projects.ServiceAccounts.Delete(name).Do()
if err != nil {
return fmt.Errorf("can't remove service account: %v", err)
}
return nil
}
func createSecretForGCSBucketWithSTS(oc *exutil.CLI, namespace, secretName, bucketName string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", namespace, secretName, "--from-literal=bucketname="+bucketName).Execute()
}
// creates a secret for Loki to connect to gcs bucket
func createSecretForGCSBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
//get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+gcsCred).Execute()
}
// creates a secret for Loki to connect to azure container
func createSecretForAzureContainer(oc *exutil.CLI, bucketName, secretName, ns string) error {
environment := "AzureGlobal"
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
if err != nil {
return fmt.Errorf("can't get azure cluster type %v", err)
}
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
environment = "AzureUSGovernment"
}
if strings.ToLower(cloudName) == "azurechinacloud" {
environment = "AzureChinaCloud"
}
if strings.ToLower(cloudName) == "azuregermancloud" {
environment = "AzureGermanCloud"
}
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName, "--from-literal=environment="+environment, "--from-literal=container="+bucketName, "--from-literal=account_name="+accountName, "--from-literal=account_key="+accountKey).Execute()
}
func createSecretForSwiftContainer(oc *exutil.CLI, containerName, secretName, ns string, cred *exutil.OpenstackCredentials) error {
userID, domainID := exutil.GetOpenStackUserIDAndDomainID(cred)
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName,
"--from-literal=auth_url="+cred.Clouds.Openstack.Auth.AuthURL,
"--from-literal=username="+cred.Clouds.Openstack.Auth.Username,
"--from-literal=user_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=user_domain_id="+domainID,
"--from-literal=user_id="+userID,
"--from-literal=password="+cred.Clouds.Openstack.Auth.Password,
"--from-literal=domain_id="+domainID,
"--from-literal=domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=container_name="+containerName,
"--from-literal=project_id="+cred.Clouds.Openstack.Auth.ProjectID,
"--from-literal=project_name="+cred.Clouds.Openstack.Auth.ProjectName,
"--from-literal=project_domain_id="+domainID,
"--from-literal=project_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName).Execute()
return err
}
// checkODF check if the ODF is installed in the cluster or not
// here only checks the sc/ocs-storagecluster-ceph-rbd and svc/s3
func checkODF(oc *exutil.CLI) bool {
svcFound := false
expectedSC := []string{"openshift-storage.noobaa.io", "ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"}
var scInCluster []string
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, sc := range scs.Items {
scInCluster = append(scInCluster, sc.Name)
}
for _, s := range expectedSC {
if !contain(scInCluster, s) {
return false
}
}
_, err = oc.AdminKubeClient().CoreV1().Services("openshift-storage").Get(context.Background(), "s3", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return svcFound
}
func createObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
template := exutil.FixturePath("testdata", "logging", "odf", "objectBucketClaim.yaml")
obc := resource{"objectbucketclaims", name, ns}
err := obc.applyFromTemplate(oc, "-f", template, "-n", ns, "-p", "NAME="+name, "NAMESPACE="+ns)
if err != nil {
return err
}
obc.WaitForResourceToAppear(oc)
resource{"objectbuckets", "obc-" + ns + "-" + name, ns}.WaitForResourceToAppear(oc)
assertResourceStatus(oc, "objectbucketclaims", name, ns, "{.status.phase}", "Bound")
return nil
}
func deleteObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
obc := resource{"objectbucketclaims", name, ns}
err := obc.clear(oc)
if err != nil {
return err
}
return obc.WaitUntilResourceIsGone(oc)
}
// checkMinIO
func checkMinIO(oc *exutil.CLI, ns string) (bool, error) {
podReady, svcFound := false, false
pod, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "app=minio"})
if err != nil {
return false, err
}
if len(pod.Items) > 0 && pod.Items[0].Status.Phase == "Running" {
podReady = true
}
_, err = oc.AdminKubeClient().CoreV1().Services(ns).Get(context.Background(), "minio", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return podReady && svcFound, err
}
func useExtraObjectStorage(oc *exutil.CLI) string {
if checkODF(oc) {
e2e.Logf("use the existing ODF storage service")
return "odf"
}
ready, err := checkMinIO(oc, minioNS)
if ready {
e2e.Logf("use existing MinIO storage service")
return "minio"
}
if strings.Contains(err.Error(), "No resources found") || strings.Contains(err.Error(), "not found") {
e2e.Logf("deploy MinIO and use this MinIO as storage service")
deployMinIO(oc)
return "minio"
}
return ""
}
func patchLokiOperatorWithAWSRoleArn(oc *exutil.CLI, subName, subNamespace, roleArn string) {
roleArnPatchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "ROLEARN",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", subName, "-n", subNamespace, "-p", fmt.Sprintf(roleArnPatchConfig, roleArn), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
}
// return the storage type per different platform
func getStorageType(oc *exutil.CLI) string {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
{
return "s3"
}
case "gcp":
{
return "gcs"
}
case "azure":
{
return "azure"
}
case "openstack":
{
return "swift"
}
default:
{
return useExtraObjectStorage(oc)
}
}
}
// lokiStack contains the configurations of loki stack
type lokiStack struct {
name string // lokiStack name
namespace string // lokiStack namespace
tSize string // size
storageType string // the backend storage type, currently support s3, gcs, azure, swift, ODF and minIO
storageSecret string // the secret name for loki to use to connect to backend storage
storageClass string // storage class name
bucketName string // the butcket or the container name where loki stores it's data in
template string // the file used to create the loki stack
}
func (l lokiStack) setTSize(size string) lokiStack {
l.tSize = size
return l
}
// prepareResourcesForLokiStack creates buckets/containers in backend storage provider, and creates the secret for Loki to use
func (l lokiStack) prepareResourcesForLokiStack(oc *exutil.CLI) error {
var err error
if len(l.bucketName) == 0 {
return fmt.Errorf("the bucketName should not be empty")
}
switch l.storageType {
case "s3":
{
var cfg aws.Config
region, err := exutil.GetAWSClusterRegion(oc)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
if !checkAWSCredentials() {
g.Skip("Skip since no AWS credetial! No Env AWS_SHARED_CREDENTIALS_FILE, Env CLUSTER_PROFILE_DIR or $HOME/.aws/credentials file")
}
partition := "aws"
if strings.HasPrefix(region, "us-gov") {
partition = "aws-us-gov"
}
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
awsAccountID, _ := getAwsAccount(stsClient)
oidcName, err := getOIDC(oc)
o.Expect(err).NotTo(o.HaveOccurred())
lokiIAMRoleName := l.name + "-" + exutil.GetRandomString()
roleArn := createIAMRoleForLokiSTSDeployment(iamClient, oidcName, awsAccountID, partition, l.namespace, l.name, lokiIAMRoleName)
os.Setenv("LOKI_ROLE_NAME_ON_STS", lokiIAMRoleName)
patchLokiOperatorWithAWSRoleArn(oc, "loki-operator", loNS, roleArn)
createObjectStorageSecretOnAWSSTSCluster(oc, region, l.storageSecret, l.bucketName, l.namespace)
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
err = createSecretForAWSS3Bucket(oc, l.bucketName, l.storageSecret, l.namespace, cred)
o.Expect(err).NotTo(o.HaveOccurred())
}
client := newS3Client(cfg)
err = createS3Bucket(client, l.bucketName, region)
if err != nil {
return err
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
if !readAzureCredentials() {
g.Skip("Azure Credentials not found. Skip case!")
} else {
performManagedIdentityAndSecretSetupForAzureWIF(oc, l.name, l.namespace, l.bucketName, l.storageSecret)
}
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.bucketName)
if err2 != nil {
return err2
}
err = exutil.CreateAzureStorageBlobContainer(client)
if err != nil {
return err
}
err = createSecretForAzureContainer(oc, l.bucketName, l.storageSecret, l.namespace)
}
}
case "gcs":
{
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
err = exutil.CreateGCSBucket(projectID, l.bucketName)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
clusterName := getInfrastructureName(oc)
gcsSAName := generateServiceAccountNameForGCS(clusterName)
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME", gcsSAName)
projectNumber, err1 := getGCPProjectNumber(projectID)
if err1 != nil {
return fmt.Errorf("can't get GCP project number: %v", err1)
}
poolID, err2 := getPoolID(oc)
if err2 != nil {
return fmt.Errorf("can't get pool ID: %v", err2)
}
sa, err3 := createServiceAccountOnGCP(projectID, gcsSAName)
if err3 != nil {
return fmt.Errorf("can't create service account: %v", err3)
}
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL", sa.Email)
err4 := grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, l.namespace, l.name, sa.Email)
if err4 != nil {
return fmt.Errorf("can't add roles to the serviceaccount: %v", err4)
}
patchLokiOperatorOnGCPSTSforCCO(oc, loNS, projectNumber, poolID, sa.Email)
err = createSecretForGCSBucketWithSTS(oc, l.namespace, l.storageSecret, l.bucketName)
} else {
err = createSecretForGCSBucket(oc, l.bucketName, l.storageSecret, l.namespace)
}
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.CreateOpenStackContainer(client, l.bucketName)
if err != nil {
return err
}
err = createSecretForSwiftContainer(oc, l.bucketName, l.storageSecret, l.namespace, cred)
}
case "odf":
{
err = createObjectBucketClaim(oc, l.namespace, l.bucketName)
if err != nil {
return err
}
err = createSecretForODFBucket(oc, l.bucketName, l.storageSecret, l.namespace)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
cfg := generateS3Config(cred)
client := newS3Client(cfg)
err = createS3Bucket(client, l.bucketName, "")
if err != nil {
return err
}
err = createSecretForMinIOBucket(oc, l.bucketName, l.storageSecret, l.namespace, cred)
}
}
return err
}
// deployLokiStack creates the lokiStack CR with basic settings: name, namespace, size, storage.secret.name, storage.secret.type, storageClassName
// optionalParameters is designed for adding parameters to deploy lokiStack with different tenants or some other settings
func (l lokiStack) deployLokiStack(oc *exutil.CLI, optionalParameters ...string) error {
var storage string
if l.storageType == "odf" || l.storageType == "minio" {
storage = "s3"
} else {
storage = l.storageType
}
lokistackTemplate := l.template
if GetIPVersionStackType(oc) == "ipv6single" {
lokistackTemplate = strings.Replace(l.template, ".yaml", "-ipv6.yaml", -1)
}
parameters := []string{"-f", lokistackTemplate, "-n", l.namespace, "-p", "NAME=" + l.name, "NAMESPACE=" + l.namespace, "SIZE=" + l.tSize, "SECRET_NAME=" + l.storageSecret, "STORAGE_TYPE=" + storage, "STORAGE_CLASS=" + l.storageClass}
if len(optionalParameters) != 0 {
parameters = append(parameters, optionalParameters...)
}
file, err := processTemplate(oc, parameters...)
defer os.Remove(file)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", l.namespace).Execute()
ls := resource{"lokistack", l.name, l.namespace}
ls.WaitForResourceToAppear(oc)
return err
}
func (l lokiStack) waitForLokiStackToBeReady(oc *exutil.CLI) {
for _, deploy := range []string{l.name + "-gateway", l.name + "-distributor", l.name + "-querier", l.name + "-query-frontend"} {
WaitForDeploymentPodsToBeReady(oc, l.namespace, deploy)
}
for _, ss := range []string{l.name + "-index-gateway", l.name + "-compactor", l.name + "-ruler", l.name + "-ingester"} {
waitForStatefulsetReady(oc, l.namespace, ss)
}
if exutil.IsWorkloadIdentityCluster(oc) {
currentPlatform := exutil.CheckPlatform(oc)
switch currentPlatform {
case "aws", "azure", "gcp":
validateCredentialsRequestGenerationOnSTS(oc, l.name, l.namespace)
}
}
}
/*
// update existing lokistack CR
// if template is specified, then run command `oc process -f template -p patches | oc apply -f -`
// if template is not specified, then run command `oc patch lokistack/${l.name} -p patches`
// if use patch, should add `--type=` in the end of patches
func (l lokiStack) update(oc *exutil.CLI, template string, patches ...string) {
var err error
if template != "" {
parameters := []string{"-f", template, "-p", "NAME=" + l.name, "NAMESPACE=" + l.namespace}
if len(patches) > 0 {
parameters = append(parameters, patches...)
}
file, processErr := processTemplate(oc, parameters...)
defer os.Remove(file)
if processErr != nil {
e2e.Failf("error processing file: %v", processErr)
}
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", l.namespace).Execute()
} else {
parameters := []string{"lokistack/" + l.name, "-n", l.namespace, "-p"}
parameters = append(parameters, patches...)
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args(parameters...).Execute()
}
if err != nil {
e2e.Failf("error updating lokistack: %v", err)
}
}
*/
func (l lokiStack) removeLokiStack(oc *exutil.CLI) {
resource{"lokistack", l.name, l.namespace}.clear(oc)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "-n", l.namespace, "-l", "app.kubernetes.io/instance="+l.name).Execute()
}
func (l lokiStack) removeObjectStorage(oc *exutil.CLI) {
resource{"secret", l.storageSecret, l.namespace}.clear(oc)
var err error
switch l.storageType {
case "s3":
{
var cfg aws.Config
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
deleteIAMroleonAWS(iamClient, os.Getenv("LOKI_ROLE_NAME_ON_STS"))
os.Unsetenv("LOKI_ROLE_NAME_ON_STS")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", `[{"op": "remove", "path": "/spec/config"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
}
client := newS3Client(cfg)
err = deleteS3Bucket(client, l.bucketName)
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
resourceGroup, err := getAzureResourceGroupFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
deleteManagedIdentityOnAzure(cred, azureSubscriptionID, resourceGroup, l.name)
deleteAzureStorageAccount(cred, azureSubscriptionID, resourceGroup, os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT"))
os.Unsetenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.bucketName)
o.Expect(err2).NotTo(o.HaveOccurred())
err = exutil.DeleteAzureStorageBlobContainer(client)
}
}
case "gcs":
{
if exutil.IsWorkloadIdentityCluster(oc) {
sa := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
if sa == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_NAME is not set, no need to delete the serviceaccount")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
email := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
if email == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL is not set, no need to delete the policies")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
projectNumber, _ := getGCPProjectNumber(projectID)
poolID, _ := getPoolID(oc)
err = removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, l.namespace, l.name, email)
o.Expect(err).NotTo(o.HaveOccurred())
err = removeServiceAccountFromGCP("projects/" + projectID + "/serviceAccounts/" + email)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
err = exutil.DeleteGCSBucket(l.bucketName)
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.DeleteOpenStackContainer(client, l.bucketName)
}
case "odf":
{
err = deleteObjectBucketClaim(oc, l.namespace, l.bucketName)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
cfg := generateS3Config(cred)
client := newS3Client(cfg)
err = deleteS3Bucket(client, l.bucketName)
}
}
o.Expect(err).NotTo(o.HaveOccurred())
}
func (l lokiStack) createSecretFromGateway(oc *exutil.CLI, name, namespace, token string) {
dirname := "/tmp/" + oc.Namespace() + getRandomString()
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+l.name+"-gateway-ca-bundle", "-n", l.namespace, "--keys=service-ca.crt", "--confirm", "--to="+dirname).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if token != "" {
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=ca-bundle.crt="+dirname+"/service-ca.crt", "--from-literal=token="+token).Execute()
} else {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=ca-bundle.crt="+dirname+"/service-ca.crt").Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
}
// TODO: add an option to provide TLS config
type lokiClient struct {
username string //Username for HTTP basic auth.
password string //Password for HTTP basic auth
address string //Server address.
orgID string //adds X-Scope-OrgID to API requests for representing tenant ID. Useful for requesting tenant data when bypassing an auth gateway.
bearerToken string //adds the Authorization header to API requests for authentication purposes.
bearerTokenFile string //adds the Authorization header to API requests for authentication purposes.
retries int //How many times to retry each query when getting an error response from Loki.
queryTags string //adds X-Query-Tags header to API requests.
quiet bool //Suppress query metadata.
}
// newLokiClient initializes a lokiClient with server address
func newLokiClient(routeAddress string) *lokiClient {
client := &lokiClient{}
client.address = routeAddress
client.retries = 5
client.quiet = true
return client
}
// retry sets how many times to retry each query
func (c *lokiClient) retry(retry int) *lokiClient {
nc := *c
nc.retries = retry
return &nc
}
// withToken sets the token used to do query
func (c *lokiClient) withToken(bearerToken string) *lokiClient {
nc := *c
nc.bearerToken = bearerToken
return &nc
}
func (c *lokiClient) withBasicAuth(username string, password string) *lokiClient {
nc := *c
nc.username = username
nc.password = password
return &nc
}
/*
func (c *lokiClient) withTokenFile(bearerTokenFile string) *lokiClient {
nc := *c
nc.bearerTokenFile = bearerTokenFile
return &nc
}
*/
func (c *lokiClient) getHTTPRequestHeader() (http.Header, error) {
h := make(http.Header)
if c.username != "" && c.password != "" {
h.Set(
"Authorization",
"Basic "+base64.StdEncoding.EncodeToString([]byte(c.username+":"+c.password)),
)
}
h.Set("User-Agent", "loki-logcli")
if c.orgID != "" {
h.Set("X-Scope-OrgID", c.orgID)
}
if c.queryTags != "" {
h.Set("X-Query-Tags", c.queryTags)
}
if (c.username != "" || c.password != "") && (len(c.bearerToken) > 0 || len(c.bearerTokenFile) > 0) {
return nil, fmt.Errorf("at most one of HTTP basic auth (username/password), bearer-token & bearer-token-file is allowed to be configured")
}
if len(c.bearerToken) > 0 && len(c.bearerTokenFile) > 0 {
return nil, fmt.Errorf("at most one of the options bearer-token & bearer-token-file is allowed to be configured")
}
if c.bearerToken != "" {
h.Set("Authorization", "Bearer "+c.bearerToken)
}
if c.bearerTokenFile != "" {
b, err := os.ReadFile(c.bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.bearerTokenFile, err)
}
bearerToken := strings.TrimSpace(string(b))
h.Set("Authorization", "Bearer "+bearerToken)
}
return h, nil
}
func (c *lokiClient) doRequest(path, query string, out interface{}) error {
h, err := c.getHTTPRequestHeader()
if err != nil {
return err
}
resp, err := doHTTPRequest(h, c.address, path, query, "GET", c.quiet, c.retries, nil, 200)
if err != nil {
return err
}
return json.Unmarshal(resp, out)
}
func (c *lokiClient) doQuery(path string, query string) (*lokiQueryResponse, error) {
var err error
var r lokiQueryResponse
if err = c.doRequest(path, query, &r); err != nil {
return nil, err
}
return &r, nil
}
// query uses the /api/v1/query endpoint to execute an instant query
// lc.query("application", "sum by(kubernetes_namespace_name)(count_over_time({kubernetes_namespace_name=\"multiple-containers\"}[5m]))", 30, false, time.Now())
func (c *lokiClient) query(tenant string, queryStr string, limit int, forward bool, time time.Time) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
qsb := newQueryStringBuilder()
qsb.setString("query", queryStr)
qsb.setInt("limit", int64(limit))
qsb.setInt("time", time.UnixNano())
qsb.setString("direction", direction())
var logPath string
if len(tenant) > 0 {
logPath = apiPath + tenant + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, qsb.encode())
}
// queryRange uses the /api/v1/query_range endpoint to execute a range query
// tenant: application, infrastructure, audit
// queryStr: string to filter logs, for example: "{kubernetes_namespace_name="test"}"
// limit: max log count
// start: Start looking for logs at this absolute time(inclusive), e.g.: time.Now().Add(time.Duration(-1)*time.Hour) means 1 hour ago
// end: Stop looking for logs at this absolute time (exclusive)
// forward: true means scan forwards through logs, false means scan backwards through logs
func (c *lokiClient) queryRange(tenant string, queryStr string, limit int, start, end time.Time, forward bool) (*lokiQueryResponse, error) {
direction := func() string {
if forward {
return "FORWARD"
}
return "BACKWARD"
}
params := newQueryStringBuilder()
params.setString("query", queryStr)
params.setInt32("limit", limit)
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
params.setString("direction", direction())
var logPath string
if len(tenant) > 0 {
logPath = apiPath + tenant + queryRangePath
} else {
logPath = queryRangePath
}
return c.doQuery(logPath, params.encode())
}
func (c *lokiClient) searchLogsInLoki(tenant, query string) (*lokiQueryResponse, error) {
res, err := c.queryRange(tenant, query, 5, time.Now().Add(time.Duration(-1)*time.Hour), time.Now(), false)
return res, err
}
func (c *lokiClient) waitForLogsAppearByQuery(tenant, query string) error {
return wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchLogsInLoki(tenant, query)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf(`find logs by %s`, query)
return true, nil
}
return false, nil
})
}
func (c *lokiClient) searchByKey(tenant, key, value string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(tenant, "{"+key+"=\""+value+"\"}")
return res, err
}
func (c *lokiClient) waitForLogsAppearByKey(tenant, key, value string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchByKey(tenant, key, value)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf(`find logs by {%s="%s"}`, key, value)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf(`can't find logs by {%s="%s"} in last 5 minutes`, key, value))
}
func (c *lokiClient) searchByNamespace(tenant, projectName string) (*lokiQueryResponse, error) {
res, err := c.searchLogsInLoki(tenant, "{kubernetes_namespace_name=\""+projectName+"\"}")
return res, err
}
func (c *lokiClient) waitForLogsAppearByProject(tenant, projectName string) {
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
logs, err := c.searchByNamespace(tenant, projectName)
if err != nil {
e2e.Logf("\ngot err when searching logs: %v, retrying...\n", err)
return false, nil
}
if len(logs.Data.Result) > 0 {
e2e.Logf("find logs from %s project", projectName)
return true, nil
}
return false, nil
})
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("can't find logs from %s project in last 5 minutes", projectName))
}
// extractLogEntities extract the log entities from loki query response, designed for checking the content of log data in Loki
func extractLogEntities(lokiQueryResult *lokiQueryResponse) []LogEntity {
var lokiLogs []LogEntity
for _, res := range lokiQueryResult.Data.Result {
for _, value := range res.Values {
lokiLog := LogEntity{}
// only process log data, drop timestamp
json.Unmarshal([]byte(convertInterfaceToArray(value)[1]), &lokiLog)
lokiLogs = append(lokiLogs, lokiLog)
}
}
return lokiLogs
}
// listLabelValues uses the /api/v1/label endpoint to list label values
func (c *lokiClient) listLabelValues(tenant, name string, start, end time.Time) (*labelResponse, error) {
lpath := fmt.Sprintf(labelValuesPath, url.PathEscape(name))
var labelResponse labelResponse
params := newQueryStringBuilder()
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
path := ""
if len(tenant) > 0 {
path = apiPath + tenant + lpath
} else {
path = lpath
}
if err := c.doRequest(path, params.encode(), &labelResponse); err != nil {
return nil, err
}
return &labelResponse, nil
}
// listLabelNames uses the /api/v1/label endpoint to list label names
func (c *lokiClient) listLabelNames(tenant string, start, end time.Time) (*labelResponse, error) {
var labelResponse labelResponse
params := newQueryStringBuilder()
params.setInt("start", start.UnixNano())
params.setInt("end", end.UnixNano())
path := ""
if len(tenant) > 0 {
path = apiPath + tenant + labelsPath
} else {
path = labelsPath
}
if err := c.doRequest(path, params.encode(), &labelResponse); err != nil {
return nil, err
}
return &labelResponse, nil
}
// listLabels gets the label names or values
func (c *lokiClient) listLabels(tenant, labelName string) ([]string, error) {
var labelResponse *labelResponse
var err error
start := time.Now().Add(time.Duration(-2) * time.Hour)
end := time.Now()
if len(labelName) > 0 {
labelResponse, err = c.listLabelValues(tenant, labelName, start, end)
} else {
labelResponse, err = c.listLabelNames(tenant, start, end)
}
return labelResponse.Data, err
}
func (c *lokiClient) queryRules(tenant, ns string) ([]byte, error) {
path := apiPath + tenant + rulesPath
params := url.Values{}
if ns != "" {
params.Add("kubernetes_namespace_name", ns)
}
h, err := c.getHTTPRequestHeader()
if err != nil {
return nil, err
}
resp, err := doHTTPRequest(h, c.address, path, params.Encode(), "GET", c.quiet, c.retries, nil, 200)
if err != nil {
/*
Ignore error "unexpected EOF", adding `h.Add("Accept-Encoding", "identity")` doesn't resolve the error.
This seems to be an issue in lokistack when tenant=application, recording rules are not in the response.
No error when tenant=infrastructure
*/
if strings.Contains(err.Error(), "unexpected EOF") && len(resp) > 0 {
e2e.Logf("got error %s when reading the response, but ignore it", err.Error())
return resp, nil
}
return nil, err
}
return resp, nil
}
type queryStringBuilder struct {
values url.Values
}
func newQueryStringBuilder() *queryStringBuilder {
return &queryStringBuilder{
values: url.Values{},
}
}
func (b *queryStringBuilder) setString(name, value string) {
b.values.Set(name, value)
}
func (b *queryStringBuilder) setInt(name string, value int64) {
b.setString(name, strconv.FormatInt(value, 10))
}
func (b *queryStringBuilder) setInt32(name string, value int) {
b.setString(name, strconv.Itoa(value))
}
/*
func (b *queryStringBuilder) setStringArray(name string, values []string) {
for _, v := range values {
b.values.Add(name, v)
}
}
func (b *queryStringBuilder) setFloat32(name string, value float32) {
b.setString(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
}
func (b *queryStringBuilder) setFloat(name string, value float64) {
b.setString(name, strconv.FormatFloat(value, 'f', -1, 64))
}
*/
// encode returns the URL-encoded query string based on key-value
// parameters added to the builder calling Set functions.
func (b *queryStringBuilder) encode() string {
return b.values.Encode()
}
// compareClusterResources compares the remaning resource with the requested resource provide by user
func compareClusterResources(oc *exutil.CLI, cpu, memory string) bool {
nodes, err := exutil.GetSchedulableLinuxWorkerNodes(oc)
o.Expect(err).NotTo(o.HaveOccurred())
var remainingCPU, remainingMemory int64
re := exutil.GetRemainingResourcesNodesMap(oc, nodes)
for _, node := range nodes {
remainingCPU += re[node.Name].CPU
remainingMemory += re[node.Name].Memory
}
requiredCPU, _ := k8sresource.ParseQuantity(cpu)
requiredMemory, _ := k8sresource.ParseQuantity(memory)
e2e.Logf("the required cpu is: %d, and the required memory is: %d", requiredCPU.MilliValue(), requiredMemory.MilliValue())
e2e.Logf("the remaining cpu is: %d, and the remaning memory is: %d", remainingCPU, remainingMemory)
return remainingCPU > requiredCPU.MilliValue() && remainingMemory > requiredMemory.MilliValue()
}
// validateInfraForLoki checks platform type
// supportedPlatforms the platform types which the case can be executed on, if it's empty, then skip this check
func validateInfraForLoki(oc *exutil.CLI, supportedPlatforms ...string) bool {
currentPlatform := exutil.CheckPlatform(oc)
if len(supportedPlatforms) > 0 {
return contain(supportedPlatforms, currentPlatform)
}
return true
}
// validateInfraAndResourcesForLoki checks cluster remaning resources and platform type
// supportedPlatforms the platform types which the case can be executed on, if it's empty, then skip this check
func validateInfraAndResourcesForLoki(oc *exutil.CLI, reqMemory, reqCPU string, supportedPlatforms ...string) bool {
return validateInfraForLoki(oc, supportedPlatforms...) && compareClusterResources(oc, reqCPU, reqMemory)
}
type externalLoki struct {
name string
namespace string
}
func (l externalLoki) deployLoki(oc *exutil.CLI) {
//Create configmap for Loki
cmTemplate := exutil.FixturePath("testdata", "logging", "external-log-stores", "loki", "loki-configmap.yaml")
lokiCM := resource{"configmap", l.name, l.namespace}
err := lokiCM.applyFromTemplate(oc, "-n", l.namespace, "-f", cmTemplate, "-p", "LOKINAMESPACE="+l.namespace, "-p", "LOKICMNAME="+l.name)
o.Expect(err).NotTo(o.HaveOccurred())
//Create Deployment for Loki
deployTemplate := exutil.FixturePath("testdata", "logging", "external-log-stores", "loki", "loki-deployment.yaml")
lokiDeploy := resource{"deployment", l.name, l.namespace}
err = lokiDeploy.applyFromTemplate(oc, "-n", l.namespace, "-f", deployTemplate, "-p", "LOKISERVERNAME="+l.name, "-p", "LOKINAMESPACE="+l.namespace, "-p", "LOKICMNAME="+l.name)
o.Expect(err).NotTo(o.HaveOccurred())
//Expose Loki as a Service
WaitForDeploymentPodsToBeReady(oc, l.namespace, l.name)
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", l.namespace, "deployment", l.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// expose loki route
err = oc.AsAdmin().WithoutNamespace().Run("expose").Args("-n", l.namespace, "svc", l.name).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
func (l externalLoki) remove(oc *exutil.CLI) {
resource{"configmap", l.name, l.namespace}.clear(oc)
resource{"deployment", l.name, l.namespace}.clear(oc)
resource{"svc", l.name, l.namespace}.clear(oc)
resource{"route", l.name, l.namespace}.clear(oc)
}
func deployMinIO(oc *exutil.CLI) {
// create namespace
_, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), minioNS, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("namespace", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// create secret
_, err = oc.AdminKubeClient().CoreV1().Secrets(minioNS).Get(context.Background(), minioSecret, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", minioSecret, "-n", minioNS, "--from-literal=access_key_id="+getRandomString(), "--from-literal=secret_access_key=passwOOrd"+getRandomString()).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
}
// deploy minIO
deployTemplate := exutil.FixturePath("testdata", "logging", "minIO", "deploy.yaml")
deployFile, err := processTemplate(oc, "-n", minioNS, "-f", deployTemplate, "-p", "NAMESPACE="+minioNS, "NAME=minio", "SECRET_NAME="+minioSecret)
defer os.Remove(deployFile)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().Run("apply").Args("-f", deployFile, "-n", minioNS).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
// wait for minio to be ready
for _, rs := range []string{"deployment", "svc", "route"} {
resource{rs, "minio", minioNS}.WaitForResourceToAppear(oc)
}
WaitForDeploymentPodsToBeReady(oc, minioNS, "minio")
}
/*
func removeMinIO(oc *exutil.CLI) {
deleteNamespace(oc, minioNS)
}
*/
// queryAlertManagerForLokiAlerts() queries user-workload alert-manager if isUserWorkloadAM parameter is true.
// All active alerts should be returned when querying Alert Managers
func queryAlertManagerForActiveAlerts(oc *exutil.CLI, token string, isUserWorkloadAM bool, alertName string, timeInMinutes int) {
var err error
if !isUserWorkloadAM {
alertManagerRoute := getRouteAddress(oc, "openshift-monitoring", "alertmanager-main")
h := make(http.Header)
h.Add("Content-Type", "application/json")
h.Add("Authorization", "Bearer "+token)
params := url.Values{}
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
resp, err := doHTTPRequest(h, "https://"+alertManagerRoute, "/api/v2/alerts", params.Encode(), "GET", true, 5, nil, 200)
if err != nil {
return false, err
}
if strings.Contains(string(resp), alertName) {
return true, nil
}
e2e.Logf("Waiting for alert %s to be in Firing state", alertName)
return false, nil
})
} else {
userWorkloadAlertManagerURL := "https://alertmanager-user-workload.openshift-user-workload-monitoring.svc:9095/api/v2/alerts"
authBearer := " \"Authorization: Bearer " + token + "\""
cmd := "curl -k -H" + authBearer + " " + userWorkloadAlertManagerURL
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, time.Duration(timeInMinutes)*time.Minute, true, func(context.Context) (done bool, err error) {
alerts, err := exutil.RemoteShPod(oc, "openshift-monitoring", "prometheus-k8s-0", "/bin/sh", "-x", "-c", cmd)
if err != nil {
return false, err
}
if strings.Contains(string(alerts), alertName) {
return true, nil
}
e2e.Logf("Waiting for alert %s to be in Firing state", alertName)
return false, nil
})
}
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Alert %s is not firing after %d minutes", alertName, timeInMinutes))
}
// Deletes cluster-monitoring-config and user-workload-monitoring-config if exists and recreates configmaps.
// deleteUserWorkloadManifests() should be called once resources are created by enableUserWorkloadMonitoringForLogging()
func enableUserWorkloadMonitoringForLogging(oc *exutil.CLI) {
oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "cluster-monitoring-config", "-n", "openshift-monitoring", "--ignore-not-found").Execute()
clusterMonitoringConfigPath := exutil.FixturePath("testdata", "logging", "loki-log-alerts", "cluster-monitoring-config.yaml")
clusterMonitoringConfig := resource{"configmap", "cluster-monitoring-config", "openshift-monitoring"}
err := clusterMonitoringConfig.applyFromTemplate(oc, "-n", clusterMonitoringConfig.namespace, "-f", clusterMonitoringConfigPath)
o.Expect(err).NotTo(o.HaveOccurred())
oc.AsAdmin().WithoutNamespace().Run("delete").Args("ConfigMap", "user-workload-monitoring-config", "-n", "openshift-user-workload-monitoring", "--ignore-not-found").Execute()
userWorkloadMConfigPath := exutil.FixturePath("testdata", "logging", "loki-log-alerts", "user-workload-monitoring-config.yaml")
userworkloadConfig := resource{"configmap", "user-workload-monitoring-config", "openshift-user-workload-monitoring"}
err = userworkloadConfig.applyFromTemplate(oc, "-n", userworkloadConfig.namespace, "-f", userWorkloadMConfigPath)
o.Expect(err).NotTo(o.HaveOccurred())
}
func deleteUserWorkloadManifests(oc *exutil.CLI) {
clusterMonitoringConfig := resource{"configmap", "cluster-monitoring-config", "openshift-monitoring"}
clusterMonitoringConfig.clear(oc)
userworkloadConfig := resource{"configmap", "user-workload-monitoring-config", "openshift-user-workload-monitoring"}
userworkloadConfig.clear(oc)
}
// To check CredentialsRequest is generated by Loki Operator on STS clusters for CCO flow
func validateCredentialsRequestGenerationOnSTS(oc *exutil.CLI, lokiStackName, lokiNamespace string) {
exutil.By("Validate that Loki Operator creates a CredentialsRequest object")
err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
cloudTokenPath, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace, `-o=jsonpath={.spec.cloudTokenPath}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(cloudTokenPath).Should(o.Equal("/var/run/secrets/storage/serviceaccount/token"))
serviceAccountNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("CredentialsRequest", lokiStackName, "-n", lokiNamespace, `-o=jsonpath={.spec.serviceAccountNames}`).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(serviceAccountNames).Should(o.Equal(fmt.Sprintf(`["%s","%s-ruler"]`, lokiStackName, lokiStackName)))
}
// Function to check if tenant logs are present under the Google Cloud Storage bucket.
// Returns success if any one of the tenants under tenants[] are found.
func validatesIfLogsArePushedToGCSBucket(bucketName string, tenants []string) {
// Create a new GCS client
client, err := storage.NewClient(context.Background())
o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create GCS client")
// Get a reference to the bucket
bucket := client.Bucket(bucketName)
// Create a query to list objects in the bucket
query := &storage.Query{}
// List objects in the bucket and check for tenant object
err = wait.PollUntilContextTimeout(context.Background(), 30*time.Second, 300*time.Second, true, func(context.Context) (done bool, err error) {
itr := bucket.Objects(context.Background(), query)
for {
objAttrs, err := itr.Next()
if err == iterator.Done {
break
}
if err != nil {
return false, err
}
for _, tenantName := range tenants {
if strings.Contains(objAttrs.Name, tenantName) {
e2e.Logf("Logs %s found under the bucket: %s", objAttrs.Name, bucketName)
return true, nil
}
}
}
e2e.Logf("Waiting for data to be available under bucket: %s", bucketName)
return false, nil
})
exutil.AssertWaitPollNoErr(err, "Timed out...No data is available under the bucket: "+bucketName)
}
// Global function to check if logs are pushed to external storage.
// Currently supports Amazon S3, Azure Blob Storage and Google Cloud Storage bucket.
func (l lokiStack) validateExternalObjectStorageForLogs(oc *exutil.CLI, tenants []string) {
switch l.storageType {
case "s3":
{
// For Amazon S3
var cfg aws.Config
if exutil.IsSTSCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
}
s3Client := newS3Client(cfg)
validatesIfLogsArePushedToS3Bucket(s3Client, l.bucketName, tenants)
}
case "azure":
{
// For Azure Container Storage
var accountName string
var err error
_, storageAccountURISuffix := getStorageAccountURISuffixAndEnvForAzure(oc)
if exutil.IsSTSCluster(oc) {
accountName = os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
_, err = exutil.GetAzureCredentialFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
accountName, _, err = exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
}
validatesIfLogsArePushedToAzureContainer(storageAccountURISuffix, accountName, l.bucketName, tenants)
}
case "gcs":
{
// For Google Cloud Storage Bucket
validatesIfLogsArePushedToGCSBucket(l.bucketName, tenants)
}
case "swift":
{
e2e.Logf("Currently swift is not supported")
// TODO swift code here
}
default:
{
e2e.Logf("Currently minio is not supported")
// TODO minio code here
}
}
}
// This function creates the cluster roles 'cluster-logging-application-view', 'cluster-logging-infrastructure-view' and 'cluster-logging-audit-view' introduced
// for fine grained read access to LokiStack logs. The ownership of these roles is moved to Cluster Observability Operator (COO) from Cluster Logging Operator (CLO) in Logging 6.0+
func createLokiClusterRolesForReadAccess(oc *exutil.CLI) {
rbacFile := exutil.FixturePath("testdata", "logging", "lokistack", "fine-grained-access-roles.yaml")
msg, err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", rbacFile).Output()
o.Expect(err).NotTo(o.HaveOccurred(), msg)
}
func deleteLokiClusterRolesForReadAccess(oc *exutil.CLI) {
roles := []string{"cluster-logging-application-view", "cluster-logging-infrastructure-view", "cluster-logging-audit-view"}
for _, role := range roles {
msg, err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterrole", role).Output()
if err != nil {
e2e.Logf("Failed to delete Loki RBAC role '%s': %s", role, msg)
}
}
}
// Patches Loki Operator running on a GCP WIF cluster. Operator is deployed with CCO mode after patching.
func patchLokiOperatorOnGCPSTSforCCO(oc *exutil.CLI, namespace string, projectNumber string, poolID string, serviceAccount string) {
patchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "PROJECT_NUMBER",
"value": "%s"
},
{
"name": "POOL_ID",
"value": "%s"
},
{
"name": "PROVIDER_ID",
"value": "%s"
},
{
"name": "SERVICE_ACCOUNT_EMAIL",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", namespace, "-p", fmt.Sprintf(patchConfig, projectNumber, poolID, poolID, serviceAccount), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
}
| package logging | ||||
function | openshift/openshift-tests-private | ca7cc5ae-9e9d-4cde-b59e-7e334e1ffe2b | getAWSCredentialFromCluster | ['"os"', '"github.com/aws/aws-sdk-go-v2/aws"'] | ['s3Credential'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getAWSCredentialFromCluster(oc *exutil.CLI) s3Credential {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err = os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/aws-creds", "-n", "kube-system", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/aws_access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/aws_secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
cred := s3Credential{Region: region, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
return cred
} | logging | |||
function | openshift/openshift-tests-private | 3c0718e7-297d-4bbb-8d58-2966497a136b | getMinIOCreds | ['"net/http"', '"os"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"'] | ['s3Credential'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getMinIOCreds(oc *exutil.CLI, ns string) s3Credential {
dirname := "/tmp/" + oc.Namespace() + "-creds"
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/"+minioSecret, "-n", ns, "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
accessKeyID, err := os.ReadFile(dirname + "/access_key_id")
o.Expect(err).NotTo(o.HaveOccurred())
secretAccessKey, err := os.ReadFile(dirname + "/secret_access_key")
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://" + getRouteAddress(oc, ns, "minio")
return s3Credential{Endpoint: endpoint, AccessKeyID: string(accessKeyID), SecretAccessKey: string(secretAccessKey)}
} | logging | |||
function | openshift/openshift-tests-private | 213b2a45-fc4f-43ad-b68e-30df332d5591 | generateS3Config | ['"context"', '"crypto/tls"', '"net/http"', '"net/url"', '"github.com/aws/aws-sdk-go-v2/aws"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/credentials"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | ['s3Credential'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func generateS3Config(cred s3Credential) aws.Config {
var err error
var cfg aws.Config
if len(cred.Endpoint) > 0 {
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: cred.Endpoint,
HostnameImmutable: true,
Source: aws.EndpointSourceCustom,
}, nil
})
// For ODF and Minio, they're deployed in OCP clusters
// In some clusters, we can't connect it without proxy, here add proxy settings to s3 client when there has http_proxy or https_proxy in the env var
httpClient := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) {
proxy := getProxyFromEnv()
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
o.Expect(err).NotTo(o.HaveOccurred())
tr.Proxy = http.ProxyURL(proxyURL)
}
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
})
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithEndpointResolverWithOptions(customResolver),
config.WithHTTPClient(httpClient),
config.WithRegion("auto"))
} else {
// aws s3
cfg, err = config.LoadDefaultConfig(context.TODO(),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cred.AccessKeyID, cred.SecretAccessKey, "")),
config.WithRegion(cred.Region))
}
o.Expect(err).NotTo(o.HaveOccurred())
return cfg
} | logging | |||
function | openshift/openshift-tests-private | 9c2a3b82-3559-4161-ac5b-7c95381c68eb | createS3Bucket | ['"context"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"', '"github.com/aws/aws-sdk-go-v2/service/s3/types"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createS3Bucket(client *s3.Client, bucketName, region string) error {
// check if the bucket exists or not
// if exists, clear all the objects in the bucket
// if not, create the bucket
exist := false
buckets, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, bu := range buckets.Buckets {
if *bu.Name == bucketName {
exist = true
break
}
}
// clear all the objects in the bucket
if exist {
return emptyS3Bucket(client, bucketName)
}
/*
Per https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#API_CreateBucket_RequestBody,
us-east-1 is the default region and it's not a valid value of LocationConstraint,
using `LocationConstraint: types.BucketLocationConstraint("us-east-1")` gets error `InvalidLocationConstraint`.
Here remove the configration when the region is us-east-1
*/
if len(region) == 0 || region == "us-east-1" {
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName})
return err
}
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: &bucketName, CreateBucketConfiguration: &types.CreateBucketConfiguration{LocationConstraint: types.BucketLocationConstraint(region)}})
return err
} | logging | ||||
function | openshift/openshift-tests-private | 6a1d3ba1-bf82-4982-93f1-2674a5e2edca | deleteS3Bucket | ['"context"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func deleteS3Bucket(client *s3.Client, bucketName string) error {
// empty bucket
err := emptyS3Bucket(client, bucketName)
if err != nil {
return err
}
// delete bucket
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: &bucketName})
return err
} | logging | ||||
function | openshift/openshift-tests-private | 60bc12f6-4f27-48cc-9015-22c4567c0d83 | emptyS3Bucket | ['"context"', '"github.com/aws/aws-sdk-go-v2/service/s3"', '"github.com/aws/aws-sdk-go-v2/service/s3/types"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func emptyS3Bucket(client *s3.Client, bucketName string) error {
// List objects in the bucket
objects, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: &bucketName,
})
if err != nil {
return err
}
// Delete objects in the bucket
if len(objects.Contents) > 0 {
objectIdentifiers := make([]types.ObjectIdentifier, len(objects.Contents))
for i, object := range objects.Contents {
objectIdentifiers[i] = types.ObjectIdentifier{Key: object.Key}
}
quiet := true
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: &bucketName,
Delete: &types.Delete{
Objects: objectIdentifiers,
Quiet: &quiet,
},
})
if err != nil {
return err
}
}
// Check if there are more objects to delete and handle pagination
if *objects.IsTruncated {
return emptyS3Bucket(client, bucketName)
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | ba2cf33d-bb9c-4898-bec9-8117cde06399 | createSecretForAWSS3Bucket | ['"fmt"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | ['s3Credential'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForAWSS3Bucket(oc *exutil.CLI, bucketName, secretName, ns string, cred s3Credential) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
endpoint := "https://s3." + cred.Region + ".amazonaws.com"
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-literal=access_key_id="+cred.AccessKeyID, "--from-literal=access_key_secret="+cred.SecretAccessKey, "--from-literal=region="+cred.Region, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
} | logging | |||
function | openshift/openshift-tests-private | 97c610e5-e72b-4bf9-8db5-3f08bc28a0b7 | createSecretForODFBucket | ['"fmt"', '"net/http"', '"os"', '"cloud.google.com/go/storage"', 'awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForODFBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
dirname := "/tmp/" + oc.Namespace() + "-creds"
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
defer os.RemoveAll(dirname)
_, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/noobaa-admin", "-n", "openshift-storage", "--confirm", "--to="+dirname).Output()
o.Expect(err).NotTo(o.HaveOccurred())
endpoint := "http://s3.openshift-storage.svc:80"
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-file=access_key_id="+dirname+"/AWS_ACCESS_KEY_ID", "--from-file=access_key_secret="+dirname+"/AWS_SECRET_ACCESS_KEY", "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+endpoint, "-n", ns).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | 8242cc38-6e05-448c-ba7b-af443e580a9d | createSecretForMinIOBucket | ['"fmt"'] | ['s3Credential'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForMinIOBucket(oc *exutil.CLI, bucketName, secretName, ns string, cred s3Credential) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "--from-literal=access_key_id="+cred.AccessKeyID, "--from-literal=access_key_secret="+cred.SecretAccessKey, "--from-literal=bucketnames="+bucketName, "--from-literal=endpoint="+cred.Endpoint, "-n", ns).Execute()
} | logging | |||
function | openshift/openshift-tests-private | e0748421-6264-4925-9c03-63bafb5f23fc | getGCPProjectNumber | ['"context"', '"strconv"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getGCPProjectNumber(projectID string) (string, error) {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return "", err
}
project, err := crmService.Projects.Get(projectID).Do()
if err != nil {
return "", err
}
return strconv.FormatInt(project.ProjectNumber, 10), nil
} | logging | ||||
function | openshift/openshift-tests-private | d0121711-8e76-4499-8416-3dbb85de3a81 | getGCPAudience | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getGCPAudience(providerName string) (string, error) {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return "", fmt.Errorf("iam.NewService: %w", err)
}
audience, err := service.Projects.Locations.WorkloadIdentityPools.Providers.Get(providerName).Do()
if err != nil {
return "", fmt.Errorf("can't get audience: %v", err)
}
return audience.Oidc.AllowedAudiences[0], nil
} | logging | ||||
function | openshift/openshift-tests-private | 52138b42-65c5-4eaf-88d0-b36141cc3cb6 | generateServiceAccountNameForGCS | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func generateServiceAccountNameForGCS(clusterName string) string {
// Service Account should be between 6-30 characters long
name := clusterName + getRandomString()
return name
} | logging | |||||
function | openshift/openshift-tests-private | f6af6cb7-4c58-479a-a006-f740a1075166 | createServiceAccountOnGCP | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createServiceAccountOnGCP(projectID, name string) (*iam.ServiceAccount, error) {
e2e.Logf("start to creating serviceaccount on GCP")
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return nil, fmt.Errorf("iam.NewService: %w", err)
}
request := &iam.CreateServiceAccountRequest{
AccountId: name,
ServiceAccount: &iam.ServiceAccount{
DisplayName: "Service Account for " + name,
},
}
account, err := service.Projects.ServiceAccounts.Create("projects/"+projectID, request).Do()
if err != nil {
return nil, fmt.Errorf("failed to create serviceaccount: %w", err)
}
e2e.Logf("Created service account: %v", account)
return account, nil
} | logging | ||||
function | openshift/openshift-tests-private | ecb51523-a3af-4135-898a-cb9f34b657b7 | addBinding | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func addBinding(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
for _, b := range policy.Bindings {
if b.Role == role {
binding = b
break
}
}
if binding != nil {
// If the binding exists, adds the member to the binding
binding.Members = append(binding.Members, member)
} else {
// If the binding does not exist, adds a new binding to the policy
binding = &cloudresourcemanager.Binding{
Role: role,
Members: []string{member},
}
policy.Bindings = append(policy.Bindings, binding)
}
return setPolicy(crmService, projectID, policy)
} | logging | ||||
function | openshift/openshift-tests-private | 76f70203-d1e6-4bc7-b866-5cf2f59356dc | removeMember | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func removeMember(projectID, member, role string) error {
crmService, err := cloudresourcemanager.NewService(context.Background())
if err != nil {
return fmt.Errorf("cloudresourcemanager.NewService: %v", err)
}
policy, err := getPolicy(crmService, projectID)
if err != nil {
return fmt.Errorf("error getting policy: %v", err)
}
// Find the policy binding for role. Only one binding can have the role.
var binding *cloudresourcemanager.Binding
var bindingIndex int
for i, b := range policy.Bindings {
if b.Role == role {
binding = b
bindingIndex = i
break
}
}
if len(binding.Members) == 1 && binding.Members[0] == member {
// If the member is the only member in the binding, removes the binding
last := len(policy.Bindings) - 1
policy.Bindings[bindingIndex] = policy.Bindings[last]
policy.Bindings = policy.Bindings[:last]
} else {
// If there is more than one member in the binding, removes the member
var memberIndex int
var exist bool
for i, mm := range binding.Members {
if mm == member {
memberIndex = i
exist = true
break
}
}
if exist {
last := len(policy.Bindings[bindingIndex].Members) - 1
binding.Members[memberIndex] = binding.Members[last]
binding.Members = binding.Members[:last]
}
}
return setPolicy(crmService, projectID, policy)
} | logging | ||||
function | openshift/openshift-tests-private | a7cedc70-a5f2-451d-820d-a881392f0958 | getPolicy | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getPolicy(crmService *cloudresourcemanager.Service, projectID string) (*cloudresourcemanager.Policy, error) {
request := new(cloudresourcemanager.GetIamPolicyRequest)
policy, err := crmService.Projects.GetIamPolicy(projectID, request).Do()
if err != nil {
return nil, err
}
return policy, nil
} | logging | |||||
function | openshift/openshift-tests-private | b2122c5c-3333-4a02-9aba-dd59dad629bd | setPolicy | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func setPolicy(crmService *cloudresourcemanager.Service, projectID string, policy *cloudresourcemanager.Policy) error {
request := new(cloudresourcemanager.SetIamPolicyRequest)
request.Policy = policy
_, err := crmService.Projects.SetIamPolicy(projectID, request).Do()
return err
} | logging | |||||
function | openshift/openshift-tests-private | d66bd676-c1dc-422a-9ab8-c47e0777de6f | grantPermissionsToGCPServiceAccount | ['"fmt"', '"cloud.google.com/go/storage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := addBinding(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := addBinding(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error adding role %s to %s: %v", role, sub, err)
}
}
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | 9deff7b4-7f18-44b4-b2a2-05bbc06e37b7 | removePermissionsFromGCPServiceAccount | ['"fmt"', '"cloud.google.com/go/storage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, lokiNS, lokiStackName, serviceAccountEmail string) error {
gcsRoles := []string{
"roles/iam.workloadIdentityUser",
"roles/storage.objectAdmin",
}
subjects := []string{
"system:serviceaccount:" + lokiNS + ":" + lokiStackName,
"system:serviceaccount:" + lokiNS + ":" + lokiStackName + "-ruler",
}
for _, role := range gcsRoles {
err := removeMember(projectID, "serviceAccount:"+serviceAccountEmail, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, serviceAccountEmail, err)
}
for _, sub := range subjects {
err := removeMember(projectID, "principal://iam.googleapis.com/projects/"+projectNumber+"/locations/global/workloadIdentityPools/"+poolID+"/subject/"+sub, role)
if err != nil {
return fmt.Errorf("error removing role %s from %s: %v", role, sub, err)
}
}
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | b64ebed0-47c6-493c-a7e9-61111839484f | removeServiceAccountFromGCP | ['"context"', '"fmt"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func removeServiceAccountFromGCP(name string) error {
ctx := context.Background()
service, err := iam.NewService(ctx)
if err != nil {
return fmt.Errorf("iam.NewService: %w", err)
}
_, err = service.Projects.ServiceAccounts.Delete(name).Do()
if err != nil {
return fmt.Errorf("can't remove service account: %v", err)
}
return nil
} | logging | ||||
function | openshift/openshift-tests-private | be03e32d-8cb7-4741-a66f-12fb836708d4 | createSecretForGCSBucketWithSTS | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForGCSBucketWithSTS(oc *exutil.CLI, namespace, secretName, bucketName string) error {
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", namespace, secretName, "--from-literal=bucketname="+bucketName).Execute()
} | logging | |||||
function | openshift/openshift-tests-private | f6acbe57-4223-4ca3-85ba-008aec0035e7 | createSecretForGCSBucket | ['"encoding/json"', '"fmt"', '"os"', '"github.com/aws/aws-sdk-go-v2/credentials"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForGCSBucket(oc *exutil.CLI, bucketName, secretName, ns string) error {
if len(secretName) == 0 {
return fmt.Errorf("secret name shouldn't be empty")
}
//get gcp-credentials from env var GOOGLE_APPLICATION_CREDENTIALS
gcsCred := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
return oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", secretName, "-n", ns, "--from-literal=bucketname="+bucketName, "--from-file=key.json="+gcsCred).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | 073d4aeb-b315-4440-9979-2fa1291161aa | createSecretForAzureContainer | ['"fmt"', '"strings"', '"cloud.google.com/go/storage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForAzureContainer(oc *exutil.CLI, bucketName, secretName, ns string) error {
environment := "AzureGlobal"
cloudName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output()
if err != nil {
return fmt.Errorf("can't get azure cluster type %v", err)
}
if strings.ToLower(cloudName) == "azureusgovernmentcloud" {
environment = "AzureUSGovernment"
}
if strings.ToLower(cloudName) == "azurechinacloud" {
environment = "AzureChinaCloud"
}
if strings.ToLower(cloudName) == "azuregermancloud" {
environment = "AzureGermanCloud"
}
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
return oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName, "--from-literal=environment="+environment, "--from-literal=container="+bucketName, "--from-literal=account_name="+accountName, "--from-literal=account_key="+accountKey).Execute()
} | logging | ||||
function | openshift/openshift-tests-private | dc944a6b-964c-4af5-84f4-700704245d60 | createSecretForSwiftContainer | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createSecretForSwiftContainer(oc *exutil.CLI, containerName, secretName, ns string, cred *exutil.OpenstackCredentials) error {
userID, domainID := exutil.GetOpenStackUserIDAndDomainID(cred)
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", "-n", ns, secretName,
"--from-literal=auth_url="+cred.Clouds.Openstack.Auth.AuthURL,
"--from-literal=username="+cred.Clouds.Openstack.Auth.Username,
"--from-literal=user_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=user_domain_id="+domainID,
"--from-literal=user_id="+userID,
"--from-literal=password="+cred.Clouds.Openstack.Auth.Password,
"--from-literal=domain_id="+domainID,
"--from-literal=domain_name="+cred.Clouds.Openstack.Auth.UserDomainName,
"--from-literal=container_name="+containerName,
"--from-literal=project_id="+cred.Clouds.Openstack.Auth.ProjectID,
"--from-literal=project_name="+cred.Clouds.Openstack.Auth.ProjectName,
"--from-literal=project_domain_id="+domainID,
"--from-literal=project_domain_name="+cred.Clouds.Openstack.Auth.UserDomainName).Execute()
return err
} | logging | |||||
function | openshift/openshift-tests-private | 8ffbd646-3810-4dbe-9b57-8b0bbf21731e | checkODF | ['"context"', '"cloud.google.com/go/storage"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func checkODF(oc *exutil.CLI) bool {
svcFound := false
expectedSC := []string{"openshift-storage.noobaa.io", "ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"}
var scInCluster []string
scs, err := oc.AdminKubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
for _, sc := range scs.Items {
scInCluster = append(scInCluster, sc.Name)
}
for _, s := range expectedSC {
if !contain(scInCluster, s) {
return false
}
}
_, err = oc.AdminKubeClient().CoreV1().Services("openshift-storage").Get(context.Background(), "s3", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return svcFound
} | logging | ||||
function | openshift/openshift-tests-private | 8dbaa366-acb9-44e0-9b14-341bdae1c32e | createObjectBucketClaim | ['k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func createObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
template := exutil.FixturePath("testdata", "logging", "odf", "objectBucketClaim.yaml")
obc := resource{"objectbucketclaims", name, ns}
err := obc.applyFromTemplate(oc, "-f", template, "-n", ns, "-p", "NAME="+name, "NAMESPACE="+ns)
if err != nil {
return err
}
obc.WaitForResourceToAppear(oc)
resource{"objectbuckets", "obc-" + ns + "-" + name, ns}.WaitForResourceToAppear(oc)
assertResourceStatus(oc, "objectbucketclaims", name, ns, "{.status.phase}", "Bound")
return nil
} | logging | ||||
function | openshift/openshift-tests-private | c61a27ca-a77b-42e2-97bb-6f17981bb764 | deleteObjectBucketClaim | ['k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func deleteObjectBucketClaim(oc *exutil.CLI, ns, name string) error {
obc := resource{"objectbucketclaims", name, ns}
err := obc.clear(oc)
if err != nil {
return err
}
return obc.WaitUntilResourceIsGone(oc)
} | logging | ||||
function | openshift/openshift-tests-private | 4767d8e1-0718-4bff-883c-d73af99b6562 | checkMinIO | ['"context"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func checkMinIO(oc *exutil.CLI, ns string) (bool, error) {
podReady, svcFound := false, false
pod, err := oc.AdminKubeClient().CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: "app=minio"})
if err != nil {
return false, err
}
if len(pod.Items) > 0 && pod.Items[0].Status.Phase == "Running" {
podReady = true
}
_, err = oc.AdminKubeClient().CoreV1().Services(ns).Get(context.Background(), "minio", metav1.GetOptions{})
if err == nil {
svcFound = true
}
return podReady && svcFound, err
} | logging | ||||
function | openshift/openshift-tests-private | c357048f-3dc8-4743-babb-301f2456e02b | useExtraObjectStorage | ['"strings"', '"cloud.google.com/go/storage"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func useExtraObjectStorage(oc *exutil.CLI) string {
if checkODF(oc) {
e2e.Logf("use the existing ODF storage service")
return "odf"
}
ready, err := checkMinIO(oc, minioNS)
if ready {
e2e.Logf("use existing MinIO storage service")
return "minio"
}
if strings.Contains(err.Error(), "No resources found") || strings.Contains(err.Error(), "not found") {
e2e.Logf("deploy MinIO and use this MinIO as storage service")
deployMinIO(oc)
return "minio"
}
return ""
} | logging | ||||
function | openshift/openshift-tests-private | 886aa7d5-26b3-4c3d-a0d9-0b5c66bf7087 | patchLokiOperatorWithAWSRoleArn | ['"fmt"', '"github.com/aws/aws-sdk-go-v2/config"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func patchLokiOperatorWithAWSRoleArn(oc *exutil.CLI, subName, subNamespace, roleArn string) {
roleArnPatchConfig := `{
"spec": {
"config": {
"env": [
{
"name": "ROLEARN",
"value": "%s"
}
]
}
}
}`
err := oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("patch").Args("sub", subName, "-n", subNamespace, "-p", fmt.Sprintf(roleArnPatchConfig, roleArn), "--type=merge").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
} | logging | ||||
function | openshift/openshift-tests-private | 97a1324a-c7d0-4b6e-9ed2-5c2e87c2efaf | getStorageType | ['"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func getStorageType(oc *exutil.CLI) string {
platform := exutil.CheckPlatform(oc)
switch platform {
case "aws":
{
return "s3"
}
case "gcp":
{
return "gcs"
}
case "azure":
{
return "azure"
}
case "openstack":
{
return "swift"
}
default:
{
return useExtraObjectStorage(oc)
}
}
} | logging | ||||
function | openshift/openshift-tests-private | 44085b8e-1241-4fac-895a-cc7a63bd5b7e | setTSize | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) setTSize(size string) lokiStack {
l.tSize = size
return l
} | logging | ||||
function | openshift/openshift-tests-private | f31bcc21-44f3-4fda-bf5f-d587c3ffa4b9 | prepareResourcesForLokiStack | ['"context"', '"fmt"', '"os"', '"strings"', '"cloud.google.com/go/storage"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/credentials"', '"github.com/aws/aws-sdk-go-v2/service/s3"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) prepareResourcesForLokiStack(oc *exutil.CLI) error {
var err error
if len(l.bucketName) == 0 {
return fmt.Errorf("the bucketName should not be empty")
}
switch l.storageType {
case "s3":
{
var cfg aws.Config
region, err := exutil.GetAWSClusterRegion(oc)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
if !checkAWSCredentials() {
g.Skip("Skip since no AWS credetial! No Env AWS_SHARED_CREDENTIALS_FILE, Env CLUSTER_PROFILE_DIR or $HOME/.aws/credentials file")
}
partition := "aws"
if strings.HasPrefix(region, "us-gov") {
partition = "aws-us-gov"
}
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
stsClient := newStsClient(cfg)
awsAccountID, _ := getAwsAccount(stsClient)
oidcName, err := getOIDC(oc)
o.Expect(err).NotTo(o.HaveOccurred())
lokiIAMRoleName := l.name + "-" + exutil.GetRandomString()
roleArn := createIAMRoleForLokiSTSDeployment(iamClient, oidcName, awsAccountID, partition, l.namespace, l.name, lokiIAMRoleName)
os.Setenv("LOKI_ROLE_NAME_ON_STS", lokiIAMRoleName)
patchLokiOperatorWithAWSRoleArn(oc, "loki-operator", loNS, roleArn)
createObjectStorageSecretOnAWSSTSCluster(oc, region, l.storageSecret, l.bucketName, l.namespace)
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
err = createSecretForAWSS3Bucket(oc, l.bucketName, l.storageSecret, l.namespace, cred)
o.Expect(err).NotTo(o.HaveOccurred())
}
client := newS3Client(cfg)
err = createS3Bucket(client, l.bucketName, region)
if err != nil {
return err
}
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
if !readAzureCredentials() {
g.Skip("Azure Credentials not found. Skip case!")
} else {
performManagedIdentityAndSecretSetupForAzureWIF(oc, l.name, l.namespace, l.bucketName, l.storageSecret)
}
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
if err1 != nil {
return fmt.Errorf("can't get azure storage account from cluster: %v", err1)
}
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.bucketName)
if err2 != nil {
return err2
}
err = exutil.CreateAzureStorageBlobContainer(client)
if err != nil {
return err
}
err = createSecretForAzureContainer(oc, l.bucketName, l.storageSecret, l.namespace)
}
}
case "gcs":
{
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
err = exutil.CreateGCSBucket(projectID, l.bucketName)
if err != nil {
return err
}
if exutil.IsWorkloadIdentityCluster(oc) {
clusterName := getInfrastructureName(oc)
gcsSAName := generateServiceAccountNameForGCS(clusterName)
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME", gcsSAName)
projectNumber, err1 := getGCPProjectNumber(projectID)
if err1 != nil {
return fmt.Errorf("can't get GCP project number: %v", err1)
}
poolID, err2 := getPoolID(oc)
if err2 != nil {
return fmt.Errorf("can't get pool ID: %v", err2)
}
sa, err3 := createServiceAccountOnGCP(projectID, gcsSAName)
if err3 != nil {
return fmt.Errorf("can't create service account: %v", err3)
}
os.Setenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL", sa.Email)
err4 := grantPermissionsToGCPServiceAccount(poolID, projectID, projectNumber, l.namespace, l.name, sa.Email)
if err4 != nil {
return fmt.Errorf("can't add roles to the serviceaccount: %v", err4)
}
patchLokiOperatorOnGCPSTSforCCO(oc, loNS, projectNumber, poolID, sa.Email)
err = createSecretForGCSBucketWithSTS(oc, l.namespace, l.storageSecret, l.bucketName)
} else {
err = createSecretForGCSBucket(oc, l.bucketName, l.storageSecret, l.namespace)
}
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.CreateOpenStackContainer(client, l.bucketName)
if err != nil {
return err
}
err = createSecretForSwiftContainer(oc, l.bucketName, l.storageSecret, l.namespace, cred)
}
case "odf":
{
err = createObjectBucketClaim(oc, l.namespace, l.bucketName)
if err != nil {
return err
}
err = createSecretForODFBucket(oc, l.bucketName, l.storageSecret, l.namespace)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
cfg := generateS3Config(cred)
client := newS3Client(cfg)
err = createS3Bucket(client, l.bucketName, "")
if err != nil {
return err
}
err = createSecretForMinIOBucket(oc, l.bucketName, l.storageSecret, l.namespace, cred)
}
}
return err
} | logging | |||
function | openshift/openshift-tests-private | 80277b0f-17ef-4308-a50e-68a3e4996430 | deployLokiStack | ['"fmt"', '"os"', '"strings"', '"cloud.google.com/go/storage"', '"github.com/aws/aws-sdk-go-v2/service/s3"', 'k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) deployLokiStack(oc *exutil.CLI, optionalParameters ...string) error {
var storage string
if l.storageType == "odf" || l.storageType == "minio" {
storage = "s3"
} else {
storage = l.storageType
}
lokistackTemplate := l.template
if GetIPVersionStackType(oc) == "ipv6single" {
lokistackTemplate = strings.Replace(l.template, ".yaml", "-ipv6.yaml", -1)
}
parameters := []string{"-f", lokistackTemplate, "-n", l.namespace, "-p", "NAME=" + l.name, "NAMESPACE=" + l.namespace, "SIZE=" + l.tSize, "SECRET_NAME=" + l.storageSecret, "STORAGE_TYPE=" + storage, "STORAGE_CLASS=" + l.storageClass}
if len(optionalParameters) != 0 {
parameters = append(parameters, optionalParameters...)
}
file, err := processTemplate(oc, parameters...)
defer os.Remove(file)
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Can not process %v", parameters))
err = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", file, "-n", l.namespace).Execute()
ls := resource{"lokistack", l.name, l.namespace}
ls.WaitForResourceToAppear(oc)
return err
} | logging | |||
function | openshift/openshift-tests-private | bbafcbc0-f877-4b7c-94ba-0788864e1cbc | waitForLokiStackToBeReady | ['"github.com/aws/aws-sdk-go-v2/aws"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) waitForLokiStackToBeReady(oc *exutil.CLI) {
for _, deploy := range []string{l.name + "-gateway", l.name + "-distributor", l.name + "-querier", l.name + "-query-frontend"} {
WaitForDeploymentPodsToBeReady(oc, l.namespace, deploy)
}
for _, ss := range []string{l.name + "-index-gateway", l.name + "-compactor", l.name + "-ruler", l.name + "-ingester"} {
waitForStatefulsetReady(oc, l.namespace, ss)
}
if exutil.IsWorkloadIdentityCluster(oc) {
currentPlatform := exutil.CheckPlatform(oc)
switch currentPlatform {
case "aws", "azure", "gcp":
validateCredentialsRequestGenerationOnSTS(oc, l.name, l.namespace)
}
}
} | logging | |||
function | openshift/openshift-tests-private | 12d5ea14-2a66-4c84-9611-0ecc069f3c42 | removeLokiStack | ['k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) removeLokiStack(oc *exutil.CLI) {
resource{"lokistack", l.name, l.namespace}.clear(oc)
_ = oc.AsAdmin().WithoutNamespace().Run("delete").Args("pvc", "-n", l.namespace, "-l", "app.kubernetes.io/instance="+l.name).Execute()
} | logging | |||
function | openshift/openshift-tests-private | 00f0f918-4224-412c-a085-09db9924c6f0 | removeObjectStorage | ['"context"', '"encoding/json"', '"os"', '"github.com/aws/aws-sdk-go-v2/aws"', '"github.com/aws/aws-sdk-go-v2/config"', '"github.com/aws/aws-sdk-go-v2/service/s3"', 'k8sresource "k8s.io/apimachinery/pkg/api/resource"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) removeObjectStorage(oc *exutil.CLI) {
resource{"secret", l.storageSecret, l.namespace}.clear(oc)
var err error
switch l.storageType {
case "s3":
{
var cfg aws.Config
if exutil.IsWorkloadIdentityCluster(oc) {
region, err := exutil.GetAWSClusterRegion(oc)
o.Expect(err).NotTo(o.HaveOccurred())
cfg = readDefaultSDKExternalConfigurations(context.TODO(), region)
iamClient := newIamClient(cfg)
deleteIAMroleonAWS(iamClient, os.Getenv("LOKI_ROLE_NAME_ON_STS"))
os.Unsetenv("LOKI_ROLE_NAME_ON_STS")
err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("sub", "loki-operator", "-n", loNS, "-p", `[{"op": "remove", "path": "/spec/config"}]`, "--type=json").Execute()
o.Expect(err).NotTo(o.HaveOccurred())
waitForPodReadyWithLabel(oc, loNS, "name=loki-operator-controller-manager")
} else {
cred := getAWSCredentialFromCluster(oc)
cfg = generateS3Config(cred)
}
client := newS3Client(cfg)
err = deleteS3Bucket(client, l.bucketName)
}
case "azure":
{
if exutil.IsWorkloadIdentityCluster(oc) {
resourceGroup, err := getAzureResourceGroupFromCluster(oc)
o.Expect(err).NotTo(o.HaveOccurred())
azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
cred := createNewDefaultAzureCredential()
deleteManagedIdentityOnAzure(cred, azureSubscriptionID, resourceGroup, l.name)
deleteAzureStorageAccount(cred, azureSubscriptionID, resourceGroup, os.Getenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT"))
os.Unsetenv("LOKI_OBJECT_STORAGE_STORAGE_ACCOUNT")
} else {
accountName, accountKey, err1 := exutil.GetAzureStorageAccountFromCluster(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client, err2 := exutil.NewAzureContainerClient(oc, accountName, accountKey, l.bucketName)
o.Expect(err2).NotTo(o.HaveOccurred())
err = exutil.DeleteAzureStorageBlobContainer(client)
}
}
case "gcs":
{
if exutil.IsWorkloadIdentityCluster(oc) {
sa := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
if sa == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_NAME is not set, no need to delete the serviceaccount")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_NAME")
email := os.Getenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
if email == "" {
e2e.Logf("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL is not set, no need to delete the policies")
} else {
os.Unsetenv("LOGGING_GCS_SERVICE_ACCOUNT_EMAIL")
projectID, errGetID := exutil.GetGcpProjectID(oc)
o.Expect(errGetID).NotTo(o.HaveOccurred())
projectNumber, _ := getGCPProjectNumber(projectID)
poolID, _ := getPoolID(oc)
err = removePermissionsFromGCPServiceAccount(poolID, projectID, projectNumber, l.namespace, l.name, email)
o.Expect(err).NotTo(o.HaveOccurred())
err = removeServiceAccountFromGCP("projects/" + projectID + "/serviceAccounts/" + email)
o.Expect(err).NotTo(o.HaveOccurred())
}
}
}
err = exutil.DeleteGCSBucket(l.bucketName)
}
case "swift":
{
cred, err1 := exutil.GetOpenStackCredentials(oc)
o.Expect(err1).NotTo(o.HaveOccurred())
client := exutil.NewOpenStackClient(cred, "object-store")
err = exutil.DeleteOpenStackContainer(client, l.bucketName)
}
case "odf":
{
err = deleteObjectBucketClaim(oc, l.namespace, l.bucketName)
}
case "minio":
{
cred := getMinIOCreds(oc, minioNS)
cfg := generateS3Config(cred)
client := newS3Client(cfg)
err = deleteS3Bucket(client, l.bucketName)
}
}
o.Expect(err).NotTo(o.HaveOccurred())
} | logging | |||
function | openshift/openshift-tests-private | 35741e22-18c6-430c-a445-3a960713793c | createSecretFromGateway | ['"os"'] | ['lokiStack'] | github.com/openshift/openshift-tests-private/test/extended/logging/loki_utils.go | func (l lokiStack) createSecretFromGateway(oc *exutil.CLI, name, namespace, token string) {
dirname := "/tmp/" + oc.Namespace() + getRandomString()
defer os.RemoveAll(dirname)
err := os.MkdirAll(dirname, 0777)
o.Expect(err).NotTo(o.HaveOccurred())
err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("cm/"+l.name+"-gateway-ca-bundle", "-n", l.namespace, "--keys=service-ca.crt", "--confirm", "--to="+dirname).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
if token != "" {
err = oc.NotShowInfo().AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=ca-bundle.crt="+dirname+"/service-ca.crt", "--from-literal=token="+token).Execute()
} else {
err = oc.AsAdmin().WithoutNamespace().Run("create").Args("secret", "generic", name, "-n", namespace, "--from-file=ca-bundle.crt="+dirname+"/service-ca.crt").Execute()
}
o.Expect(err).NotTo(o.HaveOccurred())
} | logging |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.