filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/jx/cmd/clients/factory.go | package clients
import (
"flag"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"github.com/jenkins-x/jx/pkg/builds"
"github.com/jenkins-x/golang-jenkins"
"github.com/jenkins-x/jx/pkg/io/secrets"
"github.com/jenkins-x/jx/pkg/vault"
certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/kube/services"
kubevault "github.com/jenkins-x/jx/pkg/kube/vault"
"github.com/jenkins-x/jx/pkg/log"
"github.com/heptio/sonobuoy/pkg/client"
"github.com/heptio/sonobuoy/pkg/dynamic"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/jenkins"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/table"
"github.com/pkg/errors"
"gopkg.in/AlecAivazis/survey.v1/terminal"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned"
tektonclient "github.com/knative/build-pipeline/pkg/client/clientset/versioned"
build "github.com/knative/build/pkg/client/clientset/versioned"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
// this is so that we load the auth plugins so we can connect to, say, GCP
_ "k8s.io/client-go/plugin/pkg/client/auth"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type factory struct {
Batch bool
kubeConfig kube.Kuber
impersonateUser string
bearerToken string
secretLocation secrets.SecretLocation
useVault bool
offline bool
}
var _ Factory = (*factory)(nil)
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory() Factory {
f := &factory{}
f.kubeConfig = kube.NewKubeConfig()
return f
}
func (f *factory) SetBatch(batch bool) {
f.Batch = batch
}
func (f *factory) SetOffline(offline bool) {
f.offline = offline
}
// ImpersonateUser returns a new factory impersonating the given user
func (f *factory) ImpersonateUser(user string) Factory {
copy := *f
copy.impersonateUser = user
return ©
}
// WithBearerToken returns a new factory with bearer token
func (f *factory) WithBearerToken(token string) Factory {
copy := *f
copy.bearerToken = token
return ©
}
// CreateJenkinsClient creates a new Jenkins client
func (f *factory) CreateJenkinsClient(kubeClient kubernetes.Interface, ns string, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gojenkins.JenkinsClient, error) {
svc, err := f.CreateJenkinsAuthConfigService(kubeClient, ns, "")
if err != nil {
return nil, err
}
url, err := f.GetJenkinsURL(kubeClient, ns)
if err != nil {
return nil, fmt.Errorf("%s. Try switching to the Development Tools environment via: jx env dev", err)
}
return jenkins.GetJenkinsClient(url, f.Batch, svc, in, out, errOut)
}
// CreateCustomJenkinsClient creates a new Jenkins client for the given custom Jenkins App
func (f *factory) CreateCustomJenkinsClient(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gojenkins.JenkinsClient, error) {
svc, err := f.CreateJenkinsAuthConfigService(kubeClient, ns, jenkinsServiceName)
if err != nil {
return nil, err
}
url, err := f.GetCustomJenkinsURL(kubeClient, ns, jenkinsServiceName)
if err != nil {
return nil, fmt.Errorf("%s. Try switching to the Development Tools environment via: jx env dev", err)
}
return jenkins.GetJenkinsClient(url, f.Batch, svc, in, out, errOut)
}
// GetJenkinsURL gets the Jenkins URL for the given namespace
func (f *factory) GetJenkinsURL(kubeClient kubernetes.Interface, ns string) (string, error) {
// lets find the Kubernetes service
client, ns, err := f.CreateKubeClient()
if err != nil {
return "", errors.Wrap(err, "failed to create the kube client")
}
url, err := services.FindServiceURL(client, ns, kube.ServiceJenkins)
if err != nil {
// lets try the real environment
realNS, _, err := kube.GetDevNamespace(client, ns)
if err != nil {
return "", errors.Wrapf(err, "failed to get the dev namespace from '%s' namespace", ns)
}
if realNS != ns {
url, err = services.FindServiceURL(client, realNS, kube.ServiceJenkins)
if err != nil {
return "", fmt.Errorf("%s in namespaces %s and %s", err, realNS, ns)
}
return url, nil
}
}
if err != nil {
return "", fmt.Errorf("%s in namespace %s", err, ns)
}
return url, err
}
// GetCustomJenkinsURL gets a custom jenkins App service URL
func (f *factory) GetCustomJenkinsURL(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string) (string, error) {
// lets find the Kubernetes service
client, ns, err := f.CreateKubeClient()
if err != nil {
return "", errors.Wrap(err, "failed to create the kube client")
}
url, err := services.FindServiceURL(client, ns, jenkinsServiceName)
if err != nil {
// lets try the real environment
realNS, _, err := kube.GetDevNamespace(client, ns)
if err != nil {
return "", errors.Wrapf(err, "failed to get the dev namespace from '%s' namespace", ns)
}
if realNS != ns {
url, err = services.FindServiceURL(client, realNS, jenkinsServiceName)
if err != nil {
return "", errors.Wrapf(err, "failed to find service URL for %s in namespaces %s and %s", jenkinsServiceName, realNS, ns)
}
return url, nil
}
}
if err != nil {
return "", fmt.Errorf("%s in namespace %s", err, ns)
}
return url, err
}
func (f *factory) CreateJenkinsAuthConfigService(c kubernetes.Interface, ns string, jenkinsServiceName string) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.JenkinsAuthConfigFile)
if jenkinsServiceName == "" {
jenkinsServiceName = kube.SecretJenkins
}
if err != nil {
return authConfigSvc, err
}
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
customJenkins := jenkinsServiceName != kube.SecretJenkins
if len(config.Servers) == 0 || customJenkins {
secretName := jenkinsServiceName
if customJenkins {
secretName = jenkinsServiceName + "-auth"
}
userAuth := auth.UserAuth{}
s, err := c.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
if err != nil {
if !customJenkins {
return authConfigSvc, err
}
}
if s != nil {
userAuth.Username = string(s.Data[kube.JenkinsAdminUserField])
userAuth.ApiToken = string(s.Data[kube.JenkinsAdminApiToken])
userAuth.BearerToken = string(s.Data[kube.JenkinsBearTokenField])
}
if customJenkins {
s, err = c.CoreV1().Secrets(ns).Get(jenkinsServiceName, metav1.GetOptions{})
if err == nil {
if userAuth.Username == "" {
userAuth.Username = string(s.Data[kube.JenkinsAdminUserField])
}
userAuth.Password = string(s.Data[kube.JenkinsAdminPasswordField])
}
}
svc, err := c.CoreV1().Services(ns).Get(jenkinsServiceName, metav1.GetOptions{})
if err != nil {
return authConfigSvc, err
}
svcURL := services.GetServiceURL(svc)
if svcURL == "" {
return authConfigSvc, fmt.Errorf("unable to find external URL annotation on service %s in namespace %s", svc.Name, ns)
}
u, err := url.Parse(svcURL)
if err != nil {
return authConfigSvc, err
}
if !userAuth.IsInvalid() || (customJenkins && userAuth.Password != "") {
if len(config.Servers) == 0 {
config.Servers = []*auth.AuthServer{
{
Name: u.Host,
URL: svcURL,
Users: []*auth.UserAuth{&userAuth},
},
}
} else {
server := config.GetOrCreateServer(svcURL)
server.Name = u.Host
server.Users = []*auth.UserAuth{&userAuth}
}
// lets save the file so that if we call LoadConfig() again we still have this defaulted user auth
err = authConfigSvc.SaveConfig()
if err != nil {
return authConfigSvc, err
}
}
}
return authConfigSvc, err
}
func (f *factory) CreateChartmuseumAuthConfigService() (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.ChartmuseumAuthConfigFile)
if err != nil {
return authConfigSvc, err
}
_, err = authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
return authConfigSvc, err
}
func (f *factory) CreateIssueTrackerAuthConfigService(secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.IssuesAuthConfigFile)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindIssue, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) CreateChatAuthConfigService(secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.ChatAuthConfigFile)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindChat, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) CreateAddonAuthConfigService(secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.AddonAuthConfigFile)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindAddon, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) AuthMergePipelineSecrets(config *auth.AuthConfig, secrets *corev1.SecretList, kind string, isCDPipeline bool) error {
if config == nil || secrets == nil {
return nil
}
for _, secret := range secrets.Items {
labels := secret.Labels
annotations := secret.Annotations
data := secret.Data
if labels != nil && labels[kube.LabelKind] == kind && annotations != nil {
u := annotations[kube.AnnotationURL]
name := annotations[kube.AnnotationName]
k := labels[kube.LabelServiceKind]
if u != "" {
server := config.GetOrCreateServer(u)
if server != nil {
// lets use the latest values from the credential
if k != "" {
server.Kind = k
}
if name != "" {
server.Name = name
}
if data != nil {
username := data[kube.SecretDataUsername]
pwd := data[kube.SecretDataPassword]
if len(username) > 0 && isCDPipeline {
userAuth := config.FindUserAuth(u, string(username))
if userAuth == nil {
userAuth = &auth.UserAuth{
Username: string(username),
ApiToken: string(pwd),
}
} else if len(pwd) > 0 {
userAuth.ApiToken = string(pwd)
}
config.SetUserAuth(u, userAuth)
config.UpdatePipelineServer(server, userAuth)
}
}
}
}
}
}
return nil
}
// CreateAuthConfigService creates a new service saving auth config under the provided name. Depending on the factory,
// It will either save the config to the local file-system, or a Vault
func (f *factory) CreateAuthConfigService(configName string) (auth.ConfigService, error) {
if f.SecretsLocation() == secrets.VaultLocationKind {
vaultClient, err := f.CreateSystemVaultClient(kube.DefaultNamespace)
authService := auth.NewVaultAuthConfigService(configName, vaultClient)
return authService, err
} else {
return auth.NewFileAuthConfigService(configName)
}
}
// SecretsLocation indicates the location where the secrets are stored
func (f *factory) SecretsLocation() secrets.SecretsLocationKind {
client, namespace, err := f.CreateKubeClient()
if err != nil {
return secrets.FileSystemLocationKind
}
if f.secretLocation == nil {
f.secretLocation = secrets.NewSecretLocation(client, namespace)
}
return f.secretLocation.Location()
}
// SetSecretsLocation configures the secrets location. It will persist the value in a config map
// if the persist flag is set.
func (f *factory) SetSecretsLocation(location secrets.SecretsLocationKind, persist bool) error {
if f.secretLocation == nil {
client, namespace, err := f.CreateKubeClient()
if err != nil {
return errors.Wrap(err, "creating the kube client")
}
f.secretLocation = secrets.NewSecretLocation(client, namespace)
}
err := f.secretLocation.SetLocation(location, persist)
if err != nil {
return errors.Wrapf(err, "setting the secrets location %q", location)
}
return nil
}
// ResetSecretsLocation resets the location of the secrets stored in memory
func (f *factory) ResetSecretsLocation() {
f.secretLocation = nil
}
// CreateSystemVaultClient gets the system vault client for managing the secrets
func (f *factory) CreateSystemVaultClient(namespace string) (vault.Client, error) {
name, err := f.getVaultName(namespace)
if err != nil {
return nil, err
}
return f.CreateVaultClient(name, namespace)
}
func (f *factory) getVaultName(namespace string) (string, error) {
name, err := kubevault.SystemVaultName(f.kubeConfig)
if err != nil {
// if we cannot load the cluster name from the kube context lets try load the cluster name from the install values
kubeClient, _, err := f.CreateKubeClient()
if err != nil {
return name, err
}
data, err := kube.ReadInstallValues(kubeClient, namespace)
if err != nil {
return name, errors.Wrapf(err, "cannot find cluster name as no ConfigMap %s in namespace %s", kube.ConfigMapNameJXInstallConfig, namespace)
}
name = data[kube.SystemVaultName]
if name == "" {
name = kubevault.SystemVaultNameForCluster(data[kube.ClusterName])
}
}
if name == "" {
return name, fmt.Errorf("could not find the cluster name in namespace %s", namespace)
}
return name, nil
}
// CreateVaultClient returns the given vault client for managing secrets
// Will use default values for name and namespace if nil values are applied
func (f *factory) CreateVaultClient(name string, namespace string) (vault.Client, error) {
vopClient, err := f.CreateVaultOperatorClient()
kubeClient, defaultNamespace, err := f.CreateKubeClient()
if err != nil {
return nil, err
}
// Use defaults if nothing is specified by the user
if namespace == "" {
devNamespace, _, err := kube.GetDevNamespace(kubeClient, defaultNamespace)
if err != nil {
return nil, errors.Wrapf(err, "getting the dev namesapce from current namesapce %q",
defaultNamespace)
}
namespace = devNamespace
}
if name == "" {
name, err = f.getVaultName(namespace)
if err != nil {
return nil, err
}
}
if !kubevault.FindVault(vopClient, name, namespace) {
return nil, fmt.Errorf("no '%s' vault found in namespace '%s'", name, namespace)
}
clientFactory, err := kubevault.NewVaultClientFactory(kubeClient, vopClient, namespace)
if err != nil {
return nil, errors.Wrap(err, "creating vault client")
}
vaultClient, err := clientFactory.NewVaultClient(name, namespace)
return vault.NewVaultClient(vaultClient), err
}
func (f *factory) CreateJXClient() (versioned.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := versioned.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateKnativeBuildClient() (build.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := build.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateTektonClient() (tektonclient.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := tektonclient.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateDynamicClient() (*dynamic.APIHelper, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateApiExtensionsClient() (apiextensionsclientset.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return apiextensionsclientset.NewForConfig(config)
}
func (f *factory) CreateMetricsClient() (*metricsclient.Clientset, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return metricsclient.NewForConfig(config)
}
func (f *factory) CreateKubeClient() (kubernetes.Interface, string, error) {
cfg, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, "", err
}
if client == nil {
return nil, "", fmt.Errorf("Failed to create Kubernetes Client")
}
ns := ""
config, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return client, ns, err
}
ns = kube.CurrentNamespace(config)
// TODO allow namsepace to be specified as a CLI argument!
return client, ns, nil
}
func (f *factory) CreateGitProvider(gitURL string, message string, authConfigSvc auth.ConfigService, gitKind string, batchMode bool, gitter gits.Gitter, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gits.GitProvider, error) {
gitInfo, err := gits.ParseGitURL(gitURL)
if err != nil {
return nil, err
}
return gitInfo.CreateProvider(f.IsInCluster(), authConfigSvc, gitKind, gitter, batchMode, in, out, errOut)
}
var kubeConfigCache *string
func createKubeConfig(offline bool) *string {
if offline {
panic("not supposed to be making a network connection")
}
var kubeconfig *string
if kubeConfigCache != nil {
return kubeConfigCache
}
if home := util.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
kubeConfigCache = kubeconfig
return kubeconfig
}
func (f *factory) CreateKubeConfig() (*rest.Config, error) {
masterURL := ""
kubeConfigEnv := os.Getenv("KUBECONFIG")
if kubeConfigEnv != "" {
pathList := filepath.SplitList(kubeConfigEnv)
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{Precedence: pathList},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterURL}}).ClientConfig()
}
kubeconfig := createKubeConfig(f.offline)
var config *rest.Config
var err error
if kubeconfig != nil {
exists, err := util.FileExists(*kubeconfig)
if err == nil && exists {
// use the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags(masterURL, *kubeconfig)
if err != nil {
return nil, err
}
}
}
if config == nil {
config, err = rest.InClusterConfig()
if err != nil {
return nil, err
}
}
if config != nil && f.bearerToken != "" {
config.BearerToken = f.bearerToken
return config, nil
}
user := f.getImpersonateUser()
if config != nil && user != "" && config.Impersonate.UserName == "" {
config.Impersonate.UserName = user
}
return config, nil
}
func (f *factory) getImpersonateUser() string {
user := f.impersonateUser
if user == "" {
// this is really only used for testing really
user = os.Getenv("JX_IMPERSONATE_USER")
}
return user
}
func (f *factory) CreateTable(out io.Writer) table.Table {
return table.CreateTable(out)
}
// IsInCDPipeline we should only load the git / issue tracker API tokens if the current pod
// is in a pipeline and running as the Jenkins service account
func (f *factory) IsInCDPipeline() bool {
// TODO should we let RBAC decide if we can see the Secrets in the dev namespace?
// or we should test if we are in the cluster and get the current ServiceAccount name?
buildNumber := builds.GetBuildNumber()
return buildNumber != ""
}
// function to tell if we are running incluster
func (f *factory) IsInCluster() bool {
_, err := rest.InClusterConfig()
if err != nil {
return false
}
return true
}
// CreateComplianceClient creates a new Sonobuoy compliance client
func (f *factory) CreateComplianceClient() (*client.SonobuoyClient, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "compliance client failed to load the Kubernetes configuration")
}
skc, err := dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, errors.Wrap(err, "compliance dynamic client failed to be created")
}
return client.NewSonobuoyClient(config, skc)
}
// CreateVaultOperatorClient creates a new vault operator client
func (f *factory) CreateVaultOperatorClient() (vaultoperatorclient.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return vaultoperatorclient.NewForConfig(config)
}
// CreateHelm creates a new Helm client
func (f *factory) CreateHelm(verbose bool,
helmBinary string,
noTiller bool,
helmTemplate bool) helm.Helmer {
if helmBinary == "" {
helmBinary = "helm"
}
featureFlag := "none"
if helmTemplate {
featureFlag = "template-mode"
} else if noTiller {
featureFlag = "no-tiller-server"
}
if verbose {
log.Infof("Using helmBinary %s with feature flag: %s\n", util.ColorInfo(helmBinary), util.ColorInfo(featureFlag))
}
helmCLI := helm.NewHelmCLI(helmBinary, helm.V2, "", verbose)
var h helm.Helmer = helmCLI
if helmTemplate {
kubeClient, ns, _ := f.CreateKubeClient()
h = helm.NewHelmTemplate(helmCLI, "", kubeClient, ns)
} else {
h = helmCLI
}
if noTiller && !helmTemplate {
h.SetHost(helm.GetTillerAddress())
helm.StartLocalTillerIfNotRunning()
}
return h
}
// CreateCertManagerClient creates a new Kuberntes client for cert-manager resources
func (f *factory) CreateCertManagerClient() (certmngclient.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return certmngclient.NewForConfig(config)
}
| [
"\"KUBECONFIG\"",
"\"JX_IMPERSONATE_USER\""
]
| []
| [
"JX_IMPERSONATE_USER",
"KUBECONFIG"
]
| [] | ["JX_IMPERSONATE_USER", "KUBECONFIG"] | go | 2 | 0 | |
test/performance/heterogeneous/dgl/hgt_main.py | """
Performance check of AutoGL model + DGL (trainer + dataset)
"""
import os
os.environ["AUTOGL_BACKEND"] = "dgl"
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from autogl.module.model.dgl import AutoHGT, AutoHeteroRGCN
from autogl.solver.utils import set_seed
import numpy as np
import argparse
from autogl.datasets import build_dataset_from_name
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
def test(model, graph, idx, labels):
model.eval()
pred = model(graph)[idx].max(1)[1]
acc = (pred == labels[idx]).float().mean()
return acc.item()
def train(model, G, args, train_mask, labels):
optimizer = torch.optim.AdamW(model.parameters())
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, total_steps=args.n_epoch, max_lr = args.max_lr)
for epoch in np.arange(args.n_epoch) + 1:
model.train()
logits = model(G)
# The loss is computed only for labeled nodes.
loss = F.cross_entropy(logits[train_mask], labels[train_mask].to(args.device))
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='hetero dgl model')
parser.add_argument('--n_epoch', type=int, default=200)
parser.add_argument('--n_inp', type=int, default=256)
parser.add_argument('--clip', type=int, default=1.0)
parser.add_argument('--max_lr', type=float, default=1e-3)
parser.add_argument('--repeat', type=int, default=50)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--model', type=str, choices=['hgt', 'HeteroRGCN'], default='hgt')
args = parser.parse_args()
torch.manual_seed(0)
dataset = build_dataset_from_name("hetero-acm-hgt")
G = dataset[0].to(args.device)
print(G)
target_field = dataset.schema["target_node_type"]
labels = G.nodes[target_field].data["label"].to(args.device)
train_mask = G.nodes[target_field].data["train_mask"].nonzero().flatten()
val_mask = G.nodes[target_field].data["val_mask"].nonzero().flatten()
test_mask = G.nodes[target_field].data["test_mask"].nonzero().flatten()
num_features = G.nodes[target_field].data["feat"].size(1)
num_classes = labels.max().item() + 1
accs = []
for seed in tqdm(range(args.repeat)):
set_seed(seed)
if args.model=='hgt':
model = AutoHGT(dataset=dataset,
num_features=num_features,
num_classes=num_classes,
device=args.device,
init=False
).from_hyper_parameter({
"num_layers": 2,
"hidden": [256,256],
"heads": 4,
"dropout": 0.2,
"act": "gelu",
"use_norm": True,
}).model
elif args.model=='HeteroRGCN':
model = AutoHeteroRGCN(dataset=dataset,
num_features=num_features,
num_classes=num_classes,
device=args.device,
init=False
).from_hyper_parameter({
"num_layers": 2,
"hidden": [256],
"act": "leaky_relu",
}).model
model.to(args.device)
train(model, G, args, train_mask, labels)
accs.append(test(model, G, test_mask, labels))
print('{:.4f} ~ {:.4f}'.format(np.mean(accs), np.std(accs)))
| []
| []
| [
"AUTOGL_BACKEND"
]
| [] | ["AUTOGL_BACKEND"] | python | 1 | 0 | |
code/evaluation/eval.py | import sys
sys.path.append('../code')
import argparse
import GPUtil
import os
from pyhocon import ConfigFactory
import torch
import numpy as np
import cvxpy as cp
from PIL import Image
import math
import utils.general as utils
import utils.plots as plt
from utils import rend_util
from utils import vis_util
from model.sg_render import compute_envmap
import imageio
# import pyexr
def evaluate(**kwargs):
torch.set_default_dtype(torch.float32)
conf = ConfigFactory.parse_file(kwargs['conf'])
exps_folder_name = kwargs['exps_folder_name']
evals_folder_name = kwargs['evals_folder_name']
expname = conf.get_string('train.expname') + '-' + kwargs['expname']
if kwargs['timestamp'] == 'latest':
if os.path.exists(os.path.join('../', kwargs['exps_folder_name'], expname)):
timestamps = os.listdir(os.path.join('../', kwargs['exps_folder_name'], expname))
if (len(timestamps)) == 0:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = sorted(timestamps)[-1]
else:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = kwargs['timestamp']
utils.mkdir_ifnotexists(os.path.join('../', evals_folder_name))
expdir = os.path.join('../', exps_folder_name, expname)
evaldir = os.path.join('../', evals_folder_name, expname, os.path.basename(kwargs['data_split_dir']))
model = utils.get_class(conf.get_string('train.model_class'))(conf=conf.get_config('model'))
if torch.cuda.is_available():
model.cuda()
eval_dataset = utils.get_class(conf.get_string('train.dataset_class'))(kwargs['gamma'],
kwargs['data_split_dir'],
train_cameras=False)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
collate_fn=eval_dataset.collate_fn
)
total_pixels = eval_dataset.total_pixels
img_res = eval_dataset.img_res
old_checkpnts_dir = os.path.join(expdir, timestamp, 'checkpoints')
ckpt_path = os.path.join(old_checkpnts_dir, 'ModelParameters', str(kwargs['checkpoint']) + ".pth")
saved_model_state = torch.load(ckpt_path)
model.load_state_dict(saved_model_state["model_state_dict"])
epoch = saved_model_state['epoch']
print('Loaded checkpoint: ', ckpt_path)
if kwargs['geometry'].endswith('.pth'):
print('Reloading geometry from: ', kwargs['geometry'])
geometry = torch.load(kwargs['geometry'])['model_state_dict']
geometry = {k: v for k, v in geometry.items() if 'implicit_network' in k}
print(geometry.keys())
model_dict = model.state_dict()
model_dict.update(geometry)
model.load_state_dict(model_dict)
#####################################################################################################
# reset lighting
#####################################################################################################
relight = False
if kwargs['light_sg'].endswith('.npy'):
print('Loading light from: ', kwargs['light_sg'])
model.envmap_material_network.load_light(kwargs['light_sg'])
evaldir = evaldir + '_relight'
relight = True
edit_diffuse = False
if len(kwargs['diffuse_albedo']) > 0:
print('Setting diffuse albedo to: ', kwargs['diffuse_albedo'])
evaldir = evaldir + '_editdiffuse'
edit_diffuse = True
utils.mkdir_ifnotexists(evaldir)
print('Output directory is: ', evaldir)
with open(os.path.join(evaldir, 'ckpt_path.txt'), 'w') as fp:
fp.write(ckpt_path + '\n')
####################################################################################################################
print("evaluating...")
model.eval()
# extract mesh
if (not edit_diffuse) and (not relight) and eval_dataset.has_groundtruth:
with torch.no_grad():
mesh = plt.get_surface_high_res_mesh(
sdf=lambda x: model.implicit_network(x)[:, 0],
resolution=kwargs['resolution']
)
# Taking the biggest connected component
components = mesh.split(only_watertight=False)
areas = np.array([c.area for c in components], dtype=np.float)
mesh_clean = components[areas.argmax()]
mesh_clean.export('{0}/mesh.obj'.format(evaldir), 'obj')
# generate images
images_dir = evaldir
all_frames = []
psnrs = []
for data_index, (indices, model_input, ground_truth) in enumerate(eval_dataloader):
if eval_dataset.has_groundtruth:
out_img_name = os.path.basename(eval_dataset.image_paths[indices[0]])[:-4]
else:
out_img_name = '{}'.format(indices[0])
if len(kwargs['view_name']) > 0 and out_img_name != kwargs['view_name']:
print('Skipping: ', out_img_name)
continue
print('Evaluating data_index: ', data_index, len(eval_dataloader))
model_input["intrinsics"] = model_input["intrinsics"].cuda()
model_input["uv"] = model_input["uv"].cuda()
model_input["object_mask"] = model_input["object_mask"].cuda()
model_input['pose'] = model_input['pose'].cuda()
split = utils.split_input(model_input, total_pixels)
res = []
for s in split:
out = model(s)
res.append({
'points': out['points'].detach(),
'idr_rgb_values': out['idr_rgb_values'].detach(),
'sg_rgb_values': out['sg_rgb_values'].detach(),
'normal_values': out['normal_values'].detach(),
'network_object_mask': out['network_object_mask'].detach(),
'object_mask': out['object_mask'].detach(),
'sg_diffuse_albedo_values': out['sg_diffuse_albedo_values'].detach(),
'sg_diffuse_rgb_values': out['sg_diffuse_rgb_values'].detach(),
'sg_specular_rgb_values': out['sg_specular_rgb_values'].detach(),
})
batch_size = ground_truth['rgb'].shape[0]
model_outputs = utils.merge_output(res, total_pixels, batch_size)
### re-render with updated diffuse albedo
if edit_diffuse:
diffuse_albedo = imageio.imread(kwargs['diffuse_albedo']).astype(np.float32)[:, :, :3]
if not kwargs['diffuse_albedo'].endswith('.exr'):
diffuse_albedo /= 255.
diffuse_albedo = torch.from_numpy(diffuse_albedo).cuda().reshape((-1, 3))
ray_dirs, _ = rend_util.get_camera_params(model_input["uv"],
model_input['pose'],
model_input["intrinsics"])
sg_ret = model.render_sg_rgb(mask=model_outputs['network_object_mask'],
normals=model_outputs['normal_values'],
view_dirs=-ray_dirs.reshape((-1, 3)),
diffuse_albedo=diffuse_albedo)
for x in sorted(sg_ret.keys()):
assert (x in model_outputs)
model_outputs[x] = sg_ret[x]
tonemap_img = lambda x: np.power(x, 1./eval_dataset.gamma)
clip_img = lambda x: np.clip(x, 0., 1.)
assert (batch_size == 1)
if kwargs['write_idr']:
rgb_eval = model_outputs['idr_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
rgb_eval = clip_img(tonemap_img(rgb_eval))
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/idr_rgb_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(tonemap_img(rgb_eval))
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_rgb_{1}.png'.format(images_dir, out_img_name))
all_frames.append(np.array(img))
# network_object_mask = model_outputs['network_object_mask']
# network_object_mask = network_object_mask.reshape(batch_size, total_pixels, 3)
# network_object_mask = plt.lin2img(network_object_mask, img_res).detach().cpu().numpy()[0]
# network_object_mask = network_object_mask.transpose(1, 2, 0)
# img = Image.fromarray((network_object_mask * 255).astype(np.uint8))
# img.save('{0}/object_mask_{1}.png'.format(images_dir, out_img_name))
normal = model_outputs['normal_values']
normal = normal.reshape(batch_size, total_pixels, 3)
normal = (normal + 1.) / 2.
normal = plt.lin2img(normal, img_res).detach().cpu().numpy()[0]
normal = normal.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)
# pyexr.write('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)
# np.save('{0}/normal_{1}.npy'.format(images_dir, out_img_name), normal)
else:
img = Image.fromarray((normal * 255).astype(np.uint8))
img.save('{0}/normal_{1}.png'.format(images_dir, out_img_name))
if (not relight) and eval_dataset.has_groundtruth:
depth = torch.ones(batch_size * total_pixels).cuda().float()
network_object_mask = model_outputs['network_object_mask'] & model_outputs['object_mask']
depth_valid = rend_util.get_depth(model_outputs['points'].reshape(batch_size, total_pixels, 3),
model_input['pose']).reshape(-1)[network_object_mask]
depth[network_object_mask] = depth_valid
depth[~network_object_mask] = 0.98 * depth_valid.min()
assert (batch_size == 1)
network_object_mask = network_object_mask.float().reshape(img_res[0], img_res[1]).cpu()
depth = depth.reshape(img_res[0], img_res[1]).cpu()
if kwargs['save_exr']:
depth = depth * network_object_mask
depth = depth.numpy()
imageio.imwrite('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)
# pyexr.write('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)
# np.save('{0}/depth_{1}.npy'.format(images_dir, out_img_name), depth)
else:
depth = vis_util.colorize(depth, cmap_name='jet')
depth = depth * network_object_mask.unsqueeze(-1) + (1. - network_object_mask.unsqueeze(-1))
depth = depth.numpy()
img = Image.fromarray((depth * 255).astype(np.uint8))
img.save('{0}/depth_{1}.png'.format(images_dir, out_img_name))
# write lighting and materials
envmap = compute_envmap(lgtSGs=model.envmap_material_network.get_light(), H=256, W=512, upper_hemi=model.envmap_material_network.upper_hemi)
envmap = envmap.cpu().numpy()
imageio.imwrite(os.path.join(images_dir, 'envmap.exr'), envmap)
roughness, specular_reflectance = model.envmap_material_network.get_base_materials()
with open(os.path.join(images_dir, 'relight_material.txt'), 'w') as fp:
for i in range(roughness.shape[0]):
fp.write('Material {}:\n'.format(i))
fp.write('\troughness: {}\n'.format(roughness[i, 0].item()))
fp.write('\tspecular_reflectance: ')
for j in range(3):
fp.write('{}, '.format(specular_reflectance[i, j].item()))
fp.write('\n\n')
rgb_gt = ground_truth['rgb']
rgb_gt = plt.lin2img(rgb_gt, img_res).numpy()[0].transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)
# pyexr.write('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)
# np.save('{0}/gt_{1}.npy'.format(images_dir, out_img_name), rgb_gt)
else:
rgb_gt = clip_img(tonemap_img(rgb_gt))
img = Image.fromarray((rgb_gt * 255).astype(np.uint8))
img.save('{0}/gt_{1}.png'.format(images_dir, out_img_name))
mask = model_input['object_mask']
mask = plt.lin2img(mask.unsqueeze(-1), img_res).cpu().numpy()[0]
mask = mask.transpose(1, 2, 0)
rgb_eval_masked = rgb_eval * mask
rgb_gt_masked = rgb_gt * mask
psnr = calculate_psnr(rgb_eval_masked, rgb_gt_masked, mask)
psnrs.append(psnr)
# verbose mode
rgb_eval = model_outputs['sg_diffuse_albedo_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_diffuse_albedo_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_diffuse_albedo_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_diffuse_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_diffuse_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_diffuse_rgb_{1}.png'.format(images_dir, out_img_name))
rgb_eval = model_outputs['sg_specular_rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
if kwargs['save_exr']:
imageio.imwrite('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# pyexr.write('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)
# np.save('{0}/sg_specular_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)
else:
rgb_eval = clip_img(rgb_eval)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/sg_specular_rgb_{1}.png'.format(images_dir, out_img_name))
if not kwargs['save_exr']:
imageio.mimwrite(os.path.join(images_dir, 'video_rgb.mp4'), all_frames, fps=15, quality=9)
print('Done rendering', images_dir)
if len(psnrs) > 0:
psnrs = np.array(psnrs).astype(np.float64)
# print("RENDERING EVALUATION {2}: psnr mean = {0} ; psnr std = {1}".format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std(), scan_id))
print("RENDERING EVALUATION: psnr mean = {0} ; psnr std = {1}".format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std()))
def get_cameras_accuracy(pred_Rs, gt_Rs, pred_ts, gt_ts,):
''' Align predicted pose to gt pose and print cameras accuracy'''
# find rotation
d = pred_Rs.shape[-1]
n = pred_Rs.shape[0]
Q = torch.addbmm(torch.zeros(d, d, dtype=torch.double), gt_Rs, pred_Rs.transpose(1, 2))
Uq, _, Vq = torch.svd(Q)
sv = torch.ones(d, dtype=torch.double)
sv[-1] = torch.det(Uq @ Vq.transpose(0, 1))
R_opt = Uq @ torch.diag(sv) @ Vq.transpose(0, 1)
R_fixed = torch.bmm(R_opt.repeat(n, 1, 1), pred_Rs)
# find translation
pred_ts = pred_ts @ R_opt.transpose(0, 1)
c_opt = cp.Variable()
t_opt = cp.Variable((1, d))
constraints = []
obj = cp.Minimize(cp.sum(
cp.norm(gt_ts.numpy() - (c_opt * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) @ t_opt), axis=1)))
prob = cp.Problem(obj, constraints)
prob.solve()
t_fixed = c_opt.value * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) * t_opt.value
# Calculate transaltion error
t_error = np.linalg.norm(t_fixed - gt_ts.numpy(), axis=-1)
t_error = t_error
t_error_mean = np.mean(t_error)
t_error_medi = np.median(t_error)
# Calculate rotation error
R_error = compare_rotations(R_fixed, gt_Rs)
R_error = R_error.numpy()
R_error_mean = np.mean(R_error)
R_error_medi = np.median(R_error)
print('CAMERAS EVALUATION: R error mean = {0} ; t error mean = {1} ; R error median = {2} ; t error median = {3}'
.format("%.2f" % R_error_mean, "%.2f" % t_error_mean, "%.2f" % R_error_medi, "%.2f" % t_error_medi))
# return alignment and aligned pose
return R_opt.numpy(), t_opt.value, c_opt.value, R_fixed.numpy(), t_fixed
def compare_rotations(R1, R2):
cos_err = (torch.bmm(R1, R2.transpose(1, 2))[:, torch.arange(3), torch.arange(3)].sum(dim=-1) - 1) / 2
cos_err[cos_err > 1] = 1
cos_err[cos_err < -1] = -1
return cos_err.acos() * 180 / np.pi
def calculate_psnr(img1, img2, mask):
# img1 and img2 have range [0, 1]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2) * (img2.shape[0] * img2.shape[1]) / mask.sum()
if mse == 0:
return float('inf')
return 20 * math.log10(1.0 / math.sqrt(mse))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/default.conf')
parser.add_argument('--data_split_dir', type=str, default='')
parser.add_argument('--gamma', type=float, default=1., help='gamma correction coefficient')
parser.add_argument('--save_exr', default=False, action="store_true", help='')
parser.add_argument('--light_sg', type=str, default='', help='')
parser.add_argument('--geometry', type=str, default='', help='')
parser.add_argument('--diffuse_albedo', type=str, default='', help='')
parser.add_argument('--view_name', type=str, default='', help='')
parser.add_argument('--expname', type=str, default='', help='The experiment name to be evaluated.')
parser.add_argument('--exps_folder', type=str, default='exps', help='The experiments folder name.')
parser.add_argument('--timestamp', default='latest', type=str, help='The experiemnt timestamp to test.')
parser.add_argument('--checkpoint', default='latest',type=str,help='The trained model checkpoint to test')
parser.add_argument('--write_idr', default=False, action="store_true", help='')
parser.add_argument('--resolution', default=512, type=int, help='Grid resolution for marching cube')
parser.add_argument('--is_uniform_grid', default=False, action="store_true", help='If set, evaluate marching cube with uniform grid.')
parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]')
opt = parser.parse_args()
if opt.gpu == "auto":
deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[])
gpu = deviceIDs[0]
else:
gpu = opt.gpu
if (not gpu == 'ignore'):
os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(gpu)
evaluate(conf=opt.conf,
write_idr=opt.write_idr,
gamma=opt.gamma,
data_split_dir=opt.data_split_dir,
expname=opt.expname,
exps_folder_name=opt.exps_folder,
evals_folder_name='evals',
timestamp=opt.timestamp,
checkpoint=opt.checkpoint,
resolution=opt.resolution,
save_exr=opt.save_exr,
light_sg=opt.light_sg,
geometry=opt.geometry,
view_name=opt.view_name,
diffuse_albedo=opt.diffuse_albedo,
)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
code/lambda_function.py | #!/usr/bin/env python3.6
import sys
sys.path.insert(1, 'lib')
import os
import json
import logging
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Retry until AWS Lambda function times out
retries = Retry(total=None,
status_forcelist=[500, 502, 503, 504])
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
def lambda_handler(event, context):
try:
logger.info('event: {}'.format(json.dumps(event)))
url = os.environ['url']
response = session.get(url)
logger.info('response: {}\n{}'.format(
response.status_code, response.content.decode("utf-8")))
except Exception as e:
logger.error("Unexpected Error: {}".format(e))
if __name__ == "__main__":
import sys
logging.basicConfig(stream=sys.stderr)
timer='townclock-ping-timercheck-demo'
seconds='1200'
os.environ['url'] = 'https://timercheck.io/'+timer+'/'+seconds
lambda_handler({
"source": "townclock.chime",
"type": "chime",
"version": "3.0",
"timestamp": "2017-05-20 01:45 UTC",
"year": "2017",
"month": "05",
"day": "20",
"hour": "01",
"minute": "45",
"day_of_week": "Sat",
"unique_id": "02976691-0e70-4dbd-8191-a2f26819e1f7",
"region": "us-east-1",
"sns_topic_arn": "arn:aws:sns:us-east-1:522480313337:unreliable-town-clock-topic-178F1OQACHTYF",
"reference": "http://townclock.io",
"support": "First Last <[email protected]>",
"disclaimer": "UNRELIABLE SERVICE"
}, None)
| []
| []
| [
"url"
]
| [] | ["url"] | python | 1 | 0 | |
engine.py | import os
import types
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
from timeit import default_timer as timer
import pygame
from ui_controls import *
from draw_utils import *
class App():
""" Encapsulates ingame loop among of other things
"""
(MODE_PLAY, MODE_EDIT) = range(1, 3)
DOUBLECLICK_DELAY = 250 # ms
def __init__(self, title=None, window_res=(640, 480), fps=60, dpi_aware=False, resizeable=False, vsync=True):
self._window_res = window_res
self._title = title
self._fps = fps
self._dpi_aware = dpi_aware
self._bgcolor = COLOR_BACKGROUND
self._mode = self.MODE_PLAY # not currently used
self._resizeable = resizeable
self._vsync = vsync
self._is_running = True
self._hide_gui = False
self._clear_screen = True
self._on_draw_cb = None
self._on_post_draw_cb = None
self._on_event_cb = None
self._on_pre_draw_cb = None
self._on_init_cb = None
self._on_quit_cb = None #return false to postpone quit
self._controls = Layout() # controls that are directly attached to the application
self._next_user_event = pygame.USEREVENT + 1
self._events = {} # holds even states
self._idle_ticks = 0 # used by some controls
self._anim_timer = 0 # used to animate sprites
self._last_mouse_click_pos = (0, 0)
self._last_mouse_click_ticks = 0
self._last_mouse_click_button = pygame.BUTTON_LEFT
self._scaled_fullscreen = False
self._pushed_btn = None
self._shadow_offset = 6
self._draged_controls = []
self._clicked_control = None
self._selected_control = None
self._unsettling_events = frozenset([ pygame.KEYDOWN, pygame.MOUSEBUTTONDOWN, pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP ])
def __setattr__(self, name, value):
""" setting app attribute with a control causes it to be added to the app
Note that it will add that control to the default layout unless a control
already has a layout
"""
if not name.startswith("_") and isinstance(value, (BaseControl, Layout)):
value._name = name
if value.layout is None and not value.layout is self._controls:
self._controls.add( value )
value._app = self
super().__setattr__(name, value)
@staticmethod
def _set_dpi_aware():
import sys
if sys.platform == 'win32':
from ctypes import windll
windll.user32.SetProcessDPIAware(True)
def set_mode(self, mode):
self._mode = mode
@property
def screen_size(self):
return self._screen.get_size()
@property
def screen_width(self):
return self._screen.get_width()
@property
def screen_height(self):
return self._screen.get_height()
def on_event(self, f_cb):
self._on_event_cb = types.MethodType(f_cb, self)
on_event = property(fset=on_event)
def on_init(self, f_cb):
self._on_init_cb = types.MethodType(f_cb, self)
on_init = property(fset=on_init)
def on_gui_draw(self, f_cb):
""" Drawn before gui is drawn """
self._on_pre_draw_cb = types.MethodType(f_cb, self)
on_gui_draw = property(fset=on_gui_draw)
def on_draw(self, f_cb):
""" set on draw callback """
self._on_draw_cb = types.MethodType(f_cb, self)
on_draw = property(fset=on_draw) #write only
def on_quit(self, f_cb):
""" set on quit callback
Note: return False to postpone quitting
"""
self._on_quit_cb = types.MethodType(f_cb, self)
on_quit = property(fset=on_quit)
def new_event(self, millis=0, once=0):
event_id = self._next_user_event
self._next_user_event += 1
event = dict(event_id=event_id, millis=millis, once=once)
self._events[event_id] = event
if millis > 0:
pygame.time.set_timer(event_id, millis, once)
return event_id
def resume_event(self, event_id, millis=None, once=None):
if not event_id in self._events:
raise ValueError('only user events can be disabled')
event = self._events[event_id]
if millis is not None and millis<=0:
raise ValueError('millis have to be > 0')
event['millis']=millis
if once is not None:
event['once'] = once
pygame.time.set_timer(event_id, event['millis'], event['once'])
def pause_event(self, event_id):
""" pause user event timer from firing """
if not event_id in self._events:
raise ValueError('only user events can be paused')
event = self._events[event_id]
pygame.time.set_timer(event_id, 0)
def toggle_scaled_fullscreen(self):
if self._scaled_fullscreen:
self._exit_scaled_fullscreen()
else:
self._go_scaled_fullscreen()
@property
def anim_timer(self):
return self._anim_timer
@property
def controls(self):
return self._controls
def _go_scaled_fullscreen(self):
#fullscreen_res = pygame.display.get_desktop_sizes()[0]
self._screen = pygame.display.set_mode(self._window_res, self._flags | pygame.SCALED | pygame.FULLSCREEN, vsync=self._vsync)
self._scaled_fullscreen = True
def _exit_scaled_fullscreen(self):
self._screen = pygame.display.set_mode(self._window_res, self._flags, vsync=self._vsync)
self._scaled_fullscreen = False
def _init_pygame(self):
if self._dpi_aware:
self._set_dpi_aware()
pygame.init()
self._flags = pygame.DOUBLEBUF
if self._resizeable:
self._flags = self._flags | pygame.RESIZABLE
self._screen = pygame.display.set_mode(self._window_res, self._flags, vsync=self._vsync)
if self._title is not None:
pygame.display.set_caption(self._title)
#print('pygame.display.Info()', pygame.display.Info())
#print('pygame.display.get_driver()', pygame.display.get_driver())
self._clock = pygame.time.Clock()
self.get_ticks = pygame.time.get_ticks
self._EVENT_CAPTURE_FRAME = self.new_event()
self.EVENT_ANIM_HEARTBEAT = self.new_event(25)
self.EVENT_DOUBLECLICK = self.new_event()
self._shadow_surface = pygame.Surface((self.screen_width, self.screen_height)).convert()
self.metrics_fps = 0
def _set_selected_control(self, control):
if self._selected_control is not None and self._selected_control!=control:
self._selected_control.selected = False
self._selected_control = control
if control is not None:
control.selected = True
def _dispatch_events(self):
""" default engine's event dispatched that would also call
a cutsom event handler cb here
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
if self._on_quit_cb is None or self._on_quit_cb():
self.quit()
if event.type==self.EVENT_ANIM_HEARTBEAT:
self._anim_timer += 1
continue
if event.type in self._unsettling_events:
self._idle_ticks = 0
if event.type==pygame.MOUSEBUTTONDOWN:
mouse_pos = event.pos
is_doubleclick = (self._last_mouse_click_ticks > (self.get_ticks() - self.DOUBLECLICK_DELAY) and
self._last_mouse_click_button==event.button and self._last_mouse_click_pos==mouse_pos)
self._last_mouse_click_ticks = self.get_ticks()
self._last_mouse_click_button = event.button
self._last_mouse_click_pos = event.pos
if is_doubleclick:
doubleclick_event = pygame.event.Event(self.EVENT_DOUBLECLICK, {"pos": mouse_pos, "button": event.button})
pygame.event.post(doubleclick_event)
#self._selected_control_old = self._selected_control
selected_control = None
for ctr in [ctrl for ctrl in self._controls if ctrl._visible and not self._hide_gui]:
drag_mode = ctr.drag_test(*mouse_pos)
if drag_mode is not None:
self._draged_controls += [(ctr, event.button, drag_mode)]
if ctr.click_test(*mouse_pos):
if isinstance(ctr, ButtonCtrl):
if self._pushed_btn is None:
self._pushed_btn = ctr
ctr.is_pushed = True
else:
if is_doubleclick:
ctr.doubleclicked(*mouse_pos, event.button, self)
ctr.clicked(*mouse_pos, event.button, self)
self._clicked_control = ctr
if ctr._selectable:
selected_control = ctr
else:
if ctr.layout.parent is not None:
selected_control = ctr.layout.parent
else:
selected_control = None
self._set_selected_control( selected_control )
elif event.type==pygame.MOUSEBUTTONUP:
if self._pushed_btn is not None:
self._pushed_btn.is_pushed=False
self._pushed_btn.clicked(*pygame.mouse.get_pos(), event.button, self)
self._pushed_btn=None
self._draged_controls = []
self._clicked_control = None
elif event.type==pygame.MOUSEMOTION:
for ctrl, button, drag_mode in self._draged_controls:
ctrl.drag_move(drag_mode, *event.pos, *event.rel, self, button)
elif event.type==pygame.KEYDOWN:
if event.key==pygame.K_ESCAPE:
if self._selected_control is not None:
self._selected_control.selected = False
self._selected_control = None
elif event.key==pygame.K_RETURN:
if pygame.key.get_mods() & pygame.KMOD_ALT:
self.toggle_scaled_fullscreen()
if self._selected_control is not None:
self._selected_control.key_pressed(event.key, self)
if event.type == pygame.VIDEORESIZE:
self._screen_size = (event.w , event.h)
self._shadow_surface = pygame.Surface((self.screen_width, self.screen_height)).convert()
# if event.type==self._EVENT_CAPTURE_FRAME:
# pass
if callable(self._on_event_cb):
self._on_event_cb(event)
self._idle_ticks += 1
def run(self):
""" main pygame loop
"""
self._init_pygame()
if self._on_init_cb is not None:
self._on_init_cb()
while self._is_running:
when = timer()
self._dispatch_events()
if self._clear_screen:
self._screen.fill(self._bgcolor)
if self._on_pre_draw_cb is not None:
self._on_pre_draw_cb()
# display shadow
shadow = self._shadow_surface
shadow.fill((0,0,0))
shadow_color = (25, 23, 19)
if not self._hide_gui:
for control in [ctrl for ctrl in self._controls if ctrl._visible and ctrl._drop_shadow and isinstance(ctrl, BaseControl)]:
sh_off = self._shadow_offset
pygame.draw.rect(shadow, shadow_color, (control.right, control.y + sh_off, sh_off, control.height))
pygame.draw.rect(shadow, shadow_color, (control.x+sh_off, control.bottom, control.width, sh_off))
self._screen.blit(shadow, (0, 0), special_flags=pygame.BLEND_SUB)
for control in [ctrl for ctrl in self._controls if ctrl._visible]:
control.draw(self._screen)
if callable(self._on_draw_cb):
self._on_draw_cb()
pygame.display.flip()
took = timer() - when
self._clock.tick(self._fps)
self.metrics_fps = 1.0 / took
print('exited')
pygame.quit()
def quit(self):
self._is_running = False
@property
def screen(self):
return self._screen
def blit(self, surf, where=(0,0), *args, **kwargs):
self.screen.blit(surf, where, *args, **kwargs)
def capture_gif(self, duration_secs, fps=5, rect=None):
self._gif_frame_delay = 1000 // int(fps)
self._capture_ends = self.get_ticks() + duration_secs*1000
if rect is None:
self._gif_rect = self.screen.get_rect()
else:
self._gif_rect = rect
self.resume_event(self._EVENT_CAPTURE_FRAME, millis=self._gif_frame_delay, once=False)
def _capture_gif_frame(self):
frame_no = getattr(self, '_frame_no', 1)
cropped = pygame.Surface(self._gif_rect[2], self._gif_rect[3])
cropped.blit(self.screen, (0, 0), self._gif_rect)
img_filename = "image_%d.png" % frame_no
app._frame_no = frame_no + 1
pygame.image.save(cropped, os.path.join("anims", img_filename))
def _savegif(filename, source_path="anims/image_*.png", frame_delay=75, loop=1):
from PIL import Image
#https://stackoverflow.com/questions/753190/programmatically-generate-video-or-animated-gif-in-python
# save series of images to gif
# by Kris
#https://stackoverflow.com/questions/64971675/pil-adding-text-to-a-gif-frames-adds-noise-to-the-picture
# disable dithering
# by fdermishin
img, *imgs=[Image.open(f).quantize(method=Image.MEDIANCUT) for f in sorted(glob.glob(source_path))]
img.save(fp=filename, format='GIF', append_images=imgs,
save_all=True, duration=frame_delay, loop=loop) | []
| []
| [
"PYGAME_HIDE_SUPPORT_PROMPT"
]
| [] | ["PYGAME_HIDE_SUPPORT_PROMPT"] | python | 1 | 0 | |
env.py | import json
import os
from pathlib import Path
import subprocess as sp
import sys
from textwrap import dedent
import venv
assert __name__ == "__main__"
name_kernel = "binary-embedding"
path_venv = Path(".venv").resolve()
if "-h" in sys.argv or "--help" in sys.argv:
print(
dedent(f"""\
Prepare an isolated environment for running the binary similarity notebook.
Usage:
{sys.executable} {__file__} [ARG]
If the Python virtual environment is active, ARG designates the name to give
to the Jupyter kernel in which to run the notebook; by default, we use the
name `{name_kernel}'.
If the Python virtual environment is not yet active, ARG designates the path
in which to set up the virtual environment. By default, we use
{path_venv}
Once the environment has been created, you should activate it (according to
instructions in https://docs.python.org/3/library/venv.html) and run this
script again to prepare the Jupyter kernel.
""".rstrip()),
file=sys.stderr
)
sys.exit(0)
if "VIRTUAL_ENV" in os.environ:
# Environment is active.
if len(sys.argv) >= 2:
name_kernel = sys.argv[1]
jupyter = []
ipykernel = []
r = sp.run(
["jupyter", "kernelspec", "list", "--json"],
encoding="utf-8",
stdout=sp.PIPE
)
if r.returncode == 0:
specs = set(json.loads(r.stdout).get("kernelspecs", {}).keys())
if name_kernel not in specs:
ipykernel = ["ipykernel"]
else:
jupyter = ["jupyterlab", "ipykernel"]
try:
if jupyter or ipykernel:
print(f"Must install: {' '.join([*jupyter, *ipykernel])}")
sp.run(["pip", "install", *jupyter, *ipykernel], check=True)
sp.run(
[
"python",
"-m",
"ipykernel",
"install",
"--user",
"--name",
name_kernel
],
check=True
)
print()
r = sp.run(
["pip", "list", "--format", "json"],
check=True,
encoding="utf-8",
stdout=sp.PIPE
)
dependencies = set([p.get("name", "") for p in json.loads(r.stdout)])
if "jupyterlab" in dependencies:
print("Ready to go! Run `jupyter lab' to get started.")
else:
print("Kernel deployed! Load the notebook in your running Jupyter and set")
print(f"the kernel to {name_kernel} to get going.")
except sp.CalledProcessError as err:
sys.exit(err.returncode)
else:
# Environment is not active.
if len(sys.argv) >= 2:
path_venv = Path(sys.argv[1])
if str(path_venv).startswith("-"):
print(f"Invalid environment path: {path_venv}")
sys.exit(28)
if not path_venv.is_dir():
print("Creating virtual environment")
venv.create(path_venv, with_pip=True, upgrade_deps=True)
print()
print("*** Environment ready! Activate it, then run this script once again to finalize setup. ***")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
backend/gps_32199/wsgi.py | """
WSGI config for gps_32199 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gps_32199.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
metrics/registry_test.go | // Copyright (c) 2018 Palantir Technologies. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package metrics_test
import (
"os"
"os/exec"
"reflect"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/palantir/pkg/metrics"
)
func TestRegistryRegistration(t *testing.T) {
// register root metrics
root := metrics.NewRootMetricsRegistry()
// register metric
_ = root.Counter("my-counter")
// create subregistry and register metric on it
sub := root.Subregistry("subregistry")
_ = sub.Gauge("sub-gauge")
wantNames := []string{
"my-counter",
"subregistry.sub-gauge",
}
var gotNames []string
root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
gotNames = append(gotNames, name)
assert.NotNil(t, metric)
}))
assert.Equal(t, wantNames, gotNames)
}
func TestMetricsWithTags(t *testing.T) {
root := metrics.NewRootMetricsRegistry()
// register metric with tags
_ = root.Counter("my-counter", metrics.MustNewTag("region", "nw"))
_ = root.Counter("my-counter", metrics.MustNewTag("region", "ne"))
_ = root.Counter("my-counter", metrics.MustNewTag("region", "se"), metrics.MustNewTag("application", "database"))
var gotNames []string
var gotTags [][]metrics.Tag
root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
gotNames = append(gotNames, name)
gotTags = append(gotTags, tags)
assert.NotNil(t, metric)
}))
// output is sorted by metric name and then by tag names (which themselves are sorted alphabetically)
wantNames := []string{
"my-counter",
"my-counter",
"my-counter",
}
wantTags := [][]metrics.Tag{
{metrics.MustNewTag("application", "database"), metrics.MustNewTag("region", "se")},
{metrics.MustNewTag("region", "ne")},
{metrics.MustNewTag("region", "nw")},
}
assert.Equal(t, wantNames, gotNames)
assert.Equal(t, wantTags, gotTags)
}
// Prefix should be used as provided (no case conversion/normalization), while tags should always be converted to
// lowercase.
func TestMetricsCasing(t *testing.T) {
root := metrics.NewRootMetricsRegistry()
// register metric with tags
_ = root.Counter("my-COUNTER", metrics.MustNewTag("REGION", "nW"))
_ = root.Counter("my-counter", metrics.MustNewTag("region", "NE"))
var gotNames []string
var gotTags [][]metrics.Tag
root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
gotNames = append(gotNames, name)
gotTags = append(gotTags, tags)
assert.NotNil(t, metric)
}))
// output is sorted by metric name and then by tag names (which themselves are sorted alphabetically)
wantNames := []string{
"my-COUNTER",
"my-counter",
}
wantTags := [][]metrics.Tag{
{metrics.MustNewTag("region", "nw")},
{metrics.MustNewTag("region", "ne")},
}
assert.Equal(t, wantNames, gotNames)
assert.Equal(t, wantTags, gotTags)
}
func TestRegistryRegistrationWithMemStats(t *testing.T) {
// register root metrics
root := metrics.NewRootMetricsRegistry()
metrics.CaptureRuntimeMemStats(root, time.Hour)
// register metric
_ = root.Counter("my-counter")
// create subregistry and register metric on it
sub := root.Subregistry("subregistry")
_ = sub.Gauge("sub-gauge")
wantNames := []string{
"go.runtime.MemStats.Alloc",
"go.runtime.MemStats.Frees",
"go.runtime.MemStats.GCCPUFraction",
"go.runtime.MemStats.HeapAlloc",
"go.runtime.MemStats.HeapIdle",
"go.runtime.MemStats.HeapInuse",
"go.runtime.MemStats.HeapObjects",
"go.runtime.MemStats.HeapReleased",
"go.runtime.MemStats.HeapSys",
"go.runtime.MemStats.Mallocs",
"go.runtime.MemStats.NumGC",
"go.runtime.MemStats.PauseNs",
"go.runtime.MemStats.PauseTotalNs",
"go.runtime.MemStats.StackInuse",
"go.runtime.MemStats.StackSys",
"go.runtime.NumCgoCall",
"go.runtime.NumGoroutine",
"go.runtime.NumThread",
"go.runtime.ReadMemStats",
"my-counter",
"subregistry.sub-gauge",
}
var gotNames []string
root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
gotNames = append(gotNames, name)
assert.NotNil(t, metric)
}))
assert.Equal(t, wantNames, gotNames)
}
func concurrentMetricTest(t *testing.T) {
root := metrics.NewRootMetricsRegistry()
commonMetric := "test-counter"
increments := 100
var waitGroup sync.WaitGroup
waitGroup.Add(2)
go func() {
for i := 0; i < increments; i++ {
root.Counter(commonMetric).Inc(1)
}
waitGroup.Done()
}()
go func() {
for i := 0; i < increments; i++ {
root.Counter(commonMetric).Inc(1)
}
waitGroup.Done()
}()
waitGroup.Wait()
require.Equal(t, int64(2*increments), root.Counter(commonMetric).Count())
}
// It is hard to catch the goroutine exits and have them impact actual test reporting. We end up having
// to simulate the testing ourselves, but it also means that if this test fails, it takes a bit of work to figure out why.
func TestManyConcurrentMetrics(t *testing.T) {
if os.Getenv("CRASH_IF_FAILS") == "1" {
concurrentMetricTest(t)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestManyConcurrentMetrics")
cmd.Env = append(os.Environ(), "CRASH_IF_FAILS=1")
err := cmd.Run()
require.NoError(t, err, "Error while checking for concurrent metric handling!")
}
func TestSubregistry_Each(t *testing.T) {
rootRegistry := metrics.NewRootMetricsRegistry()
subRegistry := rootRegistry.Subregistry("prefix.")
subRegistry.Gauge("gauge1").Update(0)
subRegistry.Gauge("gauge2").Update(1)
gauge1Count := 0
gauge2Count := 0
subRegistry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
assert.NotNil(t, metric)
assert.Empty(t, tags)
switch name {
case "gauge1":
gauge1Count++
case "gauge2":
gauge2Count++
default:
assert.Fail(t, "unexpected metric %s", name)
}
}))
assert.Equal(t, 1, gauge1Count)
assert.Equal(t, 1, gauge2Count)
}
func TestSubregistry_Unregister(t *testing.T) {
registry := metrics.NewRootMetricsRegistry().Subregistry("prefix.")
registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue1")).Update(0)
registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue2")).Update(0)
registry.Gauge("gauge2").Update(0)
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue1")}))
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")}))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 3, registrySize(t, registry))
registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue1"))
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")}))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 2, registrySize(t, registry))
registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue2"))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 1, registrySize(t, registry))
registry.Unregister("gauge2")
assert.Equal(t, 0, registrySize(t, registry))
}
func TestRootRegistry_Unregister(t *testing.T) {
registry := metrics.NewRootMetricsRegistry()
registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue1")).Update(0)
registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue2")).Update(0)
registry.Gauge("gauge2").Update(0)
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue1")}))
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")}))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 3, registrySize(t, registry))
registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue1"))
assert.True(t, registryContains(t, registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")}))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 2, registrySize(t, registry))
registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue2"))
assert.True(t, registryContains(t, registry, "gauge2", nil))
assert.Equal(t, 1, registrySize(t, registry))
registry.Unregister("gauge2")
assert.Equal(t, 0, registrySize(t, registry))
}
func TestRootRegistry_SubregistryWithTags(t *testing.T) {
rootRegistry := metrics.NewRootMetricsRegistry()
permanentTag := metrics.MustNewTag("permanentKey", "permanentValue")
subregistry := rootRegistry.Subregistry("subregistry", permanentTag)
runtimeTag := metrics.MustNewTag("key", "value")
subregistry.Counter("counter", runtimeTag).Count()
subregistry.Gauge("gauge", runtimeTag).Update(0)
subregistry.GaugeFloat64("gaugeFloat64", runtimeTag).Update(0)
subregistry.Meter("meter", runtimeTag).Mark(0)
subregistry.Timer("timer", runtimeTag).Update(0)
subregistry.Histogram("histogram", runtimeTag).Update(0)
subregistry.HistogramWithSample("histogramWithSample", metrics.DefaultSample(), runtimeTag).Update(0)
registered := map[string]map[string]string{}
subregistry.Each(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
registered[name] = tags.ToMap()
})
assert.Equal(t,
map[string]map[string]string{
"counter": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"gauge": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"gaugeFloat64": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"meter": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"timer": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"histogram": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
"histogramWithSample": metrics.Tags{permanentTag, runtimeTag}.ToMap(),
},
registered,
)
subregistry.Unregister("counter", runtimeTag)
subregistry.Unregister("gauge", runtimeTag)
subregistry.Unregister("gaugeFloat64", runtimeTag)
subregistry.Unregister("meter", runtimeTag)
subregistry.Unregister("timer", runtimeTag)
subregistry.Unregister("histogram", runtimeTag)
subregistry.Unregister("histogramWithSample", runtimeTag)
subregistry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
assert.Fail(t, "there should be no metrics registered")
}))
}
func registrySize(t *testing.T, registry metrics.Registry) int {
count := 0
registry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) {
count++
}))
return count
}
func registryContains(t *testing.T, registry metrics.Registry, name string, tags metrics.Tags) bool {
contains := false
tagStrings := []string{}
for _, tag := range tags {
tagStrings = append(tagStrings, tag.String())
}
registry.Each(metrics.MetricVisitor(func(eachName string, eachTags metrics.Tags, metric metrics.MetricVal) {
eachTagStrings := []string{}
for _, eachTag := range eachTags {
eachTagStrings = append(eachTagStrings, eachTag.String())
}
if eachName == name && reflect.DeepEqual(eachTagStrings, tagStrings) {
contains = true
}
}))
return contains
}
| [
"\"CRASH_IF_FAILS\""
]
| []
| [
"CRASH_IF_FAILS"
]
| [] | ["CRASH_IF_FAILS"] | go | 1 | 0 | |
src/fora/logger.py | """
Provides logging utilities.
"""
import argparse
import difflib
import os
from dataclasses import dataclass
import sys
from types import TracebackType
from typing import Any, Optional, Type, cast
import fora
@dataclass
class State:
"""Global state for logging."""
indentation_level: int = 0
"""The current global indentation level."""
state: State = State()
"""The global logger state."""
def use_color() -> bool:
"""Returns true if color should be used."""
if not isinstance(cast(Any, fora.args), argparse.Namespace):
return os.getenv("NO_COLOR") is None
return not fora.args.no_color
def col(color_code: str) -> str:
"""Returns the given argument only if color is enabled."""
return color_code if use_color() else ""
class IndentationContext:
"""A context manager to modify the indentation level."""
def __enter__(self) -> None:
state.indentation_level += 1
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
_ = (exc_type, exc, traceback)
state.indentation_level -= 1
def ellipsis(s: str, width: int) -> str:
"""
Shrinks the given string to width (including an ellipsis character).
Parameters
----------
s
The string.
width
The maximum width.
Returns
-------
str
A modified string with at most `width` characters.
"""
if len(s) > width:
s = s[:width - 1] + "…"
return s
def indent() -> IndentationContext:
"""Retruns a context manager that increases the indentation level."""
return IndentationContext()
def indent_prefix() -> str:
"""Returns the indentation prefix for the current indentation level."""
if not use_color():
return " " * state.indentation_level
ret = ""
for i in range(state.indentation_level):
if i % 2 == 0:
ret += "[90m│[m "
else:
ret += "[90m╵[m "
return ret
def debug(msg: str) -> None:
"""Prints the given message only in debug mode."""
if not fora.args.debug:
return
print(f" [1;34mDEBUG[m: {msg}", file=sys.stderr)
def debug_args(msg: str, args: dict[str, Any]) -> None:
"""Prints all given arguments when in debug mode."""
if not fora.args.debug:
return
str_args = ""
args = {k: v for k,v in args.items() if k != "self"}
if len(args) > 0:
str_args = " " + ", ".join(f"{k}={v}" for k,v in args.items())
print(f" [1;34mDEBUG[m: {msg}{str_args}", file=sys.stderr)
def print_indented(msg: str, **kwargs: Any) -> None:
"""Same as print(), but prefixes the message with the indentation prefix."""
print(f"{indent_prefix()}{msg}", **kwargs)
def connection_init(connector: Any) -> None:
"""Prints connection initialization information."""
print_indented(f"{col('[1;34m')}host{col('[m')} {connector.host.name} via {col('[1;33m')}{connector.host.url}{col('[m')}", flush=True)
def connection_failed(error_msg: str) -> None:
"""Signals that an error has occurred while establishing the connection."""
print(col("[1;31m") + "ERR" + col("[m"))
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{error_msg}{col('[m')}")
def connection_established() -> None:
"""Signals that the connection has been successfully established."""
#print(col("[1;32m") + "OK" + col("[m"))
def run_script(script: str, name: Optional[str] = None) -> None:
"""Prints the script file and name that is being executed next."""
if name is not None:
print_indented(f"{col('[33;1m')}script{col('[m')} {script} {col('[90m')}({name}){col('[m')}")
else:
print_indented(f"{col('[33;1m')}script{col('[m')} {script}")
def print_operation_title(op: Any, title_color: str, end: str = "\n") -> None:
"""Prints the operation title and description."""
name_if_given = (" " + col('[90m') + f"({op.name})" + col('[m')) if op.name is not None else ""
dry_run_info = f" {col('[90m')}(dry){col('[m')}" if fora.args.dry else ""
print_indented(f"{title_color}{op.op_name}{col('[m')}{dry_run_info} {op.description}{name_if_given}", end=end, flush=True)
def print_operation_early(op: Any) -> None:
"""Prints the operation title and description before the final status is known."""
title_color = col("[1;33m")
# Only overwrite status later if debugging is not enabled.
print_operation_title(op, title_color, end=" (early status)\n" if fora.args.debug else "")
def decode_escape(data: bytes, encoding: str = 'utf-8') -> str:
"""
Tries to decode the given data with the given encoding, but replaces all non-decodeable
and non-printable characters with backslash escape sequences.
Example:
```python
>>> decode_escape(b'It is Wednesday\\nmy dudes\\r\\n🐸\\xff\\0')
'It is Wednesday\\\\nMy Dudes\\\\r\\\\n🐸\\\\xff\\\\0'
```
Parameters
----------
content
The content that should be decoded and escaped.
encoding
The encoding that should be tried. To preserve utf-8 symbols, use 'utf-8',
to replace any non-ascii character with an escape sequence use 'ascii'.
Returns
-------
str
The decoded and escaped string.
"""
def escape_char(c: str) -> str:
special = {'\x00': '\\0', '\n': '\\n', '\r': '\\r', '\t': '\\t'}
if c in special:
return special[c]
num = ord(c)
if not c.isprintable() and num <= 0xff:
return f"\\x{num:02x}"
return c
return ''.join([escape_char(c) for c in data.decode(encoding, 'backslashreplace')])
def diff(filename: str, old: Optional[bytes], new: Optional[bytes], color: bool = True) -> list[str]:
"""
Creates a diff between the old and new content of the given filename,
that can be printed to the console. This function returns the diff
output as an array of lines. The lines in the output array are not
terminated by newlines.
If color is True, the diff is colored using ANSI escape sequences.
If you want to provide an alternative diffing function, beware that
the input can theoretically contain any bytes and therefore should
be decoded as utf-8 if possible, but non-decodeable
or non-printable charaters should be replaced with human readable
variants such as `\\x00`, `^@` or similar represenations.
Your diffing function should still be able to work on the raw bytes
representation, after you aquire the diff and before you apply colors,
your output should be made printable with a function such as `fora.logger.decode_escape`:
```python
# First decode and escape
line = logger.decode_escape(byteline)
# Add coloring afterwards so ANSI escape sequences are not escaped
```
Parameters
----------
filename
The filename of the file that is being diffed.
old
The old content, or None if the file didn't exist before.
new
The new content, or None if the file was deleted.
color
Whether the output should be colored (with ANSI color sequences).
Returns
-------
list[str]
The lines of the diff output. The individual lines will not have a terminating newline.
"""
bdiff = list(difflib.diff_bytes(difflib.unified_diff,
a=[] if old is None else old.split(b'\n'),
b=[] if new is None else new.split(b'\n'),
lineterm=b''))
# Strip file name header and decode diff to be human readable.
difflines = map(decode_escape, bdiff[2:])
# Create custom file name header
action = 'created' if old is None else 'deleted' if new is None else 'modified'
title = f"{action}: {filename}"
N = len(title)
header = ['─' * N, title, '─' * N]
# Apply coloring if desired
if color:
def apply_color(line: str) -> str:
linecolor = {
'+': '[32m',
'-': '[31m',
'@': '[34m',
}
return linecolor.get(line[0], '[90m') + line + '[m'
# Apply color to diff
difflines = map(apply_color, difflines)
# Apply color to header
header = list(map(lambda line: f"[33m{line}[m", header))
return header + list(difflines)
# TODO: move functions to operation api. cleaner and has type access.
def _operation_state_infos(result: Any) -> list[str]:
def to_str(v: Any) -> str:
return v.hex() if isinstance(v, bytes) else str(v)
# Print "key: value" pairs with changes
state_infos: list[str] = []
for k,final_v in result.final.items():
if final_v is None:
continue
initial_v = result.initial[k]
str_initial_v = to_str(initial_v)
str_final_v = to_str(final_v)
# Add ellipsis on long strings, if we are not in verbose mode
if fora.args.verbose == 0:
k = ellipsis(k, 12)
str_initial_v = ellipsis(to_str(initial_v), 9)
str_final_v = ellipsis(to_str(final_v), 9+3+9 if initial_v is None else 9)
if initial_v == final_v:
if fora.args.verbose >= 1:
# TODO = instead of : for better readability
entry_str = f"{col('[90m')}{k}: {str_initial_v}{col('[m')}"
state_infos.append(entry_str)
else:
if initial_v is None:
entry_str = f"{col('[33m')}{k}: {col('[32m')}{str_final_v}{col('[m')}"
else:
entry_str = f"{col('[33m')}{k}: {col('[31m')}{str_initial_v}{col('[33m')} → {col('[32m')}{str_final_v}{col('[m')}"
state_infos.append(entry_str)
return state_infos
def print_operation(op: Any, result: Any) -> None:
"""Prints the operation summary after it has finished execution."""
if result.success:
title_color = col("[1;32m") if result.changed else col("[1;90m")
else:
title_color = col("[1;31m")
# Print title and name, overwriting the transitive status
print("\r", end="")
print_operation_title(op, title_color)
if not result.success:
print_indented(f" {col('[90m')}└{col('[m')} " + f"{col('[31m')}{result.failure_message}{col('[m')}")
return
if not fora.args.changes:
return
# Cache number of upcoming diffs to determine what box character to print
n_diffs = len(op.diffs) if fora.args.diff else 0
box_char = '└' if n_diffs == 0 else '├'
# Print "key: value" pairs with changes
state_infos = _operation_state_infos(result)
if len(state_infos) > 0:
print_indented(f"{col('[90m')}{box_char}{col('[m')} " + f"{col('[90m')},{col('[m')} ".join(state_infos))
if fora.args.diff:
diff_lines = []
# Generate diffs
for file, old, new in op.diffs:
diff_lines.extend(diff(file, old, new))
# Print diffs with block character line
if len(diff_lines) > 0:
for l in diff_lines[:-1]:
print_indented(f"{col('[90m')}│ {col('[m')}" + l)
print_indented(f"{col('[90m')}└ {col('[m')}" + diff_lines[-1])
| []
| []
| [
"NO_COLOR"
]
| [] | ["NO_COLOR"] | python | 1 | 0 | |
pkg/startup/startup.go | // Copyright (c) 2016,2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package startup
import (
"context"
cryptorand "crypto/rand"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/names"
cnet "github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/selector"
"github.com/projectcalico/libcalico-go/lib/upgrade/migrator"
"github.com/projectcalico/libcalico-go/lib/upgrade/migrator/clients"
"github.com/projectcalico/node/pkg/calicoclient"
"github.com/projectcalico/node/pkg/startup/autodetection"
"github.com/projectcalico/node/pkg/startup/autodetection/ipv4"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
DEFAULT_IPV4_POOL_CIDR = "192.168.0.0/16"
DEFAULT_IPV4_POOL_BLOCK_SIZE = 26
DEFAULT_IPV6_POOL_BLOCK_SIZE = 122
DEFAULT_IPV4_POOL_NAME = "default-ipv4-ippool"
DEFAULT_IPV6_POOL_NAME = "default-ipv6-ippool"
AUTODETECTION_METHOD_FIRST = "first-found"
AUTODETECTION_METHOD_CAN_REACH = "can-reach="
AUTODETECTION_METHOD_INTERFACE = "interface="
AUTODETECTION_METHOD_SKIP_INTERFACE = "skip-interface="
// KubeadmConfigConfigMap is defined in k8s.io/kubernetes, which we can't import due to versioning issues.
KubeadmConfigConfigMap = "kubeadm-config"
)
// Version string, set during build.
var VERSION string
// For testing purposes we define an exit function that we can override.
var exitFunction = os.Exit
var (
// Default values, names for different configs.
defaultLogSeverity = "Info"
globalFelixConfigName = "default"
felixNodeConfigNamePrefix = "node."
)
// This file contains the main startup processing for the calico/node. This
// includes:
// - Detecting IP address and Network to use for BGP
// - Configuring the node resource with IP/AS information provided in the
// environment, or autodetected.
// - Creating default IP Pools for quick-start use
func Run() {
// Check $CALICO_STARTUP_LOGLEVEL to capture early log statements
configureLogging()
// Determine the name for this node.
nodeName := determineNodeName()
// Create the Calico API cli.
cfg, cli := calicoclient.CreateClient()
ctx := context.Background()
// An explicit value of true is required to wait for the datastore.
if os.Getenv("WAIT_FOR_DATASTORE") == "true" {
waitForConnection(ctx, cli)
log.Info("Datastore is ready")
} else {
log.Info("Skipping datastore connection test")
}
if cfg.Spec.DatastoreType == apiconfig.Kubernetes {
if err := ensureKDDMigrated(cfg, cli); err != nil {
log.WithError(err).Errorf("Unable to ensure datastore is migrated.")
terminate()
}
}
// Query the current Node resources. We update our node resource with
// updated IP data and use the full list of nodes for validation.
node := getNode(ctx, cli, nodeName)
var kubeadmConfig *v1.ConfigMap
// If Calico is running in policy only mode we don't need to write
// BGP related details to the Node.
if os.Getenv("CALICO_NETWORKING_BACKEND") != "none" {
// Configure and verify the node IP addresses and subnets.
checkConflicts, err := configureIPsAndSubnets(node)
if err != nil {
clearv4 := os.Getenv("IP") == "autodetect"
clearv6 := os.Getenv("IP6") == "autodetect"
if node.ResourceVersion != "" {
// If we're auto-detecting an IP on an existing node and hit an error, clear the previous
// IP addresses from the node since they are no longer valid.
clearNodeIPs(ctx, cli, node, clearv4, clearv6)
}
terminate()
}
// If we report an IP change (v4 or v6) we should verify there are no
// conflicts between Nodes.
if checkConflicts && os.Getenv("DISABLE_NODE_IP_CHECK") != "true" {
v4conflict, v6conflict, err := checkConflictingNodes(ctx, cli, node)
if err != nil {
// If we've auto-detected a new IP address for an existing node that now conflicts, clear the old IP address(es)
// from the node in the datastore. This frees the address in case it needs to be used for another node.
clearv4 := (os.Getenv("IP") == "autodetect") && v4conflict
clearv6 := (os.Getenv("IP6") == "autodetect") && v6conflict
if node.ResourceVersion != "" {
clearNodeIPs(ctx, cli, node, clearv4, clearv6)
}
terminate()
}
}
// Configure the node AS number.
configureASNumber(node)
// If running under kubernetes with secrets to call k8s API
if config, err := rest.InClusterConfig(); err == nil {
// default timeout is 30 seconds, which isn't appropriate for this kind of
// startup action because network services, like kube-proxy might not be
// running and we don't want to block the full 30 seconds if they are just
// a few seconds behind.
config.Timeout = 2 * time.Second
// Creates the k8s clientset.
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.WithError(err).Error("Failed to create clientset")
return
}
log.Info("Setting NetworkUnavailable to False")
err = setNodeNetworkUnavailableFalse(*clientset, nodeName)
if err != nil {
log.WithError(err).Error("Unable to set NetworkUnavailable to False")
}
kubeadmConfig, err = clientset.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(KubeadmConfigConfigMap, metav1.GetOptions{})
if err != nil {
// Any error other than not finding kubeadm's config map should be serious enough
// that we ought to stop here and return.
if !errors.IsNotFound(err) {
log.WithError(err).Error("failed to query kubeadm's config map")
terminate()
return
}
}
}
}
// Populate a reference to the node based on orchestrator node identifiers.
configureNodeRef(node)
// Check expected filesystem
ensureFilesystemAsExpected()
// Apply the updated node resource.
if _, err := CreateOrUpdate(ctx, cli, node); err != nil {
log.WithError(err).Errorf("Unable to set node resource configuration")
terminate()
}
// Configure IP Pool configuration.
configureIPPools(ctx, cli, kubeadmConfig)
// Set default configuration required for the cluster.
if err := ensureDefaultConfig(ctx, cfg, cli, node, kubeadmConfig); err != nil {
log.WithError(err).Errorf("Unable to set global default configuration")
terminate()
}
// Write config files now that we are ready to start other components.
writeNodeConfig(nodeName)
// Tell the user what the name of the node is.
log.Infof("Using node name: %s", nodeName)
}
// configureNodeRef will attempt to discover the cluster type it is running on, check to ensure we
// have not already set it on this Node, and set it if need be.
func configureNodeRef(node *api.Node) {
orchestrator := "k8s"
nodeRef := ""
// Sort out what type of cluster we're running on.
if nodeRef = os.Getenv("CALICO_K8S_NODE_REF"); nodeRef == "" {
return
}
node.Spec.OrchRefs = []api.OrchRef{api.OrchRef{NodeName: nodeRef, Orchestrator: orchestrator}}
}
// CreateOrUpdate creates the Node if ResourceVersion is not specified,
// or Update if it's specified.
func CreateOrUpdate(ctx context.Context, client client.Interface, node *api.Node) (*api.Node, error) {
if node.ResourceVersion != "" {
return client.Nodes().Update(ctx, node, options.SetOptions{})
}
return client.Nodes().Create(ctx, node, options.SetOptions{})
}
func clearNodeIPs(ctx context.Context, client client.Interface, node *api.Node, clearv4, clearv6 bool) {
if clearv4 {
log.WithField("IP", node.Spec.BGP.IPv4Address).Info("Clearing out-of-date IPv4 address from this node")
node.Spec.BGP.IPv4Address = ""
}
if clearv6 {
log.WithField("IP", node.Spec.BGP.IPv6Address).Info("Clearing out-of-date IPv6 address from this node")
node.Spec.BGP.IPv6Address = ""
}
// If the BGP spec is empty, then set it to nil.
if node.Spec.BGP != nil && reflect.DeepEqual(*node.Spec.BGP, api.NodeBGPSpec{}) {
node.Spec.BGP = nil
}
if clearv4 || clearv6 {
_, err := client.Nodes().Update(ctx, node, options.SetOptions{})
if err != nil {
log.WithError(err).Warnf("Failed to clear node addresses")
}
}
}
func configureLogging() {
// Log to stdout. this prevents our logs from being interpreted as errors by, for example,
// fluentd's default configuration.
log.SetOutput(os.Stdout)
// Set log formatting.
log.SetFormatter(&logutils.Formatter{})
// Install a hook that adds file and line number information.
log.AddHook(&logutils.ContextHook{})
// Default to info level logging
logLevel := log.InfoLevel
rawLogLevel := os.Getenv("CALICO_STARTUP_LOGLEVEL")
if rawLogLevel != "" {
parsedLevel, err := log.ParseLevel(rawLogLevel)
if err == nil {
logLevel = parsedLevel
} else {
log.WithError(err).Error("Failed to parse log level, defaulting to info.")
}
}
log.SetLevel(logLevel)
log.Infof("Early log level set to %v", logLevel)
}
// determineNodeName is called to determine the node name to use for this instance
// of calico/node.
func determineNodeName() string {
var nodeName string
var err error
// Determine the name of this node. Precedence is:
// - NODENAME
// - Value stored in our nodename file.
// - HOSTNAME (lowercase)
// - os.Hostname (lowercase).
// We use the names.Hostname which lowercases and trims the name.
if nodeName = strings.TrimSpace(os.Getenv("NODENAME")); nodeName != "" {
log.Infof("Using NODENAME environment for node name")
} else if nodeName = nodenameFromFile(); nodeName != "" {
log.Info("Using stored node name from " + nodenameFileName())
} else if nodeName = strings.ToLower(strings.TrimSpace(os.Getenv("HOSTNAME"))); nodeName != "" {
log.Infof("Using HOSTNAME environment (lowercase) for node name")
} else if nodeName, err = names.Hostname(); err != nil {
log.WithError(err).Error("Unable to determine hostname")
terminate()
} else {
log.Warn("Using auto-detected node name. It is recommended that an explicit value is supplied using " +
"the NODENAME environment variable.")
}
log.Infof("Determined node name: %s", nodeName)
return nodeName
}
func nodenameFileName() string {
fn := os.Getenv("CALICO_NODENAME_FILE")
if fn == "" {
return defaultNodenameFile
}
return fn
}
// nodenameFromFile reads the nodename file if it exists and
// returns the nodename within.
func nodenameFromFile() string {
filename := nodenameFileName()
data, err := ioutil.ReadFile(filename)
if err != nil {
if os.IsNotExist(err) {
// File doesn't exist, return empty string.
log.Debug("File does not exist: " + filename)
return ""
}
log.WithError(err).Error("Failed to read " + filename)
terminate()
}
return string(data)
}
// waitForConnection waits for the datastore to become accessible.
func waitForConnection(ctx context.Context, c client.Interface) {
log.Info("Checking datastore connection")
for {
// Query some arbitrary configuration to see if the connection
// is working. Getting a specific Node is a good option, even
// if the Node does not exist.
_, err := c.Nodes().Get(ctx, "foo", options.GetOptions{})
// We only care about a couple of error cases, all others would
// suggest the datastore is accessible.
if err != nil {
switch err.(type) {
case cerrors.ErrorConnectionUnauthorized:
log.Warn("Connection to the datastore is unauthorized")
terminate()
case cerrors.ErrorDatastoreError:
log.WithError(err).Info("Hit error connecting to datastore - retry")
time.Sleep(1000 * time.Millisecond)
continue
}
}
// We've connected to the datastore - break out of the loop.
break
}
log.Info("Datastore connection verified")
}
// writeNodeConfig writes out the this node's configuration to disk for use by other components.
// Specifically, it creates:
// - nodenameFileName() - used to persist the determined node name to disk for future use.
func writeNodeConfig(nodeName string) {
filename := nodenameFileName()
log.Debugf("Writing %s to "+filename, nodeName)
if err := ioutil.WriteFile(filename, []byte(nodeName), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
terminate()
}
}
// getNode returns the current node configuration. If this node has not yet
// been created, it returns a blank node resource.
func getNode(ctx context.Context, client client.Interface, nodeName string) *api.Node {
node, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})
if err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
log.WithError(err).WithField("Name", nodeName).Info("Unable to query node configuration")
log.Warn("Unable to access datastore to query node configuration")
terminate()
}
log.WithField("Name", nodeName).Info("Building new node resource")
node = api.NewNode()
node.Name = nodeName
}
return node
}
// configureIPsAndSubnets updates the supplied node resource with IP and Subnet
// information to use for BGP. This returns true if we detect a change in Node IP address.
func configureIPsAndSubnets(node *api.Node) (bool, error) {
// If the node resource currently has no BGP configuration, add an empty
// set of configuration as it makes the processing below easier, and we
// must end up configuring some BGP fields before we complete.
if node.Spec.BGP == nil {
log.Info("Initialize BGP data")
node.Spec.BGP = &api.NodeBGPSpec{}
}
oldIpv4 := node.Spec.BGP.IPv4Address
oldIpv6 := node.Spec.BGP.IPv6Address
// Determine the autodetection type for IPv4 and IPv6. Note that we
// only autodetect IPv4 when it has not been specified. IPv6 must be
// explicitly requested using the "autodetect" value.
//
// If we aren't auto-detecting then we need to validate the configured
// value and possibly fix up missing subnet configuration.
ipv4Env := os.Getenv("IP")
if ipv4Env == "autodetect" || (ipv4Env == "" && node.Spec.BGP.IPv4Address == "") {
adm := os.Getenv("IP_AUTODETECTION_METHOD")
cidr := autoDetectCIDR(adm, 4)
if cidr != nil {
// We autodetected an IPv4 address so update the value in the node.
node.Spec.BGP.IPv4Address = cidr.String()
} else if node.Spec.BGP.IPv4Address == "" {
// No IPv4 address is configured, but we always require one, so exit.
log.Warn("Couldn't autodetect an IPv4 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.")
return false, fmt.Errorf("Failed to autodetect an IPv4 address")
} else {
// No IPv4 autodetected, but a previous one was configured.
// Tell the user we are leaving the value unchanged. We
// will validate that the IP matches one on the interface.
log.Warnf("Autodetection of IPv4 address failed, keeping existing value: %s", node.Spec.BGP.IPv4Address)
validateIP(node.Spec.BGP.IPv4Address)
}
} else if ipv4Env != "none" {
if ipv4Env != "" {
node.Spec.BGP.IPv4Address = parseIPEnvironment("IP", ipv4Env, 4)
}
validateIP(node.Spec.BGP.IPv4Address)
}
ipv6Env := os.Getenv("IP6")
if ipv6Env == "autodetect" {
adm := os.Getenv("IP6_AUTODETECTION_METHOD")
cidr := autoDetectCIDR(adm, 6)
if cidr != nil {
// We autodetected an IPv6 address so update the value in the node.
node.Spec.BGP.IPv6Address = cidr.String()
} else if node.Spec.BGP.IPv6Address == "" {
// No IPv6 address is configured, but we have requested one, so exit.
log.Warn("Couldn't autodetect an IPv6 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.")
return false, fmt.Errorf("Failed to autodetect an IPv6 address")
} else {
// No IPv6 autodetected, but a previous one was configured.
// Tell the user we are leaving the value unchanged. We
// will validate that the IP matches one on the interface.
log.Warnf("Autodetection of IPv6 address failed, keeping existing value: %s", node.Spec.BGP.IPv6Address)
validateIP(node.Spec.BGP.IPv6Address)
}
} else if ipv6Env != "none" {
if ipv6Env != "" {
node.Spec.BGP.IPv6Address = parseIPEnvironment("IP6", ipv6Env, 6)
}
validateIP(node.Spec.BGP.IPv6Address)
}
if ipv4Env == "none" && (ipv6Env == "" || ipv6Env == "none") {
log.Warn("No IP Addresses configured, and autodetection is not enabled")
terminate()
}
// Detect if we've seen the IP address change, and flag that we need to check for conflicting Nodes
if node.Spec.BGP.IPv4Address != oldIpv4 {
log.Info("Node IPv4 changed, will check for conflicts")
return true, nil
}
if node.Spec.BGP.IPv6Address != oldIpv6 {
log.Info("Node IPv6 changed, will check for conflicts")
return true, nil
}
return false, nil
}
// fetchAndValidateIPAndNetwork fetches and validates the IP configuration from
// either the environment variables or from the values already configured in the
// node.
func parseIPEnvironment(envName, envValue string, version int) string {
// To parse the environment (which could be an IP or a CIDR), convert
// to a JSON string and use the UnmarshalJSON method on the IPNet
// struct to parse the value.
ip := &cnet.IPNet{}
err := ip.UnmarshalJSON([]byte("\"" + envValue + "\""))
if err != nil || ip.Version() != version {
log.Warnf("Environment does not contain a valid IPv%d address: %s=%s", version, envName, envValue)
terminate()
}
log.Infof("Using IPv%d address from environment: %s=%s", ip.Version(), envName, envValue)
return ip.String()
}
// validateIP checks that the IP address is actually on one of the host
// interfaces and warns if not.
func validateIP(ipn string) {
// No validation required if no IP address is specified.
if ipn == "" {
return
}
ipAddr, _, err := cnet.ParseCIDROrIP(ipn)
if err != nil {
log.WithError(err).Errorf("Failed to parse autodetected CIDR '%s'", ipn)
terminate()
}
// Get a complete list of interfaces with their addresses and check if
// the IP address can be found.
ifaces, err := autodetection.GetInterfaces(nil, nil, ipAddr.Version())
if err != nil {
log.WithError(err).Error("Unable to query host interfaces")
terminate()
}
if len(ifaces) == 0 {
log.Info("No interfaces found for validating IP configuration")
}
for _, i := range ifaces {
for _, c := range i.Cidrs {
if ipAddr.Equal(c.IP) {
log.Infof("IPv%d address %s discovered on interface %s", ipAddr.Version(), ipAddr.String(), i.Name)
return
}
}
}
log.Warnf("Unable to confirm IPv%d address %s is assigned to this host", ipAddr.Version(), ipAddr)
}
func parseBlockSizeEnvironment(envValue string) int {
i, err := strconv.Atoi(envValue)
if err != nil {
log.WithError(err).Error("Unable to convert blocksize to int")
terminate()
}
return i
}
// validateBlockSize check if blockSize is valid
func validateBlockSize(version int, blockSize int) {
// 20 to 32 (inclusive) for IPv4 and 116 to 128 (inclusive) for IPv6
if version == 4 {
if blockSize < 20 || blockSize > 32 {
log.Errorf("Invalid blocksize %d for version %d", blockSize, version)
terminate()
}
} else if version == 6 {
if blockSize < 116 || blockSize > 128 {
log.Errorf("Invalid blocksize %d for version %d", blockSize, version)
terminate()
}
}
}
// validateNodeSelector checks if selector is valid
func validateNodeSelector(version int, s string) {
_, err := selector.Parse(s)
if err != nil {
log.Errorf("Invalid node selector '%s' for version %d: %s", s, version, err)
terminate()
}
}
// evaluateENVBool evaluates a passed environment variable
// Returns True if the envVar is defined and set to true.
// Returns False if the envVar is defined and set to false.
// Returns defaultValue in the envVar is not defined.
// An log entry will always be written
func evaluateENVBool(envVar string, defaultValue bool) bool {
envValue, isSet := os.LookupEnv(envVar)
if isSet {
switch strings.ToLower(envValue) {
case "false", "0", "no", "n", "f":
log.Infof("%s is %t through environment variable", envVar, false)
return false
}
log.Infof("%s is %t through environment variable", envVar, true)
return true
}
log.Infof("%s is %t (defaulted) through environment variable", envVar, defaultValue)
return defaultValue
}
// autoDetectCIDR auto-detects the IP and Network using the requested
// detection method.
func autoDetectCIDR(method string, version int) *cnet.IPNet {
if method == "" || method == AUTODETECTION_METHOD_FIRST {
// Autodetect the IP by enumerating all interfaces (excluding
// known internal interfaces).
return autoDetectCIDRFirstFound(version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_INTERFACE) {
// Autodetect the IP from the specified interface.
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_INTERFACE)
// Regexes are passed in as a string separated by ","
ifRegexes := regexp.MustCompile(`\s*,\s*`).Split(ifStr, -1)
return autoDetectCIDRByInterface(ifRegexes, version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_CAN_REACH) {
// Autodetect the IP by connecting a UDP socket to a supplied address.
destStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_CAN_REACH)
return autoDetectCIDRByReach(destStr, version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE) {
// Autodetect the Ip by enumerating all interfaces (excluding
// known internal interfaces and any interfaces whose name
// matches the given regexes).
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE)
// Regexes are passed in as a string separated by ","
ifRegexes := regexp.MustCompile(`\s*,\s*`).Split(ifStr, -1)
return autoDetectCIDRBySkipInterface(ifRegexes, version)
}
// The autodetection method is not recognised and is required. Exit.
log.Errorf("Invalid IP autodetection method: %s", method)
terminate()
return nil
}
// autoDetectCIDRFirstFound auto-detects the first valid Network it finds across
// all interfaces (excluding common known internal interface names).
func autoDetectCIDRFirstFound(version int) *cnet.IPNet {
incl := []string{}
iface, cidr, err := autodetection.FilteredEnumeration(incl, DEFAULT_INTERFACES_TO_EXCLUDE, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address: %s", version, err)
return nil
}
log.Infof("Using autodetected IPv%d address on interface %s: %s", version, iface.Name, cidr.String())
return cidr
}
// autoDetectCIDRByInterface auto-detects the first valid Network on the interfaces
// matching the supplied interface regex.
func autoDetectCIDRByInterface(ifaceRegexes []string, version int) *cnet.IPNet {
iface, cidr, err := autodetection.FilteredEnumeration(ifaceRegexes, nil, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address using interface regexes %v: %s", version, ifaceRegexes, err)
return nil
}
log.Infof("Using autodetected IPv%d address %s on matching interface %s", version, cidr.String(), iface.Name)
return cidr
}
// autoDetectCIDRByReach auto-detects the IP and Network by setting up a UDP
// connection to a "reach" address.
func autoDetectCIDRByReach(dest string, version int) *cnet.IPNet {
if cidr, err := autodetection.ReachDestination(dest, version); err != nil {
log.Warnf("Unable to auto-detect IPv%d address by connecting to %s: %s", version, dest, err)
return nil
} else {
log.Infof("Using autodetected IPv%d address %s, detected by connecting to %s", version, cidr.String(), dest)
return cidr
}
}
// autoDetectCIDRBySkipInterface auto-detects the first valid Network on the interfaces
// matching the supplied interface regexes.
func autoDetectCIDRBySkipInterface(ifaceRegexes []string, version int) *cnet.IPNet {
incl := []string{}
excl := DEFAULT_INTERFACES_TO_EXCLUDE
excl = append(excl, ifaceRegexes...)
iface, cidr, err := autodetection.FilteredEnumeration(incl, excl, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address while excluding %v: %s", version, ifaceRegexes, err)
return nil
}
log.Infof("Using autodetected IPv%d address on interface %s: %s while skipping matching interfaces", version, iface.Name, cidr.String())
return cidr
}
// configureASNumber configures the Node resource with the AS number specified
// in the environment, or is a no-op if not specified.
func configureASNumber(node *api.Node) {
// Extract the AS number from the environment
asStr := os.Getenv("AS")
if asStr != "" {
if asNum, err := numorstring.ASNumberFromString(asStr); err != nil {
log.WithError(err).Errorf("The AS number specified in the environment (AS=%s) is not valid", asStr)
terminate()
} else {
log.Infof("Using AS number specified in environment (AS=%s)", asNum)
node.Spec.BGP.ASNumber = &asNum
}
} else {
if node.Spec.BGP.ASNumber == nil {
log.Info("No AS number configured on node resource, using global value")
} else {
log.Infof("Using AS number %s configured in node resource", node.Spec.BGP.ASNumber)
}
}
}
// generateIPv6ULAPrefix return a random generated ULA IPv6 prefix as per RFC 4193. The pool
// is generated from bytes pulled from a secure random source.
func GenerateIPv6ULAPrefix() (string, error) {
ulaAddr := []byte{0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
_, err := cryptorand.Read(ulaAddr[1:6])
if err != nil {
return "", err
}
ipNet := net.IPNet{
IP: net.IP(ulaAddr),
Mask: net.CIDRMask(48, 128),
}
return ipNet.String(), nil
}
// configureIPPools ensures that default IP pools are created (unless explicitly requested otherwise).
func configureIPPools(ctx context.Context, client client.Interface, kubeadmConfig *v1.ConfigMap) {
// Read in environment variables for use here and later.
ipv4Pool := os.Getenv("CALICO_IPV4POOL_CIDR")
ipv6Pool := os.Getenv("CALICO_IPV6POOL_CIDR")
if strings.ToLower(os.Getenv("NO_DEFAULT_POOLS")) == "true" {
if len(ipv4Pool) > 0 || len(ipv6Pool) > 0 {
log.Error("Invalid configuration with NO_DEFAULT_POOLS defined and CALICO_IPV4POOL_CIDR or CALICO_IPV6POOL_CIDR defined.")
terminate()
}
log.Info("Skipping IP pool configuration")
return
}
// If CIDRs weren't specified through the environment variables, check if they're present in kubeadm's
// config map.
if (len(ipv4Pool) == 0 || len(ipv6Pool) == 0) && kubeadmConfig != nil {
v4, v6, err := extractKubeadmCIDRs(kubeadmConfig)
if err == nil {
if len(ipv4Pool) == 0 {
ipv4Pool = v4
log.Infof("found v4=%s in the kubeadm config map", ipv4Pool)
}
if len(ipv6Pool) == 0 {
ipv6Pool = v6
log.Infof("found v6=%s in the kubeadm config map", ipv6Pool)
}
} else {
log.WithError(err).Warn("Failed to extract CIDRs from kubeadm config.")
}
}
ipv4IpipModeEnvVar := strings.ToLower(os.Getenv("CALICO_IPV4POOL_IPIP"))
ipv4VXLANModeEnvVar := strings.ToLower(os.Getenv("CALICO_IPV4POOL_VXLAN"))
var (
ipv4BlockSize int
ipv6BlockSize int
)
ipv4BlockSizeEnvVar := os.Getenv("CALICO_IPV4POOL_BLOCK_SIZE")
if ipv4BlockSizeEnvVar != "" {
ipv4BlockSize = parseBlockSizeEnvironment(ipv4BlockSizeEnvVar)
} else {
ipv4BlockSize = DEFAULT_IPV4_POOL_BLOCK_SIZE
}
validateBlockSize(ipv4BlockSize, 4)
ipv6BlockSizeEnvVar := os.Getenv("CALICO_IPV6POOL_BLOCK_SIZE")
if ipv6BlockSizeEnvVar != "" {
ipv6BlockSize = parseBlockSizeEnvironment(ipv6BlockSizeEnvVar)
} else {
ipv6BlockSize = DEFAULT_IPV6_POOL_BLOCK_SIZE
}
validateBlockSize(ipv6BlockSize, 6)
ipv4NodeSelector := os.Getenv("CALICO_IPV4POOL_NODE_SELECTOR")
validateNodeSelector(4, ipv4NodeSelector)
ipv6NodeSelector := os.Getenv("CALICO_IPV6POOL_NODE_SELECTOR")
validateNodeSelector(6, ipv6NodeSelector)
// Get a list of all IP Pools
poolList, err := client.IPPools().List(ctx, options.ListOptions{})
if err != nil {
log.WithError(err).Error("Unable to fetch IP pool list")
terminate()
return // not really needed but allows testing to function
}
// Check for IPv4 and IPv6 pools.
ipv4Present := false
ipv6Present := false
for _, p := range poolList.Items {
ip, _, err := cnet.ParseCIDR(p.Spec.CIDR)
if err != nil {
log.Warnf("Error parsing CIDR '%s'. Skipping the IPPool.", p.Spec.CIDR)
}
version := ip.Version()
ipv4Present = ipv4Present || (version == 4)
ipv6Present = ipv6Present || (version == 6)
if ipv4Present && ipv6Present {
break
}
}
// Read IPV4 CIDR from env if set and parse then check it for errors
if ipv4Pool == "" {
ipv4Pool = DEFAULT_IPV4_POOL_CIDR
_, preferedNet, _ := net.ParseCIDR(DEFAULT_IPV4_POOL_CIDR)
if selectedPool, err := ipv4.GetDefaultIPv4Pool(preferedNet); err == nil {
ipv4Pool = selectedPool.String()
}
log.Infof("Selected default IP pool is '%s'", ipv4Pool)
}
_, ipv4Cidr, err := cnet.ParseCIDR(ipv4Pool)
if err != nil || ipv4Cidr.Version() != 4 {
log.Errorf("Invalid CIDR specified in CALICO_IPV4POOL_CIDR '%s'", ipv4Pool)
terminate()
return // not really needed but allows testing to function
}
// If no IPv6 pool is specified, generate one.
if ipv6Pool == "" {
ipv6Pool, err = GenerateIPv6ULAPrefix()
if err != nil {
log.Errorf("Failed to generate an IPv6 default pool")
terminate()
}
}
_, ipv6Cidr, err := cnet.ParseCIDR(ipv6Pool)
if err != nil || ipv6Cidr.Version() != 6 {
log.Errorf("Invalid CIDR specified in CALICO_IPV6POOL_CIDR '%s'", ipv6Pool)
terminate()
return // not really needed but allows testing to function
}
// Ensure there are pools created for each IP version.
if !ipv4Present {
log.Debug("Create default IPv4 IP pool")
outgoingNATEnabled := evaluateENVBool("CALICO_IPV4POOL_NAT_OUTGOING", true)
createIPPool(ctx, client, ipv4Cidr, DEFAULT_IPV4_POOL_NAME, ipv4IpipModeEnvVar, ipv4VXLANModeEnvVar, outgoingNATEnabled, ipv4BlockSize, ipv4NodeSelector)
}
if !ipv6Present && ipv6Supported() {
log.Debug("Create default IPv6 IP pool")
outgoingNATEnabled := evaluateENVBool("CALICO_IPV6POOL_NAT_OUTGOING", false)
createIPPool(ctx, client, ipv6Cidr, DEFAULT_IPV6_POOL_NAME, string(api.IPIPModeNever), string(api.VXLANModeNever), outgoingNATEnabled, ipv6BlockSize, ipv6NodeSelector)
}
}
// createIPPool creates an IP pool using the specified CIDR. This
// method is a no-op if the pool already exists.
func createIPPool(ctx context.Context, client client.Interface, cidr *cnet.IPNet, poolName, ipipModeName, vxlanModeName string, isNATOutgoingEnabled bool, blockSize int, nodeSelector string) {
version := cidr.Version()
var ipipMode api.IPIPMode
var vxlanMode api.VXLANMode
// Parse the given IPIP mode.
switch strings.ToLower(ipipModeName) {
case "", "off", "never":
ipipMode = api.IPIPModeNever
case "crosssubnet", "cross-subnet":
ipipMode = api.IPIPModeCrossSubnet
case "always":
ipipMode = api.IPIPModeAlways
default:
log.Errorf("Unrecognized IPIP mode specified in CALICO_IPV4POOL_IPIP '%s'", ipipModeName)
terminate()
}
// Parse the given VXLAN mode.
switch strings.ToLower(vxlanModeName) {
case "", "off", "never":
vxlanMode = api.VXLANModeNever
case "crosssubnet", "cross-subnet":
vxlanMode = api.VXLANModeCrossSubnet
case "always":
vxlanMode = api.VXLANModeAlways
default:
log.Errorf("Unrecognized VXLAN mode specified in CALICO_IPV4POOL_VXLAN'%s'", vxlanModeName)
terminate()
}
pool := &api.IPPool{
ObjectMeta: metav1.ObjectMeta{
Name: poolName,
},
Spec: api.IPPoolSpec{
CIDR: cidr.String(),
NATOutgoing: isNATOutgoingEnabled,
IPIPMode: ipipMode,
VXLANMode: vxlanMode,
BlockSize: blockSize,
NodeSelector: nodeSelector,
},
}
log.Infof("Ensure default IPv%d pool is created. IPIP mode: %s, VXLAN mode: %s", version, ipipMode, vxlanMode)
// Create the pool. There is a small chance that another node may
// beat us to it, so handle the fact that the pool already exists.
if _, err := client.IPPools().Create(ctx, pool, options.SetOptions{}); err != nil {
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
log.WithError(err).Errorf("Failed to create default IPv%d IP pool: %s", version, cidr.String())
terminate()
}
} else {
log.Infof("Created default IPv%d pool (%s) with NAT outgoing %t. IPIP mode: %s, VXLAN mode: %s",
version, cidr, isNATOutgoingEnabled, ipipMode, vxlanMode)
}
}
// checkConflictingNodes checks whether any other nodes have been configured
// with the same IP addresses.
func checkConflictingNodes(ctx context.Context, client client.Interface, node *api.Node) (v4conflict, v6conflict bool, retErr error) {
// Get the full set of nodes.
var nodes []api.Node
if nodeList, err := client.Nodes().List(ctx, options.ListOptions{}); err != nil {
log.WithError(err).Errorf("Unable to query node configuration")
retErr = err
return
} else {
nodes = nodeList.Items
}
ourIPv4, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv4Address)
if err != nil && node.Spec.BGP.IPv4Address != "" {
log.WithError(err).Errorf("Error parsing IPv4 CIDR '%s' for node '%s'", node.Spec.BGP.IPv4Address, node.Name)
retErr = err
return
}
ourIPv6, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv6Address)
if err != nil && node.Spec.BGP.IPv6Address != "" {
log.WithError(err).Errorf("Error parsing IPv6 CIDR '%s' for node '%s'", node.Spec.BGP.IPv6Address, node.Name)
retErr = err
return
}
for _, theirNode := range nodes {
if theirNode.Spec.BGP == nil {
// Skip nodes that don't have BGP configured. We know
// that this node does have BGP since we only perform
// this check after configuring BGP.
continue
}
theirIPv4, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv4Address)
if err != nil && theirNode.Spec.BGP.IPv4Address != "" {
log.WithError(err).Errorf("Error parsing IPv4 CIDR '%s' for node '%s'", theirNode.Spec.BGP.IPv4Address, theirNode.Name)
retErr = err
return
}
theirIPv6, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv6Address)
if err != nil && theirNode.Spec.BGP.IPv6Address != "" {
log.WithError(err).Errorf("Error parsing IPv6 CIDR '%s' for node '%s'", theirNode.Spec.BGP.IPv6Address, theirNode.Name)
retErr = err
return
}
// If this is our node (based on the name), check if the IP
// addresses have changed. If so warn the user as it could be
// an indication of multiple nodes using the same name. This
// is not an error condition as the IPs could actually change.
if theirNode.Name == node.Name {
if theirIPv4.IP != nil && ourIPv4.IP != nil && !theirIPv4.IP.Equal(ourIPv4.IP) {
fields := log.Fields{"node": theirNode.Name, "original": theirIPv4.String(), "updated": ourIPv4.String()}
log.WithFields(fields).Warnf("IPv4 address has changed. This could happen if there are multiple nodes with the same name.")
}
if theirIPv6.IP != nil && ourIPv6.IP != nil && !theirIPv6.IP.Equal(ourIPv6.IP) {
fields := log.Fields{"node": theirNode.Name, "original": theirIPv6.String(), "updated": ourIPv6.String()}
log.WithFields(fields).Warnf("IPv6 address has changed. This could happen if there are multiple nodes with the same name.")
}
continue
}
// Check that other nodes aren't using the same IP addresses.
// This is an error condition.
if theirIPv4.IP != nil && ourIPv4.IP != nil && theirIPv4.IP.Equal(ourIPv4.IP) {
log.Warnf("Calico node '%s' is already using the IPv4 address %s.", theirNode.Name, ourIPv4.String())
retErr = fmt.Errorf("IPv4 address conflict")
v4conflict = true
}
if theirIPv6.IP != nil && ourIPv6.IP != nil && theirIPv6.IP.Equal(ourIPv6.IP) {
log.Warnf("Calico node '%s' is already using the IPv6 address %s.", theirNode.Name, ourIPv6.String())
retErr = fmt.Errorf("IPv6 address conflict")
v6conflict = true
}
}
return
}
// ensureDefaultConfig ensures all of the required default settings are
// configured.
func ensureDefaultConfig(ctx context.Context, cfg *apiconfig.CalicoAPIConfig, c client.Interface, node *api.Node, kubeadmConfig *v1.ConfigMap) error {
// Ensure the ClusterInformation is populated.
// Get the ClusterType from ENV var. This is set from the manifest.
clusterType := os.Getenv("CLUSTER_TYPE")
if kubeadmConfig != nil {
if len(clusterType) == 0 {
clusterType = "kubeadm"
} else {
clusterType += ",kubeadm"
}
}
if err := c.EnsureInitialized(ctx, VERSION, clusterType); err != nil {
return nil
}
// By default we set the global reporting interval to 0 - this is
// different from the defaults defined in Felix.
//
// Logging to file is disabled in the felix.cfg config file. This
// should always be disabled for calico/node. By default we log to
// screen - set the default logging value that we desire.
felixConf, err := c.FelixConfigurations().Get(ctx, globalFelixConfigName, options.GetOptions{})
if err != nil {
// Create the default config if it doesn't already exist.
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
newFelixConf := api.NewFelixConfiguration()
newFelixConf.Name = globalFelixConfigName
newFelixConf.Spec.ReportingInterval = &metav1.Duration{Duration: 0}
newFelixConf.Spec.LogSeverityScreen = defaultLogSeverity
_, err = c.FelixConfigurations().Create(ctx, newFelixConf, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", newFelixConf).Errorf("Error creating Felix global config")
return err
}
}
} else {
log.WithError(err).WithField("FelixConfig", globalFelixConfigName).Errorf("Error getting Felix global config")
return err
}
} else {
updateNeeded := false
if felixConf.Spec.ReportingInterval == nil {
felixConf.Spec.ReportingInterval = &metav1.Duration{Duration: 0}
updateNeeded = true
} else {
log.WithField("ReportingInterval", felixConf.Spec.ReportingInterval).Debug("Global Felix value already assigned")
}
if felixConf.Spec.LogSeverityScreen == "" {
felixConf.Spec.LogSeverityScreen = defaultLogSeverity
updateNeeded = true
} else {
log.WithField("LogSeverityScreen", felixConf.Spec.LogSeverityScreen).Debug("Global Felix value already assigned")
}
if updateNeeded {
_, err = c.FelixConfigurations().Update(ctx, felixConf, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", felixConf).Errorf("Error updating Felix global config")
return err
}
}
}
}
// Configure Felix to allow traffic from the containers to the host (if
// not otherwise firewalled by the host administrator or profiles).
// This is important for container deployments, where it is common
// for containers to speak to services running on the host (e.g. k8s
// pods speaking to k8s api-server, and mesos tasks registering with agent
// on startup). Note: KDD does not yet support per-node felix config.
if cfg.Spec.DatastoreType != apiconfig.Kubernetes {
felixNodeCfg, err := c.FelixConfigurations().Get(ctx, fmt.Sprintf("%s%s", felixNodeConfigNamePrefix, node.Name), options.GetOptions{})
if err != nil {
// Create the default config if it doesn't already exist.
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
newFelixNodeCfg := api.NewFelixConfiguration()
newFelixNodeCfg.Name = fmt.Sprintf("%s%s", felixNodeConfigNamePrefix, node.Name)
newFelixNodeCfg.Spec.DefaultEndpointToHostAction = "Return"
_, err = c.FelixConfigurations().Create(ctx, newFelixNodeCfg, options.SetOptions{})
if err != nil {
if exists, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
log.Infof("Ignoring resource exists error when setting value %s", exists.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", newFelixNodeCfg).Errorf("Error creating Felix node config")
return err
}
}
} else {
log.WithError(err).WithField("FelixConfig", felixNodeConfigNamePrefix).Errorf("Error getting Felix node config")
return err
}
} else {
if felixNodeCfg.Spec.DefaultEndpointToHostAction == "" {
felixNodeCfg.Spec.DefaultEndpointToHostAction = "Return"
_, err = c.FelixConfigurations().Update(ctx, felixNodeCfg, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", felixNodeCfg).Errorf("Error updating Felix node config")
return err
}
}
} else {
log.WithField("DefaultEndpointToHostAction", felixNodeCfg.Spec.DefaultEndpointToHostAction).Debug("Host Felix value already assigned")
}
}
}
return nil
}
// ensureKDDMigrated ensures any data migration needed is done.
func ensureKDDMigrated(cfg *apiconfig.CalicoAPIConfig, cv3 client.Interface) error {
cv1, err := clients.LoadKDDClientV1FromAPIConfigV3(cfg)
if err != nil {
return err
}
m := migrator.New(cv3, cv1, nil)
yes, err := m.ShouldMigrate()
if err != nil {
return err
} else if yes {
log.Infof("Running migration")
if _, err = m.Migrate(); err != nil {
return fmt.Errorf("Migration failed: %v", err)
}
log.Infof("Migration successful")
} else {
log.Debugf("Migration is not needed")
}
return nil
}
// Set Kubernetes NodeNetworkUnavailable to false when starting
// https://kubernetes.io/docs/concepts/architecture/nodes/#condition
func setNodeNetworkUnavailableFalse(clientset kubernetes.Clientset, nodeName string) error {
condition := kapiv1.NodeCondition{
Type: kapiv1.NodeNetworkUnavailable,
Status: kapiv1.ConditionFalse,
Reason: "CalicoIsUp",
Message: "Calico is running on this node",
LastTransitionTime: metav1.Now(),
LastHeartbeatTime: metav1.Now(),
}
raw, err := json.Marshal(&[]kapiv1.NodeCondition{condition})
if err != nil {
return err
}
patch := []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw))
to := time.After(30 * time.Second)
for {
select {
case <-to:
err = fmt.Errorf("timed out patching node, last error was: %s", err.Error())
return err
default:
_, err = clientset.CoreV1().Nodes().PatchStatus(nodeName, patch)
if err != nil {
log.WithError(err).Warnf("Failed to set NetworkUnavailable to False; will retry")
} else {
// Success!
return nil
}
}
}
}
// terminate prints a terminate message and exists with status 1.
func terminate() {
log.Warn("Terminating")
exitFunction(1)
}
// extractKubeadmCIDRs looks through the config map and parses lines starting with 'podSubnet'.
func extractKubeadmCIDRs(kubeadmConfig *v1.ConfigMap) (string, string, error) {
var v4, v6 string
var line []string
var err error
if kubeadmConfig == nil {
return "", "", fmt.Errorf("Invalid config map.")
}
// Look through the config map for lines starting with 'podSubnet', then assign the right variable
// according to the IP family of the matching string.
re := regexp.MustCompile(`podSubnet: (.*)`)
for _, l := range kubeadmConfig.Data {
if line = re.FindStringSubmatch(l); line != nil {
break
}
}
if len(line) != 0 {
// IPv4 and IPv6 CIDRs will be separated by a comma in a dual stack setup.
for _, cidr := range strings.Split(line[1], ",") {
addr, _, err := net.ParseCIDR(cidr)
if err != nil {
break
}
if addr.To4() == nil {
if len(v6) == 0 {
v6 = cidr
}
} else {
if len(v4) == 0 {
v4 = cidr
}
}
if len(v6) != 0 && len(v4) != 0 {
break
}
}
}
return v4, v6, err
}
| [
"\"WAIT_FOR_DATASTORE\"",
"\"CALICO_NETWORKING_BACKEND\"",
"\"IP\"",
"\"IP6\"",
"\"DISABLE_NODE_IP_CHECK\"",
"\"IP\"",
"\"IP6\"",
"\"CALICO_K8S_NODE_REF\"",
"\"CALICO_STARTUP_LOGLEVEL\"",
"\"NODENAME\"",
"\"HOSTNAME\"",
"\"CALICO_NODENAME_FILE\"",
"\"IP\"",
"\"IP_AUTODETECTION_METHOD\"",
"\"IP6\"",
"\"IP6_AUTODETECTION_METHOD\"",
"\"AS\"",
"\"CALICO_IPV4POOL_CIDR\"",
"\"CALICO_IPV6POOL_CIDR\"",
"\"NO_DEFAULT_POOLS\"",
"\"CALICO_IPV4POOL_IPIP\"",
"\"CALICO_IPV4POOL_VXLAN\"",
"\"CALICO_IPV4POOL_BLOCK_SIZE\"",
"\"CALICO_IPV6POOL_BLOCK_SIZE\"",
"\"CALICO_IPV4POOL_NODE_SELECTOR\"",
"\"CALICO_IPV6POOL_NODE_SELECTOR\"",
"\"CLUSTER_TYPE\""
]
| []
| [
"WAIT_FOR_DATASTORE",
"CALICO_IPV6POOL_CIDR",
"IP6",
"CALICO_K8S_NODE_REF",
"HOSTNAME",
"CLUSTER_TYPE",
"CALICO_IPV4POOL_CIDR",
"CALICO_NETWORKING_BACKEND",
"NODENAME",
"CALICO_IPV6POOL_BLOCK_SIZE",
"IP_AUTODETECTION_METHOD",
"IP6_AUTODETECTION_METHOD",
"CALICO_NODENAME_FILE",
"CALICO_STARTUP_LOGLEVEL",
"CALICO_IPV4POOL_BLOCK_SIZE",
"NO_DEFAULT_POOLS",
"IP",
"CALICO_IPV6POOL_NODE_SELECTOR",
"CALICO_IPV4POOL_VXLAN",
"CALICO_IPV4POOL_NODE_SELECTOR",
"CALICO_IPV4POOL_IPIP",
"AS",
"DISABLE_NODE_IP_CHECK"
]
| [] | ["WAIT_FOR_DATASTORE", "CALICO_IPV6POOL_CIDR", "IP6", "CALICO_K8S_NODE_REF", "HOSTNAME", "CLUSTER_TYPE", "CALICO_IPV4POOL_CIDR", "CALICO_NETWORKING_BACKEND", "NODENAME", "CALICO_IPV6POOL_BLOCK_SIZE", "IP_AUTODETECTION_METHOD", "IP6_AUTODETECTION_METHOD", "CALICO_NODENAME_FILE", "CALICO_STARTUP_LOGLEVEL", "CALICO_IPV4POOL_BLOCK_SIZE", "NO_DEFAULT_POOLS", "IP", "CALICO_IPV6POOL_NODE_SELECTOR", "CALICO_IPV4POOL_VXLAN", "CALICO_IPV4POOL_NODE_SELECTOR", "CALICO_IPV4POOL_IPIP", "AS", "DISABLE_NODE_IP_CHECK"] | go | 23 | 0 | |
main.go | package main
import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/ahmetb/go-cursor"
"github.com/charmbracelet/glamour"
"golang.org/x/crypto/ssh"
terminal "golang.org/x/term"
)
// optimize for terminals with 72 char width
//
// i haven't figured out how to get the terminal width from the ssh session
//
// for the sake of time, i'm hardcoding it.
const globalTerminalWidth = 72
func typewrite(w io.Writer, speed time.Duration, content string) {
chars := strings.Split(content, "")
for _, c := range chars {
fmt.Fprint(w, c)
time.Sleep(speed)
}
}
func typewriteLines(w io.Writer, speed time.Duration, lines []string) {
for _, line := range lines {
typewrite(w, speed, line)
}
}
type gistCache struct {
Expiration time.Time
Content string
Rendered string
}
type GistService struct {
files [][]string
cachedGists map[string]gistCache
}
func NewGistService(files [][]string) GistService {
return GistService{
files: files,
cachedGists: map[string]gistCache{},
}
}
func (g GistService) FileNames() []string {
fileNames := make([]string, g.Count())
for i, f := range g.files {
fileNames[i] = f[0]
}
return fileNames
}
func (g GistService) Count() int {
return len(g.files)
}
// returns URL if file exists, empty string if not
func (g GistService) FileURL(fileName string) string {
var url string
for _, f := range g.files {
if fileName == f[0] {
url = f[1]
}
}
return url
}
func (g GistService) FileExists(fileName string) bool {
return g.FileURL(fileName) != ""
}
type GistServiceFileType int
const (
GistServiceFileTypeGist GistServiceFileType = iota
GistServiceFileTypeRepoFile
)
func (g GistService) urlType(fileURL string) (GistServiceFileType, error) {
u, err := url.Parse(fileURL)
if err != nil {
return -1, err
}
if strings.HasPrefix(u.Host, "gist") {
return GistServiceFileTypeGist, nil
}
if strings.Contains(u.Path, "/blob/") {
return GistServiceFileTypeRepoFile, nil
}
return -1, errors.New("GistServiceFileType of fileURL not recognized")
}
func (g GistService) fetchRemoteGistContents(gistURL string) (string, error) {
fileType, err := g.urlType(gistURL)
if err != nil {
return "", err
}
var rawGistURL string
switch fileType {
case GistServiceFileTypeGist:
rawGistURL = gistURL + "/raw"
case GistServiceFileTypeRepoFile:
u, err := url.Parse(gistURL)
if err != nil {
return "", err
}
u.Path = strings.Replace(u.Path, "blob/", "", 1)
u.Host = "raw.githubusercontent.com"
rawGistURL = u.String()
default:
return "", errors.New("GistServiceFileType case not handled")
}
fmt.Println(rawGistURL)
resp, err := http.Get(rawGistURL)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
func (g GistService) FileContents(fileName string) (string, error) {
gistURL := g.FileURL(fileName)
if gistURL == "" {
return "", errors.New("file " + fileName + " does not exist")
}
var cachedGist gistCache
if cached, exists := g.cachedGists[fileName]; exists {
cachedGist = cached
}
if time.Now().After(cachedGist.Expiration) {
content, err := g.fetchRemoteGistContents(gistURL)
if err != nil {
return "", fmt.Errorf("error fetching remote gist: %v", err)
}
cachedGist.Content = content
cachedGist.Expiration = time.Now().Add(5 * time.Minute)
}
g.cachedGists[fileName] = cachedGist
return cachedGist.Content, nil
}
func (g GistService) FileRendered(fileName string, darkOrLight string) (string, error) {
var cachedGist gistCache
if cached, exists := g.cachedGists[fileName]; exists {
cachedGist = cached
}
if darkOrLight != "light" && darkOrLight != "dark" && darkOrLight != "" {
return "", errors.New("invalid style")
}
if darkOrLight == "" {
darkOrLight = "dark"
}
// if possible, just return the prerendered stuff we have
if time.Now().Before(cachedGist.Expiration) && cachedGist.Rendered != "" {
return cachedGist.Rendered, nil
}
// else, do the whole shebang...
raw, err := g.FileContents(fileName)
if err != nil {
return "", err
}
r, err := glamour.NewTermRenderer(
glamour.WithStandardStyle(darkOrLight),
glamour.WithWordWrap(int(globalTerminalWidth-3)), // 72 default width, (-3 for space for line numbers)
glamour.WithBaseURL(g.FileURL(fileName)),
)
if err != nil {
return "", err
}
rendered, err := r.Render(raw)
if err != nil {
return "", err
}
// custom formatting changes
var content string
lines := strings.Split(string(rendered), "\n")
for i, l := range lines {
// remove first and last two lines (which are blank)
if i == 0 || i >= len(lines)-2 {
continue
}
// add line numbers (and left pad them)
content += fmt.Sprintf("%2v.", i) + l
// add new lines where needed
if i+1 < len(lines) {
content += "\n"
}
}
// change escaped \- to just - (for the signature at the end of the JDs)
content = strings.ReplaceAll(content, `\-`, "-")
cachedGist.Rendered = content
g.cachedGists[fileName] = cachedGist
return cachedGist.Rendered, nil
}
func main() {
var sshPort string
envSshPort := os.Getenv("SSH_PORT")
if envSshPort == "" {
sshPort = ":9999"
} else {
sshPort = ":" + envSshPort
}
files := [][]string{
{"README.md", "https://github.com/hackclub/jobs/blob/main/directory/README.md"},
{"tech_lead.md", "https://github.com/hackclub/jobs/blob/main/directory/tech_lead.md"},
{"hired_philanthropy_position.md", "https://github.com/hackclub/jobs/blob/main/directory/philanthropy_position.md"},
{"hired_education_engineer.md", "https://github.com/hackclub/jobs/blob/main/directory/education_engineer.md"},
{"hired_executive_assistant.md", "https://github.com/hackclub/jobs/blob/main/directory/executive_assistant.md"},
{"hired_communications_manager.md", "https://github.com/hackclub/jobs/blob/main/directory/communications_manager.md"},
{"hired_events_designer.md", "https://github.com/hackclub/jobs/blob/main/directory/events_designer.md"},
{"club-operations-lead.md", "https://github.com/hackclub/jobs/blob/main/directory/club-operations-lead.md"},
{"education-engineer.md", "https://github.com/hackclub/jobs/blob/main/directory/education-engineer.md"},
{"communications-manager.md", "https://github.com/hackclub/jobs/blob/main/directory/communications-manager.md"},
{"events_designer.md", "https://github.com/hackclub/jobs/blob/main/directory/events_designer.md"},
{"philanthropy_position.md", "https://github.com/hackclub/jobs/blob/main/directory/philanthropy_position.md"},
{"executive_assistant.md", "https://github.com/hackclub/jobs/blob/main/directory/executive_assistant.md"},
{"hired_clubs_lead.md", "https://gist.github.com/zachlatta/ef83904bfcfddc04bc823355e5bcd280"},
{"hired_bank_ops_associate.md", "https://github.com/hackclub/v3/blob/main/components/jobs/bank-ops-associate/jd.mdx"},
{"hired_bank_ops_lead.md", "https://github.com/hackclub/v3/blob/main/components/jobs/bank-ops-lead/jd.mdx"},
{"hired_game_designer.md", "https://gist.github.com/zachlatta/a00579cabbd94c98561377eaf369e9a6"},
}
gists := NewGistService(files)
config := &ssh.ServerConfig{
NoClientAuth: true,
}
// create /tmp if it doesn't exist
if _, err := os.Stat("tmp/"); os.IsNotExist(err) {
os.Mkdir("tmp/", os.ModeDir)
}
// add ed25519 key
privateBytes25519, err := ioutil.ReadFile("tmp/id_ed25519")
if err != nil {
panic("Failed to open private key from disk. Try running `ssh-keygen -t ed25519` in tmp/ to create one.")
}
private25519, err := ssh.ParsePrivateKey(privateBytes25519)
if err != nil {
panic("Failed to parse ed25519 private key")
}
config.AddHostKey(private25519)
// add rsa key
privateBytesRSA, err := ioutil.ReadFile("tmp/id_rsa")
if err != nil {
panic("Failed to open private key from disk. Try running `ssh-keygen` in tmp/ to create one.")
}
privateRSA, err := ssh.ParsePrivateKey(privateBytesRSA)
if err != nil {
panic("Failed to parse rsa private key")
}
config.AddHostKey(privateRSA)
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0%s", sshPort))
if err != nil {
panic("failed to listen for connection")
}
fmt.Println("SSH server running at 0.0.0.0" + sshPort)
for {
nConn, err := listener.Accept()
if err != nil {
panic("failed to accept incoming connection")
}
go func() {
// ssh handshake must be performed
_, chans, reqs, err := ssh.NewServerConn(nConn, config)
if err != nil {
fmt.Println("failed to handshake with new client:", err)
return
}
// ssh connections can make "requests" outside of the main tcp pipe
// for the connection. receive and discard all of those.
go ssh.DiscardRequests(reqs)
for newChannel := range chans {
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
fmt.Println("could not accept channel:", err)
return
}
go func(in <-chan *ssh.Request) {
for req := range in {
if req.Type == "shell" {
req.Reply(true, nil)
}
}
}(requests)
go func() {
defer channel.Close()
connecting := []string{
"\x1b[33m...connecting...\x1b[0m\r",
"\x1b[35m...c..o..n..n..e..c..t..i..n..g...\x1b[0m\r",
}
connectingSpeed := 100
for _, l := range connecting {
for _, c := range strings.Split(l, "") {
fmt.Fprint(channel, c)
time.Sleep(time.Duration(connectingSpeed) * time.Millisecond)
}
connectingSpeed += 50
}
connected := []string{
"\r\x1b[2m..........................................................\x1b[0m\n\r",
"\n\r",
" \x1b[35m(ノ◕ヮ◕)ノ*:・゚✧ ~*~ CONNECTED! ~*~ ✧゚・: *ヽ(◕ヮ◕ヽ)\x1b[0m\n\r",
"\n\r",
"\x1b[2m..........................................................\x1b[0m\n\r",
"\n\r",
"\x1b[1mWELCOME TO THE HACK CLUB JOBS TERMINAL.\x1b[0m PLEASE TYPE `help` TO BEGIN.\n\r",
"\n\r",
}
typewriteLines(channel, 25*time.Millisecond, connected)
term := terminal.NewTerminal(channel, "\x1b[36m\\(•◡•)/ ~> \x1b[1m$\x1b[0m ")
term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
// only autocomplete when they hit tab
if key != '\t' {
return newLine, newPos, ok
}
lineParts := strings.Split(line, " ")
// only autocomplete if they're typing a file into cat
if lineParts[0] != "cat" {
return newLine, newPos, ok
}
var givenFile string
if len(lineParts) > 1 {
givenFile = lineParts[1]
}
files := gists.FileNames()
fileMatches := []string{}
for _, fileName := range files {
if strings.HasPrefix(fileName, givenFile) {
fileMatches = append(fileMatches, fileName)
}
}
if len(fileMatches) > 1 {
fmt.Fprintln(term, strings.Join(fileMatches, "\t")+"\n")
} else if len(fileMatches) == 1 {
newLine = strings.Join([]string{"cat", fileMatches[0]}, " ")
newPos = len(newLine)
ok = true
}
return newLine, newPos, ok
}
for {
cmds := map[string]func([]string){
"help": func(args []string) {
fmt.Fprintln(term, "\x1b[1mHACK CLUB JOBS TERMINAL\x1b[0m \x1b[2mversion 1.0.1-release (x86_64)\x1b[0m"+`
These shell commands are defined internally. Type `+"`help`"+` to see this
list.
`)
// use tabwriter to neatly format command help
helpWriter := tabwriter.NewWriter(term, 8, 8, 0, '\t', 0)
commands := [][]string{
{"ls", "list contents of current directory"},
{"cat [file] [dark or light]", "display contents of current file"},
{"clear", "summon the v o i d"},
{"exit", "exit the terminal"},
}
for _, command := range commands {
fmt.Fprintf(helpWriter, " %s\t%s\r\n", command[0], command[1])
}
helpWriter.Flush()
fmt.Fprintln(term, "\npsst! try running `ls` to get started")
},
"ls": func(args []string) {
files := gists.FileNames()
for i, file := range files {
if file == "README.md" {
files[i] = "\x1b[1m" + file + "\x1b[0m"
} else if strings.HasPrefix(file, "hired_") {
files[i] = "\x1b[2m" + file + "\x1b[0m"
}
}
fmt.Fprintln(term, "\x1b[1;2myou dust off the shelves and find the following files laying about...\x1b[0m\n\r")
fmt.Fprintln(term, strings.Join(files, "\n"))
},
"clear": func(args []string) {
fmt.Fprint(term, "\x1b[H\x1b[2J")
},
"cat": func(args []string) {
if len(args) == 0 {
fmt.Fprintln(term, "meow! please pass me a file! i can't do anything without one!")
return
}
argFile := args[0]
var darkOrLight string
if len(args) > 1 {
darkOrLight = args[1]
}
if !gists.FileExists(argFile) {
fmt.Fprintln(term, "meow! i can't find the file", argFile)
return
}
meowText := " m e e o o o w ! "
typewrite(term, 100*time.Millisecond, meowText)
content, err := gists.FileRendered(argFile, darkOrLight)
if err != nil {
fmt.Println(err)
fmt.Fprintln(term, "meow... i am having trouble accessing my brain (file retrieval error)")
return
}
// clear the meow
fmt.Fprint(term, "\r"+strings.Repeat(" ", len(meowText))+"\r")
contentLines := strings.Split(content, "\n")
linesToShow := 14
var exitMsg string
if darkOrLight == "" || darkOrLight == "dark" {
exitMsg += " ~ psst. you can switch to light mode with `cat [file] light` ~"
} else {
exitMsg += " ~ psst. you can switch to dark mode with `cat [file] dark` ~"
}
exitMsg += "\r\n\n easier to read this file online? " + gists.FileURL(argFile) + " ~(˘▾˘~)"
// if we don't need to page, print and exit
if len(contentLines) <= linesToShow {
fmt.Fprintln(term, content)
fmt.Fprintln(term, exitMsg)
return
}
// page!
input := make(chan string, 1)
finishedPrinting := false
go func() {
fmt.Println("ATTEMPTING TO PAGE")
totalLines := len(contentLines)
currentLine := 0
// print the first n lines
fmt.Fprintln(term, strings.Join(contentLines[currentLine:linesToShow], "\n"))
currentLine += linesToShow
for range input {
nextCurrentLine := currentLine + linesToShow
if nextCurrentLine > totalLines {
nextCurrentLine = totalLines
}
fmt.Fprint(term, cursor.MoveUp(1))
fmt.Fprintln(term, strings.Join(contentLines[currentLine:nextCurrentLine], "\n"))
currentLine = nextCurrentLine
if currentLine >= totalLines {
finishedPrinting = true
break
}
}
}()
for !finishedPrinting {
line, err := term.ReadPassword(" ~(press enter to print more...)~")
if err != nil {
break
}
input <- line
}
fmt.Fprint(term, cursor.MoveUp(1))
fmt.Fprintln(term, exitMsg)
},
"pwd": func(args []string) {
typewrite(term, 75*time.Millisecond, "you look up, you look down, you look all around. you are completely and utterly lost.\n\r")
},
"cd": func(args []string) {
typewrite(term, 75*time.Millisecond, "what even IS a directory? this is the HACK CLUB JOBS TERMINAL. there are only jobs here.\r\n")
},
"whoami": func(args []string) {
typewrite(term, 75*time.Millisecond, "who ARE you? why are we here? what IS this all about?\r\n")
},
"exit": func(args []string) {
goodbye := []string{
"\x1b[1;34mJOBS TERMINAL OUT. SEE YOU LATER!\x1b[0m\r\n",
"CODE AT https://github.com/hackclub/jobs\r\n",
"WANT TO TRY SOMETHING FUN? RUN $ ssh sshtron.zachlatta.com\r\n",
"(~˘▾˘)~\n\n",
}
typewriteLines(term, 25*time.Millisecond, goodbye)
channel.Close()
},
}
line, err := term.ReadLine()
if err != nil {
break
}
log.Println(nConn.RemoteAddr(), "ran command:", line)
trimmedInput := strings.TrimSpace(line)
inputElements := strings.Split(trimmedInput, " ")
inputCmd := inputElements[0]
inputArgs := inputElements[1:]
if cmd, ok := cmds[inputCmd]; ok {
fmt.Fprintln(term, "")
cmd(inputArgs)
fmt.Fprintln(term, "")
} else if inputCmd != "" {
fmt.Fprintln(term, "")
fmt.Fprintln(term, inputCmd, `is not a known command.
p.s. this is a custom SSH server, with a custom shell, written in Go. open source at https://github.com/hackclub/jobs!`)
fmt.Fprintln(term, "")
}
}
}()
}
}()
}
}
| [
"\"SSH_PORT\""
]
| []
| [
"SSH_PORT"
]
| [] | ["SSH_PORT"] | go | 1 | 0 | |
pkg/kube_events_manager/util.go | package kube_events_manager
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"runtime/trace"
"strings"
. "github.com/flant/libjq-go"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
. "github.com/flant/shell-operator/pkg/kube_events_manager/types"
"github.com/flant/shell-operator/pkg/app"
"github.com/flant/shell-operator/pkg/executor"
utils_checksum "github.com/flant/shell-operator/pkg/utils/checksum"
)
// ApplyJqFilter filter object json representation with jq expression, calculate checksum
// over result and return ObjectAndFilterResult. If jqFilter is empty, no filter
// is required and checksum is calculated over full json representation of the object.
func ApplyJqFilter(jqFilter string, obj *unstructured.Unstructured) (*ObjectAndFilterResult, error) {
defer trace.StartRegion(context.Background(), "ApplyJqFilter").End()
res := &ObjectAndFilterResult{
Object: obj,
}
res.Metadata.JqFilter = jqFilter
res.Metadata.ResourceId = ResourceId(obj)
data, err := json.Marshal(obj)
if err != nil {
return nil, err
}
res.ObjectBytes = int64(len(data))
if jqFilter == "" {
res.Metadata.Checksum = utils_checksum.CalculateChecksum(string(data))
} else {
var err error
var filtered string
if os.Getenv("JQ_EXEC") == "yes" {
stdout, stderr, err := execJq(jqFilter, data)
if err != nil {
return nil, fmt.Errorf("failed exec jq: \nerr: '%s'\nstderr: '%s'", err, stderr)
}
filtered = stdout
} else {
filtered, err = Jq().WithLibPath(app.JqLibraryPath).Program(jqFilter).Cached().Run(string(data))
if err != nil {
return nil, fmt.Errorf("failed jq filter '%s': '%s'", jqFilter, err)
}
}
res.FilterResult = filtered
res.Metadata.Checksum = utils_checksum.CalculateChecksum(filtered)
}
return res, nil
}
// TODO: Can be removed after testing with libjq-go
// execJq run jq in locked mode with executor
func execJq(jqFilter string, jsonData []byte) (stdout string, stderr string, err error) {
var cmd *exec.Cmd
if app.JqLibraryPath == "" {
cmd = exec.Command("/usr/bin/jq", jqFilter)
} else {
cmd = exec.Command("/usr/bin/jq", "-L", app.JqLibraryPath, jqFilter)
}
var stdinBuf bytes.Buffer
_, err = stdinBuf.WriteString(string(jsonData))
if err != nil {
panic(err)
}
cmd.Stdin = &stdinBuf
var stdoutBuf bytes.Buffer
cmd.Stdout = &stdoutBuf
var stderrBuf bytes.Buffer
cmd.Stderr = &stderrBuf
err = executor.Run(cmd)
stdout = strings.TrimSpace(stdoutBuf.String())
stderr = strings.TrimSpace(stderrBuf.String())
return
}
func ResourceId(obj *unstructured.Unstructured) string {
return fmt.Sprintf("%s/%s/%s", obj.GetNamespace(), obj.GetKind(), obj.GetName())
}
func FormatLabelSelector(selector *metav1.LabelSelector) (string, error) {
res, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return "", err
}
return res.String(), nil
}
func FormatFieldSelector(selector *FieldSelector) (string, error) {
if selector == nil || selector.MatchExpressions == nil {
return "", nil
}
requirements := make([]fields.Selector, 0)
for _, req := range selector.MatchExpressions {
switch req.Operator {
case "=", "==", "Equals":
requirements = append(requirements, fields.OneTermEqualSelector(req.Field, req.Value))
case "!=", "NotEquals":
requirements = append(requirements, fields.OneTermNotEqualSelector(req.Field, req.Value))
default:
return "", fmt.Errorf("%s%s%s: operator '%s' is not recognized", req.Field, req.Operator, req.Value, req.Operator)
}
}
return fields.AndSelectors(requirements...).String(), nil
}
| [
"\"JQ_EXEC\""
]
| []
| [
"JQ_EXEC"
]
| [] | ["JQ_EXEC"] | go | 1 | 0 | |
test/install_k8s_test.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This tests the k8s installation. It validates the CNI plugin configuration
// and the existence of the CNI plugin binary locations.
package install_test
import (
"fmt"
"os"
"testing"
//"github.com/nsf/jsondiff"
"istio.io/cni/deployments/kubernetes/install/test"
)
var (
PreConfDir = "data/pre"
ExpectedConfDir = "data/expected"
TestWorkDir, _ = os.Getwd()
Hub = "gcr.io/istio-release"
Tag = "master-latest-daily"
)
type testCase struct {
name string
preConfFile string
resultFileName string
expectedOutputFile string
expectedPostCleanFile string
}
func doTest(testNum int, tc testCase, t *testing.T) {
os.Setenv("HUB", Hub)
os.Setenv("TAG", Tag)
t.Logf("Running install CNI test with HUB=%s, TAG=%s", Hub, Tag)
test.RunInstallCNITest(testNum, tc.preConfFile, tc.resultFileName, tc.expectedOutputFile, tc.expectedPostCleanFile, t)
}
func TestInstall(t *testing.T) {
envHub := os.Getenv("HUB")
if envHub != "" {
Hub = envHub
}
envTag := os.Getenv("TAG")
if envTag != "" {
Tag = envTag
}
t.Logf("HUB=%s, TAG=%s", Hub, Tag)
testDataDir := TestWorkDir + "/../deployments/kubernetes/install/test/data"
cases := []testCase{
{
name: "First file with pre-plugins",
preConfFile: "NONE",
resultFileName: "10-calico.conflist",
expectedOutputFile: testDataDir + "/expected/10-calico.conflist-istioconfig",
expectedPostCleanFile: "",
},
{
name: "File with pre-plugins",
preConfFile: "10-calico.conflist",
resultFileName: "10-calico.conflist",
expectedOutputFile: testDataDir + "/expected/10-calico.conflist-istioconfig",
expectedPostCleanFile: "",
},
{
name: "File without pre-plugins",
preConfFile: "minikube_cni.conf",
resultFileName: "minikube_cni.conflist",
expectedOutputFile: testDataDir + "/expected/minikube_cni.conflist.expected",
expectedPostCleanFile: testDataDir + "/expected/minikube_cni.conflist.clean",
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("case %d %s", i, c.name), func(t *testing.T) {
t.Logf("%s: Test preconf %s, expected %s", c.name, c.preConfFile, c.expectedOutputFile)
doTest(i, c, t)
})
}
}
| [
"\"HUB\"",
"\"TAG\""
]
| []
| [
"HUB",
"TAG"
]
| [] | ["HUB", "TAG"] | go | 2 | 0 | |
utils/jwt_token.go | package utils
import (
"errors"
"fmt"
"os"
"time"
"github.com/dgrijalva/jwt-go"
)
type jwtService struct {
secretKey string
issure string
}
func NewJWTService() *jwtService {
return &jwtService{
secretKey: os.Getenv("JWT_SECRET"),
issure: "book-api",
}
}
type Claim struct {
Sum string `json:"sum"`
jwt.StandardClaims
}
func (s *jwtService) GenerateToken(id string) (string, error) {
if len(os.Getenv("JWT_SECRET")) == 0 {
return "", errors.New("JWT SECRET not found")
}
claim := &Claim{
id,
jwt.StandardClaims{
ExpiresAt: time.Now().Add(time.Hour * 2).Unix(),
Issuer: s.issure,
IssuedAt: time.Now().Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claim)
t, err := token.SignedString([]byte(s.secretKey))
if err != nil {
return "", err
}
return t, nil
}
func (s *jwtService) ValidateToken(token string) bool {
_, err := jwt.Parse(token, func(t *jwt.Token) (interface{}, error) {
if _, isValid := t.Method.(*jwt.SigningMethodHMAC); !isValid {
return nil, fmt.Errorf("invalid token: %v", token)
}
return []byte(s.secretKey), nil
})
return err == nil
}
func (s *jwtService) GetIDFromToken(t string) (string, error) {
token, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) {
if _, isvalid := token.Method.(*jwt.SigningMethodHMAC); !isvalid {
return nil, fmt.Errorf("invalid Token: %v", t)
}
return []byte(s.secretKey), nil
})
if err != nil {
return "", err
}
if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
id := claims["sum"].(string)
return id, nil
}
return "", err
}
| [
"\"JWT_SECRET\"",
"\"JWT_SECRET\""
]
| []
| [
"JWT_SECRET"
]
| [] | ["JWT_SECRET"] | go | 1 | 0 | |
cmd/server-main.go | /*
* Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"github.com/minio/cli"
"github.com/minio/dsync"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/certs"
)
func init() {
logger.Init(GOPATH)
logger.RegisterUIError(fmtError)
}
var serverFlags = []cli.Flag{
cli.StringFlag{
Name: "address",
Value: ":" + globalMinioPort,
Usage: "Bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname.",
},
}
var serverCmd = cli.Command{
Name: "server",
Usage: "Start object storage server.",
Flags: append(serverFlags, globalFlags...),
Action: serverMain,
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..]
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64}
DIR:
DIR points to a directory on a filesystem. When you want to combine
multiple drives into a single large system, pass one directory per
filesystem separated by space. You may also use a '...' convention
to abbreviate the directory arguments. Remote directories in a
distributed setup are encoded as HTTP(s) URIs.
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Custom username or access key of minimum 3 characters in length.
MINIO_SECRET_KEY: Custom password or secret key of minimum 8 characters in length.
ENDPOINTS:
MINIO_ENDPOINTS: List of all endpoints delimited by ' '.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
WORM:
MINIO_WORM: To turn on Write-Once-Read-Many in server, set this value to "on".
EXAMPLES:
1. Start minio server on "/home/shared" directory.
$ {{.HelpName}} /home/shared
2. Start minio server bound to a specific ADDRESS:PORT.
$ {{.HelpName}} --address 192.168.1.101:9000 /home/shared
3. Start minio server and enable virtual-host-style requests.
$ export MINIO_DOMAIN=mydomain.com
$ {{.HelpName}} --address mydomain.com:9000 /mnt/export
4. Start minio server on 64 disks server with endpoints through environment variable.
$ export MINIO_ENDPOINTS=/mnt/export{1...64}
$ {{.HelpName}}
5. Start distributed minio server on an 8 node setup with 8 drives each. Run following command on all the 8 nodes.
$ export MINIO_ACCESS_KEY=minio
$ export MINIO_SECRET_KEY=miniostorage
$ {{.HelpName}} http://node{1...8}.example.com/mnt/export/{1...8}
6. Start minio server with edge caching enabled.
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}} /home/shared
`,
}
// Checks if endpoints are either available through environment
// or command line, returns false if both fails.
func endpointsPresent(ctx *cli.Context) bool {
_, ok := os.LookupEnv("MINIO_ENDPOINTS")
if !ok {
ok = ctx.Args().Present()
}
return ok
}
func serverHandleCmdArgs(ctx *cli.Context) {
// Handle common command args.
handleCommonCmdArgs(ctx)
// Server address.
serverAddr := ctx.String("address")
logger.FatalIf(CheckLocalServerAddr(serverAddr), "Unable to validate passed arguments")
var setupType SetupType
var err error
if len(ctx.Args()) > serverCommandLineArgsMax {
uErr := uiErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("Invalid total number of endpoints (%d) passed, supported upto 32 unique arguments",
len(ctx.Args())))
logger.FatalIf(uErr, "Unable to validate passed endpoints")
}
endpoints := strings.Fields(os.Getenv("MINIO_ENDPOINTS"))
if len(endpoints) > 0 {
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(serverAddr, endpoints...)
} else {
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(serverAddr, ctx.Args()...)
}
logger.FatalIf(err, "Invalid command line arguments")
globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr)
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error sutiation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioPort), "Unable to start the server")
globalIsXL = (setupType == XLSetupType)
globalIsDistXL = (setupType == DistXLSetupType)
if globalIsDistXL {
globalIsXL = true
}
}
func serverHandleEnvVars() {
// Handle common environment variables.
handleCommonEnvVars()
if serverRegion := os.Getenv("MINIO_REGION"); serverRegion != "" {
// region Envs are set globally.
globalIsEnvRegion = true
globalServerRegion = serverRegion
}
}
// serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) {
if ctx.Args().First() == "help" || !endpointsPresent(ctx) {
cli.ShowCommandHelpAndExit(ctx, "server", 1)
}
// Disable logging until server initialization is complete, any
// error during initialization will be shown as a fatal message
logger.Disable = true
// Get "json" flag from command line argument and
// enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag {
logger.EnableJSON()
}
// Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag {
logger.EnableQuiet()
}
// Handle all server command args.
serverHandleCmdArgs(ctx)
// Handle all server environment vars.
serverHandleEnvVars()
// Create certs path.
logger.FatalIf(createConfigDir(), "Unable to initialize configuration files")
// Initialize server config.
initConfig()
// Check and load SSL certificates.
var err error
globalPublicCerts, globalRootCAs, globalTLSCerts, globalIsSSL, err = getSSLConfig()
logger.FatalIf(err, "Unable to load the TLS configuration")
// Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistXL {
if globalEndpoints.IsHTTPS() && !globalIsSSL {
logger.Fatal(uiErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server")
}
if !globalEndpoints.IsHTTPS() && globalIsSSL {
logger.Fatal(uiErrCertsAndHTTPEndpoints(nil), "Unable to start the server")
}
}
if !quietFlag {
// Check for new updates from dl.minio.io.
mode := globalMinioModeFS
if globalIsDistXL {
mode = globalMinioModeDistXL
} else if globalIsXL {
mode = globalMinioModeXL
}
checkUpdate(mode)
}
// Set system resources to maximum.
logger.LogIf(context.Background(), setMaxResources())
// Set nodes for dsync for distributed setup.
if globalIsDistXL {
globalDsync, err = dsync.New(newDsyncNodes(globalEndpoints))
if err != nil {
logger.Fatal(err, "Unable to initialize distributed locking on %s", globalEndpoints)
}
}
// Initialize name space lock.
initNSLock(globalIsDistXL)
// Init global heal state
initAllHealState(globalIsXL)
// Configure server.
var handler http.Handler
handler, err = configureServerHandler(globalEndpoints)
if err != nil {
logger.Fatal(uiErrUnexpectedError(err), "Unable to configure one of server's RPC services")
}
// Create new notification system.
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
if err != nil {
logger.Fatal(err, "Unable to initialize the notification system")
}
// Create new policy system.
globalPolicySys = NewPolicySys()
// Initialize Admin Peers inter-node communication only in distributed setup.
initGlobalAdminPeers(globalEndpoints)
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
globalHTTPServer = xhttp.NewServer([]string{globalMinioAddr}, handler, getCert)
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start()
}()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
newObject, err := newObjectLayer(globalEndpoints)
if err != nil {
// Stop watching for any certificate changes.
globalTLSCerts.Stop()
globalHTTPServer.Shutdown()
logger.FatalIf(err, "Unable to initialize backend")
}
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
// Prints the formatted startup message once object layer is initialized.
apiEndpoints := getAPIEndpoints(globalMinioAddr)
printStartupMessage(apiEndpoints)
// Set uptime time after object layer has initialized.
globalBootTime = UTCNow()
// Re-enable logging
logger.Disable = false
handleSignals()
}
// Initialize object layer with the supplied disks, objectLayer is nil upon any error.
func newObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err error) {
// For FS only, directly use the disk.
isFS := len(endpoints) == 1
if isFS {
// Initialize new FS object layer.
return NewFSObjectLayer(endpoints[0].Path)
}
format, err := waitForFormatXL(context.Background(), endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount)
if err != nil {
return nil, err
}
return newXLSets(endpoints, format, len(format.XL.Sets), len(format.XL.Sets[0]))
}
| [
"\"MINIO_ENDPOINTS\"",
"\"MINIO_REGION\""
]
| []
| [
"MINIO_REGION",
"MINIO_ENDPOINTS"
]
| [] | ["MINIO_REGION", "MINIO_ENDPOINTS"] | go | 2 | 0 | |
auth/providers/ldap/options.go | /*
Copyright The Guard Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ldap
import (
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"github.com/appscode/go/types"
"github.com/go-ldap/ldap"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"gopkg.in/jcmturner/gokrb5.v4/keytab"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type Options struct {
ServerAddress string
ServerPort string
// The connector uses this DN in credentials to search for users and groups.
// Not required if the LDAP server provides access for anonymous auth.
BindDN string
// The connector uses this Password in credentials to search for users and groups.
// Not required if the LDAP server provides access for anonymous auth.
BindPassword string
// BaseDN to start the search user
UserSearchDN string
// filter to apply when searching user
// default : (objectClass=person)
UserSearchFilter string
// Ldap username attribute
// default : uid
UserAttribute string
//BaseDN to start the search group
GroupSearchDN string
// filter to apply when searching the groups that user is member of
// default : (objectClass=groupOfNames)
GroupSearchFilter string
// Ldap group member attribute
// default: member
GroupMemberAttribute string
// Ldap group name attribute
// default: cn
GroupNameAttribute string
SkipTLSVerification bool
// for LDAP over SSL
IsSecureLDAP bool
// for start tls connection
StartTLS bool
// path to the caCert file, needed for self signed server certificate
CaCertFile string
CaCertPool *x509.CertPool
// LDAP user authentication mechanism
// 0 for simple authentication
// 1 for kerberos(via GSSAPI)
AuthenticationChoice AuthChoice
// path to the keytab file
// it's contain LDAP service principal keys
// required for kerberos
// default : 0
KeytabFile string
// keytab contains service principal and encryption key
keytab keytab.Keytab
// The serviceAccountName needs to be defined when using Active Directory
// where the SPN is mapped to a user account. If this is not required it
// should be set to an empty string ""
// default : ""
ServiceAccountName string
}
func NewOptions() Options {
return Options{
BindDN: os.Getenv("LDAP_BIND_DN"),
BindPassword: os.Getenv("LDAP_BIND_PASSWORD"),
}
}
// if ca cert is provided then create CA Cert Pool
// if keytab file is provides then load it
func (o *Options) Configure() error {
// caCertPool for self signed LDAP sever certificate
if o.CaCertFile != "" {
caCert, err := ioutil.ReadFile(o.CaCertFile)
if err != nil {
return errors.Wrap(err, "unable to read ca cert file")
}
o.CaCertPool = x509.NewCertPool()
o.CaCertPool.AppendCertsFromPEM(caCert)
ok := o.CaCertPool.AppendCertsFromPEM(caCert)
if !ok {
return errors.New("Failed to add CA cert in CertPool for LDAP")
}
}
// keytab required for kerberos
if o.AuthenticationChoice == AuthChoiceKerberos {
var err error
if o.KeytabFile != "" {
return errors.New("keytab not provided")
}
o.keytab, err = keytab.Load(o.KeytabFile)
if err != nil {
return errors.Wrap(err, "unable to parse keytab file")
}
}
return nil
}
func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ServerAddress, "ldap.server-address", o.ServerAddress, "Host or IP of the LDAP server")
fs.StringVar(&o.ServerPort, "ldap.server-port", "389", "LDAP server port")
fs.StringVar(&o.BindDN, "ldap.bind-dn", o.BindDN, "The connector uses this DN in credentials to search for users and groups. Not required if the LDAP server provides access for anonymous auth.")
fs.StringVar(&o.BindPassword, "ldap.bind-password", o.BindPassword, "The connector uses this password in credentials to search for users and groups. Not required if the LDAP server provides access for anonymous auth.")
fs.StringVar(&o.UserSearchDN, "ldap.user-search-dn", o.UserSearchDN, "BaseDN to start the search user")
fs.StringVar(&o.UserSearchFilter, "ldap.user-search-filter", DefaultUserSearchFilter, "Filter to apply when searching user")
fs.StringVar(&o.UserAttribute, "ldap.user-attribute", DefaultUserAttribute, "Ldap username attribute")
fs.StringVar(&o.GroupSearchDN, "ldap.group-search-dn", o.GroupSearchDN, "BaseDN to start the search group")
fs.StringVar(&o.GroupSearchFilter, "ldap.group-search-filter", DefaultGroupSearchFilter, "Filter to apply when searching the groups that user is member of")
fs.StringVar(&o.GroupMemberAttribute, "ldap.group-member-attribute", DefaultGroupMemberAttribute, "Ldap group member attribute")
fs.StringVar(&o.GroupNameAttribute, "ldap.group-name-attribute", DefaultGroupNameAttribute, "Ldap group name attribute")
fs.BoolVar(&o.SkipTLSVerification, "ldap.skip-tls-verification", false, "Skip LDAP server TLS verification, default : false")
fs.BoolVar(&o.IsSecureLDAP, "ldap.is-secure-ldap", false, "Secure LDAP (LDAPS)")
fs.BoolVar(&o.StartTLS, "ldap.start-tls", false, "Start tls connection")
fs.StringVar(&o.CaCertFile, "ldap.ca-cert-file", "", "ca cert file that used for self signed server certificate")
fs.Var(&o.AuthenticationChoice, "ldap.auth-choice", "LDAP user authentication mechanisms Simple/Kerberos(via GSSAPI)")
fs.StringVar(&o.KeytabFile, "ldap.keytab-file", "", "path to the keytab file, it's contain LDAP service principal keys")
fs.StringVar(&o.ServiceAccountName, "ldap.service-account", "", "service account name")
}
// request to search user
func (o *Options) newUserSearchRequest(username string) *ldap.SearchRequest {
userFilter := fmt.Sprintf("(&%s(%s=%s))", o.UserSearchFilter, o.UserAttribute, username)
return &ldap.SearchRequest{
BaseDN: o.UserSearchDN,
Scope: ldap.ScopeWholeSubtree,
DerefAliases: ldap.NeverDerefAliases,
SizeLimit: 2, // limit number of entries in result
TimeLimit: 10,
TypesOnly: false,
Filter: userFilter, // filter default format : (&(objectClass=person)(uid=%s))
}
}
// request to get user group list
func (o *Options) newGroupSearchRequest(userDN string) *ldap.SearchRequest {
groupFilter := fmt.Sprintf("(&%s(%s=%s))", o.GroupSearchFilter, o.GroupMemberAttribute, userDN)
return &ldap.SearchRequest{
BaseDN: o.GroupSearchDN,
Scope: ldap.ScopeWholeSubtree,
DerefAliases: ldap.NeverDerefAliases,
SizeLimit: 0, // limit number of entries in result, 0 values means no limitations
TimeLimit: 10,
TypesOnly: false,
Filter: groupFilter, // filter default format : (&(objectClass=groupOfNames)(member=%s))
Attributes: []string{o.GroupNameAttribute},
}
}
func (o *Options) Validate() []error {
var errs []error
if o.ServerAddress == "" {
errs = append(errs, errors.New("ldap.server-address must be non-empty"))
}
if o.ServerPort == "" {
errs = append(errs, errors.New("ldap.server-port must be non-empty"))
}
if o.UserSearchDN == "" {
errs = append(errs, errors.New("ldap.user-search-dn must be non-empty"))
}
if o.UserAttribute == "" {
errs = append(errs, errors.New("ldap.user-attribute must be non-empty"))
}
if o.GroupSearchDN == "" {
errs = append(errs, errors.New("ldap.group-search-dn must be non-empty"))
}
if o.GroupMemberAttribute == "" {
errs = append(errs, errors.New("ldap.group-member-attribute must be non-empty"))
}
if o.GroupNameAttribute == "" {
errs = append(errs, errors.New("ldap.group-name-attribute must be non-empty"))
}
if o.IsSecureLDAP && o.StartTLS {
errs = append(errs, errors.New("ldap.is-secure-ldap and ldap.start-tls both can not be true at the same time"))
}
if o.AuthenticationChoice == AuthChoiceKerberos && o.KeytabFile == "" {
errs = append(errs, errors.New("for kerberos ldap.keytab-file must be non-empty"))
}
return errs
}
func (o Options) Apply(d *apps.Deployment) (extraObjs []runtime.Object, err error) {
container := d.Spec.Template.Spec.Containers[0]
// create auth secret
ldapData := map[string][]byte{
"bind-dn": []byte(o.BindDN), // username kept in secret, since password is in secret
"bind-password": []byte(o.BindPassword),
}
if o.CaCertFile != "" {
cert, err := ioutil.ReadFile(o.CaCertFile)
if err != nil {
return nil, err
}
ldapData["ca.crt"] = cert
}
if o.KeytabFile != "" {
key, err := ioutil.ReadFile(o.KeytabFile)
if err != nil {
return nil, err
}
ldapData["krb5.keytab"] = key
}
authSecret := &core.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "guard-ldap-auth",
Namespace: d.Namespace,
Labels: d.Labels,
},
Data: ldapData,
}
extraObjs = append(extraObjs, authSecret)
// mount auth secret into deployment
volMount := core.VolumeMount{
Name: authSecret.Name,
MountPath: "/etc/guard/auth/ldap",
}
container.VolumeMounts = append(container.VolumeMounts, volMount)
vol := core.Volume{
Name: authSecret.Name,
VolumeSource: core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: authSecret.Name,
DefaultMode: types.Int32P(0444),
},
},
}
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, vol)
// use auth secret in container[0] args
container.Env = append(container.Env,
core.EnvVar{
Name: "LDAP_BIND_DN",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{
Name: authSecret.Name,
},
Key: "bind-dn",
},
},
},
core.EnvVar{
Name: "LDAP_BIND_PASSWORD",
ValueFrom: &core.EnvVarSource{
SecretKeyRef: &core.SecretKeySelector{
LocalObjectReference: core.LocalObjectReference{
Name: authSecret.Name,
},
Key: "bind-password",
},
},
},
)
args := container.Args
if o.ServerAddress != "" {
args = append(args, fmt.Sprintf("--ldap.server-address=%s", o.ServerAddress))
}
if o.ServerPort != "" {
args = append(args, fmt.Sprintf("--ldap.server-port=%s", o.ServerPort))
}
if o.UserSearchDN != "" {
args = append(args, fmt.Sprintf("--ldap.user-search-dn=%s", o.UserSearchDN))
}
if o.UserSearchFilter != "" {
args = append(args, fmt.Sprintf("--ldap.user-search-filter=%s", o.UserSearchFilter))
}
if o.UserSearchFilter != "" {
args = append(args, fmt.Sprintf("--ldap.user-attribute=%s", o.UserAttribute))
}
if o.GroupSearchDN != "" {
args = append(args, fmt.Sprintf("--ldap.group-search-dn=%s", o.GroupSearchDN))
}
if o.GroupSearchFilter != "" {
args = append(args, fmt.Sprintf("--ldap.group-search-filter=%s", o.GroupSearchFilter))
}
if o.GroupMemberAttribute != "" {
args = append(args, fmt.Sprintf("--ldap.group-member-attribute=%s", o.GroupMemberAttribute))
}
if o.GroupNameAttribute != "" {
args = append(args, fmt.Sprintf("--ldap.group-name-attribute=%s", o.GroupNameAttribute))
}
if o.SkipTLSVerification {
args = append(args, "--ldap.skip-tls-verification")
}
if o.IsSecureLDAP {
args = append(args, "--ldap.is-secure-ldap")
}
if o.StartTLS {
args = append(args, "--ldap.start-tls")
}
if o.CaCertFile != "" {
args = append(args, fmt.Sprintf("--ldap.ca-cert-file=/etc/guard/auth/ldap/ca.crt"))
}
if o.ServiceAccountName != "" {
args = append(args, fmt.Sprintf("--ldap.service-account=%s", o.ServiceAccountName))
}
if o.KeytabFile != "" {
args = append(args, fmt.Sprintf("--ldap.keytab-file=/etc/guard/auth/ldap/krb5.keytab"))
}
args = append(args, fmt.Sprintf("--ldap.auth-choice=%v", o.AuthenticationChoice))
container.Args = args
d.Spec.Template.Spec.Containers[0] = container
return extraObjs, nil
}
| [
"\"LDAP_BIND_DN\"",
"\"LDAP_BIND_PASSWORD\""
]
| []
| [
"LDAP_BIND_PASSWORD",
"LDAP_BIND_DN"
]
| [] | ["LDAP_BIND_PASSWORD", "LDAP_BIND_DN"] | go | 2 | 0 | |
build.go | // +build ignore
package main
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
gocc string
gocxx string
cgo string
pkgArch string
version string = "v1"
// deb & rpm does not support semver so have to handle their version a little differently
linuxPackageVersion string = "v1"
linuxPackageIteration string = ""
race bool
phjsToRelease string
workingDir string
includeBuildNumber bool = true
buildNumber int = 0
binaries []string = []string{"grafana-server", "grafana-cli"}
)
const minGoVersion = 1.8
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
ensureGoPath()
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.StringVar(&gocc, "cc", "", "CC")
flag.StringVar(&gocxx, "cxx", "", "CXX")
flag.StringVar(&cgo, "cgo-enabled", "", "CGO_ENABLED")
flag.StringVar(&pkgArch, "pkg-arch", "", "PKG ARCH")
flag.StringVar(&phjsToRelease, "phjs", "", "PhantomJS binary")
flag.BoolVar(&race, "race", race, "Use race detector")
flag.BoolVar(&includeBuildNumber, "includeBuildNumber", includeBuildNumber, "IncludeBuildNumber in package name")
flag.IntVar(&buildNumber, "buildNumber", 0, "Build number from CI system")
flag.Parse()
readVersionFromPackageJson()
log.Printf("Version: %s, Linux Version: %s, Package Iteration: %s\n", version, linuxPackageVersion, linuxPackageIteration)
if flag.NArg() == 0 {
log.Println("Usage: go run build.go build")
return
}
workingDir, _ = os.Getwd()
for _, cmd := range flag.Args() {
switch cmd {
case "setup":
setup()
case "build-cli":
clean()
build("grafana-cli", "./pkg/cmd/grafana-cli", []string{})
case "build":
clean()
for _, binary := range binaries {
build(binary, "./pkg/cmd/"+binary, []string{})
}
case "test":
test("./pkg/...")
grunt("test")
case "package":
grunt(gruntBuildArg("release")...)
if runtime.GOOS != "windows" {
createLinuxPackages()
}
case "pkg-rpm":
grunt(gruntBuildArg("release")...)
createRpmPackages()
case "pkg-deb":
grunt(gruntBuildArg("release")...)
createDebPackages()
case "sha-dist":
shaFilesInDist()
case "latest":
makeLatestDistCopies()
case "clean":
clean()
default:
log.Fatalf("Unknown command %q", cmd)
}
}
}
func makeLatestDistCopies() {
files, err := ioutil.ReadDir("dist")
if err != nil {
log.Fatalf("failed to create latest copies. Cannot read from /dist")
}
latestMapping := map[string]string{
".deb": "dist/grafana_latest_amd64.deb",
".rpm": "dist/grafana-latest-1.x86_64.rpm",
".tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
}
for _, file := range files {
for extension, fullName := range latestMapping {
if strings.HasSuffix(file.Name(), extension) {
runError("cp", path.Join("dist", file.Name()), fullName)
}
}
}
}
func readVersionFromPackageJson() {
reader, err := os.Open("package.json")
if err != nil {
log.Fatal("Failed to open package.json")
return
}
defer reader.Close()
jsonObj := map[string]interface{}{}
jsonParser := json.NewDecoder(reader)
if err := jsonParser.Decode(&jsonObj); err != nil {
log.Fatal("Failed to decode package.json")
}
version = jsonObj["version"].(string)
linuxPackageVersion = version
linuxPackageIteration = ""
// handle pre version stuff (deb / rpm does not support semver)
parts := strings.Split(version, "-")
if len(parts) > 1 {
linuxPackageVersion = parts[0]
linuxPackageIteration = parts[1]
}
// add timestamp to iteration
if includeBuildNumber {
if buildNumber != 0 {
linuxPackageIteration = fmt.Sprintf("%d%s", buildNumber, linuxPackageIteration)
} else {
linuxPackageIteration = fmt.Sprintf("%d%s", time.Now().Unix(), linuxPackageIteration)
}
}
}
type linuxPackageOptions struct {
packageType string
homeDir string
binPath string
serverBinPath string
cliBinPath string
configDir string
ldapFilePath string
etcDefaultPath string
etcDefaultFilePath string
initdScriptFilePath string
systemdServiceFilePath string
postinstSrc string
initdScriptSrc string
defaultFileSrc string
systemdFileSrc string
depends []string
}
func createDebPackages() {
createPackage(linuxPackageOptions{
packageType: "deb",
homeDir: "/usr/share/grafana",
binPath: "/usr/sbin",
configDir: "/etc/grafana",
etcDefaultPath: "/etc/default",
etcDefaultFilePath: "/etc/default/grafana-server",
initdScriptFilePath: "/etc/init.d/grafana-server",
systemdServiceFilePath: "/usr/lib/systemd/system/grafana-server.service",
postinstSrc: "packaging/deb/control/postinst",
initdScriptSrc: "packaging/deb/init.d/grafana-server",
defaultFileSrc: "packaging/deb/default/grafana-server",
systemdFileSrc: "packaging/deb/systemd/grafana-server.service",
depends: []string{"adduser", "libfontconfig"},
})
}
func createRpmPackages() {
createPackage(linuxPackageOptions{
packageType: "rpm",
homeDir: "/usr/share/grafana",
binPath: "/usr/sbin",
configDir: "/etc/grafana",
etcDefaultPath: "/etc/sysconfig",
etcDefaultFilePath: "/etc/sysconfig/grafana-server",
initdScriptFilePath: "/etc/init.d/grafana-server",
systemdServiceFilePath: "/usr/lib/systemd/system/grafana-server.service",
postinstSrc: "packaging/rpm/control/postinst",
initdScriptSrc: "packaging/rpm/init.d/grafana-server",
defaultFileSrc: "packaging/rpm/sysconfig/grafana-server",
systemdFileSrc: "packaging/rpm/systemd/grafana-server.service",
depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"},
})
}
func createLinuxPackages() {
createDebPackages()
createRpmPackages()
}
func createPackage(options linuxPackageOptions) {
packageRoot, _ := ioutil.TempDir("", "grafana-linux-pack")
// create directories
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.homeDir))
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.configDir))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/etc/init.d"))
runPrint("mkdir", "-p", filepath.Join(packageRoot, options.etcDefaultPath))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/usr/lib/systemd/system"))
runPrint("mkdir", "-p", filepath.Join(packageRoot, "/usr/sbin"))
// copy binary
for _, binary := range binaries {
runPrint("cp", "-p", filepath.Join(workingDir, "tmp/bin/"+binary), filepath.Join(packageRoot, "/usr/sbin/"+binary))
}
// copy init.d script
runPrint("cp", "-p", options.initdScriptSrc, filepath.Join(packageRoot, options.initdScriptFilePath))
// copy environment var file
runPrint("cp", "-p", options.defaultFileSrc, filepath.Join(packageRoot, options.etcDefaultFilePath))
// copy systemd file
runPrint("cp", "-p", options.systemdFileSrc, filepath.Join(packageRoot, options.systemdServiceFilePath))
// copy release files
runPrint("cp", "-a", filepath.Join(workingDir, "tmp")+"/.", filepath.Join(packageRoot, options.homeDir))
// remove bin path
runPrint("rm", "-rf", filepath.Join(packageRoot, options.homeDir, "bin"))
args := []string{
"-s", "dir",
"--description", "Grafana",
"-C", packageRoot,
"--vendor", "Grafana",
"--url", "https://grafana.com",
"--license", "\"Apache 2.0\"",
"--maintainer", "[email protected]",
"--config-files", options.initdScriptFilePath,
"--config-files", options.etcDefaultFilePath,
"--config-files", options.systemdServiceFilePath,
"--after-install", options.postinstSrc,
"--name", "grafana",
"--version", linuxPackageVersion,
"-p", "./dist",
}
if options.packageType == "rpm" {
args = append(args, "--rpm-posttrans", "packaging/rpm/control/posttrans")
}
if options.packageType == "deb" {
args = append(args, "--deb-no-default-config-files")
}
if pkgArch != "" {
args = append(args, "-a", pkgArch)
}
if linuxPackageIteration != "" {
args = append(args, "--iteration", linuxPackageIteration)
}
// add dependenciesj
for _, dep := range options.depends {
args = append(args, "--depends", dep)
}
args = append(args, ".")
fmt.Println("Creating package: ", options.packageType)
runPrint("fpm", append([]string{"-t", options.packageType}, args...)...)
}
func verifyGitRepoIsClean() {
rs, err := runError("git", "ls-files", "--modified")
if err != nil {
log.Fatalf("Failed to check if git tree was clean, %v, %v\n", string(rs), err)
return
}
count := len(string(rs))
if count > 0 {
log.Fatalf("Git repository has modified files, aborting")
}
log.Println("Git repository is clean")
}
func ensureGoPath() {
if os.Getenv("GOPATH") == "" {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
}
func ChangeWorkingDir(dir string) {
os.Chdir(dir)
}
func grunt(params ...string) {
if runtime.GOOS == "windows" {
runPrint(`.\node_modules\.bin\grunt`, params...)
} else {
runPrint("./node_modules/.bin/grunt", params...)
}
}
func gruntBuildArg(task string) []string {
args := []string{task}
if includeBuildNumber {
args = append(args, fmt.Sprintf("--pkgVer=%v-%v", linuxPackageVersion, linuxPackageIteration))
} else {
args = append(args, fmt.Sprintf("--pkgVer=%v", version))
}
if pkgArch != "" {
args = append(args, fmt.Sprintf("--arch=%v", pkgArch))
}
if phjsToRelease != "" {
args = append(args, fmt.Sprintf("--phjsToRelease=%v", phjsToRelease))
}
return args
}
func setup() {
runPrint("go", "get", "-v", "github.com/kardianos/govendor")
runPrint("go", "install", "-v", "./pkg/cmd/grafana-server")
}
func test(pkg string) {
setBuildEnv()
runPrint("go", "test", "-short", "-timeout", "60s", pkg)
}
func build(binaryName, pkg string, tags []string) {
binary := "./bin/" + binaryName
if goos == "windows" {
binary += ".exe"
}
rmr(binary, binary+".md5")
args := []string{"build", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
if race {
args = append(args, "-race")
}
args = append(args, "-o", binary)
args = append(args, pkg)
setBuildEnv()
runPrint("go", "version")
runPrint("go", args...)
// Create an md5 checksum of the binary, to be included in the archive for
// automatic upgrades.
err := md5File(binary)
if err != nil {
log.Fatal(err)
}
}
func ldflags() string {
var b bytes.Buffer
b.WriteString("-w")
b.WriteString(fmt.Sprintf(" -X main.version=%s", version))
b.WriteString(fmt.Sprintf(" -X main.commit=%s", getGitSha()))
b.WriteString(fmt.Sprintf(" -X main.buildstamp=%d", buildStamp()))
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func clean() {
rmr("dist")
rmr("tmp")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/grafana", goos, goarch)))
}
func setBuildEnv() {
os.Setenv("GOOS", goos)
if strings.HasPrefix(goarch, "armv") {
os.Setenv("GOARCH", "arm")
os.Setenv("GOARM", goarch[4:])
} else {
os.Setenv("GOARCH", goarch)
}
if goarch == "386" {
os.Setenv("GO386", "387")
}
if cgo != "" {
os.Setenv("CGO_ENABLED", cgo)
}
if gocc != "" {
os.Setenv("CC", gocc)
}
if gocxx != "" {
os.Setenv("CXX", gocxx)
}
}
func getGitSha() string {
v, err := runError("git", "rev-parse", "--short", "HEAD")
if err != nil {
return "unknown-dev"
}
return string(v)
}
func buildStamp() int64 {
bs, err := runError("git", "show", "-s", "--format=%ct")
if err != nil {
return time.Now().Unix()
}
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func run(cmd string, args ...string) []byte {
bs, err := runError(cmd, args...)
if err != nil {
log.Println(cmd, strings.Join(args, " "))
log.Println(string(bs))
log.Fatal(err)
}
return bytes.TrimSpace(bs)
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
if err != nil {
return nil, err
}
return bytes.TrimSpace(bs), nil
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func md5File(file string) error {
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
h := md5.New()
_, err = io.Copy(h, fd)
if err != nil {
return err
}
out, err := os.Create(file + ".md5")
if err != nil {
return err
}
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
if err != nil {
return err
}
return out.Close()
}
func shaFilesInDist() {
filepath.Walk("./dist", func(path string, f os.FileInfo, err error) error {
if path == "./dist" {
return nil
}
if strings.Contains(path, ".sha256") == false {
err := shaFile(path)
if err != nil {
log.Printf("Failed to create sha file. error: %v\n", err)
}
}
return nil
})
}
func shaFile(file string) error {
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
h := sha256.New()
_, err = io.Copy(h, fd)
if err != nil {
return err
}
out, err := os.Create(file + ".sha256")
if err != nil {
return err
}
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
if err != nil {
return err
}
return out.Close()
}
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
testing/testing.go | package testing
import (
"crypto/tls"
"fmt"
"net/http"
"os"
"strconv"
"strings"
stripe "github.com/stripe/stripe-go/v72"
"github.com/stripe/stripe-go/v72/form"
"golang.org/x/net/http2"
)
// This file should contain any testing helpers that should be commonly
// available across all tests in the Stripe package.
//
// There's not much in here because it' a relatively recent addition to the
// package, but should be used as appropriate for any new changes.
const (
// MockMinimumVersion is the minimum acceptable version for stripe-mock.
// It's here so that if the library depends on new endpoints or features
// added in a more recent version of stripe-mock, we can show people a
// better error message instead of the test suite crashing with a bunch of
// confusing 404 errors or the like.
MockMinimumVersion = "0.103.0"
// TestMerchantID is a token that can be used to represent a merchant ID in
// simple tests.
TestMerchantID = "acct_123"
)
func init() {
// Enable strict mode on form encoding so that we'll panic if any kind of
// malformed param struct is detected
form.Strict = true
port := os.Getenv("STRIPE_MOCK_PORT")
if port == "" {
port = "12112"
}
// stripe-mock's certificate for localhost is self-signed so configure a
// specialized client that skips the certificate authority check.
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
// Go can often enable HTTP/2 automatically if it's supported, but
// confusingly, if you set `TLSClientConfig`, it disables it and you have
// to explicitly invoke http2's `ConfigureTransport` to get it back.
//
// See the incorrectly closed bug report here:
//
// https://github.com/golang/go/issues/20645
//
err := http2.ConfigureTransport(transport)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize HTTP/2 transport: %v\n", err)
os.Exit(1)
}
httpClient := &http.Client{
Transport: transport,
}
resp, err := httpClient.Get("https://localhost:" + port)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't reach stripe-mock at `localhost:%s` (%v). Is "+
"it running? Please see README for setup instructions.\n", port, err)
os.Exit(1)
}
version := resp.Header.Get("Stripe-Mock-Version")
if version != "master" && compareVersions(version, MockMinimumVersion) > 0 {
fmt.Fprintf(os.Stderr, "Your version of stripe-mock (%s) is too old. The "+
"minimum version to run this test suite is %s. Please see its "+
"repository for upgrade instructions.\n", version, MockMinimumVersion)
os.Exit(1)
}
stripe.Key = "sk_test_myTestKey"
// Configure a backend for stripe-mock and set it for both the API and
// Uploads (unlike the real Stripe API, stripe-mock supports both these
// backends).
stripeMockBackend := stripe.GetBackendWithConfig(
stripe.APIBackend,
&stripe.BackendConfig{
URL: stripe.String("https://localhost:" + port),
HTTPClient: httpClient,
LeveledLogger: stripe.DefaultLeveledLogger,
},
)
stripe.SetBackend(stripe.APIBackend, stripeMockBackend)
stripe.SetBackend(stripe.UploadsBackend, stripeMockBackend)
}
// compareVersions compares two semantic version strings. We need this because
// with more complex double-digit numbers, lexical comparison breaks down.
func compareVersions(a, b string) (ret int) {
as := strings.Split(a, ".")
bs := strings.Split(b, ".")
loopMax := len(bs)
if len(as) > len(bs) {
loopMax = len(as)
}
for i := 0; i < loopMax; i++ {
var x, y string
if len(as) > i {
x = as[i]
}
if len(bs) > i {
y = bs[i]
}
xi, _ := strconv.Atoi(x)
yi, _ := strconv.Atoi(y)
if xi > yi {
ret = -1
} else if xi < yi {
ret = 1
}
if ret != 0 {
break
}
}
return
}
| [
"\"STRIPE_MOCK_PORT\""
]
| []
| [
"STRIPE_MOCK_PORT"
]
| [] | ["STRIPE_MOCK_PORT"] | go | 1 | 0 | |
build/PureCloudPlatformClientV2/models/system_prompt_asset_entity_listing.py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class SystemPromptAssetEntityListing(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SystemPromptAssetEntityListing - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'entities': 'list[SystemPromptAsset]',
'page_size': 'int',
'page_number': 'int',
'total': 'int',
'first_uri': 'str',
'self_uri': 'str',
'next_uri': 'str',
'previous_uri': 'str',
'last_uri': 'str',
'page_count': 'int'
}
self.attribute_map = {
'entities': 'entities',
'page_size': 'pageSize',
'page_number': 'pageNumber',
'total': 'total',
'first_uri': 'firstUri',
'self_uri': 'selfUri',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'last_uri': 'lastUri',
'page_count': 'pageCount'
}
self._entities = None
self._page_size = None
self._page_number = None
self._total = None
self._first_uri = None
self._self_uri = None
self._next_uri = None
self._previous_uri = None
self._last_uri = None
self._page_count = None
@property
def entities(self):
"""
Gets the entities of this SystemPromptAssetEntityListing.
:return: The entities of this SystemPromptAssetEntityListing.
:rtype: list[SystemPromptAsset]
"""
return self._entities
@entities.setter
def entities(self, entities):
"""
Sets the entities of this SystemPromptAssetEntityListing.
:param entities: The entities of this SystemPromptAssetEntityListing.
:type: list[SystemPromptAsset]
"""
self._entities = entities
@property
def page_size(self):
"""
Gets the page_size of this SystemPromptAssetEntityListing.
:return: The page_size of this SystemPromptAssetEntityListing.
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""
Sets the page_size of this SystemPromptAssetEntityListing.
:param page_size: The page_size of this SystemPromptAssetEntityListing.
:type: int
"""
self._page_size = page_size
@property
def page_number(self):
"""
Gets the page_number of this SystemPromptAssetEntityListing.
:return: The page_number of this SystemPromptAssetEntityListing.
:rtype: int
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""
Sets the page_number of this SystemPromptAssetEntityListing.
:param page_number: The page_number of this SystemPromptAssetEntityListing.
:type: int
"""
self._page_number = page_number
@property
def total(self):
"""
Gets the total of this SystemPromptAssetEntityListing.
:return: The total of this SystemPromptAssetEntityListing.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""
Sets the total of this SystemPromptAssetEntityListing.
:param total: The total of this SystemPromptAssetEntityListing.
:type: int
"""
self._total = total
@property
def first_uri(self):
"""
Gets the first_uri of this SystemPromptAssetEntityListing.
:return: The first_uri of this SystemPromptAssetEntityListing.
:rtype: str
"""
return self._first_uri
@first_uri.setter
def first_uri(self, first_uri):
"""
Sets the first_uri of this SystemPromptAssetEntityListing.
:param first_uri: The first_uri of this SystemPromptAssetEntityListing.
:type: str
"""
self._first_uri = first_uri
@property
def self_uri(self):
"""
Gets the self_uri of this SystemPromptAssetEntityListing.
:return: The self_uri of this SystemPromptAssetEntityListing.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this SystemPromptAssetEntityListing.
:param self_uri: The self_uri of this SystemPromptAssetEntityListing.
:type: str
"""
self._self_uri = self_uri
@property
def next_uri(self):
"""
Gets the next_uri of this SystemPromptAssetEntityListing.
:return: The next_uri of this SystemPromptAssetEntityListing.
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""
Sets the next_uri of this SystemPromptAssetEntityListing.
:param next_uri: The next_uri of this SystemPromptAssetEntityListing.
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""
Gets the previous_uri of this SystemPromptAssetEntityListing.
:return: The previous_uri of this SystemPromptAssetEntityListing.
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""
Sets the previous_uri of this SystemPromptAssetEntityListing.
:param previous_uri: The previous_uri of this SystemPromptAssetEntityListing.
:type: str
"""
self._previous_uri = previous_uri
@property
def last_uri(self):
"""
Gets the last_uri of this SystemPromptAssetEntityListing.
:return: The last_uri of this SystemPromptAssetEntityListing.
:rtype: str
"""
return self._last_uri
@last_uri.setter
def last_uri(self, last_uri):
"""
Sets the last_uri of this SystemPromptAssetEntityListing.
:param last_uri: The last_uri of this SystemPromptAssetEntityListing.
:type: str
"""
self._last_uri = last_uri
@property
def page_count(self):
"""
Gets the page_count of this SystemPromptAssetEntityListing.
:return: The page_count of this SystemPromptAssetEntityListing.
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""
Sets the page_count of this SystemPromptAssetEntityListing.
:param page_count: The page_count of this SystemPromptAssetEntityListing.
:type: int
"""
self._page_count = page_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| []
| []
| []
| [] | [] | python | null | null | null |
server/src/app.py | # -*- coding:utf-8 -*-
from flask import Flask, render_template
import os
from fund.smtam import *
import inspect
from jinja2 import Environment, FileSystemLoader
app = Flask(__name__, static_folder='static', template_folder='templates')
env = Environment(
loader=FileSystemLoader(
"./templates/",
encoding="utf8",
)
)
@app.route("/")
def root():
return render_template("index.html")
@app.route("/isin/<isin_code>")
def root_isin(isin_code: str):
# /isin/配下にアクセスが有った場合はSmtamTemplateを使用する
rep = SmtamTemplate(isin_code)
params = inspect.getmembers(rep)
return render_template(rep.template_file, **dict(params))
@app.route("/design_b/<isin_code>")
def root_bulma(isin_code: str):
# /design_b/配下にアクセスがあった場合はtemplate_2を使う
rep = SmtamTemplate(isin_code)
rep.template_file = 'smtam_template_2.html'
params = inspect.getmembers(rep)
return render_template(rep.template_file, **dict(params))
if __name__ == "__main__":
# サーバ起動用
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
cloud-control-manager/cloud-driver/drivers/azure/main/Test_Resources.go | package main
import (
"fmt"
cblog "github.com/cloud-barista/cb-log"
azdrv "github.com/cloud-barista/poc-cicd-spider/cloud-control-manager/cloud-driver/drivers/azure"
idrv "github.com/cloud-barista/poc-cicd-spider/cloud-control-manager/cloud-driver/interfaces"
irs "github.com/cloud-barista/poc-cicd-spider/cloud-control-manager/cloud-driver/interfaces/resources"
"github.com/davecgh/go-spew/spew"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"io/ioutil"
"os"
)
var cblogger *logrus.Logger
func init() {
// cblog is a global variable.
cblogger = cblog.GetLogger("CB-SPIDER")
}
func testImageHandler(config Config) {
resourceHandler, err := getResourceHandler("image")
if err != nil {
cblogger.Error(err)
}
imageHandler := resourceHandler.(irs.ImageHandler)
cblogger.Info("Test ImageHandler")
cblogger.Info("1. ListImage()")
cblogger.Info("2. GetImage()")
cblogger.Info("3. CreateImage()")
cblogger.Info("4. DeleteImage()")
cblogger.Info("5. Exit")
imageId := "Canonical:UbuntuServer:18.04-LTS:18.04.201804262"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListImage() ...")
if _, err := imageHandler.ListImage(); err != nil {
cblogger.Error(err)
} else {
//spew.Dump(list)
}
cblogger.Info("Finish ListImage()")
case 2:
cblogger.Info("Start GetImage() ...")
if imageInfo, err := imageHandler.GetImage(irs.IID{NameId: imageId}); err != nil {
cblogger.Error(err)
} else {
spew.Dump(imageInfo)
}
cblogger.Info("Finish GetImage()")
case 3:
cblogger.Info("Start CreateImage() ...")
/*reqInfo := irs.ImageReqInfo{Name: imageId}
if imageInfo, err := imageHandler.CreateImage(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(imageInfo)
}*/
cblogger.Info("Finish CreateImage()")
case 4:
cblogger.Info("Start DeleteImage() ...")
/*if ok, err := imageHandler.DeleteImage(imageId); !ok {
cblogger.Error(err)
}*/
cblogger.Info("Finish DeleteImage()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
/*func testPublicIPHanlder(config Config) {
resourceHandler, err := getResourceHandler("publicip")
if err != nil {
cblogger.Error(err)
}
publicIPHandler := resourceHandler.(irs.PublicIPHandler)
cblogger.Info("Test PublicIPHandler")
cblogger.Info("1. ListPublicIP()")
cblogger.Info("2. GetPublicIP()")
cblogger.Info("3. CreatePublicIP()")
cblogger.Info("4. DeletePublicIP()")
cblogger.Info("5. Exit")
publicIPId := "CB-PublicIP"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListPublicIP() ...")
if list, err := publicIPHandler.ListPublicIP(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListPublicIP()")
case 2:
cblogger.Info("Start GetPublicIP() ...")
if publicIpInfo, err := publicIPHandler.GetPublicIP(publicIPId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(publicIpInfo)
}
cblogger.Info("Finish GetPublicIP()")
case 3:
cblogger.Info("Start CreatePublicIP() ...")
reqInfo := irs.PublicIPReqInfo{Name: publicIPId}
if publicIpInfo, err := publicIPHandler.CreatePublicIP(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(publicIpInfo)
}
cblogger.Info("Finish CreatePublicIP()")
case 4:
cblogger.Info("Start DeletePublicIP() ...")
if ok, err := publicIPHandler.DeletePublicIP(publicIPId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeletePublicIP()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}*/
func testSecurityHandler(config Config) {
resourceHandler, err := getResourceHandler("security")
if err != nil {
cblogger.Error(err)
}
securityHandler := resourceHandler.(irs.SecurityHandler)
cblogger.Info("Test SecurityHandler")
cblogger.Info("1. ListSecurity()")
cblogger.Info("2. GetSecurity()")
cblogger.Info("3. CreateSecurity()")
cblogger.Info("4. DeleteSecurity()")
cblogger.Info("5. Exit")
iid := irs.IID{
NameId: "CB-SecGroup",
SystemId: "CB-SecGroup",
}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListSecurity() ...")
if list, err := securityHandler.ListSecurity(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListSecurity()")
case 2:
cblogger.Info("Start GetSecurity() ...")
if securityInfo, err := securityHandler.GetSecurity(iid); err != nil {
cblogger.Error(err)
} else {
spew.Dump(securityInfo)
}
cblogger.Info("Finish GetSecurity()")
case 3:
cblogger.Info("Start CreateSecurity() ...")
reqInfo := irs.SecurityReqInfo{
IId: iid,
SecurityRules: &[]irs.SecurityRuleInfo{
{
FromPort: "22",
ToPort: "22",
IPProtocol: "TCP",
Direction: "inbound",
},
{
FromPort: "3306",
ToPort: "3306",
IPProtocol: "TCP",
Direction: "inbound",
},
},
}
if securityInfo, err := securityHandler.CreateSecurity(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(securityInfo)
}
cblogger.Info("Finish CreateSecurity()")
case 4:
cblogger.Info("Start DeleteSecurity() ...")
if ok, err := securityHandler.DeleteSecurity(iid); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteSecurity()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
/*func testVNetworkHandler(config Config) {
resourceHandler, err := getResourceHandler("vnetwork")
if err != nil {
cblogger.Error(err)
}
vNetworkHandler := resourceHandler.(irs.VNetworkHandler)
cblogger.Info("Test VNetworkHandler")
cblogger.Info("1. ListVNetwork()")
cblogger.Info("2. GetVNetwork()")
cblogger.Info("3. CreateVNetwork()")
cblogger.Info("4. DeleteVNetwork()")
cblogger.Info("5. Exit")
vNetworkId := "CB-Subnet"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVNetwork() ...")
if list, err := vNetworkHandler.ListVNetwork(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVNetwork()")
case 2:
cblogger.Info("Start GetVNetwork() ...")
if vNetInfo, err := vNetworkHandler.GetVNetwork(vNetworkId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish GetVNetwork()")
case 3:
cblogger.Info("Start CreateVNetwork() ...")
reqInfo := irs.VNetworkReqInfo{Name: vNetworkId}
if vNetInfo, err := vNetworkHandler.CreateVNetwork(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish CreateVNetwork()")
case 4:
cblogger.Info("Start DeleteVNetwork() ...")
if ok, err := vNetworkHandler.DeleteVNetwork(vNetworkId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVNetwork()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}*/
/*func testVNicHandler(config Config) {
resourceHandler, err := getResourceHandler("vnic")
if err != nil {
cblogger.Error(err)
}
vNicHandler := resourceHandler.(irs.VNicHandler)
cblogger.Info("Test VNicHandler")
cblogger.Info("1. ListVNic()")
cblogger.Info("2. GetVNic()")
cblogger.Info("3. CreateVNic()")
cblogger.Info("4. DeleteVNic()")
cblogger.Info("5. Exit Program")
vNicId := "CB-VNic"
subetName := "CB-Subnet"
segGroupId := "/subscriptions/cb592624-b77b-4a8f-bb13-0e5a48cae40f/resourceGroups/CB-GROUP/providers/Microsoft.Network/networkSecurityGroups/CB-SecGroup"
publicIpId := "/subscriptions/cb592624-b77b-4a8f-bb13-0e5a48cae40f/resourceGroups/CB-GROUP/providers/Microsoft.Network/publicIPAddresses/CB-PublicIP"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVNic() ...")
if list, err := vNicHandler.ListVNic(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVNic()")
case 2:
cblogger.Info("Start GetVNic() ...")
if vNicInfo, err := vNicHandler.GetVNic(vNicId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
}
cblogger.Info("Finish GetVNic()")
case 3:
cblogger.Info("Start CreateVNic() ...")
reqInfo := irs.VNicReqInfo{
Name: vNicId,
VNetName: subetName,
SecurityGroupIds: []string{segGroupId},
PublicIPid: publicIpId,
}
if vNicInfo, err := vNicHandler.CreateVNic(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
}
cblogger.Info("Finish CreateVNic()")
case 4:
cblogger.Info("Start DeleteVNic() ...")
if ok, err := vNicHandler.DeleteVNic(vNicId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVNic()")
case 5:
cblogger.Info("Exit Program")
break Loop
}
}
}
}*/
func testVPCHandler(config Config) {
resourceHandler, err := getResourceHandler("vpc")
if err != nil {
cblogger.Error(err)
}
vpcHandler := resourceHandler.(irs.VPCHandler)
cblogger.Info("Test VPCHandler")
cblogger.Info("1. ListVPC()")
cblogger.Info("2. GetVPC()")
cblogger.Info("3. CreateVPC()")
cblogger.Info("4. DeleteVPC()")
cblogger.Info("5. Exit")
vpcId := irs.IID{NameId: "CB-VNet"}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVPC() ...")
if list, err := vpcHandler.ListVPC(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVPC()")
case 2:
cblogger.Info("Start GetVPC() ...")
if vNetInfo, err := vpcHandler.GetVPC(vpcId); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish GetVPC()")
case 3:
cblogger.Info("Start CreateVPC() ...")
reqInfo := irs.VPCReqInfo{
IId: vpcId,
IPv4_CIDR: "130.0.0.0/16",
SubnetInfoList: []irs.SubnetInfo{
{
IId: irs.IID{
NameId: vpcId.NameId + "-subnet-1",
},
IPv4_CIDR: "130.0.0.0/24",
},
{
IId: irs.IID{
NameId: vpcId.NameId + "-subnet-2",
},
IPv4_CIDR: "130.0.1.0/24",
},
},
}
if vNetInfo, err := vpcHandler.CreateVPC(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNetInfo)
}
cblogger.Info("Finish CreateVPC()")
case 4:
cblogger.Info("Start DeleteVPC() ...")
if ok, err := vpcHandler.DeleteVPC(vpcId); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteVPC()")
case 5:
cblogger.Info("Exit")
break Loop
}
}
}
}
func testKeypairHandler(config Config) {
resourceHandler, err := getResourceHandler("keypair")
if err != nil {
cblogger.Error(err)
}
keypairHandler := resourceHandler.(irs.KeyPairHandler)
cblogger.Info("Test KeypairHandler")
cblogger.Info("1. ListKeyPair()")
cblogger.Info("2. GetKeyPair()")
cblogger.Info("3. CreateKeyPair()")
cblogger.Info("4. DeleteKeyPair()")
cblogger.Info("5. Exit Program")
iid := irs.IID{
NameId: "CB-Keypair",
SystemId: "CB-Keypair",
}
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListKeyPair() ...")
if list, err := keypairHandler.ListKey(); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListKeyPair()")
case 2:
cblogger.Info("Start GetKeyPair() ...")
if vNicInfo, err := keypairHandler.GetKey(iid); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
}
cblogger.Info("Finish GetKeyPair()")
case 3:
cblogger.Info("Start CreateKeyPair() ...")
reqInfo := irs.KeyPairReqInfo{
IId: iid,
}
if vNicInfo, err := keypairHandler.CreateKey(reqInfo); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vNicInfo)
}
cblogger.Info("Finish CreateKeyPair()")
case 4:
cblogger.Info("Start DeleteKeyPair() ...")
if ok, err := keypairHandler.DeleteKey(iid); !ok {
cblogger.Error(err)
}
cblogger.Info("Finish DeleteKeyPair()")
case 5:
cblogger.Info("Exit Program")
break Loop
}
}
}
}
func testVmSpecHandler(config Config) {
resourceHandler, err := getResourceHandler("vmspec")
if err != nil {
cblogger.Error(err)
}
vmSpecHandler := resourceHandler.(irs.VMSpecHandler)
cblogger.Info("Test VmSpecHandler")
cblogger.Info("1. ListVmSpec()")
cblogger.Info("2. GetVmSpec()")
cblogger.Info("3. ListOrgVmSpec()")
cblogger.Info("4. GetOrgVmSpec()")
cblogger.Info("9. Exit")
var vmSpecName string
vmSpecName = "Standard_F72s_v2ojpijipo"
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
region := config.Azure.Location
if inputCnt == 1 {
switch commandNum {
case 1:
cblogger.Info("Start ListVmSpec() ...")
if list, err := vmSpecHandler.ListVMSpec(region); err != nil {
cblogger.Error(err)
} else {
spew.Dump(list)
}
cblogger.Info("Finish ListVmSpec()")
case 2:
cblogger.Info("Start GetVmSpec() ...")
if vmSpec, err := vmSpecHandler.GetVMSpec(region, vmSpecName); err != nil {
cblogger.Error(err)
} else {
spew.Dump(vmSpec)
}
cblogger.Info("Finish GetVmSpec()")
case 3:
cblogger.Info("Start ListOrgVmSpec() ...")
if listStr, err := vmSpecHandler.ListOrgVMSpec(region); err != nil {
cblogger.Error(err)
} else {
fmt.Println(listStr)
}
cblogger.Info("Finish ListOrgVmSpec()")
case 4:
cblogger.Info("Start GetOrgVmSpec() ...")
if vmSpecStr, err := vmSpecHandler.GetOrgVMSpec(region, vmSpecName); err != nil {
cblogger.Error(err)
} else {
fmt.Println(vmSpecStr)
}
cblogger.Info("Finish GetOrgVmSpec()")
case 9:
cblogger.Info("Exit")
break Loop
}
}
}
}
func getResourceHandler(resourceType string) (interface{}, error) {
var cloudDriver idrv.CloudDriver
cloudDriver = new(azdrv.AzureDriver)
config := readConfigFile()
connectionInfo := idrv.ConnectionInfo{
CredentialInfo: idrv.CredentialInfo{
ClientId: config.Azure.ClientId,
ClientSecret: config.Azure.ClientSecret,
TenantId: config.Azure.TenantId,
SubscriptionId: config.Azure.SubscriptionID,
},
RegionInfo: idrv.RegionInfo{
Region: config.Azure.Location,
ResourceGroup: config.Azure.GroupName,
},
}
cloudConnection, _ := cloudDriver.ConnectCloud(connectionInfo)
var resourceHandler interface{}
var err error
switch resourceType {
case "image":
resourceHandler, err = cloudConnection.CreateImageHandler()
case "publicip":
//resourceHandler, err = cloudConnection.CreatePublicIPHandler()
case "security":
resourceHandler, err = cloudConnection.CreateSecurityHandler()
case "vnetwork":
//resourceHandler, err = cloudConnection.CreateVNetworkHandler()
case "vpc":
resourceHandler, err = cloudConnection.CreateVPCHandler()
case "vnic":
//resourceHandler, err = cloudConnection.CreateVNicHandler()
case "keypair":
resourceHandler, err = cloudConnection.CreateKeyPairHandler()
case "vmspec":
resourceHandler, err = cloudConnection.CreateVMSpecHandler()
}
if err != nil {
return nil, err
}
return resourceHandler, nil
}
func showTestHandlerInfo() {
cblogger.Info("==========================================================")
cblogger.Info("[Test ResourceHandler]")
cblogger.Info("1. ImageHandler")
cblogger.Info("2. PublicIPHandler")
cblogger.Info("3. SecurityHandler")
cblogger.Info("4. VPCHandler")
cblogger.Info("5. VNicHandler")
cblogger.Info("6. KeyPairHandler")
cblogger.Info("7. VmSpecHandler")
cblogger.Info("8. Exit")
cblogger.Info("==========================================================")
}
func main() {
showTestHandlerInfo() // ResourceHandler 테스트 정보 출력
config := readConfigFile() // config.yaml 파일 로드
Loop:
for {
var commandNum int
inputCnt, err := fmt.Scan(&commandNum)
if err != nil {
cblogger.Error(err)
}
if inputCnt == 1 {
switch commandNum {
case 1:
testImageHandler(config)
showTestHandlerInfo()
case 2:
//testPublicIPHanlder(config)
showTestHandlerInfo()
case 3:
testSecurityHandler(config)
showTestHandlerInfo()
case 4:
//testVNetworkHandler(config)
testVPCHandler(config)
showTestHandlerInfo()
case 5:
//testVNicHandler(config)
showTestHandlerInfo()
case 6:
testKeypairHandler(config)
showTestHandlerInfo()
case 7:
testVmSpecHandler(config)
//testKeypairHandler(config)
showTestHandlerInfo()
case 8:
cblogger.Info("Exit Test ResourceHandler Program")
break Loop
}
}
}
}
type Config struct {
Azure struct {
ClientId string `yaml:"client_id"`
ClientSecret string `yaml:"client_secret"`
TenantId string `yaml:"tenant_id"`
SubscriptionID string `yaml:"subscription_id"`
GroupName string `yaml:"group_name"`
VMName string `yaml:"vm_name"`
Location string `yaml:"location"`
VMSize string `yaml:"vm_size"`
Image struct {
Publisher string `yaml:"publisher"`
Offer string `yaml:"offer"`
Sku string `yaml:"sku"`
Version string `yaml:"version"`
} `yaml:"image"`
Os struct {
ComputeName string `yaml:"compute_name"`
AdminUsername string `yaml:"admin_username"`
AdminPassword string `yaml:"admin_password"`
} `yaml:"os"`
Network struct {
ID string `yaml:"id"`
Primary bool `yaml:"primary"`
} `yaml:"network"`
ServerId string `yaml:"server_id"`
ImageInfo struct {
GroupName string `yaml:"group_name"`
Name string `yaml:"name"`
} `yaml:"image_info"`
PublicIP struct {
GroupName string `yaml:"group_name"`
Name string `yaml:"name"`
} `yaml:"public_ip"`
Security struct {
GroupName string `yaml:"group_name"`
Name string `yaml:"name"`
} `yaml:"security_group"`
VNetwork struct {
GroupName string `yaml:"group_name"`
Name string `yaml:"name"`
} `yaml:"virtual_network"`
VNic struct {
GroupName string `yaml:"group_name"`
Name string `yaml:"name"`
} `yaml:"network_interface"`
} `yaml:"azure"`
}
func readConfigFile() Config {
// Set Environment Value of Project Root Path
rootPath := os.Getenv("CBSPIDER_PATH")
fmt.Println(rootPath)
data, err := ioutil.ReadFile(rootPath + "/conf/config.yaml")
if err != nil {
cblogger.Error(err)
}
var config Config
err = yaml.Unmarshal(data, &config)
if err != nil {
cblogger.Error(err)
}
return config
}
| [
"\"CBSPIDER_PATH\""
]
| []
| [
"CBSPIDER_PATH"
]
| [] | ["CBSPIDER_PATH"] | go | 1 | 0 | |
plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.xenserver.resource;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import javax.naming.ConfigurationException;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.log4j.Logger;
import org.apache.xmlrpc.XmlRpcException;
import org.joda.time.Duration;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import com.cloud.agent.IAgentControl;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.GetHostStatsCommand;
import com.cloud.agent.api.GetVmStatsCommand;
import com.cloud.agent.api.HostStatsEntry;
import com.cloud.agent.api.HostVmStateReportEntry;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.PingRoutingWithNwGroupsCommand;
import com.cloud.agent.api.PingRoutingWithOvsCommand;
import com.cloud.agent.api.RebootAnswer;
import com.cloud.agent.api.RebootCommand;
import com.cloud.agent.api.SetupGuestNetworkCommand;
import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.agent.api.VgpuTypesInfo;
import com.cloud.agent.api.VmStatsEntry;
import com.cloud.agent.api.routing.IpAssocCommand;
import com.cloud.agent.api.routing.IpAssocVpcCommand;
import com.cloud.agent.api.routing.NetworkElementCommand;
import com.cloud.agent.api.routing.SetNetworkACLCommand;
import com.cloud.agent.api.routing.SetSourceNatCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.GPUDeviceTO;
import com.cloud.agent.api.to.IpAddressTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.resource.virtualnetwork.VRScripts;
import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer;
import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource;
import com.cloud.exception.InternalErrorException;
import com.cloud.host.Host.Type;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.xenserver.resource.wrapper.xenbase.CitrixRequestWrapper;
import com.cloud.hypervisor.xenserver.resource.wrapper.xenbase.XenServerUtilitiesHelper;
import com.cloud.network.Networks;
import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.ServerResource;
import com.cloud.resource.hypervisor.HypervisorResource;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.resource.StorageSubsystemCommandHandler;
import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase;
import com.cloud.template.VirtualMachineTemplate.BootloaderType;
import com.cloud.utils.ExecutionResult;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.StringUtils;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SSHCmdHelper;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VmDetailConstants;
import com.trilead.ssh2.SCPClient;
import com.xensource.xenapi.Bond;
import com.xensource.xenapi.Connection;
import com.xensource.xenapi.Console;
import com.xensource.xenapi.Host;
import com.xensource.xenapi.HostCpu;
import com.xensource.xenapi.HostMetrics;
import com.xensource.xenapi.Network;
import com.xensource.xenapi.PBD;
import com.xensource.xenapi.PIF;
import com.xensource.xenapi.Pool;
import com.xensource.xenapi.SR;
import com.xensource.xenapi.Session;
import com.xensource.xenapi.Task;
import com.xensource.xenapi.Types;
import com.xensource.xenapi.Types.BadServerResponse;
import com.xensource.xenapi.Types.VmPowerState;
import com.xensource.xenapi.Types.XenAPIException;
import com.xensource.xenapi.VBD;
import com.xensource.xenapi.VDI;
import com.xensource.xenapi.VIF;
import com.xensource.xenapi.VLAN;
import com.xensource.xenapi.VM;
import com.xensource.xenapi.XenAPIObject;
/**
* CitrixResourceBase encapsulates the calls to the XenServer Xapi process to
* perform the required functionalities for CloudStack.
*
* ==============> READ THIS <============== Because the XenServer objects can
* expire when the session expires, we cannot keep any of the actual XenServer
* objects in this class. The only thing that is constant is the UUID of the
* XenServer objects but not the objects themselves! This is very important
* before you do any changes in this code here.
*
*/
public abstract class CitrixResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer {
/**
* used to describe what type of resource a storage device is of
*/
public enum SRType {
EXT, ISO, LVM, LVMOHBA, LVMOISCSI,
/**
* used for resigning metadata (like SR UUID and VDI UUID when a
* particular storage manager is installed on a XenServer host (for back-end snapshots to work))
*/
RELVMOISCSI, NFS;
String _str;
private SRType() {
_str = super.toString().toLowerCase();
}
public boolean equals(final String type) {
return _str.equalsIgnoreCase(type);
}
@Override
public String toString() {
return _str;
}
}
private final static int BASE_TO_CONVERT_BYTES_INTO_KILOBYTES = 1024;
private static final XenServerConnectionPool ConnPool = XenServerConnectionPool.getInstance();
// static min values for guests on xenserver
private static final long mem_128m = 134217728L;
static final Random Rand = new Random(System.currentTimeMillis());
private static final Logger s_logger = Logger.getLogger(CitrixResourceBase.class);
protected static final HashMap<VmPowerState, PowerState> s_powerStatesTable;
private String xenServer70plusGuestToolsName = "guest-tools.iso";
private String xenServerBefore70GuestToolsName = "xs-tools.iso";
static {
s_powerStatesTable = new HashMap<VmPowerState, PowerState>();
s_powerStatesTable.put(VmPowerState.HALTED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.PAUSED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.RUNNING, PowerState.PowerOn);
s_powerStatesTable.put(VmPowerState.SUSPENDED, PowerState.PowerOff);
s_powerStatesTable.put(VmPowerState.UNRECOGNIZED, PowerState.PowerUnknown);
}
private static PowerState convertToPowerState(final VmPowerState ps) {
final PowerState powerState = s_powerStatesTable.get(ps);
return powerState == null ? PowerState.PowerUnknown : powerState;
}
private static boolean isAlienVm(final VM vm, final Connection conn) throws XenAPIException, XmlRpcException {
// TODO : we need a better way to tell whether or not the VM belongs to
// CloudStack
final String vmName = vm.getNameLabel(conn);
if (vmName.matches("^[ivs]-\\d+-.+")) {
return false;
}
return true;
}
protected IAgentControl _agentControl;
protected boolean _canBridgeFirewall = false;
protected String _cluster;
// Guest and Host Performance Statistics
protected String _consolidationFunction = "AVERAGE";
protected long _dcId;
protected String _guestNetworkName;
protected int _heartbeatInterval = 60;
protected int _heartbeatTimeout = 120;
protected XsHost _host = new XsHost();
protected String _instance; // instance name (default is usually "VM")
protected boolean _isOvs = false;
protected String _linkLocalPrivateNetworkName;
protected int _maxNics = 7;
final int _maxWeight = 256;
protected int _migratewait;
protected String _name;
protected Queue<String> _password = new LinkedList<String>();
protected String _pod;
protected int _pollingIntervalInSeconds = 60;
protected String _privateNetworkName;
protected String _publicNetworkName;
protected final int _retry = 100;
protected boolean _securityGroupEnabled;
protected final int _sleep = 10000;
protected String _storageNetworkName1;
protected String _storageNetworkName2;
protected List<VIF> _tmpDom0Vif = new ArrayList<VIF>();
protected String _username;
protected VirtualRoutingResource _vrResource;
protected String _configDriveIsopath = "/opt/xensource/packages/configdrive_iso/";
protected String _configDriveSRName = "ConfigDriveISOs";
public String _attachIsoDeviceNum = "3";
protected XenServerUtilitiesHelper xenServerUtilitiesHelper = new XenServerUtilitiesHelper();
protected int _wait;
// Hypervisor specific params with generic value, may need to be overridden
// for specific versions
long _xsMemoryUsed = 128 * 1024 * 1024L; // xenserver hypervisor used 128 M
double _xsVirtualizationFactor = 63.0 / 64.0; // 1 - virtualization overhead
protected StorageSubsystemCommandHandler storageHandler;
private static final String XENSTORE_DATA_IP = "vm-data/ip";
private static final String XENSTORE_DATA_GATEWAY = "vm-data/gateway";
private static final String XENSTORE_DATA_NETMASK = "vm-data/netmask";
private static final String XENSTORE_DATA_CS_INIT = "vm-data/cloudstack/init";
public CitrixResourceBase() {
}
/**
* Replaces the old password with the new password used to connect to the host.
*
* @param password - the new host password.
* @return the old password.
*/
public String replaceOldPasswdInQueue(final String password) {
final String oldPasswd = _password.poll();
_password.add(password);
return oldPasswd;
}
public String getPwdFromQueue() {
return _password.peek();
}
public XenServerUtilitiesHelper getXenServerUtilitiesHelper() {
return xenServerUtilitiesHelper;
}
protected StorageSubsystemCommandHandler buildStorageHandler() {
final XenServerStorageProcessor processor = new XenServerStorageProcessor(this);
return new StorageSubsystemCommandHandlerBase(processor);
}
public String callHostPlugin(final Connection conn, final String plugin, final String cmd, final String... params) {
final Map<String, String> args = new HashMap<String, String>();
String msg;
try {
for (int i = 0; i < params.length; i += 2) {
args.put(params[i], params[i + 1]);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args));
}
final Host host = Host.getByUuid(conn, _host.getUuid());
final String result = host.callPlugin(conn, plugin, cmd, args);
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin Result: " + result);
}
return result.replace("\n", "");
} catch (final XenAPIException e) {
msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString();
s_logger.warn(msg);
} catch (final XmlRpcException e) {
msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage();
s_logger.debug(msg);
}
throw new CloudRuntimeException(msg);
}
protected String callHostPluginAsync(final Connection conn, final String plugin, final String cmd, final int wait, final Map<String, String> params) {
final int timeout = wait * 1000;
final Map<String, String> args = new HashMap<String, String>();
Task task = null;
try {
for (final Map.Entry<String, String> entry : params.entrySet()) {
args.put(entry.getKey(), entry.getValue());
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args));
}
final Host host = Host.getByUuid(conn, _host.getUuid());
task = host.callPluginAsync(conn, plugin, cmd, args);
// poll every 1 seconds
waitForTask(conn, task, 1000, timeout);
checkForSuccess(conn, task);
final String result = task.getResult(conn);
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin Result: " + result);
}
return result.replace("<value>", "").replace("</value>", "").replace("\n", "");
} catch (final Types.HandleInvalid e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle);
} catch (final Exception e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e);
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
return null;
}
protected String callHostPluginAsync(final Connection conn, final String plugin, final String cmd, final int wait, final String... params) {
final int timeout = wait * 1000;
final Map<String, String> args = new HashMap<String, String>();
Task task = null;
try {
for (int i = 0; i < params.length; i += 2) {
args.put(params[i], params[i + 1]);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args));
}
final Host host = Host.getByUuid(conn, _host.getUuid());
task = host.callPluginAsync(conn, plugin, cmd, args);
// poll every 1 seconds
waitForTask(conn, task, 1000, timeout);
checkForSuccess(conn, task);
final String result = task.getResult(conn);
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin Result: " + result);
}
return result.replace("<value>", "").replace("</value>", "").replace("\n", "");
} catch (final Types.HandleInvalid e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle);
} catch (final XenAPIException e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e);
} catch (final Exception e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e);
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
return null;
}
public String callHostPluginPremium(final Connection conn, final String cmd, final String... params) {
return callHostPlugin(conn, "vmopspremium", cmd, params);
}
protected String callHostPluginThroughMaster(final Connection conn, final String plugin, final String cmd, final String... params) {
final Map<String, String> args = new HashMap<String, String>();
try {
final Map<Pool, Pool.Record> poolRecs = Pool.getAllRecords(conn);
if (poolRecs.size() != 1) {
throw new CloudRuntimeException("There are " + poolRecs.size() + " pool for host :" + _host.getUuid());
}
final Host master = poolRecs.values().iterator().next().master;
for (int i = 0; i < params.length; i += 2) {
args.put(params[i], params[i + 1]);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args));
}
final String result = master.callPlugin(conn, plugin, cmd, args);
if (s_logger.isTraceEnabled()) {
s_logger.trace("callHostPlugin Result: " + result);
}
return result.replace("\n", "");
} catch (final Types.HandleInvalid e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle);
} catch (final XenAPIException e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e);
} catch (final XmlRpcException e) {
s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e);
}
return null;
}
public boolean canBridgeFirewall() {
return _canBridgeFirewall;
}
public boolean canBridgeFirewall(final Connection conn) {
return Boolean.valueOf(callHostPlugin(conn, "vmops", "can_bridge_firewall", "host_uuid", _host.getUuid(), "instance", _instance));
}
public void checkForSuccess(final Connection c, final Task task) throws XenAPIException, XmlRpcException {
if (task.getStatus(c) == Types.TaskStatusType.SUCCESS) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") completed");
}
return;
} else {
final String msg = "Task failed! Task record: " + task.getRecord(c);
s_logger.warn(msg);
task.cancel(c);
task.destroy(c);
throw new Types.BadAsyncResult(msg);
}
}
protected boolean checkSR(final Connection conn, final SR sr) {
try {
final SR.Record srr = sr.getRecord(conn);
final Set<PBD> pbds = sr.getPBDs(conn);
if (pbds.size() == 0) {
final String msg = "There is no PBDs for this SR: " + srr.nameLabel + " on host:" + _host.getUuid();
s_logger.warn(msg);
return false;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking " + srr.nameLabel + " or SR " + srr.uuid + " on " + _host);
}
if (srr.shared) {
if (SRType.NFS.equals(srr.type)) {
final Map<String, String> smConfig = srr.smConfig;
if (!smConfig.containsKey("nosubdir")) {
smConfig.put("nosubdir", "true");
sr.setSmConfig(conn, smConfig);
}
}
final Host host = Host.getByUuid(conn, _host.getUuid());
boolean found = false;
for (final PBD pbd : pbds) {
final PBD.Record pbdr = pbd.getRecord(conn);
if (host.equals(pbdr.host)) {
if (!pbdr.currentlyAttached) {
pbdPlug(conn, pbd, pbdr.uuid);
}
found = true;
break;
}
}
if (!found) {
final PBD.Record pbdr = srr.PBDs.iterator().next().getRecord(conn);
pbdr.host = host;
pbdr.uuid = "";
final PBD pbd = PBD.create(conn, pbdr);
pbdPlug(conn, pbd, pbd.getUuid(conn));
}
} else {
for (final PBD pbd : pbds) {
final PBD.Record pbdr = pbd.getRecord(conn);
if (!pbdr.currentlyAttached) {
pbdPlug(conn, pbd, pbdr.uuid);
}
}
}
} catch (final Exception e) {
final String msg = "checkSR failed host:" + _host + " due to " + e.toString();
s_logger.warn(msg, e);
return false;
}
return true;
}
private void CheckXenHostInfo() throws ConfigurationException {
final Connection conn = ConnPool.getConnect(_host.getIp(), _username, _password);
if (conn == null) {
throw new ConfigurationException("Can not create connection to " + _host.getIp());
}
try {
Host.Record hostRec = null;
try {
final Host host = Host.getByUuid(conn, _host.getUuid());
hostRec = host.getRecord(conn);
final Pool.Record poolRec = Pool.getAllRecords(conn).values().iterator().next();
_host.setPool(poolRec.uuid);
} catch (final Exception e) {
throw new ConfigurationException("Can not get host information from " + _host.getIp());
}
if (!hostRec.address.equals(_host.getIp())) {
final String msg = "Host " + _host.getIp() + " seems be reinstalled, please remove this host and readd";
s_logger.error(msg);
throw new ConfigurationException(msg);
}
} finally {
try {
Session.logout(conn);
} catch (final Exception e) {
}
}
}
@Override
public ExecutionResult cleanupCommand(final NetworkElementCommand cmd) {
if (cmd instanceof IpAssocCommand && !(cmd instanceof IpAssocVpcCommand)) {
return cleanupNetworkElementCommand((IpAssocCommand)cmd);
}
return new ExecutionResult(true, null);
}
public boolean cleanupHaltedVms(final Connection conn) throws XenAPIException, XmlRpcException {
final Host host = Host.getByUuid(conn, _host.getUuid());
final Map<VM, VM.Record> vms = VM.getAllRecords(conn);
boolean success = true;
if (vms != null && !vms.isEmpty()) {
for (final Map.Entry<VM, VM.Record> entry : vms.entrySet()) {
final VM vm = entry.getKey();
final VM.Record vmRec = entry.getValue();
if (vmRec.isATemplate || vmRec.isControlDomain) {
continue;
}
if (VmPowerState.HALTED.equals(vmRec.powerState) && vmRec.affinity.equals(host) && !isAlienVm(vm, conn)) {
try {
vm.destroy(conn);
} catch (final Exception e) {
s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e);
success = false;
}
}
}
}
return success;
}
protected ExecutionResult cleanupNetworkElementCommand(final IpAssocCommand cmd) {
final Connection conn = getConnection();
final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
final String lastIp = cmd.getAccessDetail(NetworkElementCommand.NETWORK_PUB_LAST_IP);
try {
final IpAddressTO[] ips = cmd.getIpAddresses();
for (final IpAddressTO ip : ips) {
final VM router = getVM(conn, routerName);
final NicTO nic = new NicTO();
nic.setMac(ip.getVifMacAddress());
nic.setType(ip.getTrafficType());
if (ip.getBroadcastUri() == null) {
nic.setBroadcastType(BroadcastDomainType.Native);
} else {
final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri());
nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri));
nic.setBroadcastUri(uri);
}
nic.setDeviceId(0);
nic.setNetworkRateMbps(ip.getNetworkRate());
nic.setName(ip.getNetworkName());
Network network = getNetwork(conn, nic);
// If we are disassociating the last IP address in the VLAN, we
// need
// to remove a VIF
boolean removeVif = false;
// there is only one ip in this public vlan and removing it, so
// remove the nic
if (org.apache.commons.lang.StringUtils.equalsIgnoreCase(lastIp, "true") && !ip.isAdd()) {
final VIF correctVif = getCorrectVif(conn, router, network);
// in isolated network eth2 is the default public interface. We don't want to delete it.
if (correctVif != null && !correctVif.getDevice(conn).equals("2")) {
removeVif = true;
}
}
if (removeVif) {
// Determine the correct VIF on DomR to
// associate/disassociate the
// IP address with
final VIF correctVif = getCorrectVif(conn, router, network);
if (correctVif != null) {
network = correctVif.getNetwork(conn);
// Mark this vif to be removed from network usage
networkUsage(conn, routerIp, "deleteVif", "eth" + correctVif.getDevice(conn));
// Remove the VIF from DomR
correctVif.unplug(conn);
correctVif.destroy(conn);
// Disable the VLAN network if necessary
disableVlanNetwork(conn, network);
}
}
}
} catch (final Exception e) {
s_logger.debug("Ip Assoc failure on applying one ip due to exception: ", e);
return new ExecutionResult(false, e.getMessage());
}
return new ExecutionResult(true, null);
}
public void cleanupTemplateSR(final Connection conn) {
Set<PBD> pbds = null;
try {
final Host host = Host.getByUuid(conn, _host.getUuid());
pbds = host.getPBDs(conn);
} catch (final XenAPIException e) {
s_logger.warn("Unable to get the SRs " + e.toString(), e);
throw new CloudRuntimeException("Unable to get SRs " + e.toString(), e);
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to get SRs " + e.getMessage(), e);
}
for (final PBD pbd : pbds) {
SR sr = null;
SR.Record srRec = null;
try {
sr = pbd.getSR(conn);
srRec = sr.getRecord(conn);
} catch (final Exception e) {
s_logger.warn("pbd.getSR get Exception due to ", e);
continue;
}
final String type = srRec.type;
if (srRec.shared) {
continue;
}
if (SRType.NFS.equals(type) || SRType.ISO.equals(type) && srRec.nameDescription.contains("template")) {
try {
pbd.unplug(conn);
pbd.destroy(conn);
sr.forget(conn);
} catch (final Exception e) {
s_logger.warn("forget SR catch Exception due to ", e);
}
}
}
}
public void cleanUpTmpDomVif(final Connection conn, final Network nw) throws XenAPIException, XmlRpcException {
final Pair<VM, VM.Record> vm = getControlDomain(conn);
final VM dom0 = vm.first();
final Set<VIF> dom0Vifs = dom0.getVIFs(conn);
for (final VIF v : dom0Vifs) {
String vifName = "unknown";
try {
final VIF.Record vifr = v.getRecord(conn);
if (v.getNetwork(conn).getUuid(conn).equals(nw.getUuid(conn))) {
if (vifr != null) {
final Map<String, String> config = vifr.otherConfig;
vifName = config.get("nameLabel");
}
s_logger.debug("A VIF in dom0 for the network is found - so destroy the vif");
v.destroy(conn);
s_logger.debug("Destroy temp dom0 vif" + vifName + " success");
}
} catch (final Exception e) {
s_logger.warn("Destroy temp dom0 vif " + vifName + "failed", e);
}
}
}
protected VDI cloudVDIcopy(final Connection conn, final VDI vdi, final SR sr, int wait) throws Exception {
Task task = null;
if (wait == 0) {
wait = 2 * 60 * 60;
}
try {
task = vdi.copyAsync(conn, sr);
// poll every 1 seconds , timeout after 2 hours
waitForTask(conn, task, 1000, (long)wait * 1000);
checkForSuccess(conn, task);
final VDI dvdi = Types.toVDI(task, conn);
return dvdi;
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e.toString());
}
}
}
}
public HashMap<String, String> clusterVMMetaDataSync(final Connection conn) {
final HashMap<String, String> vmMetaDatum = new HashMap<String, String>();
try {
final Map<VM, VM.Record> vm_map = VM.getAllRecords(conn); // USE
if (vm_map != null) {
for (final VM.Record record : vm_map.values()) {
if (record.isControlDomain || record.isASnapshot || record.isATemplate) {
continue; // Skip DOM0
}
final String platform = StringUtils.mapToString(record.platform);
if (platform.isEmpty()) {
continue; //Skip if platform is null
}
vmMetaDatum.put(record.nameLabel, StringUtils.mapToString(record.platform));
}
}
} catch (final Throwable e) {
final String msg = "Unable to get vms through host " + _host.getUuid() + " due to to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
}
return vmMetaDatum;
}
@Override
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
_name = name;
try {
_dcId = Long.parseLong((String)params.get("zone"));
} catch (final NumberFormatException e) {
throw new ConfigurationException("Unable to get the zone " + params.get("zone"));
}
_host.setUuid((String)params.get("guid"));
_name = _host.getUuid();
_host.setIp((String)params.get("ipaddress"));
_username = (String)params.get("username");
_password.add((String)params.get("password"));
_pod = (String)params.get("pod");
_cluster = (String)params.get("cluster");
_privateNetworkName = (String)params.get("private.network.device");
_publicNetworkName = (String)params.get("public.network.device");
_guestNetworkName = (String)params.get("guest.network.device");
_instance = (String)params.get("instance.name");
_securityGroupEnabled = Boolean.parseBoolean((String)params.get("securitygroupenabled"));
_linkLocalPrivateNetworkName = (String)params.get("private.linkLocal.device");
if (_linkLocalPrivateNetworkName == null) {
_linkLocalPrivateNetworkName = "cloud_link_local_network";
}
_storageNetworkName1 = (String)params.get("storage.network.device1");
_storageNetworkName2 = (String)params.get("storage.network.device2");
_heartbeatTimeout = NumbersUtil.parseInt((String)params.get("xenserver.heartbeat.timeout"), 120);
_heartbeatInterval = NumbersUtil.parseInt((String)params.get("xenserver.heartbeat.interval"), 60);
String value = (String)params.get("wait");
_wait = NumbersUtil.parseInt(value, 600);
value = (String)params.get("migratewait");
_migratewait = NumbersUtil.parseInt(value, 3600);
_maxNics = NumbersUtil.parseInt((String)params.get("xenserver.nics.max"), 7);
if (_pod == null) {
throw new ConfigurationException("Unable to get the pod");
}
if (_host.getIp() == null) {
throw new ConfigurationException("Unable to get the host address");
}
if (_username == null) {
throw new ConfigurationException("Unable to get the username");
}
if (_password.peek() == null) {
throw new ConfigurationException("Unable to get the password");
}
if (_host.getUuid() == null) {
throw new ConfigurationException("Unable to get the uuid");
}
CheckXenHostInfo();
storageHandler = buildStorageHandler();
_vrResource = new VirtualRoutingResource(this);
if (!_vrResource.configure(name, params)) {
throw new ConfigurationException("Unable to configure VirtualRoutingResource");
}
return true;
}
/**
* This method creates a XenServer network and configures it for being used
* as a L2-in-L3 tunneled network
*/
public synchronized Network configureTunnelNetwork(final Connection conn, final Long networkId, final long hostId, final String bridgeName) {
try {
final Network nw = findOrCreateTunnelNetwork(conn, bridgeName);
// Invoke plugin to setup the bridge which will be used by this
// network
final String bridge = nw.getBridge(conn);
final Map<String, String> nwOtherConfig = nw.getOtherConfig(conn);
final String configuredHosts = nwOtherConfig.get("ovs-host-setup");
boolean configured = false;
if (configuredHosts != null) {
final String hostIdsStr[] = configuredHosts.split(",");
for (final String hostIdStr : hostIdsStr) {
if (hostIdStr.equals(((Long)hostId).toString())) {
configured = true;
break;
}
}
}
if (!configured) {
String result;
if (bridgeName.startsWith("OVS-DR-VPC-Bridge")) {
result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge_for_distributed_routing", "bridge", bridge, "key", bridgeName, "xs_nw_uuid", nw.getUuid(conn), "cs_host_id",
((Long)hostId).toString());
} else {
result = callHostPlugin(conn, "ovstunnel", "setup_ovs_bridge", "bridge", bridge, "key", bridgeName, "xs_nw_uuid", nw.getUuid(conn), "cs_host_id", ((Long)hostId).toString());
}
// Note down the fact that the ovs bridge has been setup
final String[] res = result.split(":");
if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) {
throw new CloudRuntimeException("Unable to pre-configure OVS bridge " + bridge);
}
}
return nw;
} catch (final Exception e) {
s_logger.warn("createandConfigureTunnelNetwork failed", e);
return null;
}
}
public String connect(final Connection conn, final String vmname, final String ipAddress) {
return connect(conn, vmname, ipAddress, 3922);
}
public String connect(final Connection conn, final String vmName, final String ipAddress, final int port) {
for (int i = 0; i <= _retry; i++) {
try {
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
if (vms.size() < 1) {
final String msg = "VM " + vmName + " is not running";
s_logger.warn(msg);
return msg;
}
} catch (final Exception e) {
final String msg = "VM.getByNameLabel " + vmName + " failed due to " + e.toString();
s_logger.warn(msg, e);
return msg;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to connect to " + ipAddress + " attempt " + i + " of " + _retry);
}
if (pingdomr(conn, ipAddress, Integer.toString(port))) {
return null;
}
try {
Thread.sleep(_sleep);
} catch (final InterruptedException e) {
}
}
final String msg = "Timeout, Unable to logon to " + ipAddress;
s_logger.debug(msg);
return msg;
}
public String copyVhdFromSecondaryStorage(final Connection conn, final String mountpoint, final String sruuid, final int wait) {
final String nameLabel = "cloud-" + UUID.randomUUID().toString();
final String results = callHostPluginAsync(conn, "vmopspremium", "copy_vhd_from_secondarystorage", wait, "mountpoint", mountpoint, "sruuid", sruuid, "namelabel", nameLabel);
String errMsg = null;
if (results == null || results.isEmpty()) {
errMsg = "copy_vhd_from_secondarystorage return null";
} else {
final String[] tmp = results.split("#");
final String status = tmp[0];
if (status.equals("0")) {
return tmp[1];
} else {
errMsg = tmp[1];
}
}
final String source = mountpoint.substring(mountpoint.lastIndexOf('/') + 1);
if (killCopyProcess(conn, source)) {
destroyVDIbyNameLabel(conn, nameLabel);
}
s_logger.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
@Override
public ExecutionResult createFileInVR(final String routerIp, final String path, final String filename, final String content) {
final Connection conn = getConnection();
final String hostPath = "/tmp/";
s_logger.debug("Copying VR with ip " + routerIp + " config file into host " + _host.getIp());
try {
SshHelper.scpTo(_host.getIp(), 22, _username, null, _password.peek(), hostPath, content.getBytes(Charset.defaultCharset()), filename, null);
} catch (final Exception e) {
s_logger.warn("scp VR config file into host " + _host.getIp() + " failed with exception " + e.getMessage().toString());
}
final String rc = callHostPlugin(conn, "vmops", "createFileInDomr", "domrip", routerIp, "srcfilepath", hostPath + filename, "dstfilepath", path);
s_logger.debug("VR Config file " + filename + " got created in VR, ip " + routerIp + " with content \n" + content);
return new ExecutionResult(rc.startsWith("succ#"), rc.substring(5));
}
protected SR createIsoSRbyURI(final Connection conn, final URI uri, final String vmName, final boolean shared) {
try {
final Map<String, String> deviceConfig = new HashMap<String, String>();
String path = uri.getPath();
path = path.replace("//", "/");
deviceConfig.put("location", uri.getHost() + ":" + path);
final Host host = Host.getByUuid(conn, _host.getUuid());
final SR sr = SR.create(conn, host, deviceConfig, new Long(0), uri.getHost() + path, "iso", "iso", "iso", shared, new HashMap<String, String>());
sr.setNameLabel(conn, vmName + "-ISO");
sr.setNameDescription(conn, deviceConfig.get("location"));
sr.scan(conn);
return sr;
} catch (final XenAPIException e) {
final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
} catch (final Exception e) {
final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
protected SR createNfsSRbyURI(final Connection conn, final URI uri, final boolean shared) {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating a " + (shared ? "shared SR for " : "not shared SR for ") + uri);
}
final Map<String, String> deviceConfig = new HashMap<String, String>();
String path = uri.getPath();
path = path.replace("//", "/");
deviceConfig.put("server", uri.getHost());
deviceConfig.put("serverpath", path);
final String name = UUID.nameUUIDFromBytes(new String(uri.getHost() + path).getBytes()).toString();
if (!shared) {
final Set<SR> srs = SR.getByNameLabel(conn, name);
for (final SR sr : srs) {
final SR.Record record = sr.getRecord(conn);
if (SRType.NFS.equals(record.type) && record.contentType.equals("user") && !record.shared) {
removeSRSync(conn, sr);
}
}
}
final Host host = Host.getByUuid(conn, _host.getUuid());
final Map<String, String> smConfig = new HashMap<String, String>();
smConfig.put("nosubdir", "true");
final SR sr = SR.create(conn, host, deviceConfig, new Long(0), name, uri.getHost() + uri.getPath(), SRType.NFS.toString(), "user", shared, smConfig);
if (!checkSR(conn, sr)) {
throw new Exception("no attached PBD");
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "Created a SR; UUID is " + sr.getUuid(conn) + " device config is " + deviceConfig));
}
sr.scan(conn);
return sr;
} catch (final XenAPIException e) {
final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
} catch (final Exception e) {
final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
public VBD createPatchVbd(final Connection conn, final String vmName, final VM vm) throws XmlRpcException, XenAPIException {
if (_host.getSystemvmisouuid() == null) {
Set<SR> srs = SR.getByNameLabel(conn, "XenServer Tools");
if (srs.size() != 1) {
s_logger.debug("Failed to find SR by name 'XenServer Tools', will try to find 'XCP-ng Tools' SR");
srs = SR.getByNameLabel(conn, "XCP-ng Tools");
if (srs.size() != 1) {
throw new CloudRuntimeException("There are " + srs.size() + " SRs with name XenServer Tools");
}
}
final SR sr = srs.iterator().next();
sr.scan(conn);
final SR.Record srr = sr.getRecord(conn);
if (_host.getSystemvmisouuid() == null) {
for (final VDI vdi : srr.VDIs) {
final VDI.Record vdir = vdi.getRecord(conn);
if (vdir.nameLabel.contains("systemvm.iso")) {
_host.setSystemvmisouuid(vdir.uuid);
break;
}
}
}
if (_host.getSystemvmisouuid() == null) {
throw new CloudRuntimeException("can not find systemvmiso");
}
}
final VBD.Record cdromVBDR = new VBD.Record();
cdromVBDR.VM = vm;
cdromVBDR.empty = true;
cdromVBDR.bootable = false;
cdromVBDR.userdevice = "3";
cdromVBDR.mode = Types.VbdMode.RO;
cdromVBDR.type = Types.VbdType.CD;
final VBD cdromVBD = VBD.create(conn, cdromVBDR);
cdromVBD.insert(conn, VDI.getByUuid(conn, _host.getSystemvmisouuid()));
return cdromVBD;
}
protected boolean createSecondaryStorageFolder(final Connection conn, final String remoteMountPath, final String newFolder) {
final String result = callHostPlugin(conn, "vmopsSnapshot", "create_secondary_storage_folder", "remoteMountPath", remoteMountPath, "newFolder", newFolder);
return result != null;
}
String createTemplateFromSnapshot(final Connection conn, final String templatePath, final String snapshotPath, final int wait) {
final String tmpltLocalDir = UUID.randomUUID().toString();
final String results = callHostPluginAsync(conn, "vmopspremium", "create_privatetemplate_from_snapshot", wait, "templatePath", templatePath, "snapshotPath", snapshotPath, "tmpltLocalDir",
tmpltLocalDir);
String errMsg = null;
if (results == null || results.isEmpty()) {
errMsg = "create_privatetemplate_from_snapshot return null";
} else {
final String[] tmp = results.split("#");
final String status = tmp[0];
if (status.equals("0")) {
return results;
} else {
errMsg = "create_privatetemplate_from_snapshot failed due to " + tmp[1];
}
}
final String source = "cloud_mount/" + tmpltLocalDir;
killCopyProcess(conn, source);
s_logger.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
public VBD createVbd(final Connection conn, final DiskTO volume, final String vmName, final VM vm, final BootloaderType bootLoaderType, VDI vdi) throws XmlRpcException, XenAPIException {
final Volume.Type type = volume.getType();
if (vdi == null) {
vdi = mount(conn, vmName, volume);
}
if (vdi != null) {
if ("detached".equals(vdi.getNameLabel(conn))) {
vdi.setNameLabel(conn, vmName + "-DATA");
}
final Map<String, String> smConfig = vdi.getSmConfig(conn);
for (final String key : smConfig.keySet()) {
if (key.startsWith("host_")) {
vdi.removeFromSmConfig(conn, key);
break;
}
}
}
final VBD.Record vbdr = new VBD.Record();
vbdr.VM = vm;
if (vdi != null) {
vbdr.VDI = vdi;
} else {
vbdr.empty = true;
}
if (type == Volume.Type.ROOT && bootLoaderType == BootloaderType.PyGrub) {
vbdr.bootable = true;
} else if (type == Volume.Type.ISO && bootLoaderType == BootloaderType.CD) {
vbdr.bootable = true;
}
if (volume.getType() == Volume.Type.ISO) {
vbdr.mode = Types.VbdMode.RO;
vbdr.type = Types.VbdType.CD;
vbdr.userdevice = "3";
} else {
vbdr.mode = Types.VbdMode.RW;
vbdr.type = Types.VbdType.DISK;
vbdr.unpluggable = (volume.getType() == Volume.Type.ROOT) ? false : true;
vbdr.userdevice = "autodetect";
final Long deviceId = volume.getDiskSeq();
if (deviceId != null && (!isDeviceUsed(conn, vm, deviceId) || deviceId > 3)) {
vbdr.userdevice = deviceId.toString();
}
}
final VBD vbd = VBD.create(conn, vbdr);
if (s_logger.isDebugEnabled()) {
s_logger.debug("VBD " + vbd.getUuid(conn) + " created for " + volume);
}
return vbd;
}
public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize) throws Types.XenAPIException, XmlRpcException {
final Connection conn = getConnection();
final VDI.Record vdir = new VDI.Record();
vdir.nameLabel = vdiNameLabel;
vdir.SR = sr;
vdir.type = Types.VdiType.USER;
final long totalSrSpace = sr.getPhysicalSize(conn);
final long unavailableSrSpace = sr.getPhysicalUtilisation(conn);
final long availableSrSpace = totalSrSpace - unavailableSrSpace;
if (availableSrSpace < volumeSize) {
throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + ".");
}
vdir.virtualSize = volumeSize;
return VDI.create(conn, vdir);
}
public void createVGPU(final Connection conn, final StartCommand cmd, final VM vm, final GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException {
}
public VIF createVif(final Connection conn, final String vmName, final VM vm, final VirtualMachineTO vmSpec, final NicTO nic) throws XmlRpcException, XenAPIException {
assert nic.getUuid() != null : "Nic should have a uuid value";
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating VIF for " + vmName + " on nic " + nic);
}
VIF.Record vifr = new VIF.Record();
vifr.VM = vm;
vifr.device = Integer.toString(nic.getDeviceId());
vifr.MAC = nic.getMac();
// Nicira needs these IDs to find the NIC
vifr.otherConfig = new HashMap<String, String>();
vifr.otherConfig.put("nicira-iface-id", nic.getUuid());
vifr.otherConfig.put("nicira-vm-id", vm.getUuid(conn));
// Provide XAPI with the cloudstack vm and nic uids.
vifr.otherConfig.put("cloudstack-nic-id", nic.getUuid());
if (vmSpec != null) {
vifr.otherConfig.put("cloudstack-vm-id", vmSpec.getUuid());
}
// OVS plugin looks at network UUID in the vif 'otherconfig' details to
// group VIF's & tunnel ports as part of tier
// when bridge is setup for distributed routing
vifr.otherConfig.put("cloudstack-network-id", nic.getNetworkUuid());
vifr.network = getNetwork(conn, nic);
if (nic.getNetworkRateMbps() != null && nic.getNetworkRateMbps().intValue() != -1) {
vifr.qosAlgorithmType = "ratelimit";
vifr.qosAlgorithmParams = new HashMap<String, String>();
// convert mbs to kilobyte per second
vifr.qosAlgorithmParams.put("kbps", Integer.toString(nic.getNetworkRateMbps() * 128));
}
vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
final VIF vif = VIF.create(conn, vifr);
if (s_logger.isDebugEnabled()) {
vifr = vif.getRecord(conn);
if (vifr != null) {
s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId());
}
}
return vif;
}
public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmSpec, final Host host) throws XenAPIException, XmlRpcException {
final String guestOsTypeName = getGuestOsType(vmSpec.getPlatformEmulator());
final Set<VM> templates = VM.getByNameLabel(conn, guestOsTypeName);
if (templates == null || templates.isEmpty()) {
throw new CloudRuntimeException("Cannot find template " + guestOsTypeName + " on XenServer host");
}
assert templates.size() == 1 : "Should only have 1 template but found " + templates.size();
final VM template = templates.iterator().next();
final VM.Record vmr = template.getRecord(conn);
vmr.affinity = host;
vmr.otherConfig.remove("disks");
vmr.otherConfig.remove("default_template");
vmr.otherConfig.remove("mac_seed");
vmr.isATemplate = false;
vmr.nameLabel = vmSpec.getName();
vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY;
vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY;
vmr.otherConfig.put("vm_uuid", vmSpec.getUuid());
vmr.VCPUsMax = (long)vmSpec.getCpus(); // FIX ME: In case of dynamic
// scaling this VCPU max should
// be the minumum of
// recommended value for that template and capacity remaining on host
long recommendedMemoryMin = 0l;
long recommendedMemoryMax = 0l;
Map<String, String> guestOsDetails = vmSpec.getGuestOsDetails();
if (guestOsDetails != null) {
if (guestOsDetails.containsKey("xenserver.dynamicMin")) {
recommendedMemoryMin = Long.valueOf(guestOsDetails.get("xenserver.dynamicMin")).longValue();
}
if (guestOsDetails.containsKey("xenserver.dynamicMax")) {
recommendedMemoryMax = Long.valueOf(guestOsDetails.get("xenserver.dynamicMax")).longValue();
}
}
if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) {
// scaling is allowed
vmr.memoryStaticMin = getStaticMin(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam(), recommendedMemoryMin);
vmr.memoryStaticMax = getStaticMax(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam(), recommendedMemoryMax);
vmr.memoryDynamicMin = vmSpec.getMinRam();
vmr.memoryDynamicMax = vmSpec.getMaxRam();
if (guestOsTypeName.toLowerCase().contains("windows")) {
vmr.VCPUsMax = (long)vmSpec.getCpus();
} else {
if (vmSpec.getVcpuMaxLimit() != null) {
vmr.VCPUsMax = (long)vmSpec.getVcpuMaxLimit();
}
}
} else {
// scaling disallowed, set static memory target
if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) {
s_logger.warn("Host " + host.getHostname(conn) + " does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable");
}
vmr.memoryStaticMin = vmSpec.getMinRam();
vmr.memoryStaticMax = vmSpec.getMaxRam();
vmr.memoryDynamicMin = vmSpec.getMinRam();
vmr.memoryDynamicMax = vmSpec.getMaxRam();
vmr.VCPUsMax = (long)vmSpec.getCpus();
}
vmr.VCPUsAtStartup = (long)vmSpec.getCpus();
vmr.consoles.clear();
vmr.xenstoreData.clear();
//Add xenstore data for the NetscalerVM
if (vmSpec.getType() == VirtualMachine.Type.NetScalerVm) {
NicTO mgmtNic = vmSpec.getNics()[0];
if (mgmtNic != null) {
Map<String, String> xenstoreData = new HashMap<String, String>(3);
xenstoreData.put(XENSTORE_DATA_IP, mgmtNic.getIp().toString().trim());
xenstoreData.put(XENSTORE_DATA_GATEWAY, mgmtNic.getGateway().toString().trim());
xenstoreData.put(XENSTORE_DATA_NETMASK, mgmtNic.getNetmask().toString().trim());
vmr.xenstoreData = xenstoreData;
}
}
final VM vm = VM.create(conn, vmr);
s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName());
final Map<String, String> vcpuParams = new HashMap<String, String>();
final Integer speed = vmSpec.getMinSpeed();
if (speed != null) {
int cpuWeight = _maxWeight; // cpu_weight
int utilization = 0; // max CPU cap, default is unlimited
// weight based allocation, CPU weight is calculated per VCPU
cpuWeight = (int)(speed * 0.99 / _host.getSpeed() * _maxWeight);
if (cpuWeight > _maxWeight) {
cpuWeight = _maxWeight;
}
if (vmSpec.getLimitCpuUse()) {
// CPU cap is per VM, so need to assign cap based on the number
// of vcpus
utilization = (int)(vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus() / _host.getSpeed() * 100);
}
vcpuParams.put("weight", Integer.toString(cpuWeight));
vcpuParams.put("cap", Integer.toString(utilization));
}
if (vcpuParams.size() > 0) {
vm.setVCPUsParams(conn, vcpuParams);
}
final String bootArgs = vmSpec.getBootArgs();
if (bootArgs != null && bootArgs.length() > 0) {
// send boot args for PV instances
String pvargs = vm.getPVArgs(conn);
pvargs = pvargs + vmSpec.getBootArgs().replaceAll(" ", "%");
vm.setPVArgs(conn, pvargs);
s_logger.debug("PV args are " + pvargs);
// send boot args into xenstore-data for HVM instances
Map<String, String> xenstoreData = new HashMap<>();
xenstoreData.put(XENSTORE_DATA_CS_INIT, bootArgs);
vm.setXenstoreData(conn, xenstoreData);
s_logger.debug("HVM args are " + bootArgs);
}
if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) {
if (vmSpec.getBootloader() == BootloaderType.CD) {
final DiskTO[] disks = vmSpec.getDisks();
for (final DiskTO disk : disks) {
if (disk.getType() == Volume.Type.ISO) {
final TemplateObjectTO iso = (TemplateObjectTO)disk.getData();
final String osType = iso.getGuestOsType();
if (osType != null) {
final String isoGuestOsName = getGuestOsType(vmSpec.getPlatformEmulator());
if (!isoGuestOsName.equals(guestOsTypeName)) {
vmSpec.setBootloader(BootloaderType.PyGrub);
}
}
}
}
}
if (vmSpec.getBootloader() == BootloaderType.CD) {
vm.setPVBootloader(conn, "eliloader");
if (!vm.getOtherConfig(conn).containsKey("install-repository")) {
vm.addToOtherConfig(conn, "install-repository", "cdrom");
}
} else if (vmSpec.getBootloader() == BootloaderType.PyGrub) {
vm.setPVBootloader(conn, "pygrub");
vm.setPVBootloaderArgs(conn, CitrixHelper.getPVbootloaderArgs(guestOsTypeName));
} else {
vm.destroy(conn);
throw new CloudRuntimeException("Unable to handle boot loader type: " + vmSpec.getBootloader());
}
}
try {
finalizeVmMetaData(vm, conn, vmSpec);
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec);
}
return vm;
}
public VM createWorkingVM(final Connection conn, final String vmName, final String guestOSType, final String platformEmulator, final List<VolumeObjectTO> listVolumeTo)
throws BadServerResponse, Types.VmBadPowerState, Types.SrFull, Types.OperationNotAllowed, XenAPIException, XmlRpcException {
// below is redundant but keeping for consistency and code readabilty
final String guestOsTypeName = platformEmulator;
if (guestOsTypeName == null) {
final String msg = " Hypervisor " + this.getClass().getName() + " doesn't support guest OS type " + guestOSType + ". you can choose 'Other install media' to run it as HVM";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
final VM template = getVM(conn, guestOsTypeName);
final VM vm = template.createClone(conn, vmName);
vm.setIsATemplate(conn, false);
final Map<VDI, VolumeObjectTO> vdiMap = new HashMap<VDI, VolumeObjectTO>();
for (final VolumeObjectTO volume : listVolumeTo) {
final String vdiUuid = volume.getPath();
try {
final VDI vdi = VDI.getByUuid(conn, vdiUuid);
vdiMap.put(vdi, volume);
} catch (final Types.UuidInvalid e) {
s_logger.warn("Unable to find vdi by uuid: " + vdiUuid + ", skip it");
}
}
for (final Map.Entry<VDI, VolumeObjectTO> entry : vdiMap.entrySet()) {
final VDI vdi = entry.getKey();
final VolumeObjectTO volumeTO = entry.getValue();
final VBD.Record vbdr = new VBD.Record();
vbdr.VM = vm;
vbdr.VDI = vdi;
if (volumeTO.getVolumeType() == Volume.Type.ROOT) {
vbdr.bootable = true;
vbdr.unpluggable = false;
} else {
vbdr.bootable = false;
vbdr.unpluggable = true;
}
vbdr.userdevice = "autodetect";
vbdr.mode = Types.VbdMode.RW;
vbdr.type = Types.VbdType.DISK;
Long deviceId = volumeTO.getDeviceId();
if (deviceId != null && (!isDeviceUsed(conn, vm, deviceId) || deviceId > 3)) {
vbdr.userdevice = deviceId.toString();
}
VBD.create(conn, vbdr);
}
return vm;
}
protected boolean deleteSecondaryStorageFolder(final Connection conn, final String remoteMountPath, final String folder) {
final String details = callHostPlugin(conn, "vmopsSnapshot", "delete_secondary_storage_folder", "remoteMountPath", remoteMountPath, "folder", folder);
return details != null && details.equals("1");
}
protected String deleteSnapshotBackup(final Connection conn, final Long dcId, final Long accountId, final Long volumeId, final String secondaryStorageMountPath, final String backupUUID) {
// If anybody modifies the formatting below again, I'll skin them
final String result = callHostPlugin(conn, "vmopsSnapshot", "deleteSnapshotBackup", "backupUUID", backupUUID, "dcId", dcId.toString(), "accountId", accountId.toString(), "volumeId",
volumeId.toString(), "secondaryStorageMountPath", secondaryStorageMountPath);
return result;
}
public void destroyPatchVbd(final Connection conn, final String vmName) throws XmlRpcException, XenAPIException {
try {
if (!vmName.startsWith("r-") && !vmName.startsWith("s-") && !vmName.startsWith("v-")) {
return;
}
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
for (final VM vm : vms) {
final Set<VBD> vbds = vm.getVBDs(conn);
for (final VBD vbd : vbds) {
if (vbd.getType(conn) == Types.VbdType.CD) {
vbd.eject(conn);
vbd.destroy(conn);
break;
}
}
}
} catch (final Exception e) {
s_logger.debug("Cannot destory CD-ROM device for VM " + vmName + " due to " + e.toString(), e);
}
}
public synchronized void destroyTunnelNetwork(final Connection conn, final Network nw, final long hostId) {
try {
final String bridge = nw.getBridge(conn);
final String result = callHostPlugin(conn, "ovstunnel", "destroy_ovs_bridge", "bridge", bridge, "cs_host_id", ((Long)hostId).toString());
final String[] res = result.split(":");
if (res.length != 2 || !res[0].equalsIgnoreCase("SUCCESS")) {
throw new CloudRuntimeException("Unable to remove OVS bridge " + bridge + ":" + result);
}
return;
} catch (final Exception e) {
s_logger.warn("destroyTunnelNetwork failed:", e);
return;
}
}
void destroyVDIbyNameLabel(final Connection conn, final String nameLabel) {
try {
final Set<VDI> vdis = VDI.getByNameLabel(conn, nameLabel);
if (vdis.size() != 1) {
s_logger.warn("destoryVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel);
return;
}
for (final VDI vdi : vdis) {
try {
vdi.destroy(conn);
} catch (final Exception e) {
final String msg = "Failed to destroy VDI : " + nameLabel + "due to " + e.toString() + "\n Force deleting VDI using system 'rm' command";
s_logger.warn(msg);
try {
final String srUUID = vdi.getSR(conn).getUuid(conn);
final String vdiUUID = vdi.getUuid(conn);
final String vdifile = "/var/run/sr-mount/" + srUUID + "/" + vdiUUID + ".vhd";
callHostPluginAsync(conn, "vmopspremium", "remove_corrupt_vdi", 10, "vdifile", vdifile);
} catch (final Exception e2) {
s_logger.warn(e2);
}
}
}
} catch (final Exception e) {
}
}
public void disableVlanNetwork(final Connection conn, final Network network) {
}
@Override
public void disconnected() {
}
public boolean doPingTest(final Connection conn, final String computingHostIp) {
final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22);
try {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
throw new CloudRuntimeException("Unable to authenticate");
}
final String cmd = "ping -c 2 " + computingHostIp;
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) {
throw new CloudRuntimeException("Cannot ping host " + computingHostIp + " from host " + _host.getIp());
}
return true;
} catch (final Exception e) {
s_logger.warn("Catch exception " + e.toString(), e);
return false;
} finally {
sshConnection.close();
}
}
public boolean doPingTest(final Connection conn, final String domRIp, final String vmIp) {
final String args = "-i " + domRIp + " -p " + vmIp;
final String result = callHostPlugin(conn, "vmops", "pingtest", "args", args);
if (result == null || result.isEmpty()) {
return false;
}
return true;
}
/**
* enableVlanNetwork creates a Network object, Vlan object, and thereby a
* tagged PIF object in Xapi.
*
* In XenServer, VLAN is added by - Create a network, which is unique
* cluster wide. - Find the PIF that you want to create the VLAN on. -
* Create a VLAN using the network and the PIF. As a result of this
* operation, a tagged PIF object is also created.
*
* Here is a list of problems with clustered Xapi implementation that we are
* trying to circumvent. - There can be multiple Networks with the same
* name-label so searching using name-label is not unique. - There are no
* other ways to search for Networks other than listing all of them which is
* not efficient in our implementation because we can have over 4000 VLAN
* networks. - In a clustered situation, it's possible for both hosts to
* detect that the Network is missing and both creates it. This causes a lot
* of problems as one host may be using one Network and another may be using
* a different network for their VMs. This causes problems in migration
* because the VMs are logically attached to different networks in Xapi's
* database but in reality, they are attached to the same network.
*
* To work around these problems, we do the following.
*
* - When creating the VLAN network, we name it as VLAN-UUID of the Network
* it is created on-VLAN Tag. Because VLAN tags is unique with one
* particular network, this is a unique name-label to quickly retrieve the
* the VLAN network with when we need it again. - When we create the VLAN
* network, we add a timestamp and a random number as a tag into the
* network. Then instead of creating VLAN on that network, we actually
* retrieve the Network again and this time uses the VLAN network with
* lowest timestamp or lowest random number as the VLAN network. This allows
* VLAN creation to happen on multiple hosts concurrently but even if two
* VLAN networks were created with the same name, only one of them is used.
*
* One cavaet about this approach is that it relies on the timestamp to be
* relatively accurate among different hosts.
*
* @param conn
* Xapi Connection
* @param tag
* VLAN tag
* @param network
* network on this host to create the VLAN on.
* @return VLAN Network created.
* @throws XenAPIException
* @throws XmlRpcException
*/
protected Network enableVlanNetwork(final Connection conn, final long tag, final XsLocalNetwork network) throws XenAPIException, XmlRpcException {
Network vlanNetwork = null;
final String oldName = "VLAN" + Long.toString(tag);
final String newName = "VLAN-" + network.getNetworkRecord(conn).uuid + "-" + tag;
XsLocalNetwork vlanNic = getNetworkByName(conn, newName);
if (vlanNic == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Couldn't find vlan network with the new name so trying old name: " + oldName);
}
vlanNic = getNetworkByName(conn, oldName);
if (vlanNic != null) {
s_logger.info("Renaming VLAN with old name " + oldName + " to " + newName);
vlanNic.getNetwork().setNameLabel(conn, newName);
}
}
if (vlanNic == null) { // Can't find it, then create it.
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating VLAN network for " + tag + " on host " + _host.getIp());
}
final Network.Record nwr = new Network.Record();
nwr.nameLabel = newName;
nwr.tags = new HashSet<String>();
nwr.tags.add(generateTimeStamp());
vlanNetwork = Network.create(conn, nwr);
vlanNic = getNetworkByName(conn, newName);
if (vlanNic == null) { // Still vlanNic is null means we could not
// create it for some reason and no exception
// capture happened.
throw new CloudRuntimeException("Could not find/create vlan network with name: " + newName);
}
}
final PIF nPif = network.getPif(conn);
final PIF.Record nPifr = network.getPifRecord(conn);
vlanNetwork = vlanNic.getNetwork();
if (vlanNic.getPif(conn) != null) {
return vlanNetwork;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating VLAN " + tag + " on host " + _host.getIp() + " on device " + nPifr.device);
}
final VLAN vlan = VLAN.create(conn, nPif, tag, vlanNetwork);
if (vlan != null) {
final VLAN.Record vlanr = vlan.getRecord(conn);
if (vlanr != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("VLAN is created for " + tag + ". The uuid is " + vlanr.uuid);
}
}
}
return vlanNetwork;
}
@Override
public RebootAnswer execute(final RebootCommand cmd) {
throw new CloudRuntimeException("The method has been replaced but the implementation CitrixRebootCommandWrapper. "
+ "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper.");
}
@Override
public StartAnswer execute(final StartCommand cmd) {
throw new CloudRuntimeException("The method has been replaced but the implementation CitrixStartCommandWrapper. "
+ "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper.");
}
@Override
public StopAnswer execute(final StopCommand cmd) {
throw new CloudRuntimeException("The method has been replaced but the implementation CitrixStopCommandWrapper. "
+ "Please use the new design in order to keep compatibility. Once all ServerResource implementation are refactored those methods will dissapper.");
}
@Override
public ExecutionResult executeInVR(final String routerIP, final String script, final String args) {
// Timeout is 120 seconds by default
return executeInVR(routerIP, script, args, VRScripts.VR_SCRIPT_EXEC_TIMEOUT);
}
@Override
public ExecutionResult executeInVR(final String routerIP, final String script, final String args, final Duration timeout) {
Pair<Boolean, String> result;
String cmdline = "/opt/cloud/bin/router_proxy.sh " + script + " " + routerIP + " " + args;
// semicolon need to be escape for bash
cmdline = cmdline.replaceAll(";", "\\\\;");
try {
s_logger.debug("Executing command in VR: " + cmdline);
result = SshHelper.sshExecute(_host.getIp(), 22, _username, null, _password.peek(), cmdline, VRScripts.CONNECTION_TIMEOUT, VRScripts.CONNECTION_TIMEOUT, timeout);
} catch (final Exception e) {
return new ExecutionResult(false, e.getMessage());
}
return new ExecutionResult(result.first(), result.second());
}
@Override
public Answer executeRequest(final Command cmd) {
final CitrixRequestWrapper wrapper = CitrixRequestWrapper.getInstance();
try {
return wrapper.execute(cmd, this);
} catch (final Exception e) {
return Answer.createUnsupportedCommandAnswer(cmd);
}
}
protected void fillHostInfo(final Connection conn, final StartupRoutingCommand cmd) {
final StringBuilder caps = new StringBuilder();
try {
final Host host = Host.getByUuid(conn, _host.getUuid());
final Host.Record hr = host.getRecord(conn);
Map<String, String> details = cmd.getHostDetails();
if (details == null) {
details = new HashMap<String, String>();
}
String productBrand = hr.softwareVersion.get("product_brand");
if (productBrand == null) {
productBrand = hr.softwareVersion.get("platform_name");
}
details.put("product_brand", productBrand);
details.put("product_version", _host.getProductVersion());
if (hr.softwareVersion.get("product_version_text_short") != null) {
details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short"));
cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));
cmd.setHypervisorVersion(_host.getProductVersion());
}
if (_privateNetworkName != null) {
details.put("private.network.device", _privateNetworkName);
}
cmd.setHostDetails(details);
cmd.setName(hr.nameLabel);
cmd.setGuid(_host.getUuid());
cmd.setPool(_host.getPool());
cmd.setDataCenter(Long.toString(_dcId));
for (final String cap : hr.capabilities) {
if (cap.length() > 0) {
caps.append(cap).append(" , ");
}
}
if (caps.length() > 0) {
caps.delete(caps.length() - 3, caps.length());
}
cmd.setCaps(caps.toString());
cmd.setSpeed(_host.getSpeed());
cmd.setCpuSockets(_host.getCpuSockets());
cmd.setCpus(_host.getCpus());
final HostMetrics hm = host.getMetrics(conn);
long ram = 0;
long dom0Ram = 0;
ram = hm.getMemoryTotal(conn);
final Set<VM> vms = host.getResidentVMs(conn);
for (final VM vm : vms) {
if (vm.getIsControlDomain(conn)) {
dom0Ram = vm.getMemoryStaticMax(conn);
break;
}
}
ram = (long)((ram - dom0Ram - _xsMemoryUsed) * _xsVirtualizationFactor);
cmd.setMemory(ram);
cmd.setDom0MinMemory(dom0Ram);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total Ram: " + ram + " dom0 Ram: " + dom0Ram);
}
PIF pif = PIF.getByUuid(conn, _host.getPrivatePif());
PIF.Record pifr = pif.getRecord(conn);
if (pifr.IP != null && pifr.IP.length() > 0) {
cmd.setPrivateIpAddress(pifr.IP);
cmd.setPrivateMacAddress(pifr.MAC);
cmd.setPrivateNetmask(pifr.netmask);
} else {
cmd.setPrivateIpAddress(_host.getIp());
cmd.setPrivateMacAddress(pifr.MAC);
cmd.setPrivateNetmask("255.255.255.0");
}
pif = PIF.getByUuid(conn, _host.getPublicPif());
pifr = pif.getRecord(conn);
if (pifr.IP != null && pifr.IP.length() > 0) {
cmd.setPublicIpAddress(pifr.IP);
cmd.setPublicMacAddress(pifr.MAC);
cmd.setPublicNetmask(pifr.netmask);
}
if (_host.getStoragePif1() != null) {
pif = PIF.getByUuid(conn, _host.getStoragePif1());
pifr = pif.getRecord(conn);
if (pifr.IP != null && pifr.IP.length() > 0) {
cmd.setStorageIpAddress(pifr.IP);
cmd.setStorageMacAddress(pifr.MAC);
cmd.setStorageNetmask(pifr.netmask);
}
}
if (_host.getStoragePif2() != null) {
pif = PIF.getByUuid(conn, _host.getStoragePif2());
pifr = pif.getRecord(conn);
if (pifr.IP != null && pifr.IP.length() > 0) {
cmd.setStorageIpAddressDeux(pifr.IP);
cmd.setStorageMacAddressDeux(pifr.MAC);
cmd.setStorageNetmaskDeux(pifr.netmask);
}
}
final Map<String, String> configs = hr.otherConfig;
cmd.setIqn(configs.get("iscsi_iqn"));
cmd.setPod(_pod);
cmd.setVersion(CitrixResourceBase.class.getPackage().getImplementationVersion());
try {
final String cmdLine = "xe sm-list | grep \"resigning of duplicates\"";
final XenServerUtilitiesHelper xenServerUtilitiesHelper = getXenServerUtilitiesHelper();
Pair<Boolean, String> result = xenServerUtilitiesHelper.executeSshWrapper(_host.getIp(), 22, _username, null, getPwdFromQueue(), cmdLine);
boolean supportsClonedVolumes = result != null && result.first() != null && result.first() && result.second() != null && result.second().length() > 0;
cmd.setSupportsClonedVolumes(supportsClonedVolumes);
} catch (NumberFormatException ex) {
s_logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage());
}
} catch (final XmlRpcException e) {
throw new CloudRuntimeException("XML RPC Exception: " + e.getMessage(), e);
} catch (final XenAPIException e) {
throw new CloudRuntimeException("XenAPIException: " + e.toString(), e);
} catch (final Exception e) {
throw new CloudRuntimeException("Exception: " + e.toString(), e);
}
}
protected void finalizeVmMetaData(final VM vm, final Connection conn, final VirtualMachineTO vmSpec) throws Exception {
final Map<String, String> details = vmSpec.getDetails();
if (details != null) {
final String platformstring = details.get(VmDetailConstants.PLATFORM);
if (platformstring != null && !platformstring.isEmpty()) {
final Map<String, String> platform = StringUtils.stringToMap(platformstring);
vm.setPlatform(conn, platform);
} else {
final String timeoffset = details.get(VmDetailConstants.TIME_OFFSET);
if (timeoffset != null) {
final Map<String, String> platform = vm.getPlatform(conn);
platform.put(VmDetailConstants.TIME_OFFSET, timeoffset);
vm.setPlatform(conn, platform);
}
final String coresPerSocket = details.get(VmDetailConstants.CPU_CORE_PER_SOCKET);
if (coresPerSocket != null) {
final Map<String, String> platform = vm.getPlatform(conn);
platform.put("cores-per-socket", coresPerSocket);
vm.setPlatform(conn, platform);
}
}
if (!BootloaderType.CD.equals(vmSpec.getBootloader())) {
final String xenservertoolsversion = details.get(VmDetailConstants.HYPERVISOR_TOOLS_VERSION);
if ((xenservertoolsversion == null || !xenservertoolsversion.equalsIgnoreCase("xenserver61")) && vmSpec.getGpuDevice() == null) {
final Map<String, String> platform = vm.getPlatform(conn);
platform.remove("device_id");
vm.setPlatform(conn, platform);
}
}
}
}
/**
* This method just creates a XenServer network following the tunnel network
* naming convention
*/
public synchronized Network findOrCreateTunnelNetwork(final Connection conn, final String nwName) {
try {
Network nw = null;
final Network.Record rec = new Network.Record();
final Set<Network> networks = Network.getByNameLabel(conn, nwName);
if (networks.size() == 0) {
rec.nameDescription = "tunnel network id# " + nwName;
rec.nameLabel = nwName;
// Initialize the ovs-host-setup to avoid error when doing
// get-param in plugin
final Map<String, String> otherConfig = new HashMap<String, String>();
otherConfig.put("ovs-host-setup", "");
// Mark 'internal network' as shared so bridge gets
// automatically created on each host in the cluster
// when VM with vif connected to this internal network is
// started
otherConfig.put("assume_network_is_shared", "true");
rec.otherConfig = otherConfig;
nw = Network.create(conn, rec);
s_logger.debug("### XenServer network for tunnels created:" + nwName);
} else {
nw = networks.iterator().next();
s_logger.debug("XenServer network for tunnels found:" + nwName);
}
return nw;
} catch (final Exception e) {
s_logger.warn("createTunnelNetwork failed", e);
return null;
}
}
void forceShutdownVM(final Connection conn, final VM vm) {
try {
final Long domId = vm.getDomid(conn);
callHostPlugin(conn, "vmopspremium", "forceShutdownVM", "domId", domId.toString());
vm.powerStateReset(conn);
vm.destroy(conn);
} catch (final Exception e) {
final String msg = "forceShutdown failed due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
}
}
protected String generateTimeStamp() {
return new StringBuilder("CsCreateTime-").append(System.currentTimeMillis()).append("-").append(Rand.nextInt(Integer.MAX_VALUE)).toString();
}
@Override
public IAgentControl getAgentControl() {
return _agentControl;
}
protected String getArgsString(final Map<String, String> args) {
final StringBuilder argString = new StringBuilder();
for (final Map.Entry<String, String> arg : args.entrySet()) {
argString.append(arg.getKey() + ": " + arg.getValue() + ", ");
}
return argString.toString();
}
@Override
public Map<String, Object> getConfigParams() {
return null;
}
public Connection getConnection() {
return ConnPool.connect(_host.getUuid(), _host.getPool(), _host.getIp(), _username, _password, _wait);
}
protected Pair<VM, VM.Record> getControlDomain(final Connection conn) throws XenAPIException, XmlRpcException {
final Host host = Host.getByUuid(conn, _host.getUuid());
Set<VM> vms = null;
vms = host.getResidentVMs(conn);
for (final VM vm : vms) {
if (vm.getIsControlDomain(conn)) {
return new Pair<VM, VM.Record>(vm, vm.getRecord(conn));
}
}
throw new CloudRuntimeException("Com'on no control domain? What the crap?!#@!##$@");
}
protected VIF getCorrectVif(final Connection conn, final VM router, final IpAddressTO ip) throws XmlRpcException, XenAPIException {
final NicTO nic = new NicTO();
nic.setType(ip.getTrafficType());
nic.setName(ip.getNetworkName());
if (ip.getBroadcastUri() == null) {
nic.setBroadcastType(BroadcastDomainType.Native);
} else {
final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri());
nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri));
nic.setBroadcastUri(uri);
}
final Network network = getNetwork(conn, nic);
// Determine the correct VIF on DomR to associate/disassociate the
// IP address with
final Set<VIF> routerVIFs = router.getVIFs(conn);
for (final VIF vif : routerVIFs) {
final Network vifNetwork = vif.getNetwork(conn);
if (vifNetwork.getUuid(conn).equals(network.getUuid(conn))) {
return vif;
}
}
return null;
}
protected VIF getCorrectVif(final Connection conn, final VM router, final Network network) throws XmlRpcException, XenAPIException {
final Set<VIF> routerVIFs = router.getVIFs(conn);
for (final VIF vif : routerVIFs) {
final Network vifNetwork = vif.getNetwork(conn);
if (vifNetwork.getUuid(conn).equals(network.getUuid(conn))) {
return vif;
}
}
return null;
}
@Override
public PingCommand getCurrentStatus(final long id) {
try {
if (!pingXAPI()) {
Thread.sleep(1000);
if (!pingXAPI()) {
s_logger.warn("can not ping xenserver " + _host.getUuid());
return null;
}
}
final Connection conn = getConnection();
if (!_canBridgeFirewall && !_isOvs) {
return new PingRoutingCommand(getType(), id, getHostVmStateReport(conn));
} else if (_isOvs) {
final List<Pair<String, Long>> ovsStates = ovsFullSyncStates();
return new PingRoutingWithOvsCommand(getType(), id, getHostVmStateReport(conn), ovsStates);
} else {
final HashMap<String, Pair<Long, Long>> nwGrpStates = syncNetworkGroups(conn, id);
return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(conn), nwGrpStates);
}
} catch (final Exception e) {
s_logger.warn("Unable to get current status", e);
return null;
}
}
protected double getDataAverage(final Node dataNode, final int col, final int numRows) {
double value = 0;
final double dummy = 0;
int numRowsUsed = 0;
for (int row = 0; row < numRows; row++) {
final Node data = dataNode.getChildNodes().item(numRows - 1 - row).getChildNodes().item(col + 1);
final Double currentDataAsDouble = Double.valueOf(getXMLNodeValue(data));
if (!currentDataAsDouble.equals(Double.NaN)) {
numRowsUsed += 1;
value += currentDataAsDouble;
}
}
if (numRowsUsed == 0) {
if (!Double.isInfinite(value) && !Double.isNaN(value)) {
return value;
} else {
s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows=0");
return dummy;
}
} else {
if (!Double.isInfinite(value / numRowsUsed) && !Double.isNaN(value / numRowsUsed)) {
return value / numRowsUsed;
} else {
s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows>0");
return dummy;
}
}
}
public HashMap<String, HashMap<String, VgpuTypesInfo>> getGPUGroupDetails(final Connection conn) throws XenAPIException, XmlRpcException {
return null;
}
protected String getGuestOsType(String platformEmulator) {
if (org.apache.commons.lang.StringUtils.isBlank(platformEmulator)) {
s_logger.debug("no guest OS type, start it as HVM guest");
platformEmulator = "Other install media";
}
return platformEmulator;
}
public XsHost getHost() {
return _host;
}
public int getMigrateWait() {
return _migratewait;
}
public StorageSubsystemCommandHandler getStorageHandler() {
return storageHandler;
}
protected boolean getHostInfo(final Connection conn) throws IllegalArgumentException {
try {
final Host myself = Host.getByUuid(conn, _host.getUuid());
Set<HostCpu> hcs = null;
for (int i = 0; i < 10; i++) {
hcs = myself.getHostCPUs(conn);
if (hcs != null) {
_host.setCpus(hcs.size());
if (_host.getCpus() > 0) {
break;
}
}
Thread.sleep(5000);
}
if (_host.getCpus() <= 0) {
throw new CloudRuntimeException("Cannot get the numbers of cpu from XenServer host " + _host.getIp());
}
final Map<String, String> cpuInfo = myself.getCpuInfo(conn);
if (cpuInfo.get("socket_count") != null) {
_host.setCpuSockets(Integer.parseInt(cpuInfo.get("socket_count")));
}
// would hcs be null we would have thrown an exception on condition
// (_host.getCpus() <= 0) by now
for (final HostCpu hc : hcs) {
_host.setSpeed(hc.getSpeed(conn).intValue());
break;
}
final Host.Record hr = myself.getRecord(conn);
_host.setProductVersion(CitrixHelper.getProductVersion(hr));
final XsLocalNetwork privateNic = getManagementNetwork(conn);
_privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel;
_host.setPrivatePif(privateNic.getPifRecord(conn).uuid);
_host.setPrivateNetwork(privateNic.getNetworkRecord(conn).uuid);
_host.setSystemvmisouuid(null);
XsLocalNetwork guestNic = null;
if (_guestNetworkName != null && !_guestNetworkName.equals(_privateNetworkName)) {
guestNic = getNetworkByName(conn, _guestNetworkName);
if (guestNic == null) {
s_logger.warn("Unable to find guest network " + _guestNetworkName);
throw new IllegalArgumentException("Unable to find guest network " + _guestNetworkName + " for host " + _host.getIp());
}
} else {
guestNic = privateNic;
_guestNetworkName = _privateNetworkName;
}
_host.setGuestNetwork(guestNic.getNetworkRecord(conn).uuid);
_host.setGuestPif(guestNic.getPifRecord(conn).uuid);
XsLocalNetwork publicNic = null;
if (_publicNetworkName != null && !_publicNetworkName.equals(_guestNetworkName)) {
publicNic = getNetworkByName(conn, _publicNetworkName);
if (publicNic == null) {
s_logger.warn("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp());
throw new IllegalArgumentException("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp());
}
} else {
publicNic = guestNic;
_publicNetworkName = _guestNetworkName;
}
_host.setPublicPif(publicNic.getPifRecord(conn).uuid);
_host.setPublicNetwork(publicNic.getNetworkRecord(conn).uuid);
if (_storageNetworkName1 == null) {
_storageNetworkName1 = _guestNetworkName;
}
XsLocalNetwork storageNic1 = null;
storageNic1 = getNetworkByName(conn, _storageNetworkName1);
if (storageNic1 == null) {
s_logger.warn("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp());
throw new IllegalArgumentException("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp());
} else {
_host.setStorageNetwork1(storageNic1.getNetworkRecord(conn).uuid);
_host.setStoragePif1(storageNic1.getPifRecord(conn).uuid);
}
XsLocalNetwork storageNic2 = null;
if (_storageNetworkName2 != null) {
storageNic2 = getNetworkByName(conn, _storageNetworkName2);
if (storageNic2 != null) {
_host.setStoragePif2(storageNic2.getPifRecord(conn).uuid);
}
}
s_logger.info("XenServer Version is " + _host.getProductVersion() + " for host " + _host.getIp());
s_logger.info("Private Network is " + _privateNetworkName + " for host " + _host.getIp());
s_logger.info("Guest Network is " + _guestNetworkName + " for host " + _host.getIp());
s_logger.info("Public Network is " + _publicNetworkName + " for host " + _host.getIp());
return true;
} catch (final XenAPIException e) {
s_logger.warn("Unable to get host information for " + _host.getIp(), e);
return false;
} catch (final Exception e) {
s_logger.warn("Unable to get host information for " + _host.getIp(), e);
return false;
}
}
public HostStatsEntry getHostStats(final Connection conn, final GetHostStatsCommand cmd, final String hostGuid, final long hostId) {
final HostStatsEntry hostStats = new HostStatsEntry(hostId, 0, 0, 0, "host", 0, 0, 0, 0);
final Object[] rrdData = getRRDData(conn, 1); // call rrd method with 1
// for host
if (rrdData == null) {
return null;
}
final Integer numRows = (Integer)rrdData[0];
final Integer numColumns = (Integer)rrdData[1];
final Node legend = (Node)rrdData[2];
final Node dataNode = (Node)rrdData[3];
final NodeList legendChildren = legend.getChildNodes();
for (int col = 0; col < numColumns; col++) {
if (legendChildren == null || legendChildren.item(col) == null) {
continue;
}
final String columnMetadata = getXMLNodeValue(legendChildren.item(col));
if (columnMetadata == null) {
continue;
}
final String[] columnMetadataList = columnMetadata.split(":");
if (columnMetadataList.length != 4) {
continue;
}
final String type = columnMetadataList[1];
final String param = columnMetadataList[3];
if (type.equalsIgnoreCase("host")) {
if (param.matches("pif_eth0_rx")) {
hostStats.setNetworkReadKBs(getDataAverage(dataNode, col, numRows) / 1000);
} else if (param.matches("pif_eth0_tx")) {
hostStats.setNetworkWriteKBs(getDataAverage(dataNode, col, numRows) / 1000);
} else if (param.contains("memory_total_kib")) {
hostStats.setTotalMemoryKBs(getDataAverage(dataNode, col, numRows));
} else if (param.contains("memory_free_kib")) {
hostStats.setFreeMemoryKBs(getDataAverage(dataNode, col, numRows));
} else if (param.matches("cpu_avg")) {
// hostStats.setNumCpus(hostStats.getNumCpus() + 1);
hostStats.setCpuUtilization(hostStats.getCpuUtilization() + getDataAverage(dataNode, col, numRows));
}
/*
* if (param.contains("loadavg")) {
* hostStats.setAverageLoad((hostStats.getAverageLoad() +
* getDataAverage(dataNode, col, numRows))); }
*/
}
}
// add the host cpu utilization
/*
* if (hostStats.getNumCpus() != 0) {
* hostStats.setCpuUtilization(hostStats.getCpuUtilization() /
* hostStats.getNumCpus()); s_logger.debug("Host cpu utilization " +
* hostStats.getCpuUtilization()); }
*/
return hostStats;
}
protected HashMap<String, HostVmStateReportEntry> getHostVmStateReport(final Connection conn) {
final HashMap<String, HostVmStateReportEntry> vmStates = new HashMap<String, HostVmStateReportEntry>();
Map<VM, VM.Record> vm_map = null;
for (int i = 0; i < 2; i++) {
try {
vm_map = VM.getAllRecords(conn);
break;
} catch (final Throwable e) {
s_logger.warn("Unable to get vms", e);
}
try {
Thread.sleep(1000);
} catch (final InterruptedException ex) {
}
}
if (vm_map == null) {
return vmStates;
}
for (final VM.Record record : vm_map.values()) {
if (record.isControlDomain || record.isASnapshot || record.isATemplate) {
continue; // Skip DOM0
}
final VmPowerState ps = record.powerState;
final Host host = record.residentOn;
String host_uuid = null;
if (!isRefNull(host)) {
try {
host_uuid = host.getUuid(conn);
} catch (final BadServerResponse e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
} catch (final XenAPIException e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
} catch (final XmlRpcException e) {
s_logger.error("Failed to get host uuid for host " + host.toWireString(), e);
}
if (host_uuid.equalsIgnoreCase(_host.getUuid())) {
vmStates.put(record.nameLabel, new HostVmStateReportEntry(convertToPowerState(ps), host_uuid));
}
}
}
return vmStates;
}
public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword,
final boolean ignoreIntroduceException) {
return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, false, SRType.LVMOISCSI.toString(), ignoreIntroduceException);
}
public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword,
final boolean resignature, final boolean ignoreIntroduceException) {
return getIscsiSR(conn, srNameLabel, target, path, chapInitiatorUsername, chapInitiatorPassword, resignature, SRType.LVMOISCSI.toString(), ignoreIntroduceException);
}
public SR getIscsiSR(final Connection conn, final String srNameLabel, final String target, String path, final String chapInitiatorUsername, final String chapInitiatorPassword,
final boolean resignature, final String srType, final boolean ignoreIntroduceException) {
synchronized (srNameLabel.intern()) {
final Map<String, String> deviceConfig = new HashMap<String, String>();
try {
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
final String tmp[] = path.split("/");
if (tmp.length != 3) {
final String msg = "Wrong iscsi path " + path + " it should be /targetIQN/LUN";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
final String targetiqn = tmp[1].trim();
final String lunid = tmp[2].trim();
String scsiid = "";
//Throws an exception if SR already exists and is attached
checkIfIscsiSrExisits(conn, srNameLabel, target, targetiqn, lunid);
// We now know the SR is not attached to the XenServer. We probe the
// LUN to see if an SR was already exists on it, if so, we just
// attach it or else we create a brand new SR
deviceConfig.put("target", target);
deviceConfig.put("targetIQN", targetiqn);
if (StringUtils.isNotBlank(chapInitiatorUsername) && StringUtils.isNotBlank(chapInitiatorPassword)) {
deviceConfig.put("chapuser", chapInitiatorUsername);
deviceConfig.put("chappassword", chapInitiatorPassword);
}
final Host host = Host.getByUuid(conn, _host.getUuid());
final Map<String, String> smConfig = new HashMap<String, String>();
SR sr = null;
String pooluuid = null;
if (SRType.LVMOISCSI.equals(srType)) {
scsiid = probeScisiId(conn, host, deviceConfig, srType, srNameLabel, lunid, smConfig);
deviceConfig.put("SCSIid", scsiid);
String result = SR.probe(conn, host, deviceConfig, srType, smConfig);
if (result.indexOf("<UUID>") != -1) {
pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
}
}
if (pooluuid == null || pooluuid.length() != 36) {
sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, srType, "user", true, smConfig);
} else {
if (resignature) {
// We resignature the SR for managed storage if needed. At the end of this
// we have an SR which is ready to be attached. For VHDoISCSI SR,
// we don't need to resignature
pooluuid = resignatureIscsiSr(conn, host, deviceConfig, srNameLabel, smConfig);
}
sr = introduceAndPlugIscsiSr(conn, pooluuid, srNameLabel, srType, smConfig, deviceConfig, ignoreIntroduceException);
}
sr.scan(conn);
return sr;
} catch (final XenAPIException e) {
final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
} catch (final Exception e) {
final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
}
private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNameLabel, String type, Map<String, String> smConfig, Map<String, String> deviceConfig,
boolean ignoreIntroduceException) throws XmlRpcException, XenAPIException {
SR sr = null;
try {
sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, type, "user", true, smConfig);
} catch (final XenAPIException ex) {
if (ignoreIntroduceException) {
return sr;
}
throw ex;
}
final Set<Host> setHosts = Host.getAll(conn);
if (setHosts == null) {
final String msg = "Unable to create iSCSI SR " + deviceConfig + " due to hosts not available.";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
for (final Host currentHost : setHosts) {
final PBD.Record rec = new PBD.Record();
rec.deviceConfig = deviceConfig;
rec.host = currentHost;
rec.SR = sr;
final PBD pbd = PBD.create(conn, rec);
pbd.plug(conn);
}
return sr;
}
private String resignatureIscsiSr(Connection conn, Host host, Map<String, String> deviceConfig, String srNameLabel, Map<String, String> smConfig) throws XmlRpcException, XenAPIException {
String pooluuid;
try {
SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig);
// The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected
// toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()).
// That being the case, if this CloudRuntimeException statement is executed, there appears to have been some kind
// of failure in the execution of the above SR.create (resign) method.
throw new CloudRuntimeException("Problem resigning the metadata");
} catch (XenAPIException ex) {
String msg = ex.toString();
if (!msg.contains("successfully resigned")) {
throw ex;
}
String type = SRType.LVMOISCSI.toString();
String result = SR.probe(conn, host, deviceConfig, type, smConfig);
pooluuid = null;
if (result.indexOf("<UUID>") != -1) {
pooluuid = result.substring(result.indexOf("<UUID>") + 6, result.indexOf("</UUID>")).trim();
}
if (pooluuid == null || pooluuid.length() != 36) {
throw new CloudRuntimeException("Non-existent or invalid SR UUID");
}
}
return pooluuid;
}
private void checkIfIscsiSrExisits(Connection conn, String srNameLabel, String target, String targetiqn, String lunid) throws XenAPIException, XmlRpcException {
final Set<SR> srs = SR.getByNameLabel(conn, srNameLabel);
for (final SR sr : srs) {
if (!(SRType.LVMOISCSI.equals(sr.getType(conn)))) {
continue;
}
final Set<PBD> pbds = sr.getPBDs(conn);
if (pbds.isEmpty()) {
continue;
}
final PBD pbd = pbds.iterator().next();
final Map<String, String> dc = pbd.getDeviceConfig(conn);
if (dc == null) {
continue;
}
if (dc.get("target") == null) {
continue;
}
if (dc.get("targetIQN") == null) {
continue;
}
if (dc.get("lunid") == null) {
continue;
}
if (target.equals(dc.get("target")) && targetiqn.equals(dc.get("targetIQN")) && lunid.equals(dc.get("lunid"))) {
throw new CloudRuntimeException("There is a SR using the same configuration target:" + dc.get("target") + ", targetIQN:" + dc.get("targetIQN") + ", lunid:" + dc.get("lunid")
+ " for pool " + srNameLabel + "on host:" + _host.getUuid());
}
}
}
private String probeScisiId(Connection conn, Host host, Map<String, String> deviceConfig, String type, String srNameLabel, String lunid, Map<String, String> smConfig)
throws XenAPIException, XmlRpcException {
String scsiid = null;
try {
SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig);
} catch (final XenAPIException e) {
final String errmsg = e.toString();
if (errmsg.contains("SR_BACKEND_FAILURE_107")) {
final String lun[] = errmsg.split("<LUN>");
boolean found = false;
for (int i = 1; i < lun.length; i++) {
final int blunindex = lun[i].indexOf("<LUNid>") + 7;
final int elunindex = lun[i].indexOf("</LUNid>");
String ilun = lun[i].substring(blunindex, elunindex);
ilun = ilun.trim();
if (ilun.equals(lunid)) {
final int bscsiindex = lun[i].indexOf("<SCSIid>") + 8;
final int escsiindex = lun[i].indexOf("</SCSIid>");
scsiid = lun[i].substring(bscsiindex, escsiindex);
scsiid = scsiid.trim();
found = true;
break;
}
}
if (!found) {
final String msg = "can not find LUN " + lunid + " in " + errmsg;
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
} else {
final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
return scsiid;
}
public SR getISOSRbyVmName(final Connection conn, final String vmName) {
try {
final Set<SR> srs = SR.getByNameLabel(conn, vmName + "-ISO");
if (srs.size() == 0) {
return null;
} else if (srs.size() == 1) {
return srs.iterator().next();
} else {
final String msg = "getIsoSRbyVmName failed due to there are more than 1 SR having same Label";
s_logger.warn(msg);
}
} catch (final XenAPIException e) {
final String msg = "getIsoSRbyVmName failed due to " + e.toString();
s_logger.warn(msg, e);
} catch (final Exception e) {
final String msg = "getIsoSRbyVmName failed due to " + e.getMessage();
s_logger.warn(msg, e);
}
return null;
}
public VDI getIsoVDIByURL(final Connection conn, final String vmName, final String isoURL) {
SR isoSR = null;
String mountpoint = null;
if (isoURL.startsWith("xs-tools")) {
try {
final String actualIsoURL = getActualIsoTemplate(conn);
final Set<VDI> vdis = VDI.getByNameLabel(conn, actualIsoURL);
if (vdis.isEmpty()) {
throw new CloudRuntimeException("Could not find ISO with URL: " + actualIsoURL);
}
return vdis.iterator().next();
} catch (final XenAPIException e) {
throw new CloudRuntimeException("Unable to get pv iso: " + isoURL + " due to " + e.toString());
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to get pv iso: " + isoURL + " due to " + e.toString());
}
}
final int index = isoURL.lastIndexOf("/");
mountpoint = isoURL.substring(0, index);
URI uri;
try {
uri = new URI(mountpoint);
} catch (final URISyntaxException e) {
throw new CloudRuntimeException("isoURL is wrong: " + isoURL);
}
isoSR = getISOSRbyVmName(conn, vmName);
if (isoSR == null) {
isoSR = createIsoSRbyURI(conn, uri, vmName, false);
}
final String isoName = isoURL.substring(index + 1);
final VDI isoVDI = getVDIbyLocationandSR(conn, isoName, isoSR);
if (isoVDI != null) {
return isoVDI;
} else {
throw new CloudRuntimeException("Could not find ISO with URL: " + isoURL);
}
}
/**
* Retrieve the actual ISO 'name-label' to be used.
* We based our decision on XenServer version.
* <ul>
* <li> for XenServer 7.0+, we use {@value #xenServer70plusGuestToolsName};
* <li> for versions before 7.0, we use {@value #xenServerBefore70GuestToolsName}.
* </ul>
*
* For XCP we always use {@value #xenServerBefore70GuestToolsName}.
*/
protected String getActualIsoTemplate(Connection conn) throws XenAPIException, XmlRpcException {
Host host = Host.getByUuid(conn, _host.getUuid());
Host.Record record = host.getRecord(conn);
String xenBrand = record.softwareVersion.get("product_brand");
String xenVersion = record.softwareVersion.get("product_version");
String[] items = xenVersion.split("\\.");
if ((xenBrand.equals("XenServer") || xenBrand.equals("XCP-ng")) && Integer.parseInt(items[0]) >= 7) {
return xenServer70plusGuestToolsName;
}
return xenServerBefore70GuestToolsName;
}
public String getLabel() {
final Connection conn = getConnection();
final String result = callHostPlugin(conn, "ovstunnel", "getLabel");
return result;
}
public String getLowestAvailableVIFDeviceNum(final Connection conn, final VM vm) {
String vmName = "";
try {
vmName = vm.getNameLabel(conn);
final List<Integer> usedDeviceNums = new ArrayList<Integer>();
final Set<VIF> vifs = vm.getVIFs(conn);
final Iterator<VIF> vifIter = vifs.iterator();
while (vifIter.hasNext()) {
final VIF vif = vifIter.next();
try {
final String deviceId = vif.getDevice(conn);
if (vm.getIsControlDomain(conn) || vif.getCurrentlyAttached(conn)) {
usedDeviceNums.add(Integer.valueOf(deviceId));
} else {
s_logger.debug("Found unplugged VIF " + deviceId + " in VM " + vmName + " destroy it");
vif.destroy(conn);
}
} catch (final NumberFormatException e) {
final String msg = "Obtained an invalid value for an allocated VIF device number for VM: " + vmName;
s_logger.debug(msg, e);
throw new CloudRuntimeException(msg);
}
}
for (Integer i = 0; i < _maxNics; i++) {
if (!usedDeviceNums.contains(i)) {
s_logger.debug("Lowest available Vif device number: " + i + " for VM: " + vmName);
return i.toString();
}
}
} catch (final XmlRpcException e) {
final String msg = "Caught XmlRpcException: " + e.getMessage();
s_logger.warn(msg, e);
} catch (final XenAPIException e) {
final String msg = "Caught XenAPIException: " + e.toString();
s_logger.warn(msg, e);
}
throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName);
}
protected XsLocalNetwork getManagementNetwork(final Connection conn) throws XmlRpcException, XenAPIException {
PIF mgmtPif = null;
PIF.Record mgmtPifRec = null;
final Host host = Host.getByUuid(conn, _host.getUuid());
final Set<PIF> hostPifs = host.getPIFs(conn);
for (final PIF pif : hostPifs) {
final PIF.Record rec = pif.getRecord(conn);
if (rec.management) {
if (rec.VLAN != null && rec.VLAN != -1) {
final String msg = new StringBuilder("Unsupported configuration. Management network is on a VLAN. host=").append(_host.getUuid()).append("; pif=").append(rec.uuid)
.append("; vlan=").append(rec.VLAN).toString();
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Management network is on pif=" + rec.uuid);
}
mgmtPif = pif;
mgmtPifRec = rec;
break;
}
}
if (mgmtPif == null) {
final String msg = "Unable to find management network for " + _host.getUuid();
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
final Bond bond = mgmtPifRec.bondSlaveOf;
if (!isRefNull(bond)) {
final String msg = "Management interface is on slave(" + mgmtPifRec.uuid + ") of bond(" + bond.getUuid(conn) + ") on host(" + _host.getUuid()
+ "), please move management interface to bond!";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
final Network nk = mgmtPifRec.network;
final Network.Record nkRec = nk.getRecord(conn);
return new XsLocalNetwork(this, nk, nkRec, mgmtPif, mgmtPifRec);
}
@Override
public String getName() {
return _name;
}
public XsLocalNetwork getNativeNetworkForTraffic(final Connection conn, final TrafficType type, final String name) throws XenAPIException, XmlRpcException {
if (name != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for network named " + name);
}
return getNetworkByName(conn, name);
}
if (type == TrafficType.Guest) {
return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getGuestNetwork()), null, PIF.getByUuid(conn, _host.getGuestPif()), null);
} else if (type == TrafficType.Control) {
setupLinkLocalNetwork(conn);
return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getLinkLocalNetwork()));
} else if (type == TrafficType.Management) {
return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getPrivateNetwork()), null, PIF.getByUuid(conn, _host.getPrivatePif()), null);
} else if (type == TrafficType.Public) {
return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getPublicNetwork()), null, PIF.getByUuid(conn, _host.getPublicPif()), null);
} else if (type == TrafficType.Storage) {
/*
* TrafficType.Storage is for secondary storage, while
* storageNetwork1 is for primary storage, we need better name here
*/
return new XsLocalNetwork(this, Network.getByUuid(conn, _host.getStorageNetwork1()), null, PIF.getByUuid(conn, _host.getStoragePif1()), null);
}
throw new CloudRuntimeException("Unsupported network type: " + type);
}
public Network getNetwork(final Connection conn, final NicTO nic) throws XenAPIException, XmlRpcException {
final String name = nic.getName();
final XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), name);
if (network == null) {
s_logger.error("Network is not configured on the backend for nic " + nic.toString());
throw new CloudRuntimeException("Network for the backend is not configured correctly for network broadcast domain: " + nic.getBroadcastUri());
}
final URI uri = nic.getBroadcastUri();
final BroadcastDomainType type = nic.getBroadcastType();
if (uri != null && uri.toString().contains("untagged")) {
return network.getNetwork();
} else if (uri != null && type == BroadcastDomainType.Vlan) {
assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Vlan;
final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
return enableVlanNetwork(conn, vlan, network);
} else if (type == BroadcastDomainType.Native || type == BroadcastDomainType.LinkLocal) {
return network.getNetwork();
} else if (uri != null && type == BroadcastDomainType.Vswitch) {
final String header = uri.toString().substring(Networks.BroadcastDomainType.Vswitch.scheme().length() + "://".length());
if (header.startsWith("vlan")) {
_isOvs = true;
return setupvSwitchNetwork(conn);
} else {
return findOrCreateTunnelNetwork(conn, getOvsTunnelNetworkName(uri.getAuthority()));
}
} else if (type == BroadcastDomainType.Storage) {
if (uri == null) {
return network.getNetwork();
} else {
final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
return enableVlanNetwork(conn, vlan, network);
}
} else if (type == BroadcastDomainType.Lswitch) {
// Nicira Logical Switch
return network.getNetwork();
} else if (uri != null && type == BroadcastDomainType.Pvlan) {
assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Pvlan;
// should we consider moving this NetUtils method to
// BroadcastDomainType?
final long vlan = Long.parseLong(NetUtils.getPrimaryPvlanFromUri(uri));
return enableVlanNetwork(conn, vlan, network);
}
throw new CloudRuntimeException("Unable to support this type of network broadcast domain: " + nic.getBroadcastUri());
}
/**
* getNetworkByName() retrieves what the server thinks is the actual network
* used by the XenServer host. This method should always be used to talk to
* retrieve a network by the name. The reason is because of the problems in
* using the name label as the way to find the Network.
*
* To see how we are working around these problems, take a look at
* enableVlanNetwork(). The following description assumes you have looked at
* the description on that method.
*
* In order to understand this, we have to see what type of networks are
* within a XenServer that's under CloudStack control.
*
* - Native Networks: these are networks that are untagged on the XenServer
* and are used to crate VLAN networks on. These are created by the user and
* is assumed to be one per cluster. - VLAN Networks: these are dynamically
* created by CloudStack and can have problems with duplicated names. -
* LinkLocal Networks: these are dynamically created by CloudStack and can
* also have problems with duplicated names but these don't have actual
* PIFs.
*
* In order to speed to retrieval of a network, we do the following: - We
* retrieve by the name. If only one network is retrieved, we assume we
* retrieved the right network. - If more than one network is retrieved, we
* check to see which one has the pif for the local host and use that. - If
* a pif is not found, then we look at the tags and find the one with the
* lowest timestamp. (See enableVlanNetwork())
*
* @param conn
* Xapi connection
* @param name
* name of the network
* @return XsNic an object that contains network, network record, pif, and
* pif record.
* @throws XenAPIException
* @throws XmlRpcException
*
* @see CitrixResourceBase#enableVlanNetwork
*/
public XsLocalNetwork getNetworkByName(final Connection conn, final String name) throws XenAPIException, XmlRpcException {
final Set<Network> networks = Network.getByNameLabel(conn, name);
if (networks.size() == 1) {
return new XsLocalNetwork(this, networks.iterator().next(), null, null, null);
}
if (networks.size() == 0) {
return null;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found more than one network with the name " + name);
}
Network earliestNetwork = null;
Network.Record earliestNetworkRecord = null;
long earliestTimestamp = Long.MAX_VALUE;
int earliestRandom = Integer.MAX_VALUE;
for (final Network network : networks) {
final XsLocalNetwork nic = new XsLocalNetwork(this, network);
if (nic.getPif(conn) != null) {
return nic;
}
final Network.Record record = network.getRecord(conn);
if (record.tags != null) {
for (final String tag : record.tags) {
final Pair<Long, Integer> stamp = parseTimestamp(tag);
if (stamp == null) {
continue;
}
if (stamp.first() < earliestTimestamp || stamp.first() == earliestTimestamp && stamp.second() < earliestRandom) {
earliestTimestamp = stamp.first();
earliestRandom = stamp.second();
earliestNetwork = network;
earliestNetworkRecord = record;
}
}
}
}
return earliestNetwork != null ? new XsLocalNetwork(this, earliestNetwork, earliestNetworkRecord, null, null) : null;
}
public long[] getNetworkStats(final Connection conn, final String privateIP) {
final String result = networkUsage(conn, privateIP, "get", null);
final long[] stats = new long[2];
if (result != null) {
final String[] splitResult = result.split(":");
int i = 0;
while (i < splitResult.length - 1) {
stats[0] += Long.parseLong(splitResult[i++]);
stats[1] += Long.parseLong(splitResult[i++]);
}
}
return stats;
}
public SR getNfsSR(final Connection conn, final String poolid, final String uuid, final String server, String serverpath, final String pooldesc) {
final Map<String, String> deviceConfig = new HashMap<String, String>();
try {
serverpath = serverpath.replace("//", "/");
final Set<SR> srs = SR.getAll(conn);
if (srs != null && !srs.isEmpty()) {
for (final SR sr : srs) {
if (!SRType.NFS.equals(sr.getType(conn))) {
continue;
}
final Set<PBD> pbds = sr.getPBDs(conn);
if (pbds.isEmpty()) {
continue;
}
final PBD pbd = pbds.iterator().next();
final Map<String, String> dc = pbd.getDeviceConfig(conn);
if (dc == null) {
continue;
}
if (dc.get("server") == null) {
continue;
}
if (dc.get("serverpath") == null) {
continue;
}
if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) {
throw new CloudRuntimeException(
"There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:" + dc.get("serverpath") + " for pool " + uuid + " on host:" + _host.getUuid());
}
}
}
deviceConfig.put("server", server);
deviceConfig.put("serverpath", serverpath);
final Host host = Host.getByUuid(conn, _host.getUuid());
final Map<String, String> smConfig = new HashMap<String, String>();
smConfig.put("nosubdir", "true");
final SR sr = SR.create(conn, host, deviceConfig, new Long(0), uuid, poolid, SRType.NFS.toString(), "user", true, smConfig);
sr.scan(conn);
return sr;
} catch (final XenAPIException e) {
throw new CloudRuntimeException("Unable to create NFS SR " + pooldesc, e);
} catch (final XmlRpcException e) {
throw new CloudRuntimeException("Unable to create NFS SR " + pooldesc, e);
}
}
private String getOvsTunnelNetworkName(final String broadcastUri) {
if (broadcastUri.contains(".")) {
final String[] parts = broadcastUri.split("\\.");
return "OVS-DR-VPC-Bridge" + parts[0];
} else {
try {
return "OVSTunnel" + broadcastUri;
} catch (final Exception e) {
return null;
}
}
}
protected List<File> getPatchFiles() {
String patch = getPatchFilePath();
String patchfilePath = Script.findScript("", patch);
if (patchfilePath == null) {
throw new CloudRuntimeException("Unable to find patch file " + patch);
}
List<File> files = new ArrayList<File>();
files.add(new File(patchfilePath));
return files;
}
protected abstract String getPatchFilePath();
public String getPerfMon(final Connection conn, final Map<String, String> params, final int wait) {
String result = null;
try {
result = callHostPluginAsync(conn, "vmopspremium", "asmonitor", 60, params);
if (result != null) {
return result;
}
} catch (final Exception e) {
s_logger.error("Can not get performance monitor for AS due to ", e);
}
return null;
}
protected Object[] getRRDData(final Connection conn, final int flag) {
/*
* Note: 1 => called from host, hence host stats 2 => called from vm,
* hence vm stats
*/
Document doc = null;
try {
doc = getStatsRawXML(conn, flag == 1 ? true : false);
} catch (final Exception e1) {
s_logger.warn("Error whilst collecting raw stats from plugin: ", e1);
return null;
}
if (doc == null) { // stats are null when the host plugin call fails
// (host down state)
return null;
}
final NodeList firstLevelChildren = doc.getChildNodes();
final NodeList secondLevelChildren = firstLevelChildren.item(0).getChildNodes();
final Node metaNode = secondLevelChildren.item(0);
final Node dataNode = secondLevelChildren.item(1);
Integer numRows = 0;
Integer numColumns = 0;
Node legend = null;
final NodeList metaNodeChildren = metaNode.getChildNodes();
for (int i = 0; i < metaNodeChildren.getLength(); i++) {
final Node n = metaNodeChildren.item(i);
if (n.getNodeName().equals("rows")) {
numRows = Integer.valueOf(getXMLNodeValue(n));
} else if (n.getNodeName().equals("columns")) {
numColumns = Integer.valueOf(getXMLNodeValue(n));
} else if (n.getNodeName().equals("legend")) {
legend = n;
}
}
return new Object[] {numRows, numColumns, legend, dataNode};
}
@Override
public int getRunLevel() {
return 0;
}
protected SR getSRByNameLabelandHost(final Connection conn, final String name) throws BadServerResponse, XenAPIException, XmlRpcException {
final Set<SR> srs = SR.getByNameLabel(conn, name);
SR ressr = null;
for (final SR sr : srs) {
Set<PBD> pbds;
pbds = sr.getPBDs(conn);
for (final PBD pbd : pbds) {
final PBD.Record pbdr = pbd.getRecord(conn);
if (pbdr.host != null && pbdr.host.getUuid(conn).equals(_host.getUuid())) {
if (!pbdr.currentlyAttached) {
pbd.plug(conn);
}
ressr = sr;
break;
}
}
}
return ressr;
}
private long getStaticMax(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) {
if (recommendedValue == 0) {
s_logger.warn("No recommended value found for dynamic max, setting static max and dynamic max equal");
return dynamicMaxRam;
}
final long staticMax = Math.min(recommendedValue, 4l * dynamicMinRam); // XS
// constraint
// for
// stability
if (dynamicMaxRam > staticMax) { // XS contraint that dynamic max <=
// static max
s_logger.warn("dynamixMax " + dynamicMaxRam + " cant be greater than static max " + staticMax + ", can lead to stability issues. Setting static max as much as dynamic max ");
return dynamicMaxRam;
}
return staticMax;
}
private long getStaticMin(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) {
if (recommendedValue == 0) {
s_logger.warn("No recommended value found for dynamic min");
return dynamicMinRam;
}
if (dynamicMinRam < recommendedValue) { // XS contraint that dynamic min
// > static min
s_logger.warn("Vm is set to dynamixMin " + dynamicMinRam + " less than the recommended static min " + recommendedValue + ", could lead to stability issues");
}
return dynamicMinRam;
}
protected Document getStatsRawXML(final Connection conn, final boolean host) {
final Date currentDate = new Date();
String urlStr = "http://" + _host.getIp() + "/rrd_updates?";
urlStr += "session_id=" + conn.getSessionReference();
urlStr += "&host=" + (host ? "true" : "false");
urlStr += "&cf=" + _consolidationFunction;
urlStr += "&interval=" + _pollingIntervalInSeconds;
urlStr += "&start=" + (currentDate.getTime() / 1000 - 1000 - 100);
URL url;
BufferedReader in = null;
try {
url = new URL(urlStr);
url.openConnection();
final URLConnection uc = url.openConnection();
in = new BufferedReader(new InputStreamReader(uc.getInputStream()));
final InputSource statsSource = new InputSource(in);
return DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(statsSource);
} catch (final MalformedURLException e) {
s_logger.warn("Malformed URL? come on...." + urlStr);
return null;
} catch (final IOException e) {
s_logger.warn("Problems getting stats using " + urlStr, e);
return null;
} catch (final SAXException e) {
s_logger.warn("Problems getting stats using " + urlStr, e);
return null;
} catch (final ParserConfigurationException e) {
s_logger.warn("Problems getting stats using " + urlStr, e);
return null;
} finally {
if (in != null) {
try {
in.close();
} catch (final IOException e) {
s_logger.warn("Unable to close the buffer ", e);
}
}
}
}
public SR getStorageRepository(final Connection conn, final String srNameLabel) {
Set<SR> srs;
try {
srs = SR.getByNameLabel(conn, srNameLabel);
} catch (final XenAPIException e) {
throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.toString(), e);
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to get SR " + srNameLabel + " due to " + e.getMessage(), e);
}
if (srs.size() > 1) {
throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + srNameLabel);
} else if (srs.size() == 1) {
final SR sr = srs.iterator().next();
if (s_logger.isDebugEnabled()) {
s_logger.debug("SR retrieved for " + srNameLabel);
}
if (checkSR(conn, sr)) {
return sr;
}
throw new CloudRuntimeException("SR check failed for storage pool: " + srNameLabel + "on host:" + _host.getUuid());
} else {
throw new CloudRuntimeException("Can not see storage pool: " + srNameLabel + " from on host:" + _host.getUuid());
}
}
protected Storage.StorageResourceType getStorageResourceType() {
return Storage.StorageResourceType.STORAGE_POOL;
}
@Override
public Type getType() {
return com.cloud.host.Host.Type.Routing;
}
protected VDI getVDIbyLocationandSR(final Connection conn, final String loc, final SR sr) {
try {
final Set<VDI> vdis = sr.getVDIs(conn);
for (final VDI vdi : vdis) {
if (vdi.getLocation(conn).startsWith(loc)) {
return vdi;
}
}
final String msg = "can not getVDIbyLocationandSR " + loc;
s_logger.warn(msg);
return null;
} catch (final XenAPIException e) {
final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
} catch (final Exception e) {
final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
public VDI getVDIbyUuid(final Connection conn, final String uuid) {
return getVDIbyUuid(conn, uuid, true);
}
public VDI getVDIbyUuid(final Connection conn, final String uuid, final boolean throwExceptionIfNotFound) {
try {
return VDI.getByUuid(conn, uuid);
} catch (final Exception e) {
if (throwExceptionIfNotFound) {
final String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString();
s_logger.debug(msg);
throw new CloudRuntimeException(msg, e);
}
return null;
}
}
public String getVhdParent(final Connection conn, final String primaryStorageSRUuid, final String snapshotUuid, final Boolean isISCSI) {
final String parentUuid = callHostPlugin(conn, "vmopsSnapshot", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid, "snapshotUuid", snapshotUuid, "isISCSI", isISCSI.toString());
if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) {
s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid);
// errString is already logged.
return null;
}
return parentUuid;
}
public VIF getVifByMac(final Connection conn, final VM router, String mac) throws XmlRpcException, XenAPIException {
final Set<VIF> routerVIFs = router.getVIFs(conn);
mac = mac.trim();
for (final VIF vif : routerVIFs) {
final String lmac = vif.getMAC(conn);
if (lmac.trim().equals(mac)) {
return vif;
}
}
return null;
}
public VirtualRoutingResource getVirtualRoutingResource() {
return _vrResource;
}
public VM getVM(final Connection conn, final String vmName) {
// Look up VMs with the specified name
Set<VM> vms;
try {
vms = VM.getByNameLabel(conn, vmName);
} catch (final XenAPIException e) {
throw new CloudRuntimeException("Unable to get " + vmName + ": " + e.toString(), e);
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to get " + vmName + ": " + e.getMessage(), e);
}
// If there are no VMs, throw an exception
if (vms.size() == 0) {
throw new CloudRuntimeException("VM with name: " + vmName + " does not exist.");
}
// If there is more than one VM, print a warning
if (vms.size() > 1) {
s_logger.warn("Found " + vms.size() + " VMs with name: " + vmName);
}
// Return the first VM in the set
return vms.iterator().next();
}
public String getVMInstanceName() {
return _instance;
}
public long getVMSnapshotChainSize(final Connection conn, final VolumeObjectTO volumeTo, final String vmName, final String vmSnapshotName)
throws BadServerResponse, XenAPIException, XmlRpcException {
if (volumeTo.getVolumeType() == Volume.Type.DATADISK) {
final VDI dataDisk = VDI.getByUuid(conn, volumeTo.getPath());
if (dataDisk != null) {
final String dataDiskName = dataDisk.getNameLabel(conn);
if (dataDiskName != null && !dataDiskName.isEmpty()) {
volumeTo.setName(dataDiskName);
}
}
}
final Set<VDI> allvolumeVDIs = VDI.getByNameLabel(conn, volumeTo.getName());
long size = 0;
for (final VDI vdi : allvolumeVDIs) {
try {
if (vdi.getIsASnapshot(conn) && vdi.getSmConfig(conn).get("vhd-parent") != null) {
final String parentUuid = vdi.getSmConfig(conn).get("vhd-parent");
final VDI parentVDI = VDI.getByUuid(conn, parentUuid);
// add size of snapshot vdi node, usually this only contains
// meta data
size = size + vdi.getPhysicalUtilisation(conn);
// add size of snapshot vdi parent, this contains data
if (!isRefNull(parentVDI)) {
size = size + parentVDI.getPhysicalUtilisation(conn).longValue();
}
}
} catch (final Exception e) {
s_logger.debug("Exception occurs when calculate snapshot capacity for volumes: due to " + e.toString());
continue;
}
}
if (volumeTo.getVolumeType() == Volume.Type.ROOT) {
VM vm = getVM(conn, vmName);
if (vm != null) {
Set<VM> vmSnapshots = vm.getSnapshots(conn);
if (vmSnapshots != null) {
for (VM vmsnap : vmSnapshots) {
try {
final String vmSnapName = vmsnap.getNameLabel(conn);
s_logger.debug("snapname " + vmSnapName);
if (vmSnapName != null && vmSnapName.contains(vmSnapshotName) && vmsnap.getIsASnapshot(conn)) {
s_logger.debug("snapname " + vmSnapName + "isASnapshot");
VDI memoryVDI = vmsnap.getSuspendVDI(conn);
if (!isRefNull(memoryVDI)) {
size = size + memoryVDI.getPhysicalUtilisation(conn);
s_logger.debug("memoryVDI size :" + size);
String parentUuid = memoryVDI.getSmConfig(conn).get("vhd-parent");
VDI pMemoryVDI = VDI.getByUuid(conn, parentUuid);
if (!isRefNull(pMemoryVDI)) {
size = size + pMemoryVDI.getPhysicalUtilisation(conn);
}
s_logger.debug("memoryVDI size+parent :" + size);
}
}
} catch (Exception e) {
s_logger.debug("Exception occurs when calculate snapshot capacity for memory: due to " + e.toString());
continue;
}
}
}
}
}
return size;
}
public PowerState getVmState(final Connection conn, final String vmName) {
int retry = 3;
while (retry-- > 0) {
try {
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
for (final VM vm : vms) {
return convertToPowerState(vm.getPowerState(conn));
}
} catch (final BadServerResponse e) {
// There is a race condition within xenserver such that if a vm
// is
// deleted and we
// happen to ask for it, it throws this stupid response. So
// if this happens,
// we take a nap and try again which then avoids the race
// condition because
// the vm's information is now cleaned up by xenserver. The
// error
// is as follows
// com.xensource.xenapi.Types$BadServerResponse
// [HANDLE_INVALID, VM,
// 3dde93f9-c1df-55a7-2cde-55e1dce431ab]
s_logger.info("Unable to get a vm PowerState due to " + e.toString() + ". We are retrying. Count: " + retry);
try {
Thread.sleep(3000);
} catch (final InterruptedException ex) {
}
} catch (final XenAPIException e) {
final String msg = "Unable to get a vm PowerState due to " + e.toString();
s_logger.warn(msg, e);
break;
} catch (final XmlRpcException e) {
final String msg = "Unable to get a vm PowerState due to " + e.getMessage();
s_logger.warn(msg, e);
break;
}
}
return PowerState.PowerOff;
}
public HashMap<String, VmStatsEntry> getVmStats(final Connection conn, final GetVmStatsCommand cmd, final List<String> vmUUIDs, final String hostGuid) {
final HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>();
for (final String vmUUID : vmUUIDs) {
vmResponseMap.put(vmUUID, new VmStatsEntry(0, 0, 0, 0, 0, 0, 0, "vm"));
}
final Object[] rrdData = getRRDData(conn, 2); // call rrddata with 2 for
// vm
if (rrdData == null) {
return null;
}
final Integer numRows = (Integer)rrdData[0];
final Integer numColumns = (Integer)rrdData[1];
final Node legend = (Node)rrdData[2];
final Node dataNode = (Node)rrdData[3];
final NodeList legendChildren = legend.getChildNodes();
for (int col = 0; col < numColumns; col++) {
if (legendChildren == null || legendChildren.item(col) == null) {
continue;
}
final String columnMetadata = getXMLNodeValue(legendChildren.item(col));
if (columnMetadata == null) {
continue;
}
final String[] columnMetadataList = columnMetadata.split(":");
if (columnMetadataList.length != 4) {
continue;
}
final String type = columnMetadataList[1];
final String uuid = columnMetadataList[2];
final String param = columnMetadataList[3];
if (type.equals("vm") && vmResponseMap.keySet().contains(uuid)) {
final VmStatsEntry vmStatsAnswer = vmResponseMap.get(uuid);
vmStatsAnswer.setEntityType("vm");
if (param.contains("cpu")) {
vmStatsAnswer.setNumCPUs(vmStatsAnswer.getNumCPUs() + 1);
vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() + getDataAverage(dataNode, col, numRows));
} else if (param.matches("vif_\\d*_rx")) {
vmStatsAnswer.setNetworkReadKBs(vmStatsAnswer.getNetworkReadKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.matches("vif_\\d*_tx")) {
vmStatsAnswer.setNetworkWriteKBs(vmStatsAnswer.getNetworkWriteKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.matches("vbd_.*_read")) {
vmStatsAnswer.setDiskReadKBs(vmStatsAnswer.getDiskReadKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.matches("vbd_.*_write")) {
vmStatsAnswer.setDiskWriteKBs(vmStatsAnswer.getDiskWriteKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.contains("memory_internal_free")) {
vmStatsAnswer.setIntFreeMemoryKBs(vmStatsAnswer.getIntFreeMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.contains("memory_target")) {
vmStatsAnswer.setTargetMemoryKBs(vmStatsAnswer.getTargetMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
} else if (param.contains("memory")) {
vmStatsAnswer.setMemoryKBs(vmStatsAnswer.getMemoryKBs() + getDataAverage(dataNode, col, numRows) / BASE_TO_CONVERT_BYTES_INTO_KILOBYTES);
}
}
}
for (final Map.Entry<String, VmStatsEntry> entry : vmResponseMap.entrySet()) {
final VmStatsEntry vmStatsAnswer = entry.getValue();
if (vmStatsAnswer.getNumCPUs() != 0) {
vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() / vmStatsAnswer.getNumCPUs());
}
vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() * 100);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization());
}
}
return vmResponseMap;
}
public String getVncUrl(final Connection conn, final VM vm) {
VM.Record record;
Console c;
try {
record = vm.getRecord(conn);
final Set<Console> consoles = record.consoles;
if (consoles.isEmpty()) {
s_logger.warn("There are no Consoles available to the vm : " + record.nameDescription);
return null;
}
final Iterator<Console> i = consoles.iterator();
while (i.hasNext()) {
c = i.next();
if (c.getProtocol(conn) == Types.ConsoleProtocol.RFB) {
return c.getLocation(conn);
}
}
} catch (final XenAPIException e) {
final String msg = "Unable to get console url due to " + e.toString();
s_logger.warn(msg, e);
return null;
} catch (final XmlRpcException e) {
final String msg = "Unable to get console url due to " + e.getMessage();
s_logger.warn(msg, e);
return null;
}
return null;
}
protected String getXMLNodeValue(final Node n) {
return n.getChildNodes().item(0).getNodeValue();
}
public void handleSrAndVdiDetach(final String iqn, final Connection conn) throws Exception {
final SR sr = getStorageRepository(conn, iqn);
removeSR(conn, sr);
}
protected void destroyUnattachedVBD(Connection conn, VM vm) {
try {
for (VBD vbd : vm.getVBDs(conn)) {
if (Types.VbdType.DISK.equals(vbd.getType(conn)) && !vbd.getCurrentlyAttached(conn)) {
vbd.destroy(conn);
}
}
} catch (final Exception e) {
s_logger.debug("Failed to destroy unattached VBD due to ", e);
}
}
public String handleVmStartFailure(final Connection conn, final String vmName, final VM vm, final String message, final Throwable th) {
final String msg = "Unable to start " + vmName + " due to " + message;
s_logger.warn(msg, th);
if (vm == null) {
return msg;
}
try {
final VM.Record vmr = vm.getRecord(conn);
final List<Network> networks = new ArrayList<Network>();
for (final VIF vif : vmr.VIFs) {
try {
final VIF.Record rec = vif.getRecord(conn);
if (rec != null) {
networks.add(rec.network);
} else {
s_logger.warn("Unable to cleanup VIF: " + vif.toWireString() + " As vif record is null");
}
} catch (final Exception e) {
s_logger.warn("Unable to cleanup VIF", e);
}
}
if (vmr.powerState == VmPowerState.RUNNING) {
try {
vm.hardShutdown(conn);
} catch (final Exception e) {
s_logger.warn("VM hardshutdown failed due to ", e);
}
}
if (vm.getPowerState(conn) == VmPowerState.HALTED) {
try {
vm.destroy(conn);
} catch (final Exception e) {
s_logger.warn("VM destroy failed due to ", e);
}
}
for (final VBD vbd : vmr.VBDs) {
try {
vbd.unplug(conn);
vbd.destroy(conn);
} catch (final Exception e) {
s_logger.warn("Unable to clean up VBD due to ", e);
}
}
for (final VIF vif : vmr.VIFs) {
try {
vif.unplug(conn);
vif.destroy(conn);
} catch (final Exception e) {
s_logger.warn("Unable to cleanup VIF", e);
}
}
for (final Network network : networks) {
if (network.getNameLabel(conn).startsWith("VLAN")) {
disableVlanNetwork(conn, network);
}
}
} catch (final Exception e) {
s_logger.warn("VM getRecord failed due to ", e);
}
return msg;
}
@Override
public StartupCommand[] initialize() throws IllegalArgumentException {
final Connection conn = getConnection();
if (!getHostInfo(conn)) {
s_logger.warn("Unable to get host information for " + _host.getIp());
return null;
}
final StartupRoutingCommand cmd = new StartupRoutingCommand();
fillHostInfo(conn, cmd);
cmd.setHypervisorType(HypervisorType.XenServer);
cmd.setCluster(_cluster);
cmd.setPoolSync(false);
try {
final Pool pool = Pool.getByUuid(conn, _host.getPool());
final Pool.Record poolr = pool.getRecord(conn);
poolr.master.getRecord(conn);
} catch (final Throwable e) {
s_logger.warn("Check for master failed, failing the FULL Cluster sync command");
}
List<StartupStorageCommand> startUpLocalStorageCommands = null;
try {
startUpLocalStorageCommands = initializeLocalSrs(conn);
} catch (XenAPIException | XmlRpcException e) {
s_logger.warn("Could not initialize local SRs on host: " + _host.getUuid(), e);
}
if (CollectionUtils.isEmpty(startUpLocalStorageCommands)) {
return new StartupCommand[] {cmd};
}
return createStartupCommandsArray(cmd, startUpLocalStorageCommands);
}
/**
* We simply create an array and add the {@link StartupRoutingCommand} as the first element of the array. Then, we add all elements from startUpLocalStorageCommands
*/
private StartupCommand[] createStartupCommandsArray(StartupRoutingCommand startupRoutingCommand, List<StartupStorageCommand> startUpLocalStorageCommands) {
StartupCommand[] startupCommands = new StartupCommand[startUpLocalStorageCommands.size() + 1];
startupCommands[0] = startupRoutingCommand;
for (int i = 1; i < startupCommands.length; i++) {
startupCommands[i] = startUpLocalStorageCommands.get(i - 1);
}
return startupCommands;
}
/**
* This method will return a list of all local SRs.
* An SR is considered local if it meets all of the following criteria:
* <ul>
* <li> {@link Record#shared} is equal to false
* <li> The PBDs of the SR ({@link Record#PBDs}) are connected to host {@link #_host}
* <li> SR type is equal to the {@link SRType} sent as parameter
* </ul>
*/
protected List<SR> getAllLocalSrForType(Connection conn, SRType srType) throws XenAPIException, XmlRpcException {
List<SR> localSrs = new ArrayList<>();
Map<SR, SR.Record> allSrRecords = SR.getAllRecords(conn);
if (MapUtils.isEmpty(allSrRecords)) {
return localSrs;
}
for (Map.Entry<SR, SR.Record> entry : allSrRecords.entrySet()) {
SR.Record srRec = entry.getValue();
if (!srType.equals(srRec.type)) {
continue;
}
if (BooleanUtils.toBoolean(srRec.shared)) {
continue;
}
Set<PBD> pbds = srRec.PBDs;
if (CollectionUtils.isEmpty(pbds)) {
continue;
}
for (PBD pbd : pbds) {
Host host = pbd.getHost(conn);
if (!isRefNull(host) && org.apache.commons.lang3.StringUtils.equals(host.getUuid(conn), _host.getUuid())) {
if (!pbd.getCurrentlyAttached(conn)) {
s_logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid));
pbd.plug(conn);
}
s_logger.debug("Scanning local SR: " + srRec.uuid);
SR sr = entry.getKey();
sr.scan(conn);
localSrs.add(sr);
}
}
}
s_logger.debug(String.format("Found %d local storage of type [%s] for host [%s]", localSrs.size(), srType.toString(), _host.getUuid()));
return localSrs;
}
/**
* This method will prepare Local SRs to be used by Apache CloudStack.
*/
protected List<StartupStorageCommand> initializeLocalSrs(Connection conn) throws XenAPIException, XmlRpcException {
List<StartupStorageCommand> localStorageStartupCommands = new ArrayList<>();
List<SR> allLocalSrs = getAllLocalSrs(conn);
for (SR sr : allLocalSrs) {
long totalCapacity = sr.getPhysicalSize(conn);
if (totalCapacity > 0) {
StartupStorageCommand cmd = createStartUpStorageCommand(conn, sr);
localStorageStartupCommands.add(cmd);
}
}
return localStorageStartupCommands;
}
/**
* This method will retrieve all Local SRs according to {@link #getAllLocalSrForType(Connection, SRType)}.
* The types used are {@link SRType#LVM} and {@link SRType#EXT}.
*
*/
protected List<SR> getAllLocalSrs(Connection conn) throws XenAPIException, XmlRpcException {
List<SR> allLocalSrLvmType = getAllLocalSrForType(conn, SRType.LVM);
List<SR> allLocalSrExtType = getAllLocalSrForType(conn, SRType.EXT);
List<SR> allLocalSrs = new ArrayList<>(allLocalSrLvmType);
allLocalSrs.addAll(allLocalSrExtType);
return allLocalSrs;
}
/**
* This method creates the StartUp storage command for the local SR.
* We will configure 'name-label' and 'description' using {@link #configureStorageNameAndDescription(Connection, SR)}.
* Then, we will create the POJO {@link StoragePoolInfo} with SR's information using method {@link #createStoragePoolInfo(Connection, SR)}.
*/
protected StartupStorageCommand createStartUpStorageCommand(Connection conn, SR sr) throws XenAPIException, XmlRpcException {
configureStorageNameAndDescription(conn, sr);
StoragePoolInfo storagePoolInfo = createStoragePoolInfo(conn, sr);
StartupStorageCommand cmd = new StartupStorageCommand();
cmd.setPoolInfo(storagePoolInfo);
cmd.setGuid(_host.getUuid());
cmd.setDataCenter(Long.toString(_dcId));
cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
String.format("StartUp command created for local storage [%s] of type [%s] on host [%s]", storagePoolInfo.getUuid(), storagePoolInfo.getPoolType(), _host.getUuid());
return cmd;
}
/**
* Instantiate {@link StoragePoolInfo} with SR's information.
*/
protected StoragePoolInfo createStoragePoolInfo(Connection conn, SR sr) throws XenAPIException, XmlRpcException {
long totalCapacity = sr.getPhysicalSize(conn);
String srUuid = sr.getUuid(conn);
Host host = Host.getByUuid(conn, _host.getUuid());
String address = host.getAddress(conn);
long availableCapacity = totalCapacity - sr.getPhysicalUtilisation(conn);
String srType = sr.getType(conn).toUpperCase();
return new StoragePoolInfo(srUuid, address, srType, srType, StoragePoolType.valueOf(srType), totalCapacity, availableCapacity);
}
protected void configureStorageNameAndDescription(Connection conn, SR sr) throws XenAPIException, XmlRpcException {
String srUuid = sr.getUuid(conn);
sr.setNameLabel(conn, srUuid);
String nameFormat = "Cloud Stack Local (%s) Storage Pool for %s";
sr.setNameDescription(conn, String.format(nameFormat, sr.getType(conn), _host.getUuid()));
}
public boolean isDeviceUsed(final Connection conn, final VM vm, final Long deviceId) {
// Figure out the disk number to attach the VM to
String msg = null;
try {
final Set<String> allowedVBDDevices = vm.getAllowedVBDDevices(conn);
if (allowedVBDDevices.contains(deviceId.toString())) {
return false;
}
return true;
} catch (final XmlRpcException e) {
msg = "Catch XmlRpcException due to: " + e.getMessage();
s_logger.warn(msg, e);
} catch (final XenAPIException e) {
msg = "Catch XenAPIException due to: " + e.toString();
s_logger.warn(msg, e);
}
throw new CloudRuntimeException("When check deviceId " + msg);
}
/**
* When Dynamic Memory Control (DMC) is enabled - xenserver allows scaling
* the guest memory while the guest is running
*
* By default this is disallowed, override the specific xenserver resource
* if this is enabled
*/
public boolean isDmcEnabled(final Connection conn, final Host host) throws XenAPIException, XmlRpcException {
return false;
}
public boolean IsISCSI(final String type) {
return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type);
}
public boolean isNetworkSetupByName(final String nameTag) throws XenAPIException, XmlRpcException {
if (nameTag != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for network setup by name " + nameTag);
}
final Connection conn = getConnection();
final XsLocalNetwork network = getNetworkByName(conn, nameTag);
if (network == null) {
return false;
}
}
return true;
}
public boolean isOvs() {
return _isOvs;
}
public boolean isRefNull(final XenAPIObject object) {
return object == null || object.toWireString().equals("OpaqueRef:NULL") || object.toWireString().equals("<not in database>");
}
public boolean isSecurityGroupEnabled() {
return _securityGroupEnabled;
}
public boolean isXcp() {
final Connection conn = getConnection();
final String result = callHostPlugin(conn, "ovstunnel", "is_xcp");
if (result.equals("XCP")) {
return true;
}
return false;
}
boolean killCopyProcess(final Connection conn, final String nameLabel) {
final String results = callHostPluginAsync(conn, "vmops", "kill_copy_process", 60, "namelabel", nameLabel);
String errMsg = null;
if (results == null || results.equals("false")) {
errMsg = "kill_copy_process failed";
s_logger.warn(errMsg);
return false;
} else {
return true;
}
}
public boolean launchHeartBeat(final Connection conn) {
final String result = callHostPluginPremium(conn, "heartbeat", "host", _host.getUuid(), "timeout", Integer.toString(_heartbeatTimeout), "interval", Integer.toString(_heartbeatInterval));
if (result == null || !result.contains("> DONE <")) {
s_logger.warn("Unable to launch the heartbeat process on " + _host.getIp());
return false;
}
return true;
}
protected String logX(final XenAPIObject obj, final String msg) {
return new StringBuilder("Host ").append(_host.getIp()).append(" ").append(obj.toWireString()).append(": ").append(msg).toString();
}
public void migrateVM(final Connection conn, final Host destHost, final VM vm, final String vmName) throws Exception {
Task task = null;
try {
final Map<String, String> other = new HashMap<String, String>();
other.put("live", "true");
task = vm.poolMigrateAsync(conn, destHost, other);
try {
// poll every 1 seconds
final long timeout = _migratewait * 1000L;
waitForTask(conn, task, 1000, timeout);
checkForSuccess(conn, task);
} catch (final Types.HandleInvalid e) {
if (vm.getResidentOn(conn).equals(destHost)) {
task = null;
return;
}
throw new CloudRuntimeException("migrate VM catch HandleInvalid and VM is not running on dest host");
}
} catch (final XenAPIException e) {
final String msg = "Unable to migrate VM(" + vmName + ") from host(" + _host.getUuid() + ")";
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
}
protected VDI mount(final Connection conn, final StoragePoolType poolType, final String volumeFolder, final String volumePath) {
return getVDIbyUuid(conn, volumePath);
}
protected VDI mount(final Connection conn, final String vmName, final DiskTO volume) throws XmlRpcException, XenAPIException {
final DataTO data = volume.getData();
final Volume.Type type = volume.getType();
if (type == Volume.Type.ISO) {
final TemplateObjectTO iso = (TemplateObjectTO)data;
final DataStoreTO store = iso.getDataStore();
if (store == null) {
// It's a fake iso
return null;
}
// corer case, xenserver pv driver iso
final String templateName = iso.getName();
if (templateName.startsWith("xs-tools")) {
try {
final String actualTemplateName = getActualIsoTemplate(conn);
final Set<VDI> vdis = VDI.getByNameLabel(conn, actualTemplateName);
if (vdis.isEmpty()) {
throw new CloudRuntimeException("Could not find ISO with URL: " + actualTemplateName);
}
return vdis.iterator().next();
} catch (final XenAPIException e) {
throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString());
} catch (final Exception e) {
throw new CloudRuntimeException("Unable to get pv iso: " + templateName + " due to " + e.toString());
}
}
if (!(store instanceof NfsTO)) {
throw new CloudRuntimeException("only support mount iso on nfs");
}
final NfsTO nfsStore = (NfsTO)store;
final String isoPath = nfsStore.getUrl() + File.separator + iso.getPath();
final int index = isoPath.lastIndexOf("/");
final String mountpoint = isoPath.substring(0, index);
URI uri;
try {
uri = new URI(mountpoint);
} catch (final URISyntaxException e) {
throw new CloudRuntimeException("Incorrect uri " + mountpoint, e);
}
final SR isoSr = createIsoSRbyURI(conn, uri, vmName, false);
final String isoname = isoPath.substring(index + 1);
final VDI isoVdi = getVDIbyLocationandSR(conn, isoname, isoSr);
if (isoVdi == null) {
throw new CloudRuntimeException("Unable to find ISO " + isoPath);
}
return isoVdi;
} else {
final VolumeObjectTO vol = (VolumeObjectTO)data;
return VDI.getByUuid(conn, vol.getPath());
}
}
public String networkUsage(final Connection conn, final String privateIpAddress, final String option, final String vif) {
if (option.equals("get")) {
return "0:0";
}
return null;
}
private List<Pair<String, Long>> ovsFullSyncStates() {
final Connection conn = getConnection();
final String result = callHostPlugin(conn, "ovsgre", "ovs_get_vm_log", "host_uuid", _host.getUuid());
final String[] logs = result != null ? result.split(";") : new String[0];
final List<Pair<String, Long>> states = new ArrayList<Pair<String, Long>>();
for (final String log : logs) {
final String[] info = log.split(",");
if (info.length != 5) {
s_logger.warn("Wrong element number in ovs log(" + log + ")");
continue;
}
// ','.join([bridge, vmName, vmId, seqno, tag])
try {
states.add(new Pair<String, Long>(info[0], Long.parseLong(info[3])));
} catch (final NumberFormatException nfe) {
states.add(new Pair<String, Long>(info[0], -1L));
}
}
return states;
}
public HashMap<String, String> parseDefaultOvsRuleComamnd(final String str) {
final HashMap<String, String> cmd = new HashMap<String, String>();
final String[] sarr = str.split("/");
for (int i = 0; i < sarr.length; i++) {
String c = sarr[i];
c = c.startsWith("/") ? c.substring(1) : c;
c = c.endsWith("/") ? c.substring(0, c.length() - 1) : c;
final String[] p = c.split(";");
if (p.length != 2) {
continue;
}
if (p[0].equalsIgnoreCase("vlans")) {
p[1] = p[1].replace("@", "[");
p[1] = p[1].replace("#", "]");
}
cmd.put(p[0], p[1]);
}
return cmd;
}
protected Pair<Long, Integer> parseTimestamp(final String timeStampStr) {
final String[] tokens = timeStampStr.split("-");
if (tokens.length != 3) {
s_logger.debug("timeStamp in network has wrong pattern: " + timeStampStr);
return null;
}
if (!tokens[0].equals("CsCreateTime")) {
s_logger.debug("timeStamp in network doesn't start with CsCreateTime: " + timeStampStr);
return null;
}
return new Pair<Long, Integer>(Long.parseLong(tokens[1]), Integer.parseInt(tokens[2]));
}
private void pbdPlug(final Connection conn, final PBD pbd, final String uuid) {
try {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Plugging in PBD " + uuid + " for " + _host);
}
pbd.plug(conn);
} catch (final Exception e) {
final String msg = "PBD " + uuid + " is not attached! and PBD plug failed due to " + e.toString() + ". Please check this PBD in " + _host;
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
}
}
protected boolean pingdomr(final Connection conn, final String host, final String port) {
String status;
status = callHostPlugin(conn, "vmops", "pingdomr", "host", host, "port", port);
if (status == null || status.isEmpty()) {
return false;
}
return true;
}
public boolean pingXAPI() {
final Connection conn = getConnection();
try {
final Host host = Host.getByUuid(conn, _host.getUuid());
if (!host.getEnabled(conn)) {
s_logger.debug("Host " + _host.getIp() + " is not enabled!");
return false;
}
} catch (final Exception e) {
s_logger.debug("cannot get host enabled status, host " + _host.getIp() + " due to " + e.toString(), e);
return false;
}
try {
callHostPlugin(conn, "echo", "main");
} catch (final Exception e) {
s_logger.debug("cannot ping host " + _host.getIp() + " due to " + e.toString(), e);
return false;
}
return true;
}
protected void plugDom0Vif(final Connection conn, final VIF dom0Vif) throws XmlRpcException, XenAPIException {
if (dom0Vif != null) {
dom0Vif.plug(conn);
}
}
protected boolean postCreatePrivateTemplate(final Connection conn, final String templatePath, final String tmpltFilename, final String templateName, String templateDescription, String checksum,
final long size, final long virtualSize, final long templateId) {
if (templateDescription == null) {
templateDescription = "";
}
if (checksum == null) {
checksum = "";
}
final String result = callHostPlugin(conn, "vmopsSnapshot", "post_create_private_template", "templatePath", templatePath, "templateFilename", tmpltFilename, "templateName", templateName,
"templateDescription", templateDescription, "checksum", checksum, "size", String.valueOf(size), "virtualSize", String.valueOf(virtualSize), "templateId", String.valueOf(templateId));
boolean success = false;
if (result != null && !result.isEmpty()) {
// Else, command threw an exception which has already been logged.
if (result.equalsIgnoreCase("1")) {
s_logger.debug("Successfully created template.properties file on secondary storage for " + tmpltFilename);
success = true;
} else {
s_logger.warn("Could not create template.properties file on secondary storage for " + tmpltFilename + " for templateId: " + templateId);
}
}
return success;
}
@Override
public ExecutionResult prepareCommand(final NetworkElementCommand cmd) {
// Update IP used to access router
cmd.setRouterAccessIp(cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP));
assert cmd.getRouterAccessIp() != null;
if (cmd instanceof IpAssocVpcCommand) {
return prepareNetworkElementCommand((IpAssocVpcCommand)cmd);
} else if (cmd instanceof IpAssocCommand) {
return prepareNetworkElementCommand((IpAssocCommand)cmd);
} else if (cmd instanceof SetupGuestNetworkCommand) {
return prepareNetworkElementCommand((SetupGuestNetworkCommand)cmd);
} else if (cmd instanceof SetSourceNatCommand) {
return prepareNetworkElementCommand((SetSourceNatCommand)cmd);
} else if (cmd instanceof SetNetworkACLCommand) {
return prepareNetworkElementCommand((SetNetworkACLCommand)cmd);
}
return new ExecutionResult(true, null);
}
public void prepareISO(final Connection conn, final String vmName, List<String[]> vmDataList, String configDriveLabel) throws XmlRpcException, XenAPIException {
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
if (vms == null || vms.size() != 1) {
throw new CloudRuntimeException("There are " + (vms == null ? "0" : vms.size()) + " VMs named " + vmName);
}
final VM vm = vms.iterator().next();
if (vmDataList != null) {
// create SR
SR sr = createLocalIsoSR(conn, _configDriveSRName + getHost().getIp());
// 1. create vm data files
createVmdataFiles(vmName, vmDataList, configDriveLabel);
// 2. copy config drive iso to host
copyConfigDriveIsoToHost(conn, sr, vmName);
}
final Set<VBD> vbds = vm.getVBDs(conn);
for (final VBD vbd : vbds) {
final VBD.Record vbdr = vbd.getRecord(conn);
if (vbdr.type == Types.VbdType.CD && vbdr.empty == false && vbdr.userdevice.equals(_attachIsoDeviceNum)) {
final VDI vdi = vbdr.VDI;
final SR sr = vdi.getSR(conn);
final Set<PBD> pbds = sr.getPBDs(conn);
if (pbds == null) {
throw new CloudRuntimeException("There is no pbd for sr " + sr);
}
for (final PBD pbd : pbds) {
final PBD.Record pbdr = pbd.getRecord(conn);
if (pbdr.host.getUuid(conn).equals(_host.getUuid())) {
return;
}
}
sr.setShared(conn, true);
final Host host = Host.getByUuid(conn, _host.getUuid());
final PBD.Record pbdr = pbds.iterator().next().getRecord(conn);
pbdr.host = host;
pbdr.uuid = "";
final PBD pbd = PBD.create(conn, pbdr);
pbdPlug(conn, pbd, pbd.getUuid(conn));
break;
}
}
}
// The idea here is to see if the DiskTO in question is from managed storage and does not yet have an SR.
// If no SR, create it and create a VDI in it.
public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final long vmId, final String vmName) throws Exception {
final Map<String, String> details = disk.getDetails();
if (details == null) {
return null;
}
final boolean isManaged = new Boolean(details.get(DiskTO.MANAGED)).booleanValue();
if (!isManaged) {
return null;
}
final String iqn = details.get(DiskTO.IQN);
final Set<SR> srNameLabels = SR.getByNameLabel(conn, iqn);
if (srNameLabels.size() != 0) {
return null;
}
final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA");
return prepareManagedStorage(conn, details, null, vdiNameLabel);
}
protected SR prepareManagedSr(final Connection conn, final Map<String, String> details) {
final String iScsiName = details.get(DiskTO.IQN);
final String storageHost = details.get(DiskTO.STORAGE_HOST);
final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME);
final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET);
final String mountpoint = details.get(DiskTO.MOUNT_POINT);
final String protocoltype = details.get(DiskTO.PROTOCOL_TYPE);
if (StoragePoolType.NetworkFilesystem.toString().equalsIgnoreCase(protocoltype)) {
final String poolid = storageHost + ":" + mountpoint;
final String namelable = mountpoint;
final String volumedesc = storageHost + ":" + mountpoint;
return getNfsSR(conn, poolid, namelable, storageHost, mountpoint, volumedesc);
} else {
return getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, SRType.LVMOISCSI.toString(), true);
}
}
protected VDI prepareManagedStorage(final Connection conn, final Map<String, String> details, final String path, final String vdiNameLabel) throws Exception {
final SR sr = prepareManagedSr(conn, details);
VDI vdi = getVDIbyUuid(conn, path, false);
final Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE));
Set<VDI> vdisInSr = sr.getVDIs(conn);
// If a VDI already exists in the SR (in case we cloned from a template cache), use that.
if (vdisInSr.size() == 1) {
vdi = vdisInSr.iterator().next();
}
if (vdi == null) {
vdi = createVdi(sr, vdiNameLabel, volumeSize);
} else {
// If vdi is not null, it must have already been created, so check whether a resize of the volume was performed.
// If true, resize the VDI to the volume size.
s_logger.info("Checking for the resize of the datadisk");
final long vdiVirtualSize = vdi.getVirtualSize(conn);
if (vdiVirtualSize != volumeSize) {
s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + vdiVirtualSize + " to volumeSize: " + volumeSize);
try {
vdi.resize(conn, volumeSize);
} catch (final Exception e) {
s_logger.warn("Unable to resize volume", e);
}
}
// change the name-label in case of a cloned VDI
if (!Objects.equals(vdi.getNameLabel(conn), vdiNameLabel)) {
try {
vdi.setNameLabel(conn, vdiNameLabel);
} catch (final Exception e) {
s_logger.warn("Unable to rename volume", e);
}
}
}
return vdi;
}
protected ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) {
final Connection conn = getConnection();
final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
try {
final IpAddressTO[] ips = cmd.getIpAddresses();
for (final IpAddressTO ip : ips) {
final VM router = getVM(conn, routerName);
final NicTO nic = new NicTO();
nic.setMac(ip.getVifMacAddress());
nic.setType(ip.getTrafficType());
if (ip.getBroadcastUri() == null) {
nic.setBroadcastType(BroadcastDomainType.Native);
} else {
final URI uri = BroadcastDomainType.fromString(ip.getBroadcastUri());
nic.setBroadcastType(BroadcastDomainType.getSchemeValue(uri));
nic.setBroadcastUri(uri);
}
nic.setDeviceId(0);
nic.setNetworkRateMbps(ip.getNetworkRate());
nic.setName(ip.getNetworkName());
final Network network = getNetwork(conn, nic);
// Determine the correct VIF on DomR to associate/disassociate
// the
// IP address with
VIF correctVif = getCorrectVif(conn, router, network);
// If we are associating an IP address and DomR doesn't have a
// VIF
// for the specified vlan ID, we need to add a VIF
// If we are disassociating the last IP address in the VLAN, we
// need
// to remove a VIF
boolean addVif = false;
if (ip.isAdd() && correctVif == null) {
addVif = true;
}
if (addVif) {
// Add a new VIF to DomR
final String vifDeviceNum = getLowestAvailableVIFDeviceNum(conn, router);
if (vifDeviceNum == null) {
throw new InternalErrorException("There were no more available slots for a new VIF on router: " + router.getNameLabel(conn));
}
nic.setDeviceId(Integer.parseInt(vifDeviceNum));
correctVif = createVif(conn, routerName, router, null, nic);
correctVif.plug(conn);
// Add iptables rule for network usage
networkUsage(conn, routerIp, "addVif", "eth" + correctVif.getDevice(conn));
}
if (ip.isAdd() && correctVif == null) {
throw new InternalErrorException("Failed to find DomR VIF to associate/disassociate IP with.");
}
if (correctVif != null) {
ip.setNicDevId(Integer.valueOf(correctVif.getDevice(conn)));
ip.setNewNic(addVif);
}
}
} catch (final InternalErrorException e) {
s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e);
return new ExecutionResult(false, e.getMessage());
} catch (final Exception e) {
return new ExecutionResult(false, e.getMessage());
}
return new ExecutionResult(true, null);
}
protected ExecutionResult prepareNetworkElementCommand(final IpAssocVpcCommand cmd) {
final Connection conn = getConnection();
final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
try {
final IpAddressTO[] ips = cmd.getIpAddresses();
for (final IpAddressTO ip : ips) {
final VM router = getVM(conn, routerName);
final VIF correctVif = getVifByMac(conn, router, ip.getVifMacAddress());
setNicDevIdIfCorrectVifIsNotNull(conn, ip, correctVif);
}
} catch (final Exception e) {
s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e);
return new ExecutionResult(false, e.getMessage());
}
return new ExecutionResult(true, null);
}
protected ExecutionResult prepareNetworkElementCommand(final SetNetworkACLCommand cmd) {
final Connection conn = getConnection();
final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
try {
final VM router = getVM(conn, routerName);
final NicTO nic = cmd.getNic();
if (nic != null) {
final VIF vif = getVifByMac(conn, router, nic.getMac());
if (vif == null) {
final String msg = "Prepare SetNetworkACL failed due to VIF is null for : " + nic.getMac() + " with routername: " + routerName;
s_logger.error(msg);
return new ExecutionResult(false, msg);
}
nic.setDeviceId(Integer.parseInt(vif.getDevice(conn)));
} else {
final String msg = "Prepare SetNetworkACL failed due to nic is null for : " + routerName;
s_logger.error(msg);
return new ExecutionResult(false, msg);
}
} catch (final Exception e) {
final String msg = "Prepare SetNetworkACL failed due to " + e.toString();
s_logger.error(msg, e);
return new ExecutionResult(false, msg);
}
return new ExecutionResult(true, null);
}
protected ExecutionResult prepareNetworkElementCommand(final SetSourceNatCommand cmd) {
final Connection conn = getConnection();
final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
final IpAddressTO pubIp = cmd.getIpAddress();
try {
final VM router = getVM(conn, routerName);
final VIF correctVif = getCorrectVif(conn, router, pubIp);
pubIp.setNicDevId(Integer.valueOf(correctVif.getDevice(conn)));
} catch (final Exception e) {
final String msg = "Ip SNAT failure due to " + e.toString();
s_logger.error(msg, e);
return new ExecutionResult(false, msg);
}
return new ExecutionResult(true, null);
}
/**
* @param cmd
* @return
*/
private ExecutionResult prepareNetworkElementCommand(final SetupGuestNetworkCommand cmd) {
final Connection conn = getConnection();
final NicTO nic = cmd.getNic();
final String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
try {
final Set<VM> vms = VM.getByNameLabel(conn, domrName);
if (vms == null || vms.isEmpty()) {
return new ExecutionResult(false, "Can not find VM " + domrName);
}
final VM vm = vms.iterator().next();
final String mac = nic.getMac();
VIF domrVif = null;
for (final VIF vif : vm.getVIFs(conn)) {
final String lmac = vif.getMAC(conn);
if (lmac.equals(mac)) {
domrVif = vif;
// Do not break it! We have 2 routers.
// break;
}
}
if (domrVif == null) {
return new ExecutionResult(false, "Can not find vif with mac " + mac + " for VM " + domrName);
}
nic.setDeviceId(Integer.parseInt(domrVif.getDevice(conn)));
} catch (final Exception e) {
final String msg = "Creating guest network failed due to " + e.toString();
s_logger.warn(msg, e);
return new ExecutionResult(false, msg);
}
return new ExecutionResult(true, null);
}
public void rebootVM(final Connection conn, final VM vm, final String vmName) throws Exception {
Task task = null;
try {
task = vm.cleanRebootAsync(conn);
try {
// poll every 1 seconds , timeout after 10 minutes
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (final Types.HandleInvalid e) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
task = null;
return;
}
throw new CloudRuntimeException("Reboot VM catch HandleInvalid and VM is not in RUNNING state");
}
} catch (final XenAPIException e) {
s_logger.debug("Unable to Clean Reboot VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString() + ", try hard reboot");
try {
vm.hardReboot(conn);
} catch (final Exception e1) {
final String msg = "Unable to hard Reboot VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString();
s_logger.warn(msg, e1);
throw new CloudRuntimeException(msg);
}
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
}
protected void skipOrRemoveSR(Connection conn, SR sr) {
if (sr == null) {
return;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "Removing SR"));
}
try {
Set<VDI> vdis = sr.getVDIs(conn);
for (VDI vdi : vdis) {
if (MapUtils.isEmpty(vdi.getCurrentOperations(conn))) {
continue;
}
return;
}
removeSR(conn, sr);
return;
} catch (XenAPIException | XmlRpcException e) {
s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e);
}
String msg = "Remove SR failed";
s_logger.warn(msg);
}
public void removeSR(final Connection conn, final SR sr) {
if (sr == null) {
return;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "Removing SR"));
}
for (int i = 0; i < 2; i++) {
try {
final Set<VDI> vdis = sr.getVDIs(conn);
for (final VDI vdi : vdis) {
vdi.forget(conn);
}
Set<PBD> pbds = sr.getPBDs(conn);
for (final PBD pbd : pbds) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(pbd, "Unplugging pbd"));
}
// if (pbd.getCurrentlyAttached(conn)) {
pbd.unplug(conn);
// }
pbd.destroy(conn);
}
pbds = sr.getPBDs(conn);
if (pbds.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "Forgetting"));
}
sr.forget(conn);
return;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "There is still one or more PBDs attached."));
if (s_logger.isTraceEnabled()) {
for (final PBD pbd : pbds) {
s_logger.trace(logX(pbd, " Still attached"));
}
}
}
} catch (final XenAPIException e) {
s_logger.debug(logX(sr, "Catch XenAPIException: " + e.toString()));
} catch (final XmlRpcException e) {
s_logger.debug(logX(sr, "Catch Exception: " + e.getMessage()));
}
}
s_logger.warn(logX(sr, "Unable to remove SR"));
}
protected String removeSRSync(final Connection conn, final SR sr) {
if (sr == null) {
return null;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug(logX(sr, "Removing SR"));
}
long waittime = 0;
try {
final Set<VDI> vdis = sr.getVDIs(conn);
for (final VDI vdi : vdis) {
final Map<java.lang.String, Types.VdiOperations> currentOperation = vdi.getCurrentOperations(conn);
if (currentOperation == null || currentOperation.size() == 0) {
continue;
}
if (waittime >= 1800000) {
final String msg = "This template is being used, try late time";
s_logger.warn(msg);
return msg;
}
waittime += 30000;
try {
Thread.sleep(30000);
} catch (final InterruptedException ex) {
}
}
removeSR(conn, sr);
return null;
} catch (final XenAPIException e) {
s_logger.warn(logX(sr, "Unable to get current opertions " + e.toString()), e);
} catch (final XmlRpcException e) {
s_logger.warn(logX(sr, "Unable to get current opertions " + e.getMessage()), e);
}
final String msg = "Remove SR failed";
s_logger.warn(msg);
return msg;
}
public String revertToSnapshot(final Connection conn, final VM vmSnapshot, final String vmName, final String oldVmUuid, final Boolean snapshotMemory, final String hostUUID)
throws XenAPIException, XmlRpcException {
final String results = callHostPluginAsync(conn, "vmopsSnapshot", "revert_memory_snapshot", 10 * 60 * 1000, "snapshotUUID", vmSnapshot.getUuid(conn), "vmName", vmName, "oldVmUuid", oldVmUuid,
"snapshotMemory", snapshotMemory.toString(), "hostUUID", hostUUID);
String errMsg = null;
if (results == null || results.isEmpty()) {
errMsg = "revert_memory_snapshot return null";
} else {
if (results.equals("0")) {
return results;
} else {
errMsg = "revert_memory_snapshot exception";
}
}
s_logger.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
public void scaleVM(final Connection conn, final VM vm, final VirtualMachineTO vmSpec, final Host host) throws XenAPIException, XmlRpcException {
final Long staticMemoryMax = vm.getMemoryStaticMax(conn);
final Long staticMemoryMin = vm.getMemoryStaticMin(conn);
final Long newDynamicMemoryMin = vmSpec.getMinRam();
final Long newDynamicMemoryMax = vmSpec.getMaxRam();
if (staticMemoryMin > newDynamicMemoryMin || newDynamicMemoryMax > staticMemoryMax) {
throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: " + "0 <= memory-static-min(" + staticMemoryMin + ") <= memory-dynamic-min("
+ newDynamicMemoryMin + ") <= memory-dynamic-max(" + newDynamicMemoryMax + ") <= memory-static-max(" + staticMemoryMax + ")");
}
vm.setMemoryDynamicRange(conn, newDynamicMemoryMin, newDynamicMemoryMax);
vm.setVCPUsNumberLive(conn, (long)vmSpec.getCpus());
final Integer speed = vmSpec.getMinSpeed();
if (speed != null) {
int cpuWeight = _maxWeight; // cpu_weight
// weight based allocation
cpuWeight = (int)(speed * 0.99 / _host.getSpeed() * _maxWeight);
if (cpuWeight > _maxWeight) {
cpuWeight = _maxWeight;
}
if (vmSpec.getLimitCpuUse()) {
long utilization = 0; // max CPU cap, default is unlimited
utilization = (int)(vmSpec.getMaxSpeed() * 0.99 * vmSpec.getCpus() / _host.getSpeed() * 100);
// vm.addToVCPUsParamsLive(conn, "cap",
// Long.toString(utilization)); currently xenserver doesnot
// support Xapi to add VCPUs params live.
callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "cap", "value", Long.toString(utilization), "vmname", vmSpec.getName());
}
// vm.addToVCPUsParamsLive(conn, "weight",
// Integer.toString(cpuWeight));
callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "weight", "value", Integer.toString(cpuWeight), "vmname", vmSpec.getName());
}
}
@Override
public void setAgentControl(final IAgentControl agentControl) {
_agentControl = agentControl;
}
public void setCanBridgeFirewall(final boolean canBridgeFirewall) {
_canBridgeFirewall = canBridgeFirewall;
}
@Override
public void setConfigParams(final Map<String, Object> params) {
}
public boolean setIptables(final Connection conn) {
final String result = callHostPlugin(conn, "vmops", "setIptables");
if (result == null || result.isEmpty()) {
return false;
}
return true;
}
public void setIsOvs(final boolean isOvs) {
_isOvs = isOvs;
}
/**
* WARN: static-min <= dynamic-min <= dynamic-max <= static-max
*
* @see XcpServerResource#setMemory(com.xensource.xenapi.Connection,
* com.xensource.xenapi.VM, long, long)
* @param conn
* @param vm
* @param minMemsize
* @param maxMemsize
* @throws XmlRpcException
* @throws XenAPIException
*/
protected void setMemory(final Connection conn, final VM vm, final long minMemsize, final long maxMemsize) throws XmlRpcException, XenAPIException {
vm.setMemoryLimits(conn, mem_128m, maxMemsize, minMemsize, maxMemsize);
}
@Override
public void setName(final String name) {
}
protected void setNicDevIdIfCorrectVifIsNotNull(final Connection conn, final IpAddressTO ip, final VIF correctVif)
throws InternalErrorException, BadServerResponse, XenAPIException, XmlRpcException {
if (correctVif == null) {
if (ip.isAdd()) {
throw new InternalErrorException("Failed to find DomR VIF to associate IP with.");
} else {
s_logger.debug("VIF to deassociate IP with does not exist, return success");
}
} else {
ip.setNicDevId(Integer.valueOf(correctVif.getDevice(conn)));
}
}
@Override
public void setRunLevel(final int level) {
}
public String setupHeartbeatSr(final Connection conn, final SR sr, final boolean force) throws XenAPIException, XmlRpcException {
final SR.Record srRec = sr.getRecord(conn);
final String srUuid = srRec.uuid;
if (!srRec.shared || !SRType.LVMOHBA.equals(srRec.type) && !SRType.LVMOISCSI.equals(srRec.type) && !SRType.NFS.equals(srRec.type)) {
return srUuid;
}
String result = null;
final Host host = Host.getByUuid(conn, _host.getUuid());
final Set<String> tags = host.getTags(conn);
if (force || !tags.contains("cloud-heartbeat-" + srUuid)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Setting up the heartbeat sr for host " + _host.getIp() + " and sr " + srUuid);
}
final Set<PBD> pbds = sr.getPBDs(conn);
for (final PBD pbd : pbds) {
final PBD.Record pbdr = pbd.getRecord(conn);
if (!pbdr.currentlyAttached && pbdr.host.getUuid(conn).equals(_host.getUuid())) {
pbd.plug(conn);
break;
}
}
result = callHostPluginThroughMaster(conn, "vmopspremium", "setup_heartbeat_sr", "host", _host.getUuid(), "sr", srUuid);
if (result == null || !result.split("#")[1].equals("0")) {
throw new CloudRuntimeException("Unable to setup heartbeat sr on SR " + srUuid + " due to " + result);
}
if (!tags.contains("cloud-heartbeat-" + srUuid)) {
tags.add("cloud-heartbeat-" + srUuid);
host.setTags(conn, tags);
}
}
result = callHostPluginPremium(conn, "setup_heartbeat_file", "host", _host.getUuid(), "sr", srUuid, "add", "true");
if (result == null || !result.split("#")[1].equals("0")) {
throw new CloudRuntimeException("Unable to setup heartbeat file entry on SR " + srUuid + " due to " + result);
}
return srUuid;
}
public void setupLinkLocalNetwork(final Connection conn) {
try {
final Network.Record rec = new Network.Record();
final Set<Network> networks = Network.getByNameLabel(conn, _linkLocalPrivateNetworkName);
Network linkLocal = null;
if (networks.size() == 0) {
rec.nameDescription = "link local network used by system vms";
rec.nameLabel = _linkLocalPrivateNetworkName;
final Map<String, String> configs = new HashMap<String, String>();
configs.put("ip_begin", NetUtils.getLinkLocalGateway());
configs.put("ip_end", NetUtils.getLinkLocalIpEnd());
configs.put("netmask", NetUtils.getLinkLocalNetMask());
configs.put("vswitch-disable-in-band", "true");
rec.otherConfig = configs;
linkLocal = Network.create(conn, rec);
} else {
linkLocal = networks.iterator().next();
if (!linkLocal.getOtherConfig(conn).containsKey("vswitch-disable-in-band")) {
linkLocal.addToOtherConfig(conn, "vswitch-disable-in-band", "true");
}
}
/* Make sure there is a physical bridge on this network */
VIF dom0vif = null;
final Pair<VM, VM.Record> vm = getControlDomain(conn);
final VM dom0 = vm.first();
final Set<VIF> vifs = dom0.getVIFs(conn);
if (vifs.size() != 0) {
for (final VIF vif : vifs) {
final Map<String, String> otherConfig = vif.getOtherConfig(conn);
if (otherConfig != null) {
final String nameLabel = otherConfig.get("nameLabel");
if (nameLabel != null && nameLabel.equalsIgnoreCase("link_local_network_vif")) {
dom0vif = vif;
}
}
}
}
/* create temp VIF0 */
if (dom0vif == null) {
s_logger.debug("Can't find a vif on dom0 for link local, creating a new one");
final VIF.Record vifr = new VIF.Record();
vifr.VM = dom0;
vifr.device = getLowestAvailableVIFDeviceNum(conn, dom0);
if (vifr.device == null) {
s_logger.debug("Failed to create link local network, no vif available");
return;
}
final Map<String, String> config = new HashMap<String, String>();
config.put("nameLabel", "link_local_network_vif");
vifr.otherConfig = config;
vifr.MAC = "FE:FF:FF:FF:FF:FF";
vifr.network = linkLocal;
vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT;
dom0vif = VIF.create(conn, vifr);
plugDom0Vif(conn, dom0vif);
} else {
s_logger.debug("already have a vif on dom0 for link local network");
if (!dom0vif.getCurrentlyAttached(conn)) {
plugDom0Vif(conn, dom0vif);
}
}
final String brName = linkLocal.getBridge(conn);
callHostPlugin(conn, "vmops", "setLinkLocalIP", "brName", brName);
_host.setLinkLocalNetwork(linkLocal.getUuid(conn));
} catch (final XenAPIException e) {
s_logger.warn("Unable to create local link network", e);
throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e);
} catch (final XmlRpcException e) {
s_logger.warn("Unable to create local link network", e);
throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e);
}
}
/* return : if setup is needed */
public boolean setupServer(final Connection conn, final Host host) {
final String packageVersion = CitrixResourceBase.class.getPackage().getImplementationVersion();
final String version = this.getClass().getName() + "-" + (packageVersion == null ? Long.toString(System.currentTimeMillis()) : packageVersion);
try {
/* push patches to XenServer */
final Host.Record hr = host.getRecord(conn);
final Iterator<String> it = hr.tags.iterator();
while (it.hasNext()) {
final String tag = it.next();
if (tag.startsWith("vmops-version-")) {
if (tag.contains(version)) {
s_logger.info(logX(host, "Host " + hr.address + " is already setup."));
return false;
} else {
it.remove();
}
}
}
final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(hr.address, 22);
try {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
throw new CloudRuntimeException("Unable to authenticate");
}
final String cmd = "mkdir -p /opt/cloud/bin /var/log/cloud";
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) {
throw new CloudRuntimeException("Cannot create directory /opt/cloud/bin on XenServer hosts");
}
final SCPClient scp = new SCPClient(sshConnection);
final List<File> files = getPatchFiles();
if (files == null || files.isEmpty()) {
throw new CloudRuntimeException("Can not find patch file");
}
for (final File file : files) {
final String path = file.getParentFile().getAbsolutePath() + "/";
final Properties props = PropertiesUtil.loadFromFile(file);
for (final Map.Entry<Object, Object> entry : props.entrySet()) {
final String k = (String)entry.getKey();
final String v = (String)entry.getValue();
assert k != null && k.length() > 0 && v != null && v.length() > 0 : "Problems with " + k + "=" + v;
final String[] tokens = v.split(",");
String f = null;
if (tokens.length == 3 && tokens[0].length() > 0) {
if (tokens[0].startsWith("/")) {
f = tokens[0];
} else if (tokens[0].startsWith("~")) {
final String homedir = System.getenv("HOME");
f = homedir + tokens[0].substring(1) + k;
} else {
f = path + tokens[0] + '/' + k;
}
} else {
f = path + k;
}
final String directoryPath = tokens[tokens.length - 1];
f = f.replace('/', File.separatorChar);
String permissions = "0755";
if (tokens.length == 3) {
permissions = tokens[1];
} else if (tokens.length == 2) {
permissions = tokens[0];
}
if (!new File(f).exists()) {
s_logger.warn("We cannot locate " + f);
continue;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Copying " + f + " to " + directoryPath + " on " + hr.address + " with permission " + permissions);
}
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "mkdir -m 700 -p " + directoryPath)) {
s_logger.debug("Unable to create destination path: " + directoryPath + " on " + hr.address + ".");
}
try {
scp.put(f, directoryPath, permissions);
} catch (final IOException e) {
final String msg = "Unable to copy file " + f + " to path " + directoryPath + " with permissions " + permissions;
s_logger.debug(msg);
throw new CloudRuntimeException("Unable to setup the server: " + msg, e);
}
}
}
} catch (final IOException e) {
throw new CloudRuntimeException("Unable to setup the server correctly", e);
} finally {
sshConnection.close();
}
hr.tags.add("vmops-version-" + version);
host.setTags(conn, hr.tags);
return true;
} catch (final XenAPIException e) {
final String msg = "XenServer setup failed due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException("Unable to get host information " + e.toString(), e);
} catch (final XmlRpcException e) {
final String msg = "XenServer setup failed due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException("Unable to get host information ", e);
}
}
public synchronized Network setupvSwitchNetwork(final Connection conn) {
try {
if (_host.getVswitchNetwork() == null) {
Network vswitchNw = null;
final Network.Record rec = new Network.Record();
final String nwName = Networks.BroadcastScheme.VSwitch.toString();
final Set<Network> networks = Network.getByNameLabel(conn, nwName);
if (networks.size() == 0) {
rec.nameDescription = "vswitch network for " + nwName;
rec.nameLabel = nwName;
vswitchNw = Network.create(conn, rec);
} else {
vswitchNw = networks.iterator().next();
}
_host.setVswitchNetwork(vswitchNw);
}
return _host.getVswitchNetwork();
} catch (final BadServerResponse e) {
s_logger.error("Failed to setup vswitch network", e);
} catch (final XenAPIException e) {
s_logger.error("Failed to setup vswitch network", e);
} catch (final XmlRpcException e) {
s_logger.error("Failed to setup vswitch network", e);
}
return null;
}
public void shutdownVM(final Connection conn, final VM vm, final String vmName, final boolean forcedStop) throws XmlRpcException {
Task task = null;
try {
if (forcedStop) {
task = vm.hardShutdownAsync(conn);
} else {
task = vm.cleanShutdownAsync(conn);
}
try {
// poll every 1 seconds , timeout after 10 minutes
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (final TimeoutException e) {
if (vm.getPowerState(conn) == VmPowerState.HALTED) {
task = null;
return;
}
throw new CloudRuntimeException("Shutdown VM catch HandleInvalid and VM is not in HALTED state");
}
} catch (final XenAPIException e) {
s_logger.debug("Unable to shutdown VM(" + vmName + ") with force=" + forcedStop + " on host(" + _host.getUuid() + ") due to " + e.toString());
try {
VmPowerState state = vm.getPowerState(conn);
if (state == VmPowerState.RUNNING) {
try {
vm.hardShutdown(conn);
} catch (final Exception e1) {
s_logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString());
state = vm.getPowerState(conn);
if (state == VmPowerState.RUNNING) {
forceShutdownVM(conn, vm);
}
return;
}
} else if (state == VmPowerState.HALTED) {
return;
} else {
final String msg = "After cleanShutdown the VM status is " + state.toString() + ", that is not expected";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
} catch (final Exception e1) {
final String msg = "Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString();
s_logger.warn(msg, e1);
throw new CloudRuntimeException(msg);
}
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
}
@Override
public boolean start() {
return true;
}
public void startVM(final Connection conn, final Host host, final VM vm, final String vmName) throws Exception {
Task task = null;
try {
task = vm.startOnAsync(conn, host, false, true);
try {
// poll every 1 seconds , timeout after 10 minutes
waitForTask(conn, task, 1000, 10 * 60 * 1000);
checkForSuccess(conn, task);
} catch (final Types.HandleInvalid e) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
s_logger.debug("VM " + vmName + " is in Running status", e);
task = null;
return;
}
throw new CloudRuntimeException("Start VM " + vmName + " catch HandleInvalid and VM is not in RUNNING state");
} catch (final TimeoutException e) {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
s_logger.debug("VM " + vmName + " is in Running status", e);
task = null;
return;
}
throw new CloudRuntimeException("Start VM " + vmName + " catch BadAsyncResult and VM is not in RUNNING state");
}
} catch (final XenAPIException e) {
final String msg = "Unable to start VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg);
} finally {
if (task != null) {
try {
task.destroy(conn);
} catch (final Exception e1) {
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString());
}
}
}
}
protected void startvmfailhandle(final Connection conn, final VM vm, final List<Ternary<SR, VDI, VolumeVO>> mounts) {
if (vm != null) {
try {
if (vm.getPowerState(conn) == VmPowerState.RUNNING) {
try {
vm.hardShutdown(conn);
} catch (final Exception e) {
final String msg = "VM hardshutdown failed due to " + e.toString();
s_logger.warn(msg, e);
}
}
if (vm.getPowerState(conn) == VmPowerState.HALTED) {
try {
vm.destroy(conn);
} catch (final Exception e) {
final String msg = "VM destroy failed due to " + e.toString();
s_logger.warn(msg, e);
}
}
} catch (final Exception e) {
final String msg = "VM getPowerState failed due to " + e.toString();
s_logger.warn(msg, e);
}
}
if (mounts != null) {
for (final Ternary<SR, VDI, VolumeVO> mount : mounts) {
final VDI vdi = mount.second();
Set<VBD> vbds = null;
try {
vbds = vdi.getVBDs(conn);
} catch (final Exception e) {
final String msg = "VDI getVBDS failed due to " + e.toString();
s_logger.warn(msg, e);
continue;
}
for (final VBD vbd : vbds) {
try {
vbd.unplug(conn);
vbd.destroy(conn);
} catch (final Exception e) {
final String msg = "VBD destroy failed due to " + e.toString();
s_logger.warn(msg, e);
}
}
}
}
}
@Override
public boolean stop() {
disconnected();
return true;
}
private HashMap<String, Pair<Long, Long>> syncNetworkGroups(final Connection conn, final long id) {
final HashMap<String, Pair<Long, Long>> states = new HashMap<String, Pair<Long, Long>>();
final String result = callHostPlugin(conn, "vmops", "get_rule_logs_for_vms", "host_uuid", _host.getUuid());
s_logger.trace("syncNetworkGroups: id=" + id + " got: " + result);
final String[] rulelogs = result != null ? result.split(";") : new String[0];
for (final String rulesforvm : rulelogs) {
final String[] log = rulesforvm.split(",");
if (log.length != 6) {
continue;
}
// output = ','.join([vmName, vmID, vmIP, domID, signature, seqno])
try {
states.put(log[0], new Pair<Long, Long>(Long.parseLong(log[1]), Long.parseLong(log[5])));
} catch (final NumberFormatException nfe) {
states.put(log[0], new Pair<Long, Long>(-1L, -1L));
}
}
return states;
}
public boolean transferManagementNetwork(final Connection conn, final Host host, final PIF src, final PIF.Record spr, final PIF dest) throws XmlRpcException, XenAPIException {
dest.reconfigureIp(conn, spr.ipConfigurationMode, spr.IP, spr.netmask, spr.gateway, spr.DNS);
Host.managementReconfigure(conn, dest);
String hostUuid = null;
int count = 0;
while (count < 10) {
try {
Thread.sleep(10000);
hostUuid = host.getUuid(conn);
if (hostUuid != null) {
break;
}
++count;
} catch (final XmlRpcException e) {
s_logger.debug("Waiting for host to come back: " + e.getMessage());
} catch (final XenAPIException e) {
s_logger.debug("Waiting for host to come back: " + e.getMessage());
} catch (final InterruptedException e) {
s_logger.debug("Gotta run");
return false;
}
}
if (hostUuid == null) {
s_logger.warn("Unable to transfer the management network from " + spr.uuid);
return false;
}
src.reconfigureIp(conn, Types.IpConfigurationMode.NONE, null, null, null, null);
return true;
}
protected void umount(final Connection conn, final VDI vdi) {
}
public void umountSnapshotDir(final Connection conn, final Long dcId) {
try {
callHostPlugin(conn, "vmopsSnapshot", "unmountSnapshotsDir", "dcId", dcId.toString());
} catch (final Exception e) {
s_logger.debug("Failed to umount snapshot dir", e);
}
}
public String upgradeSnapshot(final Connection conn, final String templatePath, final String snapshotPath) {
final String results = callHostPluginAsync(conn, "vmopspremium", "upgrade_snapshot", 2 * 60 * 60, "templatePath", templatePath, "snapshotPath", snapshotPath);
if (results == null || results.isEmpty()) {
final String msg = "upgrade_snapshot return null";
s_logger.warn(msg);
throw new CloudRuntimeException(msg);
}
final String[] tmp = results.split("#");
final String status = tmp[0];
if (status.equals("0")) {
return results;
} else {
s_logger.warn(results);
throw new CloudRuntimeException(results);
}
}
public void waitForTask(final Connection c, final Task task, final long pollInterval, final long timeout) throws XenAPIException, XmlRpcException, TimeoutException {
final long beginTime = System.currentTimeMillis();
if (s_logger.isTraceEnabled()) {
s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") sent to " + c.getSessionReference() + " is pending completion with a " + timeout + "ms timeout");
}
while (task.getStatus(c) == Types.TaskStatusType.PENDING) {
try {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") is pending, sleeping for " + pollInterval + "ms");
}
Thread.sleep(pollInterval);
} catch (final InterruptedException e) {
}
if (System.currentTimeMillis() - beginTime > timeout) {
final String msg = "Async " + timeout / 1000 + " seconds timeout for task " + task.toString();
s_logger.warn(msg);
task.cancel(c);
task.destroy(c);
throw new TimeoutException(msg);
}
}
}
public boolean createAndAttachConfigDriveIsoForVM(final Connection conn, final VM vm, final List<String[]> vmDataList, final String configDriveLabel) throws XenAPIException, XmlRpcException {
final String vmName = vm.getNameLabel(conn);
// create SR
final SR sr = createLocalIsoSR(conn, _configDriveSRName + _host.getIp());
if (sr == null) {
s_logger.debug("Failed to create local SR for the config drive");
return false;
}
s_logger.debug("Creating vm data files in config drive for vm " + vmName);
// 1. create vm data files
if (!createVmdataFiles(vmName, vmDataList, configDriveLabel)) {
s_logger.debug("Failed to create vm data files in config drive for vm " + vmName);
return false;
}
// 2. copy config drive iso to host
if (!copyConfigDriveIsoToHost(conn, sr, vmName)) {
return false;
}
// 3. attachIsoToVM
if (!attachConfigDriveIsoToVm(conn, vm)) {
return false;
}
return true;
}
public boolean createVmdataFiles(final String vmName, final List<String[]> vmDataList, final String configDriveLabel) {
// add vm iso to the isolibrary
final String isoPath = "/tmp/" + vmName + "/configDrive/";
final String configDriveName = "cloudstack/";
//create folder for the VM
//Remove the folder before creating it.
try {
deleteLocalFolder("/tmp/" + isoPath);
} catch (final IOException e) {
s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage());
} catch (final Exception e) {
s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage());
}
if (vmDataList != null) {
for (final String[] item : vmDataList) {
final String dataType = item[0];
final String fileName = item[1];
final String content = item[2];
// create file with content in folder
if (dataType != null && !dataType.isEmpty()) {
//create folder
final String folder = isoPath + configDriveName + dataType;
if (folder != null && !folder.isEmpty()) {
final File dir = new File(folder);
final boolean result = true;
try {
if (!dir.exists()) {
dir.mkdirs();
}
} catch (final SecurityException ex) {
s_logger.debug("Failed to create dir " + ex.getMessage());
return false;
}
if (result && content != null && !content.isEmpty()) {
File file = new File(folder + "/" + fileName + ".txt");
try (OutputStreamWriter fw = new OutputStreamWriter(new FileOutputStream(file.getAbsoluteFile()), "UTF-8");
BufferedWriter bw = new BufferedWriter(fw);) {
bw.write(content);
s_logger.debug("created file: " + file + " in folder:" + folder);
} catch (final IOException ex) {
s_logger.debug("Failed to create file " + ex.getMessage());
return false;
}
}
}
}
}
s_logger.debug("Created the vm data in " + isoPath);
}
String s = null;
try {
final String cmd = "mkisofs -iso-level 3 -V " + configDriveLabel + " -o " + isoPath + vmName + ".iso " + isoPath;
final Process p = Runtime.getRuntime().exec(cmd);
final BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream(), Charset.defaultCharset()));
final BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream(), Charset.defaultCharset()));
// read the output from the command
while ((s = stdInput.readLine()) != null) {
s_logger.debug(s);
}
// read any errors from the attempted command
while ((s = stdError.readLine()) != null) {
s_logger.debug(s);
}
s_logger.debug(" Created config drive ISO using the command " + cmd + " in the host " + _host.getIp());
} catch (final IOException e) {
s_logger.debug(e.getMessage());
return false;
}
return true;
}
public boolean copyConfigDriveIsoToHost(final Connection conn, final SR sr, final String vmName) {
final String vmIso = "/tmp/" + vmName + "/configDrive/" + vmName + ".iso";
//scp file into the host
final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22);
try {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
throw new CloudRuntimeException("Unable to authenticate");
}
s_logger.debug("scp config drive iso file " + vmIso + " to host " + _host.getIp() + " path " + _configDriveIsopath);
final SCPClient scp = new SCPClient(sshConnection);
final String p = "0755";
scp.put(vmIso, _configDriveIsopath, p);
sr.scan(conn);
s_logger.debug("copied config drive iso to host " + _host);
} catch (final IOException e) {
s_logger.debug("failed to copy configdrive iso " + vmIso + " to host " + _host, e);
return false;
} catch (final XmlRpcException e) {
s_logger.debug("Failed to scan config drive iso SR " + _configDriveSRName + _host.getIp() + " in host " + _host, e);
return false;
} finally {
sshConnection.close();
//clean up the config drive files
final String configDir = "/tmp/" + vmName;
try {
deleteLocalFolder(configDir);
s_logger.debug("Successfully cleaned up config drive directory " + configDir + " after copying it to host ");
} catch (final Exception e) {
s_logger.debug("Failed to delete config drive folder :" + configDir + " for VM " + vmName + " " + e.getMessage());
}
}
return true;
}
public boolean attachConfigDriveIsoToVm(final Connection conn, final VM vm) throws XenAPIException, XmlRpcException {
final String vmName = vm.getNameLabel(conn);
final String isoURL = _configDriveIsopath + vmName + ".iso";
VDI srVdi;
//1. find the vdi of the iso
//2. find the vbd for the vdi
//3. attach iso to vm
try {
final Set<VDI> vdis = VDI.getByNameLabel(conn, vmName + ".iso");
if (vdis.isEmpty()) {
throw new CloudRuntimeException("Could not find ISO with URL: " + isoURL);
}
srVdi = vdis.iterator().next();
} catch (final XenAPIException e) {
s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString());
return false;
} catch (final Exception e) {
s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString());
return false;
}
VBD isoVBD = null;
// Find the VM's CD-ROM VBD
final Set<VBD> vbds = vm.getVBDs(conn);
for (final VBD vbd : vbds) {
final Types.VbdType type = vbd.getType(conn);
final VBD.Record vbdr = vbd.getRecord(conn);
// if the device exists then attach it
if (!vbdr.userdevice.equals(_attachIsoDeviceNum) && type == Types.VbdType.CD) {
isoVBD = vbd;
break;
}
}
if (isoVBD == null) {
//create vbd
final VBD.Record cfgDriveVbdr = new VBD.Record();
cfgDriveVbdr.VM = vm;
cfgDriveVbdr.empty = true;
cfgDriveVbdr.bootable = false;
cfgDriveVbdr.userdevice = "autodetect";
cfgDriveVbdr.mode = Types.VbdMode.RO;
cfgDriveVbdr.type = Types.VbdType.CD;
final VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr);
isoVBD = cfgDriveVBD;
s_logger.debug("Created CD-ROM VBD for VM: " + vm);
}
if (isoVBD != null) {
// If an ISO is already inserted, eject it
if (isoVBD.getEmpty(conn) == false) {
isoVBD.eject(conn);
}
try {
// Insert the new ISO
isoVBD.insert(conn, srVdi);
s_logger.debug("Attached config drive iso to vm " + vmName);
} catch (final XmlRpcException ex) {
s_logger.debug("Failed to attach config drive iso to vm " + vmName);
return false;
}
}
return true;
}
public SR createLocalIsoSR(final Connection conn, final String srName) throws XenAPIException, XmlRpcException {
// if config drive sr already exists then return
SR sr = getSRByNameLabelandHost(conn, _configDriveSRName + _host.getIp());
if (sr != null) {
s_logger.debug("Config drive SR already exist, returing it");
return sr;
}
try {
final Map<String, String> deviceConfig = new HashMap<String, String>();
final com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_host.getIp(), 22);
try {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
throw new CloudRuntimeException("Unable to authenticate");
}
final String cmd = "mkdir -p " + _configDriveIsopath;
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) {
throw new CloudRuntimeException("Cannot create directory configdrive_iso on XenServer hosts");
}
} catch (final IOException e) {
throw new CloudRuntimeException("Unable to create iso folder", e);
} finally {
sshConnection.close();
}
s_logger.debug("Created the config drive SR " + srName + " folder path " + _configDriveIsopath);
deviceConfig.put("location", _configDriveIsopath);
deviceConfig.put("legacy_mode", "true");
final Host host = Host.getByUuid(conn, _host.getUuid());
final String type = SRType.ISO.toString();
sr = SR.create(conn, host, deviceConfig, new Long(0), _configDriveIsopath, "iso", type, "iso", false, new HashMap<String, String>());
sr.setNameLabel(conn, srName);
sr.setNameDescription(conn, deviceConfig.get("location"));
sr.scan(conn);
s_logger.debug("Config drive ISO SR at the path " + _configDriveIsopath + " got created in host " + _host);
return sr;
} catch (final XenAPIException e) {
final String msg = "createLocalIsoSR failed! mountpoint " + e.toString();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
} catch (final Exception e) {
final String msg = "createLocalIsoSR failed! mountpoint: due to " + e.getMessage();
s_logger.warn(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
public void deleteLocalFolder(final String directory) throws Exception {
if (directory == null || directory.isEmpty()) {
final String msg = "Invalid directory path (null/empty) detected. Cannot delete specified directory.";
s_logger.debug(msg);
throw new Exception(msg);
}
try {
FileUtils.deleteDirectory(new File(directory));
} catch (final IOException e) {
// IOException here means failure to delete. Not swallowing it here to
// let the caller handle with appropriate contextual log message.
throw e;
}
}
protected SR getSRByNameLabel(Connection conn, String name) throws BadServerResponse, XenAPIException, XmlRpcException {
Set<SR> srs = SR.getByNameLabel(conn, name);
SR ressr = null;
for (SR sr : srs) {
Set<PBD> pbds;
pbds = sr.getPBDs(conn);
for (PBD pbd : pbds) {
PBD.Record pbdr = pbd.getRecord(conn);
if (pbdr.host != null) {
ressr = sr;
break;
}
}
}
return ressr;
}
public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, String ipAddr) {
// attach the config drive in destination host
try {
s_logger.debug("Attaching config drive iso device for the VM " + vmName + " In host " + ipAddr);
Set<VM> vms = VM.getByNameLabel(conn, vmName);
SR sr = getSRByNameLabel(conn, _configDriveSRName + ipAddr);
//Here you will find only two vdis with the <vmname>.iso.
//one is from source host and second from dest host
Set<VDI> vdis = VDI.getByNameLabel(conn, vmName + ".iso");
if (vdis.isEmpty()) {
s_logger.debug("Could not find config drive ISO: " + vmName);
return false;
}
VDI configdriveVdi = null;
for (VDI vdi : vdis) {
SR vdiSr = vdi.getSR(conn);
if (vdiSr.getUuid(conn).equals(sr.getUuid(conn))) {
//get this vdi to attach to vbd
configdriveVdi = vdi;
s_logger.debug("VDI for the config drive ISO " + vdi);
} else {
// delete the vdi in source host so that the <vmname>.iso file is get removed
s_logger.debug("Removing the source host VDI for the config drive ISO " + vdi);
vdi.destroy(conn);
}
}
if (configdriveVdi == null) {
s_logger.debug("Config drive ISO VDI is not found ");
return false;
}
for (VM vm : vms) {
//create vbd
VBD.Record cfgDriveVbdr = new VBD.Record();
cfgDriveVbdr.VM = vm;
cfgDriveVbdr.empty = true;
cfgDriveVbdr.bootable = false;
cfgDriveVbdr.userdevice = "autodetect";
cfgDriveVbdr.mode = Types.VbdMode.RO;
cfgDriveVbdr.type = Types.VbdType.CD;
VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr);
s_logger.debug("Inserting vbd " + configdriveVdi);
cfgDriveVBD.insert(conn, configdriveVdi);
break;
}
return true;
} catch (BadServerResponse e) {
s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a bad server response.", e);
return false;
} catch (XenAPIException e) {
s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a xapi problem.", e);
return false;
} catch (XmlRpcException e) {
s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a problem in a remote call.", e);
return false;
}
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | java | 1 | 0 | |
src/radical/entk/appman/appmanager.py | __copyright__ = "Copyright 2017-2018, http://radical.rutgers.edu"
__author__ = "Vivek Balasubramanian <[email protected]>"
__license__ = "MIT"
import radical.utils as ru
from radical.entk.exceptions import *
from radical.entk.pipeline.pipeline import Pipeline
from radical.entk.stage.stage import Stage
from radical.entk.task.task import Task
from radical.entk.utils.prof_utils import write_session_description
from radical.entk.utils.prof_utils import write_workflow
from wfprocessor import WFprocessor
import sys
import time
import os
import Queue
import pika
import json
from threading import Thread, Event
from radical.entk import states
class AppManager(object):
"""
An application manager takes the responsibility of setting up the communication infrastructure, instantiates the
ResourceManager, TaskManager, WFProcessor objects and all their threads and processes. This is the Master object
running in the main process and is designed to recover from errors from all other objects, threads and processes.
:Arguments:
:config_path: Url to config path to be read for AppManager
:hostname: host rabbitmq server is running
:port: port at which rabbitmq can be accessed
:reattempts: number of attempts to re-invoke any failed EnTK components
:resubmit_failed: resubmit failed tasks (True/False)
:autoterminate: terminate resource reservation upon execution of all tasks of first workflow (True/False)
:write_workflow: write workflow and mapping to rts entities to a file (post-termination)
:rts: Specify RTS to use. Current options: 'mock', 'radical.pilot' (default if unspecified)
:rmq_cleanup: Cleanup all queues created in RabbitMQ server for current execution (default is True)
:rts_config: Configuration for the RTS, accepts {"sandbox_cleanup": True/False,"db_cleanup": True/False} when RTS is RP
:name: Name of the Application. It should be unique between executions. (default is randomly assigned)
"""
def __init__(self,
config_path=None,
hostname=None,
port=None,
reattempts=None,
resubmit_failed=None,
autoterminate=None,
write_workflow=None,
rts=None,
rmq_cleanup=None,
rts_config=None,
name=None):
# Create a session for each EnTK script execution
if name:
self._name = name
self._sid = name
else:
self._name= str()
self._sid = ru.generate_id('re.session', ru.ID_PRIVATE)
self._read_config(config_path, hostname, port, reattempts,
resubmit_failed, autoterminate, write_workflow,
rts, rmq_cleanup, rts_config)
# Create an uid + logger + profiles for AppManager, under the sid
# namespace
path = os.getcwd() + '/' + self._sid
self._uid = ru.generate_id('appmanager.%(item_counter)04d', ru.ID_CUSTOM, namespace=self._sid)
self._logger = ru.Logger('radical.entk.%s' % self._uid, path=path, targets=['2','.'])
self._prof = ru.Profiler(name='radical.entk.%s' % self._uid, path=path)
self._report = ru.Reporter(name='radical.entk.%s' % self._uid)
self._report.info('EnTK session: %s\n' % self._sid)
self._prof.prof('create amgr obj', uid=self._uid)
self._report.info('Creating AppManager')
self._resource_manager = None
# RabbitMQ Queues
self._pending_queue = list()
self._completed_queue = list()
# Global parameters to have default values
self._mqs_setup = False
self._resource_desc = None
self._task_manager = None
self._workflow = None
self._cur_attempt = 1
self._shared_data = list()
self._rmq_ping_interval = os.getenv('RMQ_PING_INTERVAL', 10)
self._logger.info('Application Manager initialized')
self._prof.prof('amgr obj created', uid=self._uid)
self._report.ok('>>ok\n')
def _read_config(self, config_path, hostname, port, reattempts,
resubmit_failed, autoterminate, write_workflow,
rts, rmq_cleanup, rts_config):
if not config_path:
config_path = os.path.dirname(os.path.abspath(__file__))
config = ru.read_json(os.path.join(config_path, 'config.json'))
self._mq_hostname = hostname if hostname else str(config['hostname'])
self._port = port if port else config['port']
self._reattempts = reattempts if reattempts else config['reattempts']
self._resubmit_failed = resubmit_failed if resubmit_failed is not None else config['resubmit_failed']
self._autoterminate = autoterminate if autoterminate is not None else config['autoterminate']
self._write_workflow = write_workflow if write_workflow is not None else config['write_workflow']
self._rts = rts if rts in ['radical.pilot', 'mock'] else str(config['rts'])
self._rmq_cleanup = rmq_cleanup if rmq_cleanup is not None else config['rmq_cleanup']
self._rts_config = rts_config if rts_config is not None else config['rts_config']
self._num_pending_qs = config['pending_qs']
self._num_completed_qs = config['completed_qs']
# ------------------------------------------------------------------------------------------------------------------
# Getter functions
# ------------------------------------------------------------------------------------------------------------------
@property
def name(self):
"""
Name for the application manager. Allows the user to setup the name of
the application manager, as well as, its session ID. This name should be
unique between different EnTK executions, otherwise it will produce an
error.
:getter: Returns the name of the application manager
:setter: Assigns the name of the application manager
:type: String
"""
return self._name
@property
def sid(self):
"""
Get the session ID of the current EnTK execution
:getter: Returns the session ID of the EnTK execution
:type: String
"""
return self._sid
@property
def resource_desc(self):
"""
:getter: Returns the resource description
:setter: Assigns a resource description
"""
return self._resource_desc
@property
def workflow(self):
"""
:getter: Return the workflow assigned for execution
:setter: Assign workflow to be executed
"""
return self._workflow
@property
def shared_data(self):
"""
:getter: Return list of filenames that are shared between multiple tasks of the application
:setter: Assign a list of names of files that need to be staged to the remote machine
"""
return self._shared_data
# ------------------------------------------------------------------------------------------------------------------
# Setter functions
# ------------------------------------------------------------------------------------------------------------------
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError(expected_type=str, actual_type=type(value))
else:
self._name = value
@resource_desc.setter
def resource_desc(self, value):
if self._rts == 'radical.pilot':
from radical.entk.execman.rp import ResourceManager
self._resource_manager = ResourceManager(resource_desc=value,
sid=self._sid,
rts_config=self._rts_config)
elif self._rts == 'mock':
from radical.entk.execman.mock import ResourceManager
self._resource_manager = ResourceManager(resource_desc=value,
sid=self._sid)
self._report.info('Validating and assigning resource manager')
if self._resource_manager._validate_resource_desc():
self._resource_manager._populate()
self._resource_manager.shared_data = self._shared_data
else:
self._logger.error('Could not validate resource description')
raise
self._report.ok('>>ok\n')
@workflow.setter
def workflow(self, workflow):
self._prof.prof('assigning workflow', uid=self._uid)
for p in workflow:
if not isinstance(p, Pipeline):
self._logger.info('workflow type incorrect')
raise TypeError(expected_type=['Pipeline', 'set of Pipelines'], actual_type=type(p))
p._validate()
self._workflow = workflow
self._logger.info('Workflow assigned to Application Manager')
@shared_data.setter
def shared_data(self, data):
if not isinstance(data, list):
data = [data]
for val in data:
if not isinstance(val, str):
raise TypeError(expected_type=str, actual_type=type(val))
if self._resource_manager:
self._resource_manager.shared_data = data
# ------------------------------------------------------------------------------------------------------------------
# Public methods
# ------------------------------------------------------------------------------------------------------------------
def run(self):
"""
**Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this
method will start the setting up the communication infrastructure, submitting a resource request and then
submission of all the tasks.
"""
try:
# Set None objects local to each run
self._wfp = None
self._sync_thread = None
self._terminate_sync = Event()
self._resubmit_failed = False
self._cur_attempt = 1
if not self._workflow:
self._logger.error('No workflow assigned currently, please check your script')
raise MissingError(obj=self._uid, missing_attribute='workflow')
if not self._resource_manager:
self._logger.error(
'No resource manager assigned currently, please create and add a valid resource manager')
raise MissingError(obj=self._uid, missing_attribute='resource_manager')
self._prof.prof('amgr run started', uid=self._uid)
# Setup rabbitmq stuff
if not self._mqs_setup:
self._report.info('Setting up RabbitMQ system')
setup = self._setup_mqs()
if not setup:
self._logger.error('RabbitMQ system not available')
raise EnTKError(text="RabbitMQ setup failed")
self._mqs_setup = True
self._report.ok('>>ok\n')
# Create WFProcessor object
self._prof.prof('creating wfp obj', uid=self._uid)
self._wfp = WFprocessor(sid=self._sid,
workflow=self._workflow,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
port=self._port,
resubmit_failed=self._resubmit_failed)
self._wfp._initialize_workflow()
self._workflow = self._wfp.workflow
# Submit resource request if not resource allocation done till now or
# resubmit a new one if the old one has completed
if self._resource_manager:
res_alloc_state = self._resource_manager.get_resource_allocation_state()
if (not res_alloc_state) or (res_alloc_state in self._resource_manager.get_completed_states()):
self._logger.info('Starting resource request submission')
self._prof.prof('init rreq submission', uid=self._uid)
self._resource_manager._submit_resource_request()
else:
self._logger.error(
'Cannot run without resource manager, please create and assign a resource manager')
raise EnTKError(text='Missing resource manager')
# Start synchronizer thread
if not self._sync_thread:
self._logger.info('Starting synchronizer thread')
self._sync_thread = Thread(target=self._synchronizer, name='synchronizer-thread')
self._prof.prof('starting synchronizer thread', uid=self._uid)
self._sync_thread.start()
# Start WFprocessor
self._logger.info('Starting WFProcessor process from AppManager')
self._wfp.start_processor()
self._report.ok('All components created\n')
# Create tmgr object only if it does not already exist
if self._rts == 'radical.pilot':
from radical.entk.execman.rp import TaskManager
elif self._rts == 'mock':
from radical.entk.execman.mock import TaskManager
if not self._task_manager:
self._prof.prof('creating tmgr obj', uid=self._uid)
self._task_manager = TaskManager(sid=self._sid,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
rmgr=self._resource_manager,
port=self._port
)
self._logger.info('Starting task manager process from AppManager')
self._task_manager.start_manager()
self._task_manager.start_heartbeat()
active_pipe_count = len(self._workflow)
finished_pipe_uids = []
# We wait till all pipelines of the workflow are marked
# complete
while ((active_pipe_count > 0) and
(self._wfp.workflow_incomplete()) and
(self._resource_manager.get_resource_allocation_state() not
in self._resource_manager.get_completed_states())):
if active_pipe_count > 0:
for pipe in self._workflow:
with pipe.lock:
if (pipe.completed) and (pipe.uid not in finished_pipe_uids):
self._logger.info('Pipe %s completed' % pipe.uid)
finished_pipe_uids.append(pipe.uid)
active_pipe_count -= 1
self._logger.info('Active pipes: %s' % active_pipe_count)
if (not self._sync_thread.is_alive()) and (self._cur_attempt <= self._reattempts):
self._sync_thread = Thread(target=self._synchronizer,
name='synchronizer-thread')
self._logger.info('Restarting synchronizer thread')
self._prof.prof('restarting synchronizer', uid=self._uid)
self._sync_thread.start()
self._cur_attempt += 1
if (not self._wfp.check_processor()) and (self._cur_attempt <= self._reattempts):
"""
If WFP dies, both child threads are also cleaned out.
We simply recreate the wfp object with a copy of the workflow
in the appmanager and start the processor.
"""
self._prof.prof('recreating wfp obj', uid=self._uid)
self._wfp = WFProcessor(
sid=self._sid,
workflow=self._workflow,
pending_queue=self._pending_queue,
completed_queue=self._completed_queue,
mq_hostname=self._mq_hostname,
port=self._port,
resubmit_failed=self._resubmit_failed)
self._logger.info('Restarting WFProcessor process from AppManager')
self._wfp.start_processor()
self._cur_attempt += 1
if (not self._task_manager.check_heartbeat()) and (self._cur_attempt <= self._reattempts):
"""
If the tmgr process or heartbeat dies, we simply start a
new process using the start_manager method. We do not
need to create a new instance of the TaskManager object
itself. We stop and start a new instance of the
heartbeat thread as well.
"""
self._prof.prof('restarting tmgr process and heartbeat', uid=self._uid)
self._logger.info('Terminating heartbeat thread')
self._task_manager.terminate_heartbeat()
self._logger.info('Terminating tmgr process')
self._task_manager.terminate_manager()
self._logger.info('Restarting task manager process')
self._task_manager.start_manager()
self._logger.info('Restarting heartbeat thread')
self._task_manager.start_heartbeat()
self._cur_attempt += 1
self._prof.prof('start termination', uid=self._uid)
# Terminate threads in following order: wfp, helper, synchronizer
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._autoterminate:
self.resource_terminate()
if self._write_workflow:
write_workflow(self._workflow, self._sid)
self._prof.prof('termination done', uid=self._uid)
except KeyboardInterrupt:
self._prof.prof('start termination', uid=self._uid)
self._logger.error('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to cancel enqueuer thread gracefully...')
# Terminate threads in following order: wfp, helper, synchronizer
if self._wfp:
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
if self._task_manager:
self._logger.info('Terminating task manager process')
self._task_manager.terminate_manager()
self._task_manager.terminate_heartbeat()
if self._sync_thread:
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._resource_manager:
self._resource_manager._terminate_resource_request()
self._prof.prof('termination done', uid=self._uid)
raise KeyboardInterrupt
except Exception, ex:
self._prof.prof('start termination', uid=self._uid)
self._logger.exception('Error in AppManager: %s' % ex)
# Terminate threads in following order: wfp, helper, synchronizer
if self._wfp:
self._logger.info('Terminating WFprocessor')
self._wfp.terminate_processor()
if self._task_manager:
self._logger.info('Terminating task manager process')
self._task_manager.terminate_manager()
self._task_manager.terminate_heartbeat()
if self._sync_thread:
self._logger.info('Terminating synchronizer thread')
self._terminate_sync.set()
self._sync_thread.join()
self._logger.info('Synchronizer thread terminated')
if self._resource_manager:
self._resource_manager._terminate_resource_request()
self._prof.prof('termination done', uid=self._uid)
raise
def resource_terminate(self):
if self._task_manager:
self._logger.info('Terminating task manager process')
self._task_manager.terminate_manager()
self._task_manager.terminate_heartbeat()
if self._resource_manager:
self._resource_manager._terminate_resource_request()
if os.environ.get('RADICAL_ENTK_PROFILE', False):
write_session_description(self)
if self._rmq_cleanup:
self._cleanup_mqs()
self._report.info('All components terminated\n')
# ------------------------------------------------------------------------------------------------------------------
# Private methods
# ------------------------------------------------------------------------------------------------------------------
def _setup_mqs(self):
"""
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
"""
try:
self._prof.prof('init mqs setup', uid=self._uid)
self._logger.debug('Setting up mq connection and channel')
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
self._logger.debug('Connection and channel setup successful')
self._logger.debug('Setting up all exchanges and queues')
qs = [
'%s-tmgr-to-sync' % self._sid,
'%s-cb-to-sync' % self._sid,
'%s-enq-to-sync' % self._sid,
'%s-deq-to-sync' % self._sid,
'%s-sync-to-tmgr' % self._sid,
'%s-sync-to-cb' % self._sid,
'%s-sync-to-enq' % self._sid,
'%s-sync-to-deq' % self._sid
]
for i in range(1, self._num_pending_qs + 1):
queue_name = '%s-pendingq-%s' % (self._sid, i)
self._pending_queue.append(queue_name)
qs.append(queue_name)
for i in range(1, self._num_completed_qs + 1):
queue_name = '%s-completedq-%s' % (self._sid, i)
self._completed_queue.append(queue_name)
qs.append(queue_name)
f = open('.%s.txt' % self._sid, 'w')
for q in qs:
# Durable Qs will not be lost if rabbitmq server crashes
mq_channel.queue_declare(queue=q)
f.write(q + '\n')
f.close()
self._logger.debug('All exchanges and queues are setup')
self._prof.prof('mqs setup done', uid=self._uid)
return True
except Exception, ex:
self._logger.error('Error setting RabbitMQ system: %s' % ex)
raise
def _cleanup_mqs(self):
try:
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
mq_channel.queue_delete(queue='%s-tmgr-to-sync' % self._sid)
mq_channel.queue_delete(queue='%s-cb-to-sync' % self._sid)
mq_channel.queue_delete(queue='%s-enq-to-sync' % self._sid)
mq_channel.queue_delete(queue='%s-deq-to-sync' % self._sid)
mq_channel.queue_delete(queue='%s-sync-to-tmgr' % self._sid)
mq_channel.queue_delete(queue='%s-sync-to-cb' % self._sid)
mq_channel.queue_delete(queue='%s-sync-to-enq' % self._sid)
mq_channel.queue_delete(queue='%s-sync-to-deq' % self._sid)
for i in range(1, self._num_pending_qs + 1):
queue_name = '%s-pendingq-%s' % (self._sid, i)
mq_channel.queue_delete(queue=queue_name)
for i in range(1, self._num_completed_qs + 1):
queue_name = '%s-completedq-%s' % (self._sid, i)
mq_channel.queue_delete(queue=queue_name)
except Exception as ex:
self._logger.exception('Message queues not deleted, error: %s' % ex)
raise
def _synchronizer(self):
"""
**Purpose**: Thread in the master process to keep the workflow data
structure in appmanager up to date. We receive pipelines, stages and
tasks objects directly. The respective object is updated in this master
process.
Details: Important to note that acknowledgements of the type
channel.basic_ack() is an acknowledgement to the server that the msg
was received. This is not to be confused with the Ack sent to the
enqueuer/dequeuer/task_manager through the sync-ack queue.
"""
try:
self._prof.prof('synchronizer started', uid=self._uid)
self._logger.info('synchronizer thread started')
def task_update(msg, reply_to, corr_id, mq_channel):
completed_task = Task()
completed_task.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_task.uid, completed_task.state))
found_task = False
# Traverse the entire workflow to find the correct task
for pipe in self._workflow:
if not pipe.completed:
if completed_task.parent_pipeline['uid'] == pipe.uid:
for stage in pipe.stages:
if completed_task.parent_stage['uid'] == stage.uid:
for task in stage.tasks:
if (completed_task.uid == task.uid)and(completed_task.state != task.state):
task.state = str(completed_task.state)
self._logger.debug('Found task %s with state %s' %
(task.uid, task.state))
if completed_task.path:
task.path = str(completed_task.path)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % task.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('Task %s in state %s\n' % (task.uid, task.state))
found_task = True
if not found_task:
# If there was a Stage update, but the Stage was not found in any of the Pipelines. This
# means that this was a Stage that was added during runtime and the AppManager does not
# know about it. The current solution is going to be: add it to the workflow object in the
# AppManager via the synchronizer.
self._prof.prof('Adap: adding new task')
self._logger.info('Adding new task %s to parent stage: %s' % (completed_task.uid,
stage.uid))
stage.add_tasks(completed_task)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % completed_task.uid)
self._prof.prof('Adap: added new task')
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('Task %s in state %s\n' %
(completed_task.uid, completed_task.state))
def stage_update(msg, reply_to, corr_id, mq_channel):
completed_stage = Stage()
completed_stage.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_stage.uid, completed_stage.state))
found_stage = False
# Traverse the entire workflow to find the correct stage
for pipe in self._workflow:
if not pipe.completed:
if completed_stage.parent_pipeline['uid'] == pipe.uid:
self._logger.info('Found parent pipeline: %s' % pipe.uid)
for stage in pipe.stages:
if (completed_stage.uid == stage.uid)and(completed_stage.state != stage.state):
self._logger.debug('Found stage %s' % stage.uid)
stage.state = str(completed_stage.state)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % stage.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
self._report.ok('Update: ')
self._report.info('Stage %s in state %s\n' % (stage.uid, stage.state))
found_stage = True
if not found_stage:
# If there was a Stage update, but the Stage was not found in any of the Pipelines. This
# means that this was a Stage that was added during runtime and the AppManager does not
# know about it. The current solution is going to be: add it to the workflow object in the
# AppManager via the synchronizer.
self._prof.prof('Adap: adding new stage', uid=self._uid)
self._logger.info('Adding new stage %s to parent pipeline: %s' % (completed_stage.uid,
pipe.uid))
pipe.add_stages(completed_stage)
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % completed_stage.uid)
self._prof.prof('Adap: adding new stage', uid=self._uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def pipeline_update(msg, reply_to, corr_id, mq_channel):
completed_pipeline = Pipeline()
completed_pipeline.from_dict(msg['object'])
self._logger.info('Received %s with state %s' % (completed_pipeline.uid, completed_pipeline.state))
# Traverse the entire workflow to find the correct pipeline
for pipe in self._workflow:
if not pipe.completed:
if (completed_pipeline.uid == pipe.uid)and(completed_pipeline.state != pipe.state):
pipe.state = str(completed_pipeline.state)
self._logger.info('Found pipeline %s, state %s, completed %s' % (pipe.uid,
pipe.state,
pipe.completed)
)
# Reply with ack msg to the sender
mq_channel.basic_publish(exchange='',
routing_key=reply_to,
properties=pika.BasicProperties(
correlation_id=corr_id),
body='%s-ack' % pipe.uid)
self._prof.prof('publishing sync ack for obj with state %s' %
msg['object']['state'],
uid=msg['object']['uid']
)
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# Keep the assignment of the completed flag after sending the acknowledgment
# back. Otherwise the MainThread takes lock over the pipeline because of logging
# and profiling
if completed_pipeline.completed:
pipe._completed_flag.set()
self._report.ok('Update: ')
self._report.info('Pipeline %s in state %s\n' % (pipe.uid, pipe.state))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
last = time.time()
while not self._terminate_sync.is_set():
#-------------------------------------------------------------------------------------------------------
# Messages between tmgr Main thread and synchronizer -- only Task objects
method_frame, props, body = mq_channel.basic_get(queue='%s-tmgr-to-sync' % self._sid)
"""
The message received is a JSON object with the following structure:
msg = {
'type': 'Pipeline'/'Stage'/'Task',
'object': json/dict
}
"""
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-tmgr' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between callback thread and synchronizer -- only Task objects
method_frame, props, body = mq_channel.basic_get(queue='%s-cb-to-sync' % self._sid)
"""
The message received is a JSON object with the following structure:
msg = {
'type': 'Pipeline'/'Stage'/'Task',
'object': json/dict
}
"""
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-cb' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between enqueue thread and synchronizer -- Task, Stage or Pipeline
method_frame, props, body = mq_channel.basic_get(queue='%s-enq-to-sync' % self._sid)
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Stage':
stage_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Pipeline':
pipeline_update(msg, '%s-sync-to-enq' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------
# Messages between dequeue thread and synchronizer -- Task, Stage or Pipeline
method_frame, props, body = mq_channel.basic_get(queue='%s-deq-to-sync' % self._sid)
if body:
msg = json.loads(body)
self._prof.prof('received obj with state %s for sync' %
msg['object']['state'], uid=msg['object']['uid'])
self._logger.debug('received %s with state %s for sync' %
(msg['object']['uid'], msg['object']['state']))
if msg['type'] == 'Task':
task_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Stage':
stage_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
elif msg['type'] == 'Pipeline':
pipeline_update(msg, '%s-sync-to-deq' % self._sid, props.correlation_id, mq_channel)
#-------------------------------------------------------------------------------------------------------
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
self._prof.prof('terminating synchronizer', uid=self._uid)
except KeyboardInterrupt:
self._logger.error('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to terminate synchronizer thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Unknown error in synchronizer: %s. \n Terminating thread' % ex)
raise
# ------------------------------------------------------------------------------------------------------------------
| []
| []
| [
"RMQ_PING_INTERVAL",
"RADICAL_ENTK_PROFILE"
]
| [] | ["RMQ_PING_INTERVAL", "RADICAL_ENTK_PROFILE"] | python | 2 | 0 | |
cmd/client/shell.go | /*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package client
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/skydive-project/skydive/config"
shttp "github.com/skydive-project/skydive/http"
"github.com/skydive-project/skydive/logging"
)
// ShellCmd skydive shell root command
var ShellCmd = &cobra.Command{
Use: "shell",
Short: "Shell Command Line Interface",
Long: "Skydive Shell Command Line Interface, yet another shell",
SilenceUsage: false,
Run: func(cmd *cobra.Command, args []string) {
shellMain()
},
}
var (
// ErrContinue parser error continue input
ErrContinue = errors.New("<continue input>")
// ErrQuit parser error quit session
ErrQuit = errors.New("<quit session>")
)
func actionGremlinQuery(s *Session, query string) error {
var values interface{}
queryHelper := NewGremlinQueryHelper(&s.authenticationOpts)
if err := queryHelper.Query(query, &values); err != nil {
return err
}
printJSON(values)
return nil
}
func actionSetVarUsername(s *Session, arg string) error {
s.authenticationOpts.Username = arg
return nil
}
func actionSetVarPassword(s *Session, arg string) error {
s.authenticationOpts.Password = arg
return nil
}
func actionSetVarAnalyzer(s *Session, arg string) error {
s.analyzerAddr = arg
config.GetConfig().Set("analyzers", s.analyzerAddr)
return nil
}
var vocaGremlinBase = []string{
"V(",
"Context(",
"Flows(",
}
var vocaGremlinExt = []string{
"Has(",
"Dedup()",
"ShortestPathTo(", // 1 or 2
"Both()",
"Count()",
"Range(", // 2
"Limit(", // 1
"Sort(",
"Out()",
"OutV()",
"OutE()",
"In()",
"InV()",
"InE()",
}
func completeG(s *Session, prefix string) []string {
if prefix == "" {
return vocaGremlinBase
}
return vocaGremlinExt
}
type command struct {
name string
action func(*Session, string) error
complete func(*Session, string) []string
arg string
document string
}
var commands = []command{
{
name: "g",
action: actionGremlinQuery,
complete: completeG,
arg: "<gremlin expression>",
document: "evaluate a gremlin expression",
},
{
name: "username",
action: actionSetVarUsername,
complete: nil,
arg: "<username>",
document: "set the analyzer connection username",
},
{
name: "password",
action: actionSetVarPassword,
complete: nil,
arg: "<password>",
document: "set the analyzer connection password",
},
{
name: "analyzer",
action: actionSetVarAnalyzer,
complete: nil,
arg: "<address:port>",
document: "set the analyzer connection address",
},
}
func (s *Session) completeWord(line string, pos int) (string, []string, string) {
if strings.HasPrefix(line, "g") {
// complete commands
if !strings.Contains(line[0:pos], ".") {
pre, post := line[0:pos], line[pos:]
result := []string{}
for _, command := range commands {
name := command.name
if strings.HasPrefix(name, pre) {
// having complete means that this command takes an argument (for now)
if !strings.HasPrefix(post, ".") && command.arg != "" {
name = name + "."
}
result = append(result, name)
}
}
return "", result, post
}
// complete command arguments
for _, command := range commands {
if command.complete == nil {
continue
}
cmdPrefix := command.name + "."
if strings.HasPrefix(line, cmdPrefix) && pos >= len(cmdPrefix) {
complLine := ""
if len(line)-len(cmdPrefix) > 0 {
complLine = line[len(cmdPrefix) : len(line)-len(cmdPrefix)]
}
return line, command.complete(s, complLine), ""
}
}
return "", nil, ""
}
if strings.HasPrefix(line, ":") {
// complete commands
if !strings.Contains(line[0:pos], " ") {
pre, post := line[0:pos], line[pos:]
result := []string{}
for _, command := range commands {
name := ":" + command.name
if strings.HasPrefix(name, pre) {
// having complete means that this command takes an argument (for now)
if !strings.HasPrefix(post, " ") && command.arg != "" {
name = name + " "
}
result = append(result, name)
}
}
return "", result, post
}
// complete command arguments
for _, command := range commands {
if command.complete == nil {
continue
}
cmdPrefix := ":" + command.name + " "
if strings.HasPrefix(line, cmdPrefix) && pos >= len(cmdPrefix) {
return cmdPrefix, command.complete(s, line[len(cmdPrefix):pos]), ""
}
}
return "", nil, ""
}
return "", nil, ""
}
func shellMain() {
s, err := NewSession()
if err != nil {
panic(err)
}
rl := newContLiner()
defer rl.Close()
var historyFile string
home, err := homeDir()
if err != nil {
logging.GetLogger().Errorf("home: %s", err)
} else {
historyFile = filepath.Join(home, "history")
f, err := os.Open(historyFile)
if err != nil {
if !os.IsNotExist(err) {
logging.GetLogger().Errorf("%s", err)
}
} else {
_, err := rl.ReadHistory(f)
if err != nil {
logging.GetLogger().Errorf("while reading history: %s", err)
}
}
}
rl.SetWordCompleter(s.completeWord)
for {
in, err := rl.Prompt()
if err != nil {
if err == io.EOF {
break
}
fmt.Fprintf(os.Stderr, "fatal: %s", err)
os.Exit(1)
}
if in == "" {
continue
}
if err := rl.Reindent(); err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
rl.Clear()
continue
}
err = s.Eval(in)
if err != nil {
if err == ErrContinue {
continue
} else if err == ErrQuit {
break
}
fmt.Println(err)
}
rl.Accepted()
}
if historyFile != "" {
err := os.MkdirAll(filepath.Dir(historyFile), 0755)
if err != nil {
logging.GetLogger().Errorf("%s", err)
} else {
f, err := os.Create(historyFile)
if err != nil {
logging.GetLogger().Errorf("%s", err)
} else {
_, err := rl.WriteHistory(f)
if err != nil {
logging.GetLogger().Errorf("while saving history: %s", err)
}
}
}
}
}
func homeDir() (home string, err error) {
home = os.Getenv("SKYDIVE_HOME")
if home != "" {
return
}
home, err = homedir.Dir()
if err != nil {
return
}
home = filepath.Join(home, ".skydive")
return
}
// Session describes a shell session
type Session struct {
authenticationOpts shttp.AuthenticationOpts
analyzerAddr string
}
// NewSession create a new shell session
func NewSession() (*Session, error) {
s := &Session{
analyzerAddr: "localhost:8082",
authenticationOpts: shttp.AuthenticationOpts{
Username: "admin",
Password: "password",
},
}
config.GetConfig().Set("analyzers", s.analyzerAddr)
return s, nil
}
// Eval evaluation a input expression
func (s *Session) Eval(in string) error {
logging.GetLogger().Debugf("eval >>> %q", in)
for _, command := range commands {
if command.name == "g" && strings.HasPrefix(in, command.name) {
err := command.action(s, in)
if err != nil {
logging.GetLogger().Errorf("%s: %s", command.name, err)
}
return nil
}
arg := strings.TrimPrefix(in, ":"+command.name)
if arg == in {
continue
}
if arg == "" || strings.HasPrefix(arg, " ") {
arg = strings.TrimSpace(arg)
err := command.action(s, arg)
if err != nil {
if err == ErrQuit {
return err
}
logging.GetLogger().Errorf("%s: %s", command.name, err)
}
return nil
}
}
return nil
}
| [
"\"SKYDIVE_HOME\""
]
| []
| [
"SKYDIVE_HOME"
]
| [] | ["SKYDIVE_HOME"] | go | 1 | 0 | |
mgmt/rest/rest_test.go | // +build medium
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"io/ioutil"
"net/http"
"os"
"github.com/intelsdi-x/snap/control"
"github.com/intelsdi-x/snap/plugin/helper"
"github.com/intelsdi-x/snap/scheduler"
log "github.com/sirupsen/logrus"
)
// common resources used for medium tests
var (
// Switching this turns on logging for all the REST API calls
LOG_LEVEL = log.WarnLevel
SNAP_PATH = helper.BuildPath
SNAP_AUTODISCOVER_PATH = os.Getenv("SNAP_AUTODISCOVER_PATH")
MOCK_PLUGIN_PATH1 = helper.PluginFilePath("snap-plugin-collector-mock1")
MOCK_PLUGIN_PATH2 = helper.PluginFilePath("snap-plugin-collector-mock2")
FILE_PLUGIN_PATH = helper.PluginFilePath("snap-plugin-publisher-mock-file")
CompressedUpload = true
TotalUploadSize = 0
UploadCount = 0
)
// Since we do not have a global snap package that could be imported
// we create a mock config struct to mock what is in snapteld.go
type mockConfig struct {
LogLevel int `json:"-"yaml:"-"`
GoMaxProcs int `json:"-"yaml:"-"`
LogPath string `json:"-"yaml:"-"`
Control *control.Config
Scheduler *scheduler.Config `json:"-",yaml:"-"`
RestAPI *Config `json:"-",yaml:"-"`
}
func getDefaultMockConfig() *mockConfig {
return &mockConfig{
LogLevel: 3,
GoMaxProcs: 1,
LogPath: "",
Control: control.GetDefaultConfig(),
Scheduler: scheduler.GetDefaultConfig(),
RestAPI: GetDefaultConfig(),
}
}
type restAPIInstance struct {
port int
server *Server
}
func command() string {
return "curl"
}
func readBody(r *http.Response) []byte {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Fatal(err)
}
r.Body.Close()
return b
}
| [
"\"SNAP_AUTODISCOVER_PATH\""
]
| []
| [
"SNAP_AUTODISCOVER_PATH"
]
| [] | ["SNAP_AUTODISCOVER_PATH"] | go | 1 | 0 | |
sumo_rl/environment/traffic_signal.py | import os
import sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import numpy as np
from gym import spaces
class TrafficSignal:
"""
This class represents a Traffic Signal of an intersection
It is responsible for retrieving information and changing the traffic phase using Traci API
"""
def __init__(self, env, ts_id, delta_time, yellow_time, min_green, max_green):
self.id = ts_id
self.env = env
self.delta_time = delta_time
self.yellow_time = yellow_time
self.min_green = min_green
self.max_green = max_green
self.green_phase = 0
self.is_yellow = False
self.time_since_last_phase_change = 0
self.next_action_time = 0
self.last_measure = 0.0
self.last_reward = None
self.phases = traci.trafficlight.getCompleteRedYellowGreenDefinition(self.id)[0].phases
self.num_green_phases = len(self.phases) // 2 # Number of green phases == number of phases (green+yellow) divided by 2
self.lanes = list(dict.fromkeys(traci.trafficlight.getControlledLanes(self.id))) # Remove duplicates and keep order
self.out_lanes = [link[0][1] for link in traci.trafficlight.getControlledLinks(self.id) if link]
self.out_lanes = list(set(self.out_lanes))
"""
Default observation space is a vector R^(#greenPhases + 2 * #lanes)
s = [current phase one-hot encoded, density for each lane, queue for each lane]
You can change this by modifing self.observation_space and the method _compute_observations()
Action space is which green phase is going to be open for the next delta_time seconds
"""
self.observation_space = spaces.Box(low=np.zeros(self.num_green_phases + 2*len(self.lanes)), high=np.ones(self.num_green_phases + 2*len(self.lanes)))
self.discrete_observation_space = spaces.Tuple((
spaces.Discrete(self.num_green_phases), # Green Phase
#spaces.Discrete(self.max_green//self.delta_time), # Elapsed time of phase
*(spaces.Discrete(10) for _ in range(2*len(self.lanes))) # Density and stopped-density for each lane
))
self.action_space = spaces.Discrete(self.num_green_phases)
logic = traci.trafficlight.Logic("new-program"+self.id, 0, 0, phases=self.phases)
traci.trafficlight.setCompleteRedYellowGreenDefinition(self.id, logic)
@property
def phase(self):
return traci.trafficlight.getPhase(self.id)
@property
def time_to_act(self):
return self.next_action_time == self.env.sim_step
def update(self):
self.time_since_last_phase_change += 1
if self.is_yellow and self.time_since_last_phase_change == self.yellow_time:
traci.trafficlight.setPhase(self.id, self.green_phase)
self.is_yellow = False
def set_next_phase(self, new_phase):
"""
Sets what will be the next green phase and sets yellow phase if the next phase is different than the current
:param new_phase: (int) Number between [0..num_green_phases]
"""
new_phase *= 2
if self.phase == new_phase or self.time_since_last_phase_change < self.min_green + self.yellow_time:
self.green_phase = self.phase
traci.trafficlight.setPhase(self.id, self.green_phase)
self.next_action_time = self.env.sim_step + self.delta_time
else:
self.green_phase = new_phase
traci.trafficlight.setPhase(self.id, self.phase + 1) # turns yellow
self.next_action_time = self.env.sim_step + self.delta_time + self.yellow_time
self.is_yellow = True
self.time_since_last_phase_change = 0
def compute_observation(self):
phase_id = [1 if self.phase//2 == i else 0 for i in range(self.num_green_phases)] # one-hot encoding
#elapsed = self.traffic_signals[ts].time_on_phase / self.max_green
density = self.get_lanes_density()
queue = self.get_lanes_queue()
observation = np.array(phase_id + density + queue)
return observation
def compute_reward(self):
self.last_reward = self._waiting_time_reward()
return self.last_reward
def _pressure_reward(self):
return -self.get_pressure()
def _queue_average_reward(self):
new_average = np.mean(self.get_stopped_vehicles_num())
reward = self.last_measure - new_average
self.last_measure = new_average
return reward
def _queue_reward(self):
return - (sum(self.get_stopped_vehicles_num()))**2
def _waiting_time_reward(self):
ts_wait = sum(self.get_waiting_time_per_lane()) / 100.0
reward = self.last_measure - ts_wait
self.last_measure = ts_wait
return reward
def _waiting_time_reward2(self):
ts_wait = sum(self.get_waiting_time())
self.last_measure = ts_wait
if ts_wait == 0:
reward = 1.0
else:
reward = 1.0/ts_wait
return reward
def _waiting_time_reward3(self):
ts_wait = sum(self.get_waiting_time())
reward = -ts_wait
self.last_measure = ts_wait
return reward
def get_waiting_time_per_lane(self):
wait_time_per_lane = []
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
wait_time = 0.0
for veh in veh_list:
veh_lane = traci.vehicle.getLaneID(veh)
acc = traci.vehicle.getAccumulatedWaitingTime(veh)
if veh not in self.env.vehicles:
self.env.vehicles[veh] = {veh_lane: acc}
else:
self.env.vehicles[veh][veh_lane] = acc - sum([self.env.vehicles[veh][lane] for lane in self.env.vehicles[veh].keys() if lane != veh_lane])
wait_time += self.env.vehicles[veh][veh_lane]
wait_time_per_lane.append(wait_time)
return wait_time_per_lane
def get_pressure(self):
return abs(sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.lanes) - sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.out_lanes))
def get_out_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.out_lanes]
def get_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_queue(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [traci.lane.getLastStepHaltingNumber(lane) for lane in self.lanes]
def get_lanes_queue(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepHaltingNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_total_queued(self):
return sum([traci.lane.getLastStepHaltingNumber(lane) for lane in self.lanes])
def _get_veh_list(self):
veh_list = []
for lane in self.lanes:
veh_list += traci.lane.getLastStepVehicleIDs(lane)
return veh_list
| []
| []
| [
"SUMO_HOME"
]
| [] | ["SUMO_HOME"] | python | 1 | 0 | |
benchmarks/tpch-fed.py | """
Usage:
tpch-fed.py --file=<file>
Options:
--file=<file> Query file.
-h --help Show this screen.
--version Show version.
"""
import os
import connectorx as cx
from contexttimer import Timer
from docopt import docopt
import pandas as pd
if __name__ == "__main__":
args = docopt(__doc__, version="Naval Fate 2.0")
query_file = args["--file"]
db_map = {
"db1": os.environ["DB1"],
"db2": os.environ["DB2"],
}
print(f"dbs: {db_map}")
with open(query_file, "r") as f:
sql = f.read()
print(f"file: {query_file}")
with Timer() as timer:
df = cx.read_sql(db_map, sql, return_type="pandas")
print("time in total:", timer.elapsed)
print(df)
| []
| []
| [
"DB1",
"DB2"
]
| [] | ["DB1", "DB2"] | python | 2 | 0 | |
td/oauth.py | import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
# Define the templates folder.
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
# Create the App.
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
return render_template("index.html")
@app.route("/login")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
# Build the authorization URL.
auth_tuple = app.config['auth_client'].authorization_url()
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
@app.route("/login/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
# Grab the Refresh Token.
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
# Define the Secret Key.
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Define the App Configurations.
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
# Run the App.
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
# flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials)
# flask_td_app.run()
# This allows us to use a plain HTTP callback
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# # app.run(ssl_context="adhoc")
| []
| []
| [
"SECRET_KEY",
"OAUTHLIB_INSECURE_TRANSPORT"
]
| [] | ["SECRET_KEY", "OAUTHLIB_INSECURE_TRANSPORT"] | python | 2 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/AbstractKotlinCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import io.swagger.v3.oas.models.media.StringSchema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.openapitools.codegen.CliOption;
import org.openapitools.codegen.CodegenConfig;
import org.openapitools.codegen.CodegenConstants;
import org.openapitools.codegen.CodegenModel;
import org.openapitools.codegen.CodegenProperty;
import org.openapitools.codegen.DefaultCodegen;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.openapitools.codegen.utils.StringUtils.*;
public abstract class AbstractKotlinCodegen extends DefaultCodegen implements CodegenConfig {
public static final String SERIALIZATION_LIBRARY_DESC = "What serialization library to use: 'moshi' (default), or 'gson' or 'jackson'";
public enum SERIALIZATION_LIBRARY_TYPE {moshi, gson, jackson}
public static final String MODEL_MUTABLE = "modelMutable";
public static final String MODEL_MUTABLE_DESC = "Create mutable models";
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractKotlinCodegen.class);
protected String artifactId;
protected String artifactVersion = "1.0.0";
protected String groupId = "org.openapitools";
protected String packageName = "org.openapitools";
protected String apiSuffix = "Api";
protected String sourceFolder = "src/main/kotlin";
protected String testFolder = "src/test/kotlin";
protected String apiDocPath = "docs/";
protected String modelDocPath = "docs/";
protected boolean parcelizeModels = false;
protected boolean serializableModel = false;
protected boolean needsDataClassBody = false;
protected boolean nonPublicApi = false;
protected CodegenConstants.ENUM_PROPERTY_NAMING_TYPE enumPropertyNaming = CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.camelCase;
protected SERIALIZATION_LIBRARY_TYPE serializationLibrary = SERIALIZATION_LIBRARY_TYPE.moshi;
public AbstractKotlinCodegen() {
super();
supportsInheritance = true;
setSortModelPropertiesByRequiredFlag(true);
languageSpecificPrimitives = new HashSet<String>(Arrays.asList(
"kotlin.Byte",
"kotlin.ByteArray",
"kotlin.Short",
"kotlin.Int",
"kotlin.Long",
"kotlin.Float",
"kotlin.Double",
"kotlin.Boolean",
"kotlin.Char",
"kotlin.String",
"kotlin.Array",
"kotlin.collections.List",
"kotlin.collections.Map",
"kotlin.collections.Set"
));
// this includes hard reserved words defined by https://github.com/JetBrains/kotlin/blob/master/core/descriptors/src/org/jetbrains/kotlin/renderer/KeywordStringsGenerated.java
// as well as keywords from https://kotlinlang.org/docs/reference/keyword-reference.html
reservedWords = new HashSet<String>(Arrays.asList(
"as",
"break",
"class",
"continue",
"do",
"else",
"false",
"for",
"fun",
"if",
"in",
"interface",
"is",
"null",
"object",
"package",
"return",
"super",
"this",
"throw",
"true",
"try",
"typealias",
"typeof",
"val",
"var",
"when",
"while"
));
defaultIncludes = new HashSet<String>(Arrays.asList(
"kotlin.Byte",
"kotlin.ByteArray",
"kotlin.Short",
"kotlin.Int",
"kotlin.Long",
"kotlin.Float",
"kotlin.Double",
"kotlin.Boolean",
"kotlin.Char",
"kotlin.Array",
"kotlin.collections.List",
"kotlin.collections.Set",
"kotlin.collections.Map"
));
typeMapping = new HashMap<String, String>();
typeMapping.put("string", "kotlin.String");
typeMapping.put("boolean", "kotlin.Boolean");
typeMapping.put("integer", "kotlin.Int");
typeMapping.put("float", "kotlin.Float");
typeMapping.put("long", "kotlin.Long");
typeMapping.put("double", "kotlin.Double");
typeMapping.put("ByteArray", "kotlin.ByteArray");
typeMapping.put("number", "java.math.BigDecimal");
typeMapping.put("decimal", "java.math.BigDecimal");
typeMapping.put("date-time", "java.time.OffsetDateTime");
typeMapping.put("date", "java.time.LocalDate");
typeMapping.put("file", "java.io.File");
typeMapping.put("array", "kotlin.Array");
typeMapping.put("list", "kotlin.collections.List");
typeMapping.put("set", "kotlin.collections.Set");
typeMapping.put("map", "kotlin.collections.Map");
typeMapping.put("object", "kotlin.Any");
typeMapping.put("binary", "kotlin.ByteArray");
typeMapping.put("Date", "java.time.LocalDate");
typeMapping.put("DateTime", "java.time.OffsetDateTime");
instantiationTypes.put("array", "kotlin.collections.ArrayList");
instantiationTypes.put("list", "kotlin.collections.ArrayList");
instantiationTypes.put("map", "kotlin.collections.HashMap");
importMapping = new HashMap<String, String>();
importMapping.put("BigDecimal", "java.math.BigDecimal");
importMapping.put("UUID", "java.util.UUID");
importMapping.put("URI", "java.net.URI");
importMapping.put("File", "java.io.File");
importMapping.put("Date", "java.time.LocalDate");
importMapping.put("Timestamp", "java.sql.Timestamp");
importMapping.put("DateTime", "java.time.OffsetDateTime");
importMapping.put("LocalDateTime", "java.time.LocalDateTime");
importMapping.put("LocalDate", "java.time.LocalDate");
importMapping.put("LocalTime", "java.time.LocalTime");
specialCharReplacements.put(";", "Semicolon");
cliOptions.clear();
addOption(CodegenConstants.SOURCE_FOLDER, CodegenConstants.SOURCE_FOLDER_DESC, sourceFolder);
addOption(CodegenConstants.PACKAGE_NAME, "Generated artifact package name.", packageName);
addOption(CodegenConstants.API_SUFFIX, CodegenConstants.API_SUFFIX_DESC, apiSuffix);
addOption(CodegenConstants.GROUP_ID, "Generated artifact package's organization (i.e. maven groupId).", groupId);
addOption(CodegenConstants.ARTIFACT_ID, "Generated artifact id (name of jar).", artifactId);
addOption(CodegenConstants.ARTIFACT_VERSION, "Generated artifact's package version.", artifactVersion);
CliOption enumPropertyNamingOpt = new CliOption(CodegenConstants.ENUM_PROPERTY_NAMING, CodegenConstants.ENUM_PROPERTY_NAMING_DESC);
cliOptions.add(enumPropertyNamingOpt.defaultValue(enumPropertyNaming.name()));
CliOption serializationLibraryOpt = new CliOption(CodegenConstants.SERIALIZATION_LIBRARY, SERIALIZATION_LIBRARY_DESC);
cliOptions.add(serializationLibraryOpt.defaultValue(serializationLibrary.name()));
cliOptions.add(new CliOption(CodegenConstants.PARCELIZE_MODELS, CodegenConstants.PARCELIZE_MODELS_DESC));
cliOptions.add(new CliOption(CodegenConstants.SERIALIZABLE_MODEL, CodegenConstants.SERIALIZABLE_MODEL_DESC));
cliOptions.add(new CliOption(CodegenConstants.SORT_PARAMS_BY_REQUIRED_FLAG, CodegenConstants.SORT_PARAMS_BY_REQUIRED_FLAG_DESC));
cliOptions.add(new CliOption(CodegenConstants.SORT_MODEL_PROPERTIES_BY_REQUIRED_FLAG, CodegenConstants.SORT_MODEL_PROPERTIES_BY_REQUIRED_FLAG_DESC));
cliOptions.add(CliOption.newBoolean(MODEL_MUTABLE, MODEL_MUTABLE_DESC, false));
}
@Override
public String apiDocFileFolder() {
return (outputFolder + File.separator + apiDocPath).replace('/', File.separatorChar);
}
@Override
public String apiFileFolder() {
return (outputFolder + File.separator + sourceFolder + File.separator + apiPackage().replace('.', File.separatorChar)).replace('/', File.separatorChar);
}
@Override
public String apiTestFileFolder() {
return (outputFolder + File.separator + testFolder + File.separator + apiPackage().replace('.', File.separatorChar)).replace('/', File.separatorChar);
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeReservedWord(String name) {
// TODO: Allow enum escaping as an option (e.g. backticks vs append/prepend underscore vs match model property escaping).
return String.format(Locale.ROOT, "`%s`", name);
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*");
}
public CodegenConstants.ENUM_PROPERTY_NAMING_TYPE getEnumPropertyNaming() {
return this.enumPropertyNaming;
}
public SERIALIZATION_LIBRARY_TYPE getSerializationLibrary() {
return this.serializationLibrary;
}
/**
* Sets the naming convention for Kotlin enum properties
*
* @param enumPropertyNamingType The string representation of the naming convention, as defined by {@link org.openapitools.codegen.CodegenConstants.ENUM_PROPERTY_NAMING_TYPE}
*/
public void setEnumPropertyNaming(final String enumPropertyNamingType) {
try {
this.enumPropertyNaming = CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.valueOf(enumPropertyNamingType);
} catch (IllegalArgumentException ex) {
StringBuilder sb = new StringBuilder(enumPropertyNamingType + " is an invalid enum property naming option. Please choose from:");
for (CodegenConstants.ENUM_PROPERTY_NAMING_TYPE t : CodegenConstants.ENUM_PROPERTY_NAMING_TYPE.values()) {
sb.append("\n ").append(t.name());
}
throw new RuntimeException(sb.toString());
}
}
/**
* Sets the serialization engine for Kotlin
*
* @param enumSerializationLibrary The string representation of the serialization library as defined by
* {@link org.openapitools.codegen.languages.AbstractKotlinCodegen.SERIALIZATION_LIBRARY_TYPE}
*/
public void setSerializationLibrary(final String enumSerializationLibrary) {
try {
this.serializationLibrary = SERIALIZATION_LIBRARY_TYPE.valueOf(enumSerializationLibrary);
} catch (IllegalArgumentException ex) {
StringBuilder sb = new StringBuilder(enumSerializationLibrary + " is an invalid enum property naming option. Please choose from:");
for (SERIALIZATION_LIBRARY_TYPE t : SERIALIZATION_LIBRARY_TYPE.values()) {
sb.append("\n ").append(t.name());
}
throw new RuntimeException(sb.toString());
}
}
/**
* returns the OpenAPI type for the property
*
* @param p OpenAPI property object
* @return string presentation of the type
**/
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
// This maps, for example, long -> kotlin.Long based on hashes in this type's constructor
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
if (languageSpecificPrimitives.contains(type)) {
return toModelName(type);
}
} else {
type = openAPIType;
}
return toModelName(type);
}
/**
* Output the type declaration of the property
*
* @param p OpenAPI Property object
* @return a string presentation of the property type
*/
@Override
public String getTypeDeclaration(Schema p) {
Schema<?> schema = ModelUtils.unaliasSchema(this.openAPI, p, importMapping);
Schema<?> target = ModelUtils.isGenerateAliasAsModel() ? p : schema;
if (ModelUtils.isArraySchema(target)) {
Schema<?> items = getSchemaItems((ArraySchema) schema);
return getSchemaType(target) + "<" + getTypeDeclaration(items) + ">";
} else if (ModelUtils.isMapSchema(target)) {
// Note: ModelUtils.isMapSchema(p) returns true when p is a composed schema that also defines
// additionalproperties: true
Schema<?> inner = getAdditionalProperties(target);
if (inner == null) {
LOGGER.error("`{}` (map property) does not have a proper inner type defined. Default to type:string", p.getName());
inner = new StringSchema().description("TODO default missing map inner type to string");
p.setAdditionalProperties(inner);
}
return getSchemaType(target) + "<kotlin.String, " + getTypeDeclaration(inner) + ">";
}
return super.getTypeDeclaration(target);
}
@Override
public String modelDocFileFolder() {
return (outputFolder + "/" + modelDocPath).replace('/', File.separatorChar);
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + modelPackage().replace('.', File.separatorChar);
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
objs = super.postProcessModelsEnum(objs);
List<Object> models = (List<Object>) objs.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
if (cm.getDiscriminator() != null) {
cm.vendorExtensions.put("x-has-data-class-body", true);
break;
}
for (CodegenProperty var : cm.vars) {
if (var.isEnum || isSerializableModel()) {
cm.vendorExtensions.put("x-has-data-class-body", true);
break;
}
}
}
return postProcessModelsEnum(objs);
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("KOTLIN_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable KOTLIN_POST_PROCESS_FILE not defined so the Kotlin code may not be properly formatted. To define it, try 'export KOTLIN_POST_PROCESS_FILE=\"/usr/local/bin/ktlint -F\"' (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
if (additionalProperties.containsKey(CodegenConstants.ENUM_PROPERTY_NAMING)) {
setEnumPropertyNaming((String) additionalProperties.get(CodegenConstants.ENUM_PROPERTY_NAMING));
}
if (additionalProperties.containsKey(CodegenConstants.SERIALIZATION_LIBRARY)) {
setSerializationLibrary((String) additionalProperties.get(CodegenConstants.SERIALIZATION_LIBRARY));
additionalProperties.put(this.serializationLibrary.name(), true);
} else {
additionalProperties.put(this.serializationLibrary.name(), true);
}
if (additionalProperties.containsKey(CodegenConstants.SOURCE_FOLDER)) {
this.setSourceFolder((String) additionalProperties.get(CodegenConstants.SOURCE_FOLDER));
} else {
additionalProperties.put(CodegenConstants.SOURCE_FOLDER, sourceFolder);
}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_NAME)) {
this.setPackageName((String) additionalProperties.get(CodegenConstants.PACKAGE_NAME));
if (!additionalProperties.containsKey(CodegenConstants.MODEL_PACKAGE))
this.setModelPackage(packageName + ".models");
if (!additionalProperties.containsKey(CodegenConstants.API_PACKAGE))
this.setApiPackage(packageName + ".apis");
} else {
additionalProperties.put(CodegenConstants.PACKAGE_NAME, packageName);
}
if (additionalProperties.containsKey(CodegenConstants.API_SUFFIX)) {
this.setApiSuffix((String) additionalProperties.get(CodegenConstants.API_SUFFIX));
}
if (additionalProperties.containsKey(CodegenConstants.ARTIFACT_ID)) {
this.setArtifactId((String) additionalProperties.get(CodegenConstants.ARTIFACT_ID));
} else {
additionalProperties.put(CodegenConstants.ARTIFACT_ID, artifactId);
}
if (additionalProperties.containsKey(CodegenConstants.GROUP_ID)) {
this.setGroupId((String) additionalProperties.get(CodegenConstants.GROUP_ID));
} else {
additionalProperties.put(CodegenConstants.GROUP_ID, groupId);
}
if (additionalProperties.containsKey(CodegenConstants.ARTIFACT_VERSION)) {
this.setArtifactVersion((String) additionalProperties.get(CodegenConstants.ARTIFACT_VERSION));
} else {
additionalProperties.put(CodegenConstants.ARTIFACT_VERSION, artifactVersion);
}
if (additionalProperties.containsKey(CodegenConstants.INVOKER_PACKAGE)) {
LOGGER.warn(CodegenConstants.INVOKER_PACKAGE + " with " + this.getName() + " generator is ignored. Use " + CodegenConstants.PACKAGE_NAME + ".");
}
if (additionalProperties.containsKey(CodegenConstants.SERIALIZABLE_MODEL)) {
this.setSerializableModel(convertPropertyToBooleanAndWriteBack(CodegenConstants.SERIALIZABLE_MODEL));
} else {
additionalProperties.put(CodegenConstants.SERIALIZABLE_MODEL, serializableModel);
}
if (additionalProperties.containsKey(CodegenConstants.PARCELIZE_MODELS)) {
this.setParcelizeModels(convertPropertyToBooleanAndWriteBack(CodegenConstants.PARCELIZE_MODELS));
} else {
additionalProperties.put(CodegenConstants.PARCELIZE_MODELS, parcelizeModels);
}
if (additionalProperties.containsKey(CodegenConstants.NON_PUBLIC_API)) {
this.setNonPublicApi(convertPropertyToBooleanAndWriteBack(CodegenConstants.NON_PUBLIC_API));
} else {
additionalProperties.put(CodegenConstants.NON_PUBLIC_API, nonPublicApi);
}
additionalProperties.put(CodegenConstants.SORT_PARAMS_BY_REQUIRED_FLAG, getSortParamsByRequiredFlag());
additionalProperties.put(CodegenConstants.SORT_MODEL_PROPERTIES_BY_REQUIRED_FLAG, getSortModelPropertiesByRequiredFlag());
additionalProperties.put(CodegenConstants.API_PACKAGE, apiPackage());
additionalProperties.put(CodegenConstants.MODEL_PACKAGE, modelPackage());
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
}
public void setArtifactId(String artifactId) {
this.artifactId = artifactId;
}
public void setArtifactVersion(String artifactVersion) {
this.artifactVersion = artifactVersion;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public void setPackageName(String packageName) {
this.packageName = packageName;
}
public void setApiSuffix(String apiSuffix) {
this.apiSuffix = apiSuffix;
}
public void setSourceFolder(String sourceFolder) {
this.sourceFolder = sourceFolder;
}
public void setTestFolder(String testFolder) {
this.testFolder = testFolder;
}
public Boolean getParcelizeModels() {
return parcelizeModels;
}
public void setParcelizeModels(Boolean parcelizeModels) {
this.parcelizeModels = parcelizeModels;
}
public boolean isSerializableModel() {
return serializableModel;
}
public void setSerializableModel(boolean serializableModel) {
this.serializableModel = serializableModel;
}
public boolean nonPublicApi() {
return nonPublicApi;
}
public void setNonPublicApi(boolean nonPublicApi) {
this.nonPublicApi = nonPublicApi;
}
public boolean isNeedsDataClassBody() {
return needsDataClassBody;
}
public void setNeedsDataClassBody(boolean needsDataClassBody) {
this.needsDataClassBody = needsDataClassBody;
}
/**
* Return the sanitized variable name for enum
*
* @param value enum variable name
* @param datatype data type
* @return the sanitized variable name for enum
*/
@Override
public String toEnumVarName(String value, String datatype) {
String modified;
if (value.length() == 0) {
modified = "EMPTY";
} else {
modified = value;
modified = sanitizeKotlinSpecificNames(modified);
}
switch (getEnumPropertyNaming()) {
case original:
// NOTE: This is provided as a last-case allowance, but will still result in reserved words being escaped.
modified = value;
break;
case camelCase:
// NOTE: Removes hyphens and underscores
modified = camelize(modified, true);
break;
case PascalCase:
// NOTE: Removes hyphens and underscores
String result = camelize(modified);
modified = titleCase(result);
break;
case snake_case:
// NOTE: Removes hyphens
modified = underscore(modified);
break;
case UPPERCASE:
modified = underscore(modified).toUpperCase(Locale.ROOT);
break;
}
if (reservedWords.contains(modified)) {
return escapeReservedWord(modified);
}
// NOTE: another sanitize because camelize can create an invalid name
return sanitizeKotlinSpecificNames(modified);
}
@Override
public String toApiName(String name) {
if (name.length() == 0) {
return "DefaultApi";
}
return (this.apiSuffix.isEmpty() ? camelize(name) : camelize(name) + this.apiSuffix);
}
/**
* Return the fully-qualified "Model" name for import
*
* @param name the name of the "Model"
* @return the fully-qualified "Model" name for import
*/
@Override
public String toModelImport(String name) {
// toModelImport is called while processing operations, but DefaultCodegen doesn't
// define imports correctly with fully qualified primitives and models as defined in this generator.
if (needToImport(name)) {
return super.toModelImport(name);
}
return name;
}
/**
* Output the proper model name (capitalized).
* In case the name belongs to the TypeSystem it won't be renamed.
*
* @param name the name of the model
* @return capitalized model name
*/
@Override
public String toModelName(final String name) {
// Allow for explicitly configured kotlin.* and java.* types
if (name.startsWith("kotlin.") || name.startsWith("java.")) {
return name;
}
// If importMapping contains name, assume this is a legitimate model name.
if (importMapping.containsKey(name)) {
return importMapping.get(name);
}
String modifiedName = name.replaceAll("\\.", "");
String sanitizedName = sanitizeKotlinSpecificNames(modifiedName);
String nameWithPrefixSuffix = sanitizedName;
if (!StringUtils.isEmpty(modelNamePrefix)) {
// add '_' so that model name can be camelized correctly
nameWithPrefixSuffix = modelNamePrefix + "_" + nameWithPrefixSuffix;
}
if (!StringUtils.isEmpty(modelNameSuffix)) {
// add '_' so that model name can be camelized correctly
nameWithPrefixSuffix = nameWithPrefixSuffix + "_" + modelNameSuffix;
}
// Camelize name of nested properties
modifiedName = camelize(nameWithPrefixSuffix);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(modifiedName)) {
final String modelName = "Model" + modifiedName;
LOGGER.warn(modifiedName + " (reserved word) cannot be used as model name. Renamed to " + modelName);
return modelName;
}
// model name starts with number
if (modifiedName.matches("^\\d.*")) {
final String modelName = "Model" + modifiedName; // e.g. 200Response => Model200Response (after camelize)
LOGGER.warn(name + " (model name starts with number) cannot be used as model name. Renamed to " + modelName);
return modelName;
}
return titleCase(modifiedName);
}
/**
* Return the operation ID (method name)
*
* @param operationId operation ID
* @return the sanitized method name
*/
@Override
public String toOperationId(String operationId) {
// throw exception if method name is empty
if (StringUtils.isEmpty(operationId))
throw new RuntimeException("Empty method/operation name (operationId) not allowed");
operationId = camelize(sanitizeName(operationId), true);
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
String newOperationId = camelize("call_" + operationId, true);
LOGGER.warn(operationId + " (reserved word) cannot be used as method name. Renamed to " + newOperationId);
return newOperationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn(operationId + " (starting with a number) cannot be used as method sname. Renamed to " + camelize("call_" + operationId), true);
operationId = camelize("call_" + operationId, true);
}
return operationId;
}
@Override
public String toModelFilename(String name) {
// Should be the same as the model name
return toModelName(name);
}
/**
* Sanitize against Kotlin specific naming conventions, which may differ from those required by {@link DefaultCodegen#sanitizeName}.
*
* @param name string to be sanitize
* @return sanitized string
*/
private String sanitizeKotlinSpecificNames(final String name) {
String word = name;
for (Map.Entry<String, String> specialCharacters : specialCharReplacements.entrySet()) {
word = replaceSpecialCharacters(word, specialCharacters);
}
// Fallback, replace unknowns with underscore.
word = word.replaceAll("\\W+", "_");
if (word.matches("\\d.*")) {
word = "_" + word;
}
// _, __, and ___ are reserved in Kotlin. Treat all names with only underscores consistently, regardless of count.
if (word.matches("^_*$")) {
word = word.replaceAll("\\Q_\\E", "Underscore");
}
return word;
}
private String replaceSpecialCharacters(String word, Map.Entry<String, String> specialCharacters) {
String specialChar = specialCharacters.getKey();
String replacementChar = specialCharacters.getValue();
// Underscore is the only special character we'll allow
if (!specialChar.equals("_") && word.contains(specialChar)) {
return replaceCharacters(word, specialChar, replacementChar);
}
return word;
}
private String replaceCharacters(String word, String oldValue, String newValue) {
if (!word.contains(oldValue)) {
return word;
}
if (word.equals(oldValue)) {
return newValue;
}
int i = word.indexOf(oldValue);
String start = word.substring(0, i);
String end = recurseOnEndOfWord(word, oldValue, newValue, i);
return start + newValue + end;
}
private String recurseOnEndOfWord(String word, String oldValue, String newValue, int lastReplacedValue) {
String end = word.substring(lastReplacedValue + 1);
if (!end.isEmpty()) {
end = titleCase(end);
end = replaceCharacters(end, oldValue, newValue);
}
return end;
}
private String titleCase(final String input) {
return input.substring(0, 1).toUpperCase(Locale.ROOT) + input.substring(1);
}
@Override
protected boolean isReservedWord(String word) {
// We want case-sensitive escaping, to avoid unnecessary backtick-escaping.
return reservedWords.contains(word);
}
/**
* Check the type to see if it needs import the library/module/package
*
* @param type name of the type
* @return true if the library/module/package of the corresponding type needs to be imported
*/
@Override
protected boolean needToImport(String type) {
// provides extra protection against improperly trying to import language primitives and java types
boolean imports = !type.startsWith("kotlin.") && !type.startsWith("java.") &&
!defaultIncludes.contains(type) && !languageSpecificPrimitives.contains(type) &&
!type.contains(".");
return imports;
}
@Override
public CodegenModel fromModel(String name, Schema schema) {
CodegenModel m = super.fromModel(name, schema);
m.optionalVars = m.optionalVars.stream().distinct().collect(Collectors.toList());
// Update allVars/requiredVars/optionalVars with isInherited
// Each of these lists contains elements that are similar, but they are all cloned
// via CodegenModel.removeAllDuplicatedProperty and therefore need to be updated
// separately.
// First find only the parent vars via baseName matching
Map<String, CodegenProperty> allVarsMap = m.allVars.stream()
.collect(Collectors.toMap(CodegenProperty::getBaseName, Function.identity()));
allVarsMap.keySet()
.removeAll(m.vars.stream().map(CodegenProperty::getBaseName).collect(Collectors.toSet()));
// Update the allVars
allVarsMap.values().forEach(p -> p.isInherited = true);
// Update any other vars (requiredVars, optionalVars)
Stream.of(m.requiredVars, m.optionalVars)
.flatMap(List::stream)
.filter(p -> allVarsMap.containsKey(p.baseName))
.forEach(p -> p.isInherited = true);
return m;
}
@Override
public String toEnumValue(String value, String datatype) {
if ("kotlin.Int".equals(datatype) || "kotlin.Long".equals(datatype)) {
return value;
} else if ("kotlin.Double".equals(datatype)) {
if (value.contains(".")) {
return value;
} else {
return value + ".0"; // Float and double must have .0
}
} else if ("kotlin.Float".equals(datatype)) {
return value + "f";
} else {
return "\"" + escapeText(value) + "\"";
}
}
@Override
public boolean isDataTypeString(final String dataType) {
return "String".equals(dataType) || "kotlin.String".equals(dataType);
}
@Override
public String toParamName(String name) {
// to avoid conflicts with 'callback' parameter for async call
if ("callback".equals(name)) {
return "paramCallback";
}
// should be the same as variable name
return toVarName(name);
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name, "\\W-[\\$]");
name = sanitizeKotlinSpecificNames(name);
if (name.toLowerCase(Locale.ROOT).matches("^_*class$")) {
return "propertyClass";
}
if ("_".equals(name)) {
name = "_u";
}
// if it's all uppper case, do nothing
if (name.matches("^[A-Z0-9_]*$")) {
return name;
}
if (startsWithTwoUppercaseLetters(name)) {
name = name.substring(0, 2).toLowerCase(Locale.ROOT) + name.substring(2);
}
// If name contains special chars -> replace them.
if ((name.chars().anyMatch(character -> specialCharReplacements.keySet().contains("" + ((char) character))))) {
List<String> allowedCharacters = new ArrayList<>();
allowedCharacters.add("_");
allowedCharacters.add("$");
name = escape(name, specialCharReplacements, allowedCharacters, "_");
}
// camelize (lower first character) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number or containing dollar symbol, escape it
if (isReservedWord(name) || name.matches("(^\\d.*)|(.*[$].*)")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String toRegularExpression(String pattern) {
return escapeText(pattern);
}
private boolean startsWithTwoUppercaseLetters(String name) {
boolean startsWithTwoUppercaseLetters = false;
if (name.length() > 1) {
startsWithTwoUppercaseLetters = name.substring(0, 2).equals(name.substring(0, 2).toUpperCase(Locale.ROOT));
}
return startsWithTwoUppercaseLetters;
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String kotlinPostProcessFile = System.getenv("KOTLIN_POST_PROCESS_FILE");
if (StringUtils.isEmpty(kotlinPostProcessFile)) {
return; // skip if KOTLIN_POST_PROCESS_FILE env variable is not defined
}
// only process files with kt extension
if ("kt".equals(FilenameUtils.getExtension(file.toString()))) {
String command = kotlinPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
p.waitFor();
int exitValue = p.exitValue();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit value: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: " + command);
}
} catch (Exception e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
}
}
}
@Override
public String toDefaultValue(Schema schema) {
Schema p = ModelUtils.getReferencedSchema(this.openAPI, schema);
if (ModelUtils.isBooleanSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
// TODO
} else if (ModelUtils.isDateTimeSchema(p)) {
// TODO
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isURISchema(p)) {
if (p.getDefault() != null) {
return "URI.create('" + p.getDefault() + "')";
}
} else if (ModelUtils.isStringSchema(p)) {
if (p.getDefault() != null) {
String _default = (String) p.getDefault();
if (p.getEnum() == null) {
return "\"" + escapeText(_default) + "\"";
} else {
// convert to enum var name later in postProcessModels
return _default;
}
}
return null;
}
return null;
}
}
| [
"\"KOTLIN_POST_PROCESS_FILE\"",
"\"KOTLIN_POST_PROCESS_FILE\""
]
| []
| [
"KOTLIN_POST_PROCESS_FILE"
]
| [] | ["KOTLIN_POST_PROCESS_FILE"] | java | 1 | 0 | |
iuberdata_zeppelin/src/main/java/org/apache/zeppelin/spark/DepInterpreter.java |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.spark;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import com.google.common.reflect.TypeToken;
import com.google.gson.Gson;
import org.apache.spark.repl.SparkILoop;
import org.apache.zeppelin.interpreter.Interpreter;
import org.apache.zeppelin.interpreter.InterpreterContext;
import org.apache.zeppelin.interpreter.InterpreterGroup;
import org.apache.zeppelin.interpreter.InterpreterResult;
import org.apache.zeppelin.interpreter.InterpreterResult.Code;
import org.apache.zeppelin.interpreter.WrappedInterpreter;
import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion;
import org.apache.zeppelin.scheduler.Scheduler;
import org.apache.zeppelin.spark.dep.SparkDependencyContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.sonatype.aether.resolution.ArtifactResolutionException;
import org.sonatype.aether.resolution.DependencyResolutionException;
import scala.Console;
import scala.None;
import scala.Some;
import scala.collection.convert.WrapAsJava$;
import scala.collection.JavaConversions;
import scala.tools.nsc.Settings;
import scala.tools.nsc.interpreter.Completion.Candidates;
import scala.tools.nsc.interpreter.Completion.ScalaCompleter;
import scala.tools.nsc.interpreter.IMain;
import scala.tools.nsc.interpreter.Results;
import scala.tools.nsc.settings.MutableSettings.BooleanSetting;
import scala.tools.nsc.settings.MutableSettings.PathSetting;
/**
* DepInterpreter downloads dependencies and pass them when SparkInterpreter initialized.
* It extends SparkInterpreter but does not create sparkcontext
*/
public class DepInterpreter extends Interpreter {
/**
* intp - org.apache.spark.repl.SparkIMain (scala 2.10)
* intp - scala.tools.nsc.interpreter.IMain; (scala 2.11)
*/
private Object intp;
private ByteArrayOutputStream out;
private SparkDependencyContext depc;
/**
* completer - org.apache.spark.repl.SparkJLineCompletion (scala 2.10)
*/
private Object completer;
private SparkILoop interpreter;
static final Logger LOGGER = LoggerFactory.getLogger(DepInterpreter.class);
public DepInterpreter(Properties property) {
super(property);
}
public SparkDependencyContext getDependencyContext() {
return depc;
}
public static String getSystemDefault(
String envName,
String propertyName,
String defaultValue) {
if (envName != null && !envName.isEmpty()) {
String envValue = System.getenv().get(envName);
if (envValue != null) {
return envValue;
}
}
if (propertyName != null && !propertyName.isEmpty()) {
String propValue = System.getProperty(propertyName);
if (propValue != null) {
return propValue;
}
}
return defaultValue;
}
@Override
public void close() {
if (intp != null) {
Utils.invokeMethod(intp, "close");
}
}
@Override
public void open() {
out = new ByteArrayOutputStream();
createIMain();
}
private void createIMain() {
Settings settings = new Settings();
URL[] urls = getClassloaderUrls();
// set classpath for scala compiler
PathSetting pathSettings = settings.classpath();
String classpath = "";
List<File> paths = currentClassPath();
for (File f : paths) {
if (classpath.length() > 0) {
classpath += File.pathSeparator;
}
classpath += f.getAbsolutePath();
}
if (urls != null) {
for (URL u : urls) {
if (classpath.length() > 0) {
classpath += File.pathSeparator;
}
classpath += u.getFile();
}
}
pathSettings.v_$eq(classpath);
settings.scala$tools$nsc$settings$ScalaSettings$_setter_$classpath_$eq(pathSettings);
// set classloader for scala compiler
settings.explicitParentLoader_$eq(new Some<>(Thread.currentThread()
.getContextClassLoader()));
BooleanSetting b = (BooleanSetting) settings.usejavacp();
b.v_$eq(true);
settings.scala$tools$nsc$settings$StandardScalaSettings$_setter_$usejavacp_$eq(b);
interpreter = new SparkILoop((java.io.BufferedReader) null, new PrintWriter(out));
interpreter.settings_$eq(settings);
interpreter.createInterpreter();
intp = Utils.invokeMethod(interpreter, "intp");
if (Utils.isScala2_10()) {
Utils.invokeMethod(intp, "setContextClassLoader");
Utils.invokeMethod(intp, "initializeSynchronous");
}
depc = new SparkDependencyContext(getProperty("zeppelin.dep.localrepo"),
getProperty("zeppelin.dep.additionalRemoteRepository"));
if (Utils.isScala2_10()) {
completer = Utils.instantiateClass(
"org.apache.spark.repl.SparkJLineCompletion",
new Class[]{Utils.findClass("org.apache.spark.repl.SparkIMain")},
new Object[]{intp});
}
interpret("@transient var _binder = new java.util.HashMap[String, Object]()");
Map<String, Object> binder;
if (Utils.isScala2_10()) {
binder = (Map<String, Object>) getValue("_binder");
} else {
binder = (Map<String, Object>) getLastObject();
}
binder.put("depc", depc);
interpret("@transient val z = "
+ "_binder.get(\"depc\")"
+ ".asInstanceOf[org.apache.zeppelin.spark.dep.SparkDependencyContext]");
}
private Results.Result interpret(String line) {
return (Results.Result) Utils.invokeMethod(
intp,
"interpret",
new Class[]{String.class},
new Object[]{line});
}
public Object getValue(String name) {
Object ret = Utils.invokeMethod(
intp, "valueOfTerm", new Class[]{String.class}, new Object[]{name});
if (ret instanceof None) {
return null;
} else if (ret instanceof Some) {
return ((Some) ret).get();
} else {
return ret;
}
}
public Object getLastObject() {
IMain.Request r = (IMain.Request) Utils.invokeMethod(intp, "lastRequest");
Object obj = r.lineRep().call("$result",
JavaConversions.asScalaBuffer(new LinkedList<>()));
return obj;
}
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
PrintStream printStream = new PrintStream(out);
Console.setOut(printStream);
out.reset();
IUberSparkInterpreter sparkInterpreter = getSparkInterpreter();
if (sparkInterpreter != null && sparkInterpreter.isSparkContextInitialized()) {
return new InterpreterResult(Code.ERROR,
"Must be used before SparkInterpreter (%spark) initialized\n" +
"Hint: put this paragraph before any Spark code and " +
"restart Zeppelin/Interpreter");
}
scala.tools.nsc.interpreter.Results.Result ret = interpret(st);
Code code = getResultCode(ret);
try {
depc.fetch();
} catch (MalformedURLException | DependencyResolutionException
| ArtifactResolutionException e) {
LOGGER.error("Exception in DepInterpreter while interpret ", e);
return new InterpreterResult(Code.ERROR, e.toString());
}
if (code == Code.INCOMPLETE) {
return new InterpreterResult(code, "Incomplete expression");
} else if (code == Code.ERROR) {
return new InterpreterResult(code, out.toString());
} else {
return new InterpreterResult(code, out.toString());
}
}
private Code getResultCode(scala.tools.nsc.interpreter.Results.Result r) {
if (r instanceof scala.tools.nsc.interpreter.Results.Success$) {
return Code.SUCCESS;
} else if (r instanceof scala.tools.nsc.interpreter.Results.Incomplete$) {
return Code.INCOMPLETE;
} else {
return Code.ERROR;
}
}
@Override
public void cancel(InterpreterContext context) {
}
@Override
public FormType getFormType() {
return FormType.NATIVE;
}
@Override
public int getProgress(InterpreterContext context) {
return 0;
}
@Override
public List<InterpreterCompletion> completion(String buf, int cursor) {
if (Utils.isScala2_10()) {
ScalaCompleter c = (ScalaCompleter) Utils.invokeMethod(completer, "completer");
Candidates ret = c.complete(buf, cursor);
List<String> candidates = WrapAsJava$.MODULE$.seqAsJavaList(ret.candidates());
List<InterpreterCompletion> completions = new LinkedList<>();
for (String candidate : candidates) {
completions.add(new InterpreterCompletion(candidate, candidate));
}
return completions;
} else {
return new LinkedList<>();
}
}
private List<File> currentClassPath() {
List<File> paths = classPath(Thread.currentThread().getContextClassLoader());
String[] cps = System.getProperty("java.class.path").split(File.pathSeparator);
if (cps != null) {
for (String cp : cps) {
paths.add(new File(cp));
}
}
return paths;
}
private List<File> classPath(ClassLoader cl) {
List<File> paths = new LinkedList<>();
if (cl == null) {
return paths;
}
if (cl instanceof URLClassLoader) {
URLClassLoader ucl = (URLClassLoader) cl;
URL[] urls = ucl.getURLs();
if (urls != null) {
for (URL url : urls) {
paths.add(new File(url.getFile()));
}
}
}
return paths;
}
private IUberSparkInterpreter getSparkInterpreter() {
InterpreterGroup intpGroup = getInterpreterGroup();
if (intpGroup == null) {
return null;
}
Interpreter p = getInterpreterInTheSameSessionByClassName(IUberSparkInterpreter.class
.getName());
if (p == null) {
return null;
}
while (p instanceof WrappedInterpreter) {
p = ((WrappedInterpreter) p).getInnerInterpreter();
}
return (IUberSparkInterpreter) p;
}
@Override
public Scheduler getScheduler() {
IUberSparkInterpreter sparkInterpreter = getSparkInterpreter();
if (sparkInterpreter != null) {
return getSparkInterpreter().getScheduler();
} else {
return null;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
extern/sector-storage/manager_calltracker.go | package sectorstorage
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
type WorkID struct {
Method sealtasks.TaskType
Params string // json [...params]
}
func (w WorkID) String() string {
return fmt.Sprintf("%s(%s)", w.Method, w.Params)
}
var _ fmt.Stringer = &WorkID{}
type WorkStatus string
const (
wsStarted WorkStatus = "started" // task started, not scheduled/running on a worker yet
wsRunning WorkStatus = "running" // task running on a worker, waiting for worker return
wsDone WorkStatus = "done" // task returned from the worker, results available
)
type WorkState struct {
ID WorkID
Status WorkStatus
WorkerCall storiface.CallID // Set when entering wsRunning
WorkError string // Status = wsDone, set when failed to start work
WorkerHostname string // hostname of last worker handling this job
StartTime int64 // unix seconds
}
func newWorkID(method sealtasks.TaskType, params ...interface{}) (WorkID, error) {
pb, err := json.Marshal(params)
if err != nil {
return WorkID{}, xerrors.Errorf("marshaling work params: %w", err)
}
if len(pb) > 256 {
s := sha256.Sum256(pb)
pb = []byte(hex.EncodeToString(s[:]))
}
return WorkID{
Method: method,
Params: string(pb),
}, nil
}
func (m *Manager) setupWorkTracker() {
m.workLk.Lock()
defer m.workLk.Unlock()
var ids []WorkState
if err := m.work.List(&ids); err != nil {
log.Error("getting work IDs") // quite bad
return
}
for _, st := range ids {
wid := st.ID
if os.Getenv("LOTUS_MINER_ABORT_UNFINISHED_WORK") == "1" {
st.Status = wsDone
}
switch st.Status {
case wsStarted:
log.Warnf("dropping non-running work %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cleannig up work state for %s", wid)
}
case wsDone:
// can happen after restart, abandoning work, and another restart
log.Warnf("dropping done work, no result, wid %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cleannig up work state for %s", wid)
}
case wsRunning:
m.callToWork[st.WorkerCall] = wid
}
}
}
// returns wait=true when the task is already tracked/running
func (m *Manager) getWork(ctx context.Context, method sealtasks.TaskType, params ...interface{}) (wid WorkID, wait bool, cancel func(), err error) {
wid, err = newWorkID(method, params)
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("creating WorkID: %w", err)
}
m.workLk.Lock()
defer m.workLk.Unlock()
have, err := m.work.Has(wid)
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("failed to check if the task is already tracked: %w", err)
}
if !have {
err := m.work.Begin(wid, &WorkState{
ID: wid,
Status: wsStarted,
})
if err != nil {
return WorkID{}, false, nil, xerrors.Errorf("failed to track task start: %w", err)
}
return wid, false, func() {
m.workLk.Lock()
defer m.workLk.Unlock()
have, err := m.work.Has(wid)
if err != nil {
log.Errorf("cancel: work has error: %+v", err)
return
}
if !have {
return // expected / happy path
}
var ws WorkState
if err := m.work.Get(wid).Get(&ws); err != nil {
log.Errorf("cancel: get work %s: %+v", wid, err)
return
}
switch ws.Status {
case wsStarted:
log.Warnf("canceling started (not running) work %s", wid)
if err := m.work.Get(wid).End(); err != nil {
log.Errorf("cancel: failed to cancel started work %s: %+v", wid, err)
return
}
case wsDone:
// TODO: still remove?
log.Warnf("cancel called on work %s in 'done' state", wid)
case wsRunning:
log.Warnf("cancel called on work %s in 'running' state (manager shutting down?)", wid)
}
}, nil
}
// already started
return wid, true, func() {
// TODO
}, nil
}
func (m *Manager) startWork(ctx context.Context, w Worker, wk WorkID) func(callID storiface.CallID, err error) error {
return func(callID storiface.CallID, err error) error {
var hostname string
info, ierr := w.Info(ctx)
if ierr != nil {
hostname = "[err]"
} else {
hostname = info.Hostname
}
m.workLk.Lock()
defer m.workLk.Unlock()
if err != nil {
merr := m.work.Get(wk).Mutate(func(ws *WorkState) error {
ws.Status = wsDone
ws.WorkError = err.Error()
return nil
})
if merr != nil {
return xerrors.Errorf("failed to start work and to track the error; merr: %+v, err: %w", merr, err)
}
return err
}
err = m.work.Get(wk).Mutate(func(ws *WorkState) error {
_, ok := m.results[wk]
if ok {
log.Warn("work returned before we started tracking it")
ws.Status = wsDone
} else {
ws.Status = wsRunning
}
ws.WorkerCall = callID
ws.WorkerHostname = hostname
ws.StartTime = time.Now().Unix()
return nil
})
if err != nil {
return xerrors.Errorf("registering running work: %w", err)
}
m.callToWork[callID] = wk
return nil
}
}
func (m *Manager) waitWork(ctx context.Context, wid WorkID) (interface{}, error) {
m.workLk.Lock()
var ws WorkState
if err := m.work.Get(wid).Get(&ws); err != nil {
m.workLk.Unlock()
return nil, xerrors.Errorf("getting work status: %w", err)
}
if ws.Status == wsStarted {
m.workLk.Unlock()
return nil, xerrors.Errorf("waitWork called for work in 'started' state")
}
// sanity check
wk := m.callToWork[ws.WorkerCall]
if wk != wid {
m.workLk.Unlock()
return nil, xerrors.Errorf("wrong callToWork mapping for call %s; expected %s, got %s", ws.WorkerCall, wid, wk)
}
// make sure we don't have the result ready
cr, ok := m.callRes[ws.WorkerCall]
if ok {
delete(m.callToWork, ws.WorkerCall)
if len(cr) == 1 {
err := m.work.Get(wk).End()
if err != nil {
m.workLk.Unlock()
// Not great, but not worth discarding potentially multi-hour computation over this
log.Errorf("marking work as done: %+v", err)
}
res := <-cr
delete(m.callRes, ws.WorkerCall)
m.workLk.Unlock()
return res.r, res.err
}
m.workLk.Unlock()
return nil, xerrors.Errorf("something else in waiting on callRes")
}
done := func() {
delete(m.results, wid)
_, ok := m.callToWork[ws.WorkerCall]
if ok {
delete(m.callToWork, ws.WorkerCall)
}
err := m.work.Get(wk).End()
if err != nil {
// Not great, but not worth discarding potentially multi-hour computation over this
log.Errorf("marking work as done: %+v", err)
}
}
// the result can already be there if the work was running, manager restarted,
// and the worker has delivered the result before we entered waitWork
res, ok := m.results[wid]
if ok {
done()
m.workLk.Unlock()
return res.r, res.err
}
ch, ok := m.waitRes[wid]
if !ok {
ch = make(chan struct{})
m.waitRes[wid] = ch
}
m.workLk.Unlock()
select {
case <-ch:
m.workLk.Lock()
defer m.workLk.Unlock()
res := m.results[wid]
done()
return res.r, res.err
case <-ctx.Done():
return nil, xerrors.Errorf("waiting for work result: %w", ctx.Err())
}
}
func (m *Manager) waitSimpleCall(ctx context.Context) func(callID storiface.CallID, err error) (interface{}, error) {
return func(callID storiface.CallID, err error) (interface{}, error) {
if err != nil {
return nil, err
}
return m.waitCall(ctx, callID)
}
}
func (m *Manager) waitCall(ctx context.Context, callID storiface.CallID) (interface{}, error) {
m.workLk.Lock()
_, ok := m.callToWork[callID]
if ok {
m.workLk.Unlock()
return nil, xerrors.Errorf("can't wait for calls related to work")
}
ch, ok := m.callRes[callID]
if !ok {
ch = make(chan result, 1)
m.callRes[callID] = ch
}
m.workLk.Unlock()
defer func() {
m.workLk.Lock()
defer m.workLk.Unlock()
delete(m.callRes, callID)
}()
select {
case res := <-ch:
return res.r, res.err
case <-ctx.Done():
return nil, xerrors.Errorf("waiting for call result: %w", ctx.Err())
}
}
func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r interface{}, cerr *storiface.CallError) error {
res := result{
r: r,
}
if cerr != nil {
res.err = cerr
}
m.sched.workTracker.onDone(ctx, callID)
m.workLk.Lock()
defer m.workLk.Unlock()
wid, ok := m.callToWork[callID]
if !ok {
rch, ok := m.callRes[callID]
if !ok {
rch = make(chan result, 1)
m.callRes[callID] = rch
}
if len(rch) > 0 {
return xerrors.Errorf("callRes channel already has a response")
}
if cap(rch) == 0 {
return xerrors.Errorf("expected rch to be buffered")
}
rch <- res
return nil
}
_, ok = m.results[wid]
if ok {
return xerrors.Errorf("result for call %v already reported", wid)
}
m.results[wid] = res
err := m.work.Get(wid).Mutate(func(ws *WorkState) error {
ws.Status = wsDone
return nil
})
if err != nil {
// in the unlikely case:
// * manager has restarted, and we're still tracking this work, and
// * the work is abandoned (storage-fsm doesn't do a matching call on the sector), and
// * the call is returned from the worker, and
// * this errors
// the user will get jobs stuck in ret-wait state
log.Errorf("marking work as done: %+v", err)
}
_, found := m.waitRes[wid]
if found {
close(m.waitRes[wid])
delete(m.waitRes, wid)
}
return nil
}
func (m *Manager) Abort(ctx context.Context, call storiface.CallID) error {
// TODO: Allow temp error
return m.returnResult(ctx, call, nil, storiface.Err(storiface.ErrUnknown, xerrors.New("task aborted")))
}
| [
"\"LOTUS_MINER_ABORT_UNFINISHED_WORK\""
]
| []
| [
"LOTUS_MINER_ABORT_UNFINISHED_WORK"
]
| [] | ["LOTUS_MINER_ABORT_UNFINISHED_WORK"] | go | 1 | 0 | |
nuclio/ingest/ingest_example_test.go | package ingest
import (
"fmt"
"github.com/nuclio/nuclio-test-go"
"os"
"testing"
"time"
)
func TestName(t *testing.T) {
data := nutest.DataBind{Name: "db0", Url: os.Getenv("V3IO_URL"), Container: "1"}
tc, err := nutest.NewTestContext(Handler, true, &data)
if err != nil {
t.Fatal(err)
}
err = tc.InitContext(InitContext)
if err != nil {
t.Fatal(err)
}
testEvent := nutest.TestEvent{
Body: []byte(pushEvent),
}
resp, err := tc.Invoke(&testEvent)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
resp, err = tc.Invoke(&testEvent)
time.Sleep(time.Second * 1)
tc.Logger.InfoWith("Run complete", "resp", resp, "err", err)
fmt.Println(resp)
time.Sleep(time.Second * 10)
}
| [
"\"V3IO_URL\""
]
| []
| [
"V3IO_URL"
]
| [] | ["V3IO_URL"] | go | 1 | 0 | |
cmd/frontend/internal/cli/config.go | package cli
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"os/user"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/types"
"github.com/sourcegraph/sourcegraph/pkg/conf"
"github.com/sourcegraph/sourcegraph/pkg/conf/conftypes"
"github.com/sourcegraph/sourcegraph/pkg/db/confdb"
"github.com/sourcegraph/sourcegraph/pkg/jsonc"
log15 "gopkg.in/inconshreveable/log15.v2"
)
func printConfigValidation() {
messages, err := conf.Validate(globals.ConfigurationServerFrontendOnly.Raw())
if err != nil {
log.Printf("Warning: Unable to validate Sourcegraph site configuration: %s", err)
return
}
if len(messages) > 0 {
log15.Warn("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
log15.Warn("⚠️ Warnings related to the Sourcegraph site configuration:")
for _, verr := range messages {
log15.Warn(verr)
}
log15.Warn("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
}
}
// handleConfigOverrides handles allowing dev environments to forcibly override
// the configuration in the database upon startup. This is used to e.g. ensure
// dev environments have a consistent configuration and to load secrets from a
// separate private repository.
func handleConfigOverrides() {
if conf.IsDev(conf.DeployType()) {
raw := conf.Raw()
devOverrideCriticalConfig := os.Getenv("DEV_OVERRIDE_CRITICAL_CONFIG")
if devOverrideCriticalConfig != "" {
critical, err := ioutil.ReadFile(devOverrideCriticalConfig)
if err != nil {
log.Fatal(err)
}
raw.Critical = string(critical)
}
devOverrideSiteConfig := os.Getenv("DEV_OVERRIDE_SITE_CONFIG")
if devOverrideSiteConfig != "" {
site, err := ioutil.ReadFile(devOverrideSiteConfig)
if err != nil {
log.Fatal(err)
}
raw.Site = string(site)
}
if devOverrideCriticalConfig != "" || devOverrideSiteConfig != "" {
err := (&configurationSource{}).Write(context.Background(), raw)
if err != nil {
log.Fatal(err)
}
}
devOverrideExtSvcConfig := os.Getenv("DEV_OVERRIDE_EXTSVC_CONFIG")
if devOverrideExtSvcConfig != "" {
existing, err := db.ExternalServices.List(context.Background(), db.ExternalServicesListOptions{})
if err != nil {
log.Fatal(err)
}
if len(existing) > 0 {
return
}
extsvc, err := ioutil.ReadFile(devOverrideExtSvcConfig)
if err != nil {
log.Fatal(err)
}
var configs map[string][]*json.RawMessage
if err := jsonc.Unmarshal(string(extsvc), &configs); err != nil {
log.Fatal(err)
}
for key, cfgs := range configs {
for i, cfg := range cfgs {
marshaledCfg, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
log.Fatal(err)
}
if err := db.ExternalServices.Create(context.Background(), &types.ExternalService{
Kind: key,
DisplayName: fmt.Sprintf("Dev %s #%d", key, i+1),
Config: string(marshaledCfg),
}); err != nil {
log.Fatal(err)
}
}
}
}
}
}
type configurationSource struct{}
func (c configurationSource) Read(ctx context.Context) (conftypes.RawUnified, error) {
critical, err := confdb.CriticalGetLatest(ctx)
if err != nil {
return conftypes.RawUnified{}, errors.Wrap(err, "confdb.CriticalGetLatest")
}
site, err := confdb.SiteGetLatest(ctx)
if err != nil {
return conftypes.RawUnified{}, errors.Wrap(err, "confdb.SiteGetLatest")
}
return conftypes.RawUnified{
Critical: critical.Contents,
Site: site.Contents,
// TODO(slimsag): future: pass GitServers list via this.
ServiceConnections: conftypes.ServiceConnections{
PostgresDSN: postgresDSN(),
},
}, nil
}
func (c configurationSource) Write(ctx context.Context, input conftypes.RawUnified) error {
// TODO(slimsag): future: pass lastID through for race prevention
critical, err := confdb.CriticalGetLatest(ctx)
if err != nil {
return errors.Wrap(err, "confdb.CriticalGetLatest")
}
site, err := confdb.SiteGetLatest(ctx)
if err != nil {
return errors.Wrap(err, "confdb.SiteGetLatest")
}
_, err = confdb.CriticalCreateIfUpToDate(ctx, &critical.ID, input.Critical)
if err != nil {
return errors.Wrap(err, "confdb.CriticalCreateIfUpToDate")
}
_, err = confdb.SiteCreateIfUpToDate(ctx, &site.ID, input.Site)
if err != nil {
return errors.Wrap(err, "confdb.SiteCreateIfUpToDate")
}
return nil
}
func postgresDSN() string {
username := ""
if user, err := user.Current(); err == nil {
username = user.Username
}
return doPostgresDSN(username, os.Getenv)
}
func doPostgresDSN(currentUser string, getenv func(string) string) string {
// PGDATASOURCE is a sourcegraph specific variable for just setting the DSN
if dsn := getenv("PGDATASOURCE"); dsn != "" {
return dsn
}
// TODO match logic in lib/pq
// https://sourcegraph.com/github.com/lib/pq@d6156e141ac6c06345c7c73f450987a9ed4b751f/-/blob/connector.go#L42
dsn := &url.URL{
Scheme: "postgres",
Host: "127.0.0.1:5432",
RawQuery: "sslmode=disable",
}
// Username preference: PGUSER, $USER, postgres
username := "postgres"
if currentUser != "" {
username = currentUser
}
if user := getenv("PGUSER"); user != "" {
username = user
}
if password := getenv("PGPASSWORD"); password != "" {
dsn.User = url.UserPassword(username, password)
} else {
dsn.User = url.User(username)
}
if host := getenv("PGHOST"); host != "" {
dsn.Host = host
}
if port := getenv("PGPORT"); port != "" {
dsn.Host += ":" + port
}
if db := getenv("PGDATABASE"); db != "" {
dsn.Path = db
}
if sslmode := getenv("PGSSLMODE"); sslmode != "" {
qry := dsn.Query()
qry.Set("sslmode", sslmode)
dsn.RawQuery = qry.Encode()
}
return dsn.String()
}
| [
"\"DEV_OVERRIDE_CRITICAL_CONFIG\"",
"\"DEV_OVERRIDE_SITE_CONFIG\"",
"\"DEV_OVERRIDE_EXTSVC_CONFIG\""
]
| []
| [
"DEV_OVERRIDE_EXTSVC_CONFIG",
"DEV_OVERRIDE_CRITICAL_CONFIG",
"DEV_OVERRIDE_SITE_CONFIG"
]
| [] | ["DEV_OVERRIDE_EXTSVC_CONFIG", "DEV_OVERRIDE_CRITICAL_CONFIG", "DEV_OVERRIDE_SITE_CONFIG"] | go | 3 | 0 | |
Books Library/dataflair/wsgi.py | """
WSGI config for dataflair project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dataflair.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
run_tests.py | #!/usr/bin/env python3
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import shutil
import subprocess
import tempfile
import platform
from mesonbuild import mesonlib
from mesonbuild import mesonmain
from mesonbuild import mlog
from mesonbuild.environment import detect_ninja
from io import StringIO
from enum import Enum
from glob import glob
Backend = Enum('Backend', 'ninja vs xcode')
if mesonlib.is_windows() or mesonlib.is_cygwin():
exe_suffix = '.exe'
else:
exe_suffix = ''
def get_backend_args_for_dir(backend, builddir):
'''
Visual Studio backend needs to be given the solution to build
'''
if backend is Backend.vs:
sln_name = glob(os.path.join(builddir, '*.sln'))[0]
return [os.path.split(sln_name)[-1]]
return []
def find_vcxproj_with_target(builddir, target):
import re, fnmatch
t, ext = os.path.splitext(target)
if ext:
p = '<TargetName>{}</TargetName>\s*<TargetExt>\{}</TargetExt>'.format(t, ext)
else:
p = '<TargetName>{}</TargetName>'.format(t)
for root, dirs, files in os.walk(builddir):
for f in fnmatch.filter(files, '*.vcxproj'):
f = os.path.join(builddir, f)
with open(f, 'r', encoding='utf-8') as o:
if re.search(p, o.read(), flags=re.MULTILINE):
return f
raise RuntimeError('No vcxproj matching {!r} in {!r}'.format(p, builddir))
def get_builddir_target_args(backend, builddir, target):
dir_args = []
if not target:
dir_args = get_backend_args_for_dir(backend, builddir)
if target is None:
return dir_args
if backend is Backend.vs:
vcxproj = find_vcxproj_with_target(builddir, target)
target_args = [vcxproj]
elif backend is Backend.xcode:
target_args = ['-target', target]
elif backend is Backend.ninja:
target_args = [target]
else:
raise AssertionError('Unknown backend: {!r}'.format(backend))
return target_args + dir_args
def get_backend_commands(backend, debug=False):
install_cmd = []
uninstall_cmd = []
if backend is Backend.vs:
cmd = ['msbuild']
clean_cmd = cmd + ['/target:Clean']
test_cmd = cmd + ['RUN_TESTS.vcxproj']
elif backend is Backend.xcode:
cmd = ['xcodebuild']
clean_cmd = cmd + ['-alltargets', 'clean']
test_cmd = cmd + ['-target', 'RUN_TESTS']
elif backend is Backend.ninja:
# We need at least 1.6 because of -w dupbuild=err
cmd = [detect_ninja('1.6'), '-w', 'dupbuild=err']
if cmd[0] is None:
raise RuntimeError('Could not find Ninja v1.6 or newer')
if debug:
cmd += ['-v']
clean_cmd = cmd + ['clean']
test_cmd = cmd + ['test', 'benchmark']
install_cmd = cmd + ['install']
uninstall_cmd = cmd + ['uninstall']
else:
raise AssertionError('Unknown backend: {!r}'.format(backend))
return cmd, clean_cmd, test_cmd, install_cmd, uninstall_cmd
def ensure_backend_detects_changes(backend):
# We're using a ninja with QuLogic's patch for sub-1s resolution timestamps
# and not running on HFS+ which only stores dates in seconds:
# https://developer.apple.com/legacy/library/technotes/tn/tn1150.html#HFSPlusDates
# FIXME: Upgrade Travis image to Apple FS when that becomes available
if 'MESON_FIXED_NINJA' in os.environ and not mesonlib.is_osx():
return
# This is needed to increase the difference between build.ninja's
# timestamp and the timestamp of whatever you changed due to a Ninja
# bug: https://github.com/ninja-build/ninja/issues/371
if backend is Backend.ninja:
time.sleep(1)
def get_fake_options(prefix):
import argparse
opts = argparse.Namespace()
opts.cross_file = None
opts.wrap_mode = None
opts.prefix = prefix
return opts
def should_run_linux_cross_tests():
return shutil.which('arm-linux-gnueabihf-gcc-6') and not platform.machine().lower().startswith('arm')
def run_configure_inprocess(commandlist):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
try:
returncode = mesonmain.run(commandlist[1:], commandlist[0])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return returncode, mystdout.getvalue(), mystderr.getvalue()
class FakeEnvironment(object):
def __init__(self):
self.cross_info = None
self.coredata = lambda: None
self.coredata.compilers = {}
def is_cross_build(self):
return False
def print_system_info():
print(mlog.bold('System information.').get_text(mlog.colorize_console))
print('Architecture:', platform.architecture())
print('Machine:', platform.machine())
print('Platform:', platform.system())
print('Processor:', platform.processor())
print('System:', platform.system())
print('')
if __name__ == '__main__':
print_system_info()
# Enable coverage early...
enable_coverage = '--cov' in sys.argv
if enable_coverage:
os.makedirs('.coverage', exist_ok=True)
sys.argv.remove('--cov')
import coverage
coverage.process_startup()
returncode = 0
# Iterate over list in reverse order to find the last --backend arg
backend = Backend.ninja
for arg in reversed(sys.argv[1:]):
if arg.startswith('--backend'):
if arg.startswith('--backend=vs'):
backend = Backend.vs
elif arg == '--backend=xcode':
backend = Backend.xcode
break
# Running on a developer machine? Be nice!
if not mesonlib.is_windows() and not mesonlib.is_haiku() and 'TRAVIS' not in os.environ:
os.nice(20)
# Appveyor sets the `platform` environment variable which completely messes
# up building with the vs2010 and vs2015 backends.
#
# Specifically, MSBuild reads the `platform` environment variable to set
# the configured value for the platform (Win32/x64/arm), which breaks x86
# builds.
#
# Appveyor setting this also breaks our 'native build arch' detection for
# Windows in environment.py:detect_windows_arch() by overwriting the value
# of `platform` set by vcvarsall.bat.
#
# While building for x86, `platform` should be unset.
if 'APPVEYOR' in os.environ and os.environ['arch'] == 'x86':
os.environ.pop('platform')
# Run tests
print(mlog.bold('Running unittests.').get_text(mlog.colorize_console))
print()
# Can't pass arguments to unit tests, so set the backend to use in the environment
env = os.environ.copy()
env['MESON_UNIT_TEST_BACKEND'] = backend.name
with tempfile.TemporaryDirectory() as td:
# Enable coverage on all subsequent processes.
if enable_coverage:
with open(os.path.join(td, 'usercustomize.py'), 'w') as f:
f.write('import coverage\n'
'coverage.process_startup()\n')
env['COVERAGE_PROCESS_START'] = '.coveragerc'
env['PYTHONPATH'] = os.pathsep.join([td] + env.get('PYTHONPATH', []))
returncode += subprocess.call([sys.executable, 'run_unittests.py', '-v'], env=env)
# Ubuntu packages do not have a binary without -6 suffix.
if should_run_linux_cross_tests():
print(mlog.bold('Running cross compilation tests.').get_text(mlog.colorize_console))
print()
returncode += subprocess.call([sys.executable, 'run_cross_test.py', 'cross/ubuntu-armhf.txt'], env=env)
returncode += subprocess.call([sys.executable, 'run_project_tests.py'] + sys.argv[1:], env=env)
sys.exit(returncode)
| []
| []
| [
"arch"
]
| [] | ["arch"] | python | 1 | 0 | |
common/src/main/java/com/dremio/config/DremioConfig.java | /*
* Copyright (C) 2017-2019 Dremio Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dremio.config;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import java.util.Optional;
import org.reflections.util.ClasspathHelper;
import com.dremio.common.config.NestedConfig;
import com.dremio.common.config.SabotConfig;
import com.dremio.common.perf.Timer;
import com.dremio.common.perf.Timer.TimedBlock;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigParseOptions;
import com.typesafe.config.ConfigValue;
import com.typesafe.config.ConfigValueFactory;
/**
* A configuration object that is merged with and validated against the dremio-reference.conf configuration.
*/
public class DremioConfig extends NestedConfig {
private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DremioConfig.class);
private static final String REFERENCE_CONFIG = "dremio-reference.conf";
private static final String DEFAULT_USER_CONFIG = "dremio.conf";
public static final String LOCAL_WRITE_PATH_STRING = "paths.local";
public static final String DIST_WRITE_PATH_STRING = "paths.dist";
public static final String CREDENTIALS_KEYSTORE_PASSWORD = "security.keystore-password";
public static final String NODE_TAG = "services.node-tag";
public static final String ENABLE_COORDINATOR_BOOL = "services.coordinator.enabled";
public static final String ENABLE_MASTER_BOOL = "services.coordinator.master.enabled";
public static final String ENABLE_EXECUTOR_BOOL = "services.executor.enabled";
public static final String EMBEDDED_MASTER_ZK_ENABLED_BOOL = "services.coordinator.master.embedded-zookeeper.enabled";
public static final String EMBEDDED_MASTER_ZK_ENABLED_PORT_INT = "services.coordinator.master.embedded-zookeeper.port";
public static final String EMBEDDED_MASTER_ZK_ENABLED_PATH_STRING = "services.coordinator.master.embedded-zookeeper.path";
public static final String WEB_ENABLED_BOOL = "services.coordinator.web.enabled";
public static final String WEB_AUTH_TYPE = "services.coordinator.web.auth.type";
public static final String WEB_PORT_INT = "services.coordinator.web.port";
public static final String WEB_TOKEN_CACHE_SIZE = "services.coordinator.web.tokens.cache.size";
public static final String SCHEDULER_SERVICE_THREAD_COUNT = "services.coordinator.scheduler.threads";
public static final String WEB_TOKEN_CACHE_EXPIRATION = "services.coordinator.web.tokens.cache.expiration_minutes";
public static final String TASK_ON_IDLE_LOAD_SHED = "debug.task.on_idle_load_shed";
public static final String TASK_RESCHEDULE_ON_UNBLOCK = "debug.task.reschedule_on_unblock";
public static final String TASK_EAGER_LOAD_SHED = "debug.task.eager_load_shed";
public static final String KERBEROS_PRINCIPAL = "services.kerberos.principal";
public static final String KERBEROS_KEYTAB_PATH = "services.kerberos.keytab.file.path";
public static final String CACHE_DB_PATH = "services.executor.cache.path.db";
public static final String CACHE_FS_PATH_LIST = "services.executor.cache.path.fs";
public static final String CACHE_ENABLED = "services.executor.cache.enabled";
public static final String CACHE_DB_QUOTA = "services.executor.cache.pctquota.db";
public static final String CACHE_FS_QUOTA_LIST = "services.executor.cache.pctquota.fs";
public static final String CACHE_FS_ENSURE_FREE_SPACE_LIST = "services.executor.cache.ensurefreespace.fs";
public static final String JOBS_ENABLED_BOOL = "services.jobs.enabled";
public static final String NO_OP_CLUSTER_COORDINATOR_ENABLED = "debug.noop.cluster" +
".coordinator.enabled";
/**
* Config values related to plan caching
*/
public static final String PLAN_CACHE_TIMEOUT_MINUTES = "dremio.plan.cache.timeout_minutes";
public static final String PLAN_CACHE_MAX_ENTRIES = "dremio.plan.cache.max_entries";
/**
* Config values related to statistics caching
*/
public static final String STATISTICS_CACHE_TIMEOUT_MINUTES = "dremio.statistics.cache.timeout_minutes";
public static final String STATISTICS_CACHE_MAX_ENTRIES = "dremio.statistics.cache.max_entries";
/**
* Path where ui config is located
*/
public static final String WEB_UI_SERVICE_CONFIG = "services.coordinator.web.ui";
/**
* Config values related to plugins
*/
public static final String PLUGINS_ROOT_PATH_PROPERTY = "dremio.plugins.path";
public static final String LEGACY_STORE_VIEWS_ENABLED = "legacy.dremio.store.views.enabled";
public static final String CLIENT_PORT_INT = "services.coordinator.client-endpoint.port";
public static final String SERVER_PORT_INT = "services.fabric.port";
public static final String CONDUIT_PORT_INT = "services.conduit.port";
public static final String FLIGHT_SERVICE_ENABLED_BOOLEAN = "services.flight.enabled";
public static final String FLIGHT_SERVICE_PORT_INT = "services.flight.port";
public static final String FLIGHT_SERVICE_AUTHENTICATION_MODE = "services.flight.auth.mode";
public static final String NESSIE_SERVICE_ENABLED_BOOLEAN = "services.nessie.enabled";
public static final String NESSIE_SERVICE_IN_MEMORY_BOOLEAN = "services.nessie.backend.in-memory";
public static final String NESSIE_SERVICE_KVSTORE_MAX_COMMIT_RETRIES = "services.nessie.kvstore.max-commit-retries";
public static final String AUTOUPGRADE = "services.coordinator.auto-upgrade";
public static final String REGISTRATION_ADDRESS = "registration.publish-host";
public static final String DB_PATH_STRING = "paths.db";
public static final String ACCELERATOR_PATH_STRING = "paths.accelerator";
public static final String DOWNLOADS_PATH_STRING = "paths.downloads";
public static final String UPLOADS_PATH_STRING = "paths.uploads";
public static final String RESULTS_PATH_STRING = "paths.results";
public static final String SCRATCH_PATH_STRING = "paths.scratch";
public static final String SPILLING_PATH_STRING = "paths.spilling";
public static final String METADATA_PATH_STRING = "paths.metadata";
public static final String ZOOKEEPER_QUORUM = "zookeeper";
public static final String ZK_CLIENT_SESSION_TIMEOUT = "zk.client.session.timeout";
public static final String ZK_CLIENT_RETRY_UNLIMITED = "zk.client.retry.unlimited";
public static final String ZK_CONNECTION_HANDLE_ENABLED = "zk.client.connection_handle.enabled";
public static final String ZK_CLIENT_RETRY_LIMIT = "zk.client.retry.limit";
public static final String ZK_CLIENT_INITIAL_TIMEOUT_MS = "zk.client.retry.initial_timeout_ms";
// Provisioning options
public static final String YARN_ENABLED_BOOL = "provisioning.yarn.enabled";
public static final String YARN_JVM_OPTIONS = "provisioning.yarn.jvmoptions";
public static final String YARN_CLASSPATH = "provisioning.yarn.classpath";
public static final String YARN_APP_CLASSPATH = "provisioning.yarn.app.classpath";
public static final String YARN_APP_CLASSPATH_PREFIX = "provisioning.yarn.app.classpath-prefix";
public static final String EC2_EFS_MOUNT_TARGET_IP_ADDRESS = "provisioning.ec2.efs.mountTargetIpAddress";
public static final String MIGRATION_ENABLED = "provisioning.migration.enabled";
// netty options
public static final String NETTY_REFLECTIONS_ACCESSIBLE = "io.netty.tryReflectionSetAccessible";
/**
* Path where debug options are located
*/
public static final String DEBUG_OPTIONS = "debug";
// to enable remote debugging of the DremioDaemon running in YARN container
public static final String DEBUG_YARN_ENABLED = "debug.yarnremote.enabled";
public static final String DEBUG_ENABLED_BOOL = "debug.enabled";
public static final String DEBUG_PREPOPULATE_BOOL = "debug.prepopulate";
public static final String DEBUG_AUTOPORT_BOOL = "debug.autoPort";
public static final String DEBUG_SINGLE_NODE_BOOL = "debug.singleNode";
public static final String DEBUG_ALLOW_TEST_APIS_BOOL = "debug.allowTestApis";
public static final String DEBUG_USE_MEMORY_STRORAGE_BOOL = "debug.useMemoryStorage";
public static final String DEBUG_FORCE_REMOTE_BOOL = "debug.forceRemote";
public static final String DEBUG_ADD_DEFAULT_USER = "debug.addDefaultUser";
public static final String DEBUG_ALLOW_NEWER_KVSTORE = "debug.allowNewerKVStore";
public static final String DEBUG_DISABLE_MASTER_ELECTION_SERVICE_BOOL = "debug.master.election.disabled";
public static final String DEBUG_DIST_ASYNC_ENABLED = "debug.dist.async.enabled";
public static final String DEBUG_DIST_CACHING_ENABLED = "debug.dist.caching.enabled";
public static final String DEBUG_DIST_MAX_CACHE_SPACE_PERCENT = "debug.dist.max.cache.space.percent";
public static final String DEBUG_UPLOADS_ASYNC_ENABLED = "debug.uploads.async.enabled";
public static final String DEBUG_SUPPORT_ASYNC_ENABLED = "debug.support.async.enabled";
public static final String DEBUG_JOBS_ASYNC_ENABLED = "debug.results.async.enabled";
public static final String DEBUG_SCRATCH_ASYNC_ENABLED = "debug.scratch.async.enabled";
public static final String DEBUG_DOWNLOAD_ASYNC_ENABLED = "debug.download.async.enabled";
public static final String DEBUG_METADATA_ASYNC_ENABLED = "debug.metadata.async.enabled";
public static final String DEBUG_LOGS_ASYNC_ENABLED = "debug.logs.async.enabled";
public static final String DEBUG_DIST_S3_FILE_STATUS_CHECK = "debug.dist.s3_file_status_check.enabled";
public static final String FABRIC_MEMORY_RESERVATION = "services.fabric.memory.reservation";
public static final String SSL_ENABLED = "enabled";
public static final String SSL_KEY_STORE_TYPE = "keyStoreType";
public static final String SSL_KEY_STORE_PATH = "keyStore";
public static final String SSL_KEY_STORE_PASSWORD = "keyStorePassword";
public static final String SSL_KEY_PASSWORD = "keyPassword";
public static final String SSL_TRUST_STORE_TYPE = "trustStoreType";
public static final String SSL_TRUST_STORE_PATH = "trustStore";
public static final String SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
public static final String SSL_DISABLE_HOST_VERIFICATION = "disableHostVerification";
public static final String SSL_AUTO_GENERATED_CERTIFICATE = "auto-certificate.enabled";
// web SSL configuration
public static final String WEB_SSL_PREFIX = "services.coordinator.web.ssl.";
// datastore
public static final String DATASTORE_TYPE = "services.datastore.type";
public static final String DATASTORE_CONFIG = "services.datastore.config";
// liveness
public static final String LIVENESS_ENABLED = "services.web-admin.enabled";
public static final String LIVENESS_PORT = "services.web-admin.port";
public static final String LIVENESS_HOST = "services.web-admin.host";
// yarn watchdog
public static final String POLL_TIMEOUT_MS = "provisioning.yarn.watchdog.poll.timeout";
public static final String POLL_INTERVAL_MS = "provisioning.yarn.watchdog.poll.interval";
public static final String MISSED_POLLS_BEFORE_KILL = "provisioning.yarn.watchdog.missed.polls.before.kill";
public static final String MAX_KILL_ATTEMPTS = "provisioning.yarn.watchdog.max.kill.attempts";
public static final String KILL_REATTEMPT_INTERVAL_MS = "provisioning.yarn.watchdog.kill.reattempt.interval";
public static final String YARN_CERTIFICATE_VALIDATION_ENABLED = "provisioning.yarn.nodemanager.certificate-validation.enabled";
public static final String REMOTE_DATASTORE_RPC_TIMEOUT_SECS = "debug.remote.datastore.rpc_timeout_seconds";
private final Config unresolved;
private final Config reference;
private final SabotConfig sabot;
private final String thisNode;
private final boolean isMasterlessEnabled;
/**
* We maintain both the reference and the unresolved data so any withValue layering can be done against unresolved values.
* @param unresolved
* @param reference
*/
private DremioConfig(SabotConfig sabot, Config unresolved, Config reference, String thisNode){
super(inverseMerge(unresolved, reference));
this.unresolved = unresolved;
this.reference = reference;
this.sabot = sabot;
this.thisNode = thisNode;
this.isMasterlessEnabled = Boolean.getBoolean("dremio_masterless");
check();
}
private void check(){
final Config inner = getInnerConfig();
final Config ref = reference.resolve();
// make sure types are right
inner.checkValid(ref);
// make sure we don't have any extra paths. these are typically typos.
List<String> invalidPaths = new ArrayList<>();
for(Entry<String, ConfigValue> entry : inner.entrySet()){
if(!ref.hasPath(entry.getKey())){
invalidPaths.add(entry.getKey());
}
}
if(!invalidPaths.isEmpty()){
StringBuilder sb = new StringBuilder();
sb.append("Failure reading configuration file. The following properties were invalid:\n");
for(String s : invalidPaths){
sb.append("\t");
sb.append(s);
sb.append("\n");
}
throw new RuntimeException(sb.toString());
}
}
private static String determineNode(){
try (TimedBlock bh = Timer.time("getCanonicalHostName")) {
return InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException ex) {
throw new RuntimeException("Failure retrieving hostname from node. Check hosts file.", ex);
}
}
@Override
public DremioConfig withValue(String path, ConfigValue value) {
return new DremioConfig(sabot, unresolved.withValue(path, value), reference, thisNode);
}
public DremioConfig withSabotValue(String path, ConfigValue value) {
return new DremioConfig(sabot.withValue(path, value), unresolved, reference, thisNode);
}
public DremioConfig withSabotValue(String path, Object value) {
return withSabotValue(path, ConfigValueFactory.fromAnyRef(value));
}
public SabotConfig getSabotConfig(){
return sabot;
}
public boolean isMasterlessEnabled() {
return isMasterlessEnabled;
}
private static Config inverseMerge(Config userConfig, Config fallback){
return userConfig.withFallback(fallback).resolve();
}
public DremioConfig withValue(String path, Object value) {
return withValue(path, ConfigValueFactory.fromAnyRef(value));
}
public URI getURI(String path){
try {
return new URI(getString(path));
} catch (URISyntaxException e) {
throw Throwables.propagate(e);
}
}
public static DremioConfig create() {
return create(null);
}
public static DremioConfig create(final URL userConfigPath) {
return create(userConfigPath, SabotConfig.create());
}
public static DremioConfig create(final URL userConfigPath, SabotConfig sabotConfig) {
Config reference = null;
final ClassLoader[] classLoaders = ClasspathHelper.classLoaders();
for (ClassLoader classLoader : classLoaders) {
final URL configUrl = classLoader.getResource(REFERENCE_CONFIG);
if(configUrl == null){
continue;
}
Preconditions.checkArgument(reference == null, "Attempted to load more than one reference configuration.");
reference = ConfigFactory.parseResources(classLoader, REFERENCE_CONFIG);
}
Preconditions.checkNotNull(reference, "Unable to find the reference configuration.");
Config userConfig = null;
if(userConfigPath == null){
for (ClassLoader classLoader : classLoaders) {
final URL configUrl = classLoader.getResource(DEFAULT_USER_CONFIG);
if(configUrl == null){
continue;
}
Preconditions.checkArgument(userConfig == null, "Attempted to load more than one user configuration.");
userConfig = ConfigFactory.parseResources(classLoader, DEFAULT_USER_CONFIG);
}
} else {
userConfig = ConfigFactory.parseURL(userConfigPath, ConfigParseOptions.defaults().setAllowMissing(false));
}
final Config effective;
if(userConfig != null){
effective = userConfig;
} else {
effective = reference;
}
final Config skinned =
applySystemProperties(
applyLegacySystemProperties(effective),
reference);
return new DremioConfig(sabotConfig, skinned, reference, determineNode());
}
private static Config setSystemProperty(Config config, String sysProp, String configProp){
String systemProperty = System.getProperty(sysProp);
if(systemProperty != null) {
config = config.withValue(configProp, ConfigValueFactory.fromAnyRef(systemProperty));
logger.info("Applying provided leagcy system property to config: -D{}={}", configProp, systemProperty);
}
return config;
}
/**
* Remove this once all scripts stop referencing these old properties.
*/
@Deprecated
private static Config applyLegacySystemProperties(Config config){
// legacy stuff for now.
config = setSystemProperty(config, "dremd.write", LOCAL_WRITE_PATH_STRING);
config = setSystemProperty(config, "dremio_autoPort", DEBUG_AUTOPORT_BOOL);
config = setSystemProperty(config, "dac_prepopulate", DEBUG_PREPOPULATE_BOOL);
config = setSystemProperty(config, "dremio_allowTestApis", DEBUG_ALLOW_TEST_APIS_BOOL);
config = setSystemProperty(config, "dremd.localPort", SERVER_PORT_INT);
config = setSystemProperty(config, "dremd.httpPort", WEB_PORT_INT);
if("LOCAL".equalsIgnoreCase(System.getProperty("dremd.mode"))){
config = config.withValue(DEBUG_SINGLE_NODE_BOOL, ConfigValueFactory.fromAnyRef(true));
logger.info("Applying provided leagcy system property to config: -Ddremd.mode=LOCAL");
}
return config;
}
public String getThisNode(){
return thisNode;
}
private static Config applySystemProperties(Config config, Config reference){
for (Entry<String, ConfigValue> entry : reference.entrySet()) {
String property = System.getProperty(entry.getKey());
if (property != null && !property.isEmpty()) {
// hack to deal with array of strings
if (property.startsWith("[") && property.endsWith("]")) {
property = property.substring(1, property.length()-1);
if (property.trim().isEmpty()) {
continue;
}
String[] strings = property.split(",");
if (strings != null && strings.length > 0) {
List<String> listStrings = new ArrayList<>();
for (String str : strings) {
listStrings.add(str.trim());
}
config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(listStrings));
}
} else {
config = config.withValue(entry.getKey(), ConfigValueFactory.fromAnyRef(property));
}
logger.info("Applying provided system property to config: -D{}={}", entry.getKey(), property);
}
}
return config;
}
public static Path getPluginsRootPath() {
final String pluginsDir = System.getProperty(PLUGINS_ROOT_PATH_PROPERTY);
if (pluginsDir != null) {
return Paths.get(pluginsDir);
}
logger.debug("The system property {} is not set", PLUGINS_ROOT_PATH_PROPERTY);
return Optional.ofNullable(System.getenv("DREMIO_HOME"))
.map(v -> Paths.get(v, "plugins"))
.orElseGet(() -> {
logger.debug("The environment variable DREMIO_HOME is not set.");
return Paths.get(".");
});
}
}
| [
"\"DREMIO_HOME\""
]
| []
| [
"DREMIO_HOME"
]
| [] | ["DREMIO_HOME"] | java | 1 | 0 | |
python/ray/tune/ray_trial_executor.py | # coding: utf-8
import copy
import inspect
import random
from collections import deque
from enum import Enum
from functools import partial
import logging
import os
import time
import traceback
from contextlib import contextmanager
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
)
import ray
from ray.exceptions import GetTimeoutError
from ray.tune.error import AbortTrialExecution, TuneError
from ray.tune.logger import NoopLogger
from ray.tune.result import TRIAL_INFO, STDOUT_FILE, STDERR_FILE
from ray.tune.utils.placement_groups import PlacementGroupManager, get_tune_pg_prefix
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.trial import Trial, Checkpoint, Location, TrialInfo
from ray.tune.trial_executor import TrialExecutor
from ray.tune.utils import warn_if_slow
from ray.tune.utils.resource_updater import ResourceUpdater
from ray.util import log_once
from ray.util.annotations import DeveloperAPI
from ray.util.placement_group import remove_placement_group, PlacementGroup
logger = logging.getLogger(__name__)
DEFAULT_GET_TIMEOUT = 60.0 # seconds
class _ActorClassCache:
"""Caches actor classes.
ray.remote is a registration call. It sends the serialized object to the
key value store (redis), and will be fetched at an arbitrary worker
later. Registration does not use any Ray scheduling resources.
Later, class.remote() actually creates the remote actor. The
actor will be instantiated on some arbitrary machine,
according to the underlying Ray scheduler.
Without this cache, you would register the same serialized object
over and over again. Naturally, since redis doesn’t spill to disk,
this can easily nuke the redis instance (and basically blow up Ray).
This cache instead allows us to register once and only once.
Note that we assume there can be multiple trainables in the
system at once.
"""
def __init__(self):
self._cache = {}
def get(self, trainable_cls):
"""Gets the wrapped trainable_cls, otherwise calls ray.remote."""
runtime_env = {"env_vars": {"TUNE_ORIG_WORKING_DIR": os.getcwd()}}
if trainable_cls not in self._cache:
remote_cls = ray.remote(runtime_env=runtime_env)(trainable_cls)
self._cache[trainable_cls] = remote_cls
return self._cache[trainable_cls]
_class_cache = _ActorClassCache()
class _LocalWrapper:
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
def post_stop_cleanup(future, pg):
"""Things to be done after a trial is stopped."""
assert isinstance(pg, PlacementGroup)
try:
# This should not be blocking as
# we are only here when triggered.
ray.get(future, timeout=0)
except GetTimeoutError:
if log_once("tune_trial_cleanup_timeout"):
logger.error(
"Timed out when trying to stop the Ray actor gracefully. "
"Consider making `stop` a faster operation."
)
except Exception:
if log_once("tune_trial_cleanup_exception"):
logger.error(
f"An exception occurred when trying to stop the Ray actor:"
f"{traceback.format_exc()}"
)
finally:
remove_placement_group(pg)
class _TrialCleanup:
"""Responsible for triggering force cleanup of remote actors,
without waiting for `Trainable.stop()` to finish.
Only instantiated when `TUNE_FORCE_TRIAL_CLEANUP_S` is set up.
"""
def __init__(self, force_cleanup):
assert force_cleanup
self._force_cleanup = force_cleanup
self._future_to_insert_time = deque()
def add(self, future):
self._future_to_insert_time.append((future, time.time()))
def get_next(self):
"""Get the next future that is eligible to be cleaned up forcibly."""
if (
len(self._future_to_insert_time) > 0
and self._future_to_insert_time[0][1] + self._force_cleanup < time.time()
):
return self._future_to_insert_time.popleft()
else:
return None
def is_empty(self):
return len(self._future_to_insert_time) == 0
def noop_logger_creator(config, logdir):
# Set the working dir in the remote process, for user file writes
os.makedirs(logdir, exist_ok=True)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(logdir)
return NoopLogger(config, logdir)
class ExecutorEventType(Enum):
"""The executor event type.
Some of the events are internal events to executor while others
are handled by runner."""
NO_RUNNING_TRIAL_TIMEOUT = 1
PG_READY = 2
TRAINING_RESULT = 3
SAVING_RESULT = 4
RESTORING_RESULT = 5
STOP_RESULT = 6 # Internally to executor only.
ERROR = 7 # This is to signal to TrialRunner that there is an error.
YIELD = 8 # Yielding back to TrialRunner's main event loop.
class ExecutorEvent:
"""A struct that describes the event to be processed by TrialRunner."""
def __init__(
self,
event_type: ExecutorEventType,
trial: Optional[Trial] = None,
result: Optional[Any] = None,
):
self.type = event_type
self.trial = trial
self.result = result
def __repr__(self):
return f"[{self.type}] for {self.trial}"
@DeveloperAPI
class RayTrialExecutor(TrialExecutor):
"""An implementation of TrialExecutor based on Ray."""
def __init__(
self,
reuse_actors: bool = False,
result_buffer_length: Optional[int] = None,
refresh_period: Optional[float] = None,
):
super(RayTrialExecutor, self).__init__()
# future --> (type, trial/pg)
self._futures = {}
force_trial_cleanup = int(os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "0"))
self._get_next_event_wait = int(
os.environ.get("TUNE_GET_EXECUTOR_EVENT_WAIT_S", "5")
)
if force_trial_cleanup:
self._trial_cleanup = _TrialCleanup(force_trial_cleanup)
else:
self._trial_cleanup = None
self._resource_updater = ResourceUpdater(refresh_period)
self._has_cleaned_up_pgs = False
self._reuse_actors = reuse_actors
# The maxlen will be updated when `set_max_pending_trials()` is called
self._cached_actor_pg = deque(maxlen=1)
self._pg_manager = PlacementGroupManager(prefix=get_tune_pg_prefix())
self._staged_trials = set()
self._trial_just_finished = False
self._trial_just_finished_before = False
self.last_pg_recon = 0
self.pg_recon_interval = float(
os.environ.get("TUNE_PLACEMENT_GROUP_RECON_INTERVAL", "5")
)
self._buffer_length = result_buffer_length or int(
os.getenv("TUNE_RESULT_BUFFER_LENGTH", 1)
)
self._buffer_min_time_s = float(os.getenv("TUNE_RESULT_BUFFER_MIN_TIME_S", 0.0))
self._buffer_max_time_s = float(
os.getenv("TUNE_RESULT_BUFFER_MAX_TIME_S", 100.0)
)
def set_max_pending_trials(self, max_pending: int) -> None:
if len(self._cached_actor_pg) > 0:
logger.warning(
"Cannot update maximum number of queued actors for reuse "
"during a run."
)
else:
self._cached_actor_pg = deque(maxlen=max_pending)
self._pg_manager.set_max_staging(max_pending)
def _stage_and_update_status(self, trials: Iterable[Trial]):
"""Check and update statuses of scheduled placement groups.
Stages placement groups of all trials.
"""
if not self._has_cleaned_up_pgs:
# Clean up existing placement groups after trigger the tuning
# run step() method for the first time
self._pg_manager.cleanup_existing_pg()
self._has_cleaned_up_pgs = True
for trial in trials:
if trial.status not in (Trial.PENDING, Trial.PAUSED):
continue
if trial in self._staged_trials:
continue
if self._pg_manager.trial_in_use(trial):
continue
if not self._pg_manager.stage_trial_pg(trial):
# Break if we reached the limit of pending placement groups.
break
self._staged_trials.add(trial)
self._pg_manager.update_status()
def get_staged_trial(self):
"""Get a trial whose placement group was successfully staged.
Can also return None if no trial is available.
Returns:
Trial object or None.
"""
# TODO(xwjiang): This method should consider `self._cached_actor_pg`.
for trial in self._staged_trials:
if self._pg_manager.has_ready(trial):
return trial
return None
def _setup_remote_runner(self, trial):
trial.init_logdir()
# We checkpoint metadata here to try mitigating logdir duplication
self._trials_to_cache.add(trial)
logger_creator = partial(noop_logger_creator, logdir=trial.logdir)
if len(self._cached_actor_pg) > 0:
assert self._reuse_actors
existing_runner, pg = self._cached_actor_pg.popleft()
logger.debug(f"Trial {trial}: Reusing cached runner " f"{existing_runner}")
trial.set_runner(existing_runner)
if pg:
self._pg_manager.assign_cached_pg(pg, trial)
if not self.reset_trial(
trial, trial.config, trial.experiment_tag, logger_creator
):
raise AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True."
)
return existing_runner
trainable_cls = trial.get_trainable_cls()
if not trainable_cls:
raise AbortTrialExecution(
f"Invalid trainable: {trial.trainable_name}. If you passed "
f"a string, make sure the trainable was registered before."
)
_actor_cls = _class_cache.get(trainable_cls)
if not self._pg_manager.has_ready(trial):
return None
full_actor_class = self._pg_manager.get_full_actor_cls(trial, _actor_cls)
# Clear the Trial's location (to be updated later on result)
# since we don't know where the remote runner is placed.
trial.set_location(Location())
logger.debug("Trial %s: Setting up new remote runner.", trial)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
trial_config = copy.deepcopy(trial.config)
trial_config[TRIAL_INFO] = TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
trial_config[STDOUT_FILE] = stdout_file
trial_config[STDERR_FILE] = stderr_file
kwargs = {
"config": trial_config,
"logger_creator": logger_creator,
}
if trial.uses_cloud_checkpointing:
# We keep these kwargs separate for backwards compatibility
# with trainables that don't provide these keyword arguments
kwargs["remote_checkpoint_dir"] = trial.remote_checkpoint_dir
kwargs["sync_function_tpl"] = trial.sync_function_tpl
# Throw a meaningful error if trainable does not use the
# new API
sig = inspect.signature(trial.get_trainable_cls())
try:
sig.bind_partial(**kwargs)
except Exception as e:
raise RuntimeError(
"Your trainable class does not accept a "
"`remote_checkpoint_dir` or `sync_function_tpl` argument "
"in its constructor, but you've passed a "
"`upload_dir` to your SyncConfig. Without accepting "
"these parameters and passing them to the base trainable "
"constructor in the init call, cloud checkpointing is "
"effectively disabled. To resolve this issue, add the "
"parameters to your trainable class constructor or "
"disable cloud checkpointing by setting `upload_dir=None`."
) from e
with self._change_working_directory(trial):
return full_actor_class.remote(**kwargs)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
if self._find_future(trial):
logging.debug(
"Trial {} already has a queued future. Skipping this "
"`train` call. This may occur if a trial has "
"been unpaused within a scheduler callback.".format(str(trial))
)
return
assert trial.status == Trial.RUNNING, trial.status
buffer_time_s = max(
self._buffer_min_time_s,
min(self._buffer_max_time_s, len(self._futures) // 10),
)
with self._change_working_directory(trial):
buffer_length = self._buffer_length
if buffer_length > 1 and trial.checkpoint_at_end:
# If a trial checkpoint can be triggered externally,
# it is not safe to buffer results.
if log_once("trial_executor_buffer_checkpoint"):
logger.warning(
"Disabling buffered training as you passed "
"`checkpoint_at_end` to `tune.run()`."
)
buffer_length = 1
if buffer_length > 1:
if trial.checkpoint_freq > 0:
buffer_length = min(buffer_length, trial.checkpoint_freq)
remote = trial.runner.train_buffered.remote(
buffer_time_s, buffer_length
)
else:
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._futures[remote] = (ExecutorEventType.TRAINING_RESULT, trial)
trial_item = self._find_future(trial)
assert len(trial_item) < 2, trial_item
def _start_trial(self, trial: Trial) -> bool:
"""Starts trial and restores last result if trial was paused.
Args:
trial: The trial to start.
Returns:
True if trial was started successfully, False otherwise.
See `RayTrialExecutor.restore` for possible errors raised.
"""
self.set_status(trial, Trial.PENDING)
runner = self._setup_remote_runner(trial)
if not runner:
return False
trial.set_runner(runner)
self.restore(trial)
self.set_status(trial, Trial.RUNNING)
if trial in self._staged_trials:
self._staged_trials.remove(trial)
if not trial.is_restoring:
self._train(trial)
return True
def _stop_trial(self, trial: Trial, error=False, error_msg=None):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error: Whether to mark this trial as terminated in error.
error_msg: Optional error message.
"""
self.set_status(trial, Trial.ERROR if error else Trial.TERMINATED)
self._trial_just_finished = True
trial.set_location(Location())
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (
not error
and self._reuse_actors
and (
len(self._cached_actor_pg)
< (self._cached_actor_pg.maxlen or float("inf"))
)
):
logger.debug("Reusing actor for %s", trial.runner)
# Move PG into cache (disassociate from trial)
pg = self._pg_manager.cache_trial_pg(trial)
if pg:
# True if a placement group was replaced
self._cached_actor_pg.append((trial.runner, pg))
should_destroy_actor = False
else:
# False if no placement group was replaced. This should
# only be the case if there are no more trials with
# this placement group factory to run
logger.debug(
"Could not cache of trial {trial} actor for "
"reuse, as there are no pending trials "
"requiring its resources."
)
should_destroy_actor = True
else:
should_destroy_actor = True
if should_destroy_actor:
logger.debug("Trial %s: Destroying actor.", trial)
with self._change_working_directory(trial):
future = trial.runner.stop.remote()
pg = self._pg_manager.remove_from_in_use(trial)
self._futures[future] = (ExecutorEventType.STOP_RESULT, pg)
if self._trial_cleanup: # force trial cleanup within a deadline
self._trial_cleanup.add(future)
if trial in self._staged_trials:
self._staged_trials.remove(trial)
except Exception:
logger.exception("Trial %s: Error stopping runner.", trial)
self.set_status(trial, Trial.ERROR)
finally:
trial.set_runner(None)
def start_trial(self, trial: Trial) -> bool:
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial: Trial to be started.
Returns:
True if the remote runner has been started. False if trial was
not started (e.g. because of lacking resources/pending PG).
"""
try:
return self._start_trial(trial)
except AbortTrialExecution:
logger.exception("Trial %s: Error starting runner, aborting!", trial)
time.sleep(2)
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
return False
except Exception:
logger.exception("Trial %s: Unexpected error starting runner.", trial)
time.sleep(2)
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# Note that we don't return the resources, since they may
# have been lost. TODO(ujvl): is this the right thing to do?
return False
def _find_future(self, trial):
out = [rid for rid, t in self._futures.items() if t[1] is trial]
assert (
len(out) <= 1
), "Expecting one future for any given trial at any given time."
return out
def stop_trial(
self, trial: Trial, error: bool = False, error_msg: Optional[str] = None
) -> None:
prior_status = trial.status
self._stop_trial(trial, error=error, error_msg=error_msg)
if prior_status == Trial.RUNNING:
logger.debug("Trial %s: Returning resources.", trial)
out = self._find_future(trial)
for result_id in out:
self._futures.pop(result_id)
def continue_training(self, trial: Trial) -> None:
"""Continues the training of this trial."""
self._train(trial)
def reset_trial(
self,
trial: Trial,
new_config: Dict,
new_experiment_tag: str,
logger_creator: Optional[Callable[[Dict], "ray.tune.Logger"]] = None,
) -> bool:
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial: Trial to be reset.
new_config: New configuration for Trial trainable.
new_experiment_tag: New experiment name for trial.
logger_creator: Function that instantiates a logger on the
actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
# Pass magic variables
extra_config = copy.deepcopy(new_config)
extra_config[TRIAL_INFO] = TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
extra_config[STDOUT_FILE] = stdout_file
extra_config[STDERR_FILE] = stderr_file
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(extra_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
def has_resources_for_trial(self, trial: Trial) -> bool:
"""Returns whether there are resources available for this trial.
This will return True as long as we didn't reach the maximum number
of pending trials. It will also return True if the trial placement
group is already staged.
Args:
trial: Trial object which should be scheduled.
Returns:
boolean
"""
return (
trial in self._staged_trials
or self._pg_manager.can_stage()
or self._pg_manager.has_ready(trial, update=True)
)
def debug_string(self) -> str:
"""Returns a human readable message for printing to the console."""
total_resources = self._pg_manager.occupied_resources()
return self._resource_updater.debug_string(total_resources)
def on_step_begin(self, trials: List[Trial]) -> None:
"""Before step() is called, update the available resources."""
self._resource_updater.update_avail_resources()
self._trial_just_finished_before = self._trial_just_finished
self._trial_just_finished = False
def on_step_end(self, trials: List[Trial]) -> None:
self._do_force_trial_cleanup()
if time.time() > self.last_pg_recon + self.pg_recon_interval:
# Only do this every now and then - usually the placement groups
# should not get out of sync, and calling this often is inefficient
self._pg_manager.reconcile_placement_groups(trials)
self.last_pg_recon = time.time()
self._pg_manager.cleanup()
def _do_force_trial_cleanup(self) -> None:
if self._trial_cleanup:
while True:
next_future_to_clean = self._trial_cleanup.get_next()
if not next_future_to_clean:
break
if next_future_to_clean in self._futures.keys():
_, pg = self._futures.pop(next_future_to_clean)
post_stop_cleanup(next_future_to_clean, pg)
else:
# This just means that before the deadline reaches,
# the future is already cleaned up.
pass
def force_reconcilation_on_next_step_end(self) -> None:
self.last_pg_recon = -float("inf")
def save(
self,
trial: Trial,
storage: str = Checkpoint.PERSISTENT,
result: Optional[Dict] = None,
) -> Checkpoint:
"""Saves the trial's state to a checkpoint asynchronously.
Args:
trial: The trial to be saved.
storage: Where to store the checkpoint. Defaults to
PERSISTENT.
result: The state of this trial as a dictionary to be saved.
If result is None, the trial's last result will be used.
Returns:
Checkpoint object, or None if an Exception occurs.
"""
logger.debug(f"saving trial {trial}")
result = result or trial.last_result
with self._change_working_directory(trial):
if storage == Checkpoint.MEMORY:
value = trial.runner.save_to_object.remote()
checkpoint = Checkpoint(storage, value, result)
trial.on_checkpoint(checkpoint)
else:
value = trial.runner.save.remote()
checkpoint = Checkpoint(storage, value, result)
trial.saving_to = checkpoint
self._futures[value] = (ExecutorEventType.SAVING_RESULT, trial)
return checkpoint
def restore(self, trial: Trial) -> None:
"""Restores training state from a given model checkpoint.
Args:
trial: The trial to be restored.
Raises:
RuntimeError: This error is raised if no runner is found.
AbortTrialExecution: This error is raised if the trial is
ineligible for restoration, given the Tune input arguments.
"""
checkpoint = trial.checkpoint
if checkpoint.value is None:
return
if trial.runner is None:
raise RuntimeError(
"Trial {}: Unable to restore - no runner found.".format(trial)
)
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Trial %s: Attempting restore from object", trial)
# Note that we don't store the remote since in-memory checkpoints
# don't guarantee fault tolerance and don't need to be waited on.
with self._change_working_directory(trial):
trial.runner.restore_from_object.remote(value)
else:
logger.debug("Trial %s: Attempting restore from %s", trial, value)
if trial.uses_cloud_checkpointing or not trial.sync_on_checkpoint:
# If using cloud checkpointing, trial will get cp from cloud.
# If not syncing to driver, assume it has access to the cp
# on the local fs.
with self._change_working_directory(trial):
remote = trial.runner.restore.remote(value)
elif trial.sync_on_checkpoint:
# This provides FT backwards compatibility in the
# case where no cloud checkpoints are provided.
logger.debug("Trial %s: Reading checkpoint into memory", trial)
obj = TrainableUtil.checkpoint_to_object(value)
with self._change_working_directory(trial):
remote = trial.runner.restore_from_object.remote(obj)
else:
raise AbortTrialExecution(
"Pass in `sync_on_checkpoint=True` for driver-based trial"
"restoration. Pass in an `upload_dir` for remote "
"storage-based restoration"
)
self._futures[remote] = (ExecutorEventType.RESTORING_RESULT, trial)
trial.restoring_from = checkpoint
def export_trial_if_needed(self, trial: Trial) -> Dict:
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
with self._change_working_directory(trial):
return ray.get(
trial.runner.export_model.remote(trial.export_formats),
timeout=DEFAULT_GET_TIMEOUT,
)
return {}
def has_gpus(self) -> bool:
return self._resource_updater.get_num_gpus() > 0
def cleanup(self, trials: List[Trial]) -> None:
while True:
if self._trial_cleanup and self._trial_cleanup.is_empty():
break
elif not self._trial_cleanup and len(self._futures) == 0:
break
self._do_force_trial_cleanup()
ready, _ = ray.wait(list(self._futures.keys()), timeout=0)
if not ready:
continue
event_type, trial_or_pg = self._futures.pop(ready[0])
if event_type == ExecutorEventType.STOP_RESULT:
post_stop_cleanup(ready[0], trial_or_pg)
self._pg_manager.reconcile_placement_groups(trials)
self._pg_manager.cleanup(force=True)
self._pg_manager.cleanup_existing_pg(block=True)
@contextmanager
def _change_working_directory(self, trial):
"""Context manager changing working directory to trial logdir.
Used in local mode.
For non-local mode it is no-op.
"""
if ray.worker._mode() == ray.worker.LOCAL_MODE:
old_dir = os.getcwd()
try:
os.chdir(trial.logdir)
yield
finally:
os.chdir(old_dir)
else:
yield
def get_next_executor_event(
self, live_trials: Set[Trial], next_trial_exists: bool
) -> ExecutorEvent:
"""Get the next executor event to be processed in TrialRunner.
In case there are multiple events available for handling, the next
event is determined by the following priority:
1. if there is `next_trial_exists`, and if there is cached resources
to use, PG_READY is emitted.
2. if there is `next_trial_exists` and there is no cached resources
to use, wait on pg future and randomized other futures. If multiple
futures are ready, pg future will take priority to be handled first.
3. if there is no `next_trial_exists`, wait on just randomized other
futures.
An example of #3 would be synchronous hyperband. Although there are pgs
ready, the scheduler is holding back scheduling new trials since the
whole band of trials is waiting for the slowest trial to finish. In
this case, we prioritize handling training result to avoid deadlock
situation.
This is a blocking wait with a timeout (specified with env var).
The reason for the timeout is
we still want to print status info periodically in TrialRunner for
better user experience.
The handle of `ExecutorEvent.STOP_RESULT` is purely internal to
RayTrialExecutor itself. All the other future results are handled by
TrialRunner.
In the future we may want to do most of the handle of
`ExecutorEvent.RESTORE_RESULT` and `SAVING_RESULT` in
RayTrialExecutor itself and only notify TrialRunner to invoke
corresponding callbacks. This view is more consistent with our goal
of TrialRunner responsible for external facing Trial state transition,
while RayTrialExecutor responsible for internal facing transitions,
namely, `is_saving`, `is_restoring` etc.
Also you may notice that the boundary between RayTrialExecutor and
PlacementGroupManager right now is really blurry. This will be
improved once we move to an ActorPool abstraction.
`next_trial_exists` means that there is a trial to run - prioritize
returning PG_READY in this case.
"""
# First update status of staged placement groups
self._stage_and_update_status(live_trials)
while True:
###################################################################
# when next_trial_exists and there are cached resources
###################################################################
# There could be existing PGs from either `self._cached_actor_pg`
# or from `self._pg_manager._ready`. If so and if there is indeed
# a next trial to run, we return `PG_READY` future for trial
# runner. The next trial can then be scheduled on this PG.
if next_trial_exists:
if len(self._cached_actor_pg) > 0:
return ExecutorEvent(ExecutorEventType.PG_READY)
# TODO(xwjiang): Expose proper API when we decide to do
# ActorPool abstraction.
if any(len(r) > 0 for r in self._pg_manager._ready.values()):
return ExecutorEvent(ExecutorEventType.PG_READY)
###################################################################
# Prepare for futures to wait
###################################################################
futures_to_wait = list(self._futures.keys())
random.shuffle(futures_to_wait)
if next_trial_exists:
# Only wait for pg explicitly if there is next trial to run.
# In which case, handling PG_READY triumphs handling other events.
# Since we want to place pending trial ASAP.
futures_to_wait = (
self._pg_manager.get_staging_future_list() + futures_to_wait
)
logger.debug(
f"get_next_executor_event before wait with futures "
f"{futures_to_wait} and "
f"next_trial_exists={next_trial_exists}"
)
ready_futures, _ = ray.wait(
futures_to_wait, num_returns=1, timeout=self._get_next_event_wait
)
###################################################################
# Dealing with no future returned case.
###################################################################
if len(ready_futures) == 0:
if len(self._futures) == 0:
# No running trial and timing out with wait, could be we may
# have insufficient cluster resources that makes tune run
# infeasible.
# TODO: Move InsufficientResourceManager's logic
# to TrialExecutor. It is not Runner's responsibility!
return ExecutorEvent(ExecutorEventType.NO_RUNNING_TRIAL_TIMEOUT)
else:
# Training simply takes long time, yield the control back to main
# event loop to print progress info etc.
return ExecutorEvent(ExecutorEventType.YIELD)
###################################################################
# If there is future returned.
###################################################################
assert len(ready_futures) == 1
ready_future = ready_futures[0]
###################################################################
# If it is a PG_READY event.
###################################################################
if ready_future not in self._futures.keys():
# This is a ready future.
self._pg_manager.handle_ready_future(ready_future)
return ExecutorEvent(ExecutorEventType.PG_READY)
###################################################################
# non PG_READY event
###################################################################
result_type, trial_or_pg = self._futures.pop(ready_future)
if result_type == ExecutorEventType.STOP_RESULT:
pg = trial_or_pg
post_stop_cleanup(ready_future, pg)
else:
trial = trial_or_pg
assert isinstance(trial, Trial)
try:
future_result = ray.get(ready_future)
# For local mode
if isinstance(future_result, _LocalWrapper):
future_result = future_result.unwrap()
if result_type in (
ExecutorEventType.TRAINING_RESULT,
ExecutorEventType.SAVING_RESULT,
ExecutorEventType.RESTORING_RESULT,
):
logger.debug(f"Returning [{result_type}] for trial {trial}")
return ExecutorEvent(result_type, trial, result=future_result)
else:
raise TuneError(f"Unexpected future type - [{result_type}]")
except Exception as e:
return ExecutorEvent(
ExecutorEventType.ERROR, trial, (e, traceback.format_exc())
)
| []
| []
| [
"TUNE_GET_EXECUTOR_EVENT_WAIT_S",
"TUNE_PLACEMENT_GROUP_RECON_INTERVAL",
"TUNE_RESULT_BUFFER_MIN_TIME_S",
"TUNE_RESULT_BUFFER_LENGTH",
"TUNE_FORCE_TRIAL_CLEANUP_S",
"TUNE_RESULT_BUFFER_MAX_TIME_S"
]
| [] | ["TUNE_GET_EXECUTOR_EVENT_WAIT_S", "TUNE_PLACEMENT_GROUP_RECON_INTERVAL", "TUNE_RESULT_BUFFER_MIN_TIME_S", "TUNE_RESULT_BUFFER_LENGTH", "TUNE_FORCE_TRIAL_CLEANUP_S", "TUNE_RESULT_BUFFER_MAX_TIME_S"] | python | 6 | 0 | |
alttprbot_racetime/config.py | import os
from dataclasses import dataclass
from alttprbot_racetime import handlers
from alttprbot_racetime.core import SahasrahBotRaceTimeBot
from alttprbot_racetime.handlers.core import SahasrahBotCoreHandler
from config import Config as c
@dataclass
class RacetimeBotConfig:
client_id: str
client_secret: str
handler_class: SahasrahBotCoreHandler
bot_class: SahasrahBotRaceTimeBot = SahasrahBotRaceTimeBot
if c.DEBUG:
RACETIME_CATEGORIES = {
'test': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_TEST"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_TEST'),
handler_class=handlers.test.GameHandler
)
}
else:
RACETIME_CATEGORIES = {
'alttpr': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_ALTTPR"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_ALTTPR'),
handler_class=handlers.alttpr.GameHandler,
),
'contra': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_CONTRA"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_CONTRA'),
handler_class=handlers.contra.GameHandler,
),
'ct-jets': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_CTJETS"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_CTJETS'),
handler_class=handlers.ctjets.GameHandler,
),
'ff1r': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_FF1R"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_FF1R'),
handler_class=handlers.ff1r.GameHandler,
),
# 'sgl': RacetimeBotConfig(
# client_id=os.environ.get("RACETIME_CLIENT_ID_SGL"),
# client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SGL'),
# handler_class=handlers.sgl.GameHandler,
# ),
'sm': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_SM"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SM'),
handler_class=handlers.sm.GameHandler,
),
'smb3r': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_SMB3R"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SMB3R'),
handler_class=handlers.smb3r.GameHandler,
),
'smr': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_SMR"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SMR'),
handler_class=handlers.smr.GameHandler,
),
'smw-hacks': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_SMWHACKS"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SMWHACKS'),
handler_class=handlers.smwhacks.GameHandler,
),
'smz3': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_SMZ3"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_SMZ3'),
handler_class=handlers.smz3.GameHandler,
),
'twwr': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_TWWR"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_TWWR'),
handler_class=handlers.twwr.GameHandler,
),
'z1r': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_Z1R"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_Z1R'),
handler_class=handlers.z1r.GameHandler,
),
'z2r': RacetimeBotConfig(
client_id=os.environ.get("RACETIME_CLIENT_ID_Z2R"),
client_secret=os.environ.get('RACETIME_CLIENT_SECRET_Z2R'),
handler_class=handlers.z2r.GameHandler,
)
}
| []
| []
| [
"RACETIME_CLIENT_SECRET_CTJETS",
"RACETIME_CLIENT_SECRET_ALTTPR",
"RACETIME_CLIENT_ID_SMWHACKS",
"RACETIME_CLIENT_SECRET_TWWR",
"RACETIME_CLIENT_SECRET_CONTRA",
"RACETIME_CLIENT_ID_CTJETS",
"RACETIME_CLIENT_SECRET_SMZ3",
"RACETIME_CLIENT_SECRET_SMB3R",
"RACETIME_CLIENT_SECRET_SMR",
"RACETIME_CLIENT_ID_Z1R",
"RACETIME_CLIENT_SECRET_SGL",
"RACETIME_CLIENT_SECRET_FF1R",
"RACETIME_CLIENT_ID_SM",
"RACETIME_CLIENT_SECRET_Z2R",
"RACETIME_CLIENT_ID_SMB3R",
"RACETIME_CLIENT_SECRET_TEST",
"RACETIME_CLIENT_ID_TEST",
"RACETIME_CLIENT_ID_SMZ3",
"RACETIME_CLIENT_ID_SMR",
"RACETIME_CLIENT_SECRET_SM",
"RACETIME_CLIENT_ID_SGL",
"RACETIME_CLIENT_ID_TWWR",
"RACETIME_CLIENT_SECRET_Z1R",
"RACETIME_CLIENT_ID_Z2R",
"RACETIME_CLIENT_ID_ALTTPR",
"RACETIME_CLIENT_SECRET_SMWHACKS",
"RACETIME_CLIENT_ID_FF1R",
"RACETIME_CLIENT_ID_CONTRA"
]
| [] | ["RACETIME_CLIENT_SECRET_CTJETS", "RACETIME_CLIENT_SECRET_ALTTPR", "RACETIME_CLIENT_ID_SMWHACKS", "RACETIME_CLIENT_SECRET_TWWR", "RACETIME_CLIENT_SECRET_CONTRA", "RACETIME_CLIENT_ID_CTJETS", "RACETIME_CLIENT_SECRET_SMZ3", "RACETIME_CLIENT_SECRET_SMB3R", "RACETIME_CLIENT_SECRET_SMR", "RACETIME_CLIENT_ID_Z1R", "RACETIME_CLIENT_SECRET_SGL", "RACETIME_CLIENT_SECRET_FF1R", "RACETIME_CLIENT_ID_SM", "RACETIME_CLIENT_SECRET_Z2R", "RACETIME_CLIENT_ID_SMB3R", "RACETIME_CLIENT_SECRET_TEST", "RACETIME_CLIENT_ID_TEST", "RACETIME_CLIENT_ID_SMZ3", "RACETIME_CLIENT_ID_SMR", "RACETIME_CLIENT_SECRET_SM", "RACETIME_CLIENT_ID_SGL", "RACETIME_CLIENT_ID_TWWR", "RACETIME_CLIENT_SECRET_Z1R", "RACETIME_CLIENT_ID_Z2R", "RACETIME_CLIENT_ID_ALTTPR", "RACETIME_CLIENT_SECRET_SMWHACKS", "RACETIME_CLIENT_ID_FF1R", "RACETIME_CLIENT_ID_CONTRA"] | python | 28 | 0 | |
java-cfenv/src/test/java/io/pivotal/cfenv/core/CfEnvTests.java | /*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.pivotal.cfenv.core;
import java.io.File;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import mockit.Mock;
import mockit.MockUp;
import org.junit.Test;
import org.springframework.util.ResourceUtils;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/**
* @author Mark Pollack
* @author Paul Warren
*/
public class CfEnvTests {
@Test
public void testCfApplicationValues() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfApplication cfApplication = cfEnv.getApp();
assertThat(cfApplication.getApplicationId())
.isEqualTo("fa05c1a9-0fc1-4fbd-bae1-139850dec7a3");
assertThat(cfApplication.getInstanceId())
.isEqualTo("fe98dc76ba549876543210abcd1234");
assertThat(cfApplication.getInstanceIndex()).isEqualTo(0);
assertThat(cfApplication.getHost()).isEqualTo("0.0.0.0");
assertThat(cfApplication.getPort()).isEqualTo(61857);
assertThat(cfApplication.getApplicationVersion())
.isEqualTo("ab12cd34-5678-abcd-0123-abcdef987654");
assertThat(cfApplication.getApplicationName()).isEqualTo("styx-james");
assertThat(cfApplication.getApplicationUris()).contains("my-app.example.com");
assertThat(cfApplication.getVersion())
.isEqualTo("ab12cd34-5678-abcd-0123-abcdef987654");
assertThat(cfApplication.getName()).isEqualTo("my-app");
assertThat(cfApplication.getUris()).contains("my-app.example.com");
assertThat(cfApplication.getCfApi()).isEqualTo("https://api.example.com");
assertThat(cfApplication.getSpaceId()).isEqualTo("06450c72-4669-4dc6-8096-45f9777db68a");
assertThat(cfApplication.getSpaceName()).isEqualTo("my-space");
}
@Test
public void testCfService() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
List<CfService> cfServices = cfEnv.findAllServices();
assertThat(cfServices.size()).isEqualTo(3);
CfService cfService = cfEnv.findServiceByTag("mysql");
assertThat(cfService.getString("blah")).isNull();
assertThat(cfService.getTags()).containsExactly("mysql", "relational");
assertThat(cfService.getMap()).containsEntry("syslog_drain_url", null)
.containsEntry("volume_mounts", new ArrayList<String>())
.containsEntry("label", "p-mysql").containsEntry("provider", null)
.containsEntry("plan", "100mb").containsEntry("name", "mysql")
.containsKey("credentials");
CfCredentials cfCredentials = cfService.getCredentials();
assertMySqlCredentials(cfCredentials);
// Query methods
assertThat(cfService.existsByTagIgnoreCase()).isFalse();
assertThat(cfService.existsByTagIgnoreCase((String[]) null)).isFalse();
assertThat(cfService.existsByTagIgnoreCase("relational")).isTrue();
assertThat(cfService.existsByTagIgnoreCase("ReLaTiOnAl")).isTrue();
assertThat(cfService.existsByTagIgnoreCase("blah")).isFalse();
assertThat(cfService.existsByTagIgnoreCase("blah", "relational")).isTrue();
assertThat(cfService.existsByUriSchemeStartsWith()).isFalse();
assertThat(cfService.existsByUriSchemeStartsWith((String[]) null)).isFalse();
assertThat(cfService.existsByUriSchemeStartsWith("mysql")).isTrue();
assertThat(cfService.existsByUriSchemeStartsWith("MYSQL")).isFalse();
assertThat(cfService.existsByUriSchemeStartsWith("blah")).isFalse();
assertThat(cfService.existsByUriSchemeStartsWith("blah", "mysql")).isTrue();
assertThat(cfService.existsByCredentialsContainsUriField()).isFalse();
assertThat(cfService.existsByCredentialsContainsUriField((String[]) null))
.isFalse();
assertThat(cfService.existsByCredentialsContainsUriField("mysql")).isFalse();
// TODO sample data does not support testing for .isTrue
// Test Redis Entries
cfService = cfEnv.findServiceByTag("redis");
assertThat(cfService.getTags()).containsExactly("pivotal", "redis");
cfCredentials = cfService.getCredentials();
Map<String, Object> credentialMap = cfCredentials.getMap();
credentialMap = cfCredentials.getMap();
assertThat(credentialMap).containsEntry("host", "10.0.4.30");
assertThat(cfCredentials.getHost()).isEqualTo("10.0.4.30");
assertThat(cfService.getName()).isEqualTo("redis-binding");
// Test Volume Services
cfService = cfEnv.findServiceByName("nfs1");
assertThat(cfService.getTags()).containsExactly("nfs");
List<CfVolume> cfVolumes = cfService.getVolumes();
assertNfsVolumes(cfVolumes);
}
private void assertMySqlCredentials(CfCredentials cfCredentials) {
Map<String, Object> credentialMap = cfCredentials.getMap();
assertThat(credentialMap).containsEntry("hostname", "10.0.4.35")
.containsEntry("port", 3306).containsEntry("name", "mysql_name")
.containsEntry("username", "mysql_username")
.containsEntry("password", "mysql_password")
.containsEntry("uri",
"mysql://mysql_username:[email protected]:3306/cf_2e23d10a_8738_8c3c_66cf_13e44422698c?reconnect=true")
.containsEntry("jdbcUrl",
"jdbc:mysql://10.0.4.35:3306/cf_2e23d10a_8738_8c3c_66cf_13e44422698c?user=mysql_username&password=mysql_password");
assertThat(cfCredentials.getUsername()).isEqualTo("mysql_username");
assertThat(cfCredentials.getPassword()).isEqualTo("mysql_password");
assertThat(cfCredentials.getHost()).isEqualTo("10.0.4.35");
assertThat(cfCredentials.getPort()).isEqualTo("3306");
assertThat(cfCredentials.getUri()).isEqualTo(
"mysql://mysql_username:[email protected]:3306/cf_2e23d10a_8738_8c3c_66cf_13e44422698c?reconnect=true");
UriInfo uriInfo = cfCredentials.getUriInfo("mysql");
assertUriInfo(uriInfo);
uriInfo = cfCredentials.getUriInfo();
assertUriInfo(uriInfo);
// assertThat(cfCredentials.findJdbcUrl()).isEqualTo(
// "jdbc:mysql://10.0.4.35:3306/cf_2e23d10a_8738_8c3c_66cf_13e44422698c?user=mysql_username&password=mysql_password");
}
private void assertUriInfo(UriInfo uriInfo) {
assertThat(uriInfo.getUsername()).isEqualTo("mysql_username");
assertThat(uriInfo.getPassword()).isEqualTo("mysql_password");
assertThat(uriInfo.getHost()).isEqualTo("10.0.4.35");
assertThat(uriInfo.getPort()).isEqualTo(3306);
}
@Test
public void testFindServiceByName() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfService cfService = cfEnv.findServiceByTag("redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
assertThat(cfService.getPlan()).isEqualTo("shared-vm");
assertThat(cfService.getName()).isEqualTo("redis-binding");
cfService = cfEnv.findServiceByTag("blah", "redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
cfService = cfEnv.findServiceByName(".*sql");
assertThat(cfService.getName()).isEqualTo("mysql");
assertThatThrownBy(() -> {
cfEnv.findServiceByName("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name [blah]");
assertThatThrownBy(() -> {
cfEnv.findServiceByName("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name []");
assertThatThrownBy(() -> {
cfEnv.findServiceByName((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name [null]");
}
@Test
public void testFindServiceByLabel() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfService cfService = cfEnv.findServiceByLabel("p-redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
assertThat(cfService.getPlan()).isEqualTo("shared-vm");
assertThat(cfService.getName()).isEqualTo("redis-binding");
cfService = cfEnv.findServiceByLabel("blah", "p-redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
assertThat(cfService.getName()).isEqualTo("redis-binding");
cfService = cfEnv.findServiceByLabel(".*redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
assertThat(cfService.getName()).isEqualTo("redis-binding");
assertThatThrownBy(() -> {
cfEnv.findServiceByLabel("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label [blah]");
assertThatThrownBy(() -> {
cfEnv.findServiceByLabel("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label []");
assertThatThrownBy(() -> {
cfEnv.findServiceByLabel((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label [null]");
}
@Test
public void testFindServiceByTag() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfService cfService = cfEnv.findServiceByTag("redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
assertThat(cfService.getPlan()).isEqualTo("shared-vm");
assertThat(cfService.getName()).isEqualTo("redis-binding");
cfService = cfEnv.findServiceByTag("blah", "redis");
assertThat(cfService.getLabel()).isEqualTo("p-redis");
cfService = cfEnv.findServiceByTag(".*sql");
assertThat(cfService.getName()).isEqualTo("mysql");
assertThatThrownBy(() -> {
cfEnv.findServiceByTag("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag [blah]");
assertThatThrownBy(() -> {
cfEnv.findServiceByTag("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag []");
assertThatThrownBy(() -> {
cfEnv.findServiceByTag((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag [null]");
}
@Test
public void testFindCredentialsByName() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfCredentials cfCredentials = cfEnv.findCredentialsByName("mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByName("blah", "mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByName(".*sql");
assertMySqlCredentials(cfCredentials);
assertThatThrownBy(() -> {
cfEnv.findCredentialsByName("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name [blah]");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByName("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name []");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByName((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with name [null]");
}
@Test
public void testFindCredentialsByLabel() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfCredentials cfCredentials = cfEnv.findCredentialsByLabel("p-mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByLabel("blah", "p-mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByLabel(".*mysql");
assertMySqlCredentials(cfCredentials);
assertThatThrownBy(() -> {
cfEnv.findCredentialsByLabel("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label [blah]");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByLabel("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label []");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByLabel((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with label [null]");
}
@Test
public void testFindCredentialsByTag() {
mockVcapEnvVars();
CfEnv cfEnv = new CfEnv();
CfCredentials cfCredentials = cfEnv.findCredentialsByTag("mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByTag("blah", "mysql");
assertMySqlCredentials(cfCredentials);
cfCredentials = cfEnv.findCredentialsByTag(".*sql");
assertMySqlCredentials(cfCredentials);
assertThatThrownBy(() -> {
cfEnv.findCredentialsByTag("blah");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag [blah]");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByTag("");
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag []");
assertThatThrownBy(() -> {
cfEnv.findCredentialsByTag((String[]) null);
}).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("No service with tag [null]");
}
@Test
public void testNullCredentials() {
mockVcapEnvVars("vcap-null-credentials.json", "vcap-application.json");
CfEnv cfEnv = new CfEnv();
CfService cfService = cfEnv.findServiceByTag("efs");
// should not throw exception
cfService.existsByCredentialsContainsUriField("foo");
}
private void assertNfsVolumes(List<CfVolume> cfVolumes) {
assertThat(cfVolumes.size()).isEqualTo(1);
Map<String, String> cfVolumeMap = cfVolumes.get(0).getMap();
assertThat(cfVolumeMap)
.containsEntry("container_dir", "/var/vcap/data/78525ee7-196c-4ed4-8ac6-857d15334631")
.containsEntry("device_type", "shared")
.containsEntry("mode", "rw");
CfVolume cfVolume = cfVolumes.get(0);
assertThat(cfVolume.getPath().toString()).isEqualTo("/var/vcap/data/78525ee7-196c-4ed4-8ac6-857d15334631");
assertThat(cfVolume.getMode()).isEqualTo(CfVolume.Mode.READ_WRITE);
}
@Test
public void testMultipleMatchingServices() {
mockVcapEnvVars("vcap-services-multiple-mysql.json", "vcap-application.json");
CfEnv cfEnv = new CfEnv();
List<CfService> services = cfEnv.findAllServices();
assertThat(services.size()).isEqualTo(3);
assertThatThrownBy(() -> {
CfService service = cfEnv.findServiceByName("mysql.*");
}).isInstanceOf(IllegalArgumentException.class).hasMessageContaining(
"No unique service matching by name [mysql.*] was found. Matching service names are [mysql, mysql2]");
assertThatThrownBy(() -> {
CfService service = cfEnv.findServiceByLabel("p-mysql");
}).isInstanceOf(IllegalArgumentException.class).hasMessageContaining(
"No unique service matching by label [p-mysql] was found. Matching service names are [mysql, mysql2]");
assertThatThrownBy(() -> {
CfService service = cfEnv.findServiceByTag("mysql");
}).isInstanceOf(IllegalArgumentException.class).hasMessageContaining(
"No unique service matching by tag [mysql] was found. Matching service names are [mysql, mysql2]");
List<CfService> servicesByName = cfEnv.findServicesByName("mysql.*");
assertThat(servicesByName.size()).isEqualTo(2);
List<CfService> servicesByLabel = cfEnv.findServicesByLabel("p-mysql");
assertThat(servicesByLabel.size()).isEqualTo(2);
List<CfService> servicesByTag = cfEnv.findServicesByTag("relational");
assertThat(servicesByTag.size()).isEqualTo(2);
}
private void mockVcapEnvVars(String vcapServicesFilename,
String vcapApplicationFilename) {
String vcapServicesJson;
try {
File file = ResourceUtils.getFile("classpath:" + vcapServicesFilename);
vcapServicesJson = new String(Files.readAllBytes(file.toPath()));
} catch (Exception e) {
throw new IllegalStateException(e);
}
String vcapAppJson;
try {
File file = ResourceUtils.getFile("classpath:" + vcapApplicationFilename);
vcapAppJson = new String(Files.readAllBytes(file.toPath()));
} catch (Exception e) {
throw new IllegalStateException(e);
}
Map<String, String> env = System.getenv();
new MockUp<System>() {
@Mock
public String getenv(String name) {
if (name.equalsIgnoreCase("VCAP_SERVICES")) {
return vcapServicesJson;
} else if (name.equalsIgnoreCase("VCAP_APPLICATION")) {
return vcapAppJson;
}
return env.get(name);
}
};
}
private void mockVcapEnvVars() {
mockVcapEnvVars("vcap-services.json", "vcap-application.json");
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
infosight/client.go | package infosight
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
var (
defaultServer string = "https://infosight.hpe.com/apis/"
)
// ClientOption allows setting custom parameters during construction
type ClientOption func(*Client) error
// WithTrace write all requests to the log
func WithTrace(trace bool) ClientOption {
return func(c *Client) error {
c.trace = trace
return nil
}
}
// WithUserAgent specify a user agent string to identify the client
func WithUserAgent(userAgent string) ClientOption {
return func(c *Client) error {
c.userAgent = userAgent
return nil
}
}
// WithContext specifies the credentials for
func WithContext(ctx context.Context) ClientOption {
return func(c *Client) error {
c.ctx = ctx
return nil
}
}
// WithLogin specifies the credentials for
func WithLogin(user string, password string) ClientOption {
return func(c *Client) error {
c.user = user
c.password = password
return nil
}
}
// WithBaseURL overrides the baseURL.
func WithBaseURL(baseURL string) ClientOption {
return func(c *Client) error {
newBaseURL, err := url.Parse(baseURL)
if err != nil {
return err
}
c.Server = newBaseURL.String()
return nil
}
}
// HTTPRequestDoer performs HTTP requests.
//
// The standard http.Client implements this interface.
type HTTPRequestDoer interface {
Do(req *http.Request) (*http.Response, error)
}
// FaultDetail detailed fault information
type FaultDetail struct {
ErrorCode string `json:"errorcode,omitempty"`
}
// Fault returned if something went wrong
type Fault struct {
FaultString string `json:"faultstring,omitempty"`
Detail *FaultDetail `json:"detail,omitempty"`
}
// FaultResponse is returned by InfoSight
type FaultResponse struct {
Status string
StatusCode int
Fault *Fault `json:"fault,omitempty"`
}
// NewFaultResponse create a new NewFaultResponse from an http response
func NewFaultResponse(r *http.Response) (*FaultResponse, error) {
var faultResponse FaultResponse
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&faultResponse)
if err != nil {
return nil, err
}
faultResponse.Status = r.Status
faultResponse.StatusCode = r.StatusCode
return &faultResponse, nil
}
func (e *FaultResponse) Error() string {
if e.Fault != nil {
return e.Fault.FaultString
}
return e.Status
}
// Status result status
type Status struct {
Message string `json:"message,omitempty"`
}
// PagingInfo request details
type PagingInfo struct {
Skip int `json:"skip,omitempty"`
Limit int `json:"limit,omitempty"`
}
// FilterInfo filter details
type FilterInfo struct {
Query map[string]string `json:"query,omitempty"`
}
type Order []string
type Sorting []Order
// RequestInfo request details
type RequestInfo struct {
Paging *PagingInfo `json:"paging,omitempty"`
Filter *FilterInfo `json:"filter,omitempty"`
Sort *Sorting `json:"sort,omitempty"`
}
// APIResponse returned on success
type APIResponse struct {
Status *Status `json:"status,omitempty"`
Request *RequestInfo `json:"request,omitempty"`
Data []interface{} `json:"data,omitempty"`
}
// Client wraps the api for you
type Client struct {
Server string
Wellness *Wellness
// Doer for performing requests, typically a *http.Client with any
// customized settings, such as certificate chains.
innerClient HTTPRequestDoer
oauthConfig *clientcredentials.Config
ctx context.Context
userAgent string
token *oauth2.Token
user string
password string
insecure bool
trace bool
}
// NewClientFromEnvironment creates a new client from default environment variables
func NewClientFromEnvironment(opts ...ClientOption) (*Client, error) {
baseURL := os.Getenv("INFOSIGHT_URL")
user := os.Getenv("INFOSIGHT_CLIENT_KEY")
password := os.Getenv("INFOSIGHT_CLIENT_SECRET")
opts = append(opts, WithLogin(user, password))
c, err := NewClient(baseURL, opts...)
if err != nil {
return nil, err
}
return c, nil
}
// NewClient returns a new wazuh API client
func NewClient(baseURL string, opts ...ClientOption) (*Client, error) {
// remove trailing slash (if any) from base URL
baseURL = strings.TrimRight(baseURL, "/")
c := &Client{
Server: baseURL,
userAgent: "go-infosight",
}
// mutate client and add all optional params
for _, o := range opts {
if err := o(c); err != nil {
return nil, err
}
}
if c.ctx == nil {
c.ctx = context.Background()
}
var transport http.RoundTripper = &BearerAuthTransport{http.DefaultTransport}
// Override default HTTP client in ctx
c.ctx = context.WithValue(c.ctx, oauth2.HTTPClient, &http.Client{Transport: transport})
if c.Server == "" {
c.Server = defaultServer
}
// ensure the server URL always has a trailing slash
if !strings.HasSuffix(c.Server, "/") {
c.Server += "/"
}
c.oauthConfig = &clientcredentials.Config{
ClientID: c.user,
ClientSecret: c.password,
TokenURL: c.Server + "oauth/token",
Scopes: []string{""},
}
c.innerClient = c.oauthConfig.Client(c.ctx)
c.Wellness = NewWellness(c)
return c, nil
}
// Errorf logs errors
func (c *Client) Errorf(format string, v ...interface{}) {
log.Printf("[ERROR] %s", fmt.Sprintf(format, v...))
}
// Warnf logs warnings
func (c *Client) Warnf(format string, v ...interface{}) {
log.Printf("[WARN] %s", fmt.Sprintf(format, v...))
}
// Debugf logs debug info
func (c *Client) Debugf(format string, v ...interface{}) {
log.Printf("[DEBUG] %s", fmt.Sprintf(format, v...))
}
// Tracef logs trace info
func (c *Client) Tracef(format string, v ...interface{}) {
log.Printf("[TRACE] %s", fmt.Sprintf(format, v...))
}
// do execute and evaluate the request
func (c *Client) do(req *http.Request) (*http.Response, error) {
// ensure we have a valid token
/*
if c.token == nil {
token, err := c.oauthConfig.Token(c.ctx)
if err != nil {
return nil, err
}
c.token = token
}
c.token.TokenType = "Bearer"
*/
req.WithContext(c.ctx)
// Headers for all request
req.Header.Set("User-Agent", c.userAgent)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
r, e := c.innerClient.Do(req)
if c.trace {
var reqStr = ""
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
reqStr = strings.ReplaceAll(strings.TrimRight(string(dump), "\r\n"), "\n", "\n ")
}
if r == nil {
dump = nil
err = nil
} else {
dump, err = httputil.DumpResponse(r, true)
}
if err == nil {
c.Tracef("%s\n\n %s\n", reqStr, strings.ReplaceAll(strings.TrimRight(string(dump), "\r\n"), "\n", "\n "))
}
}
return r, e
}
/* Workaround for wrong token type returned by InfoSight (BearerToken, but expects Bearer in auth header)
https://sgeb.io/posts/2015/05/fix-go-oauth2-case-sensitive-bearer-auth-headers/
*/
// BearerAuthTransport wraps a RoundTripper. It capitalized bearer token
// authorization headers.
type BearerAuthTransport struct {
rt http.RoundTripper
}
// RoundTrip satisfies the RoundTripper interface. It replaces authorization
// headers of scheme `BearerToken` by changing it to `Bearer` (as per OAuth 2.0 spec).
func (t *BearerAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
auth := req.Header.Get("Authorization")
if strings.HasPrefix(auth, "BearerToken ") {
auth = "Bearer " + auth[12:]
}
req2 := cloneRequest(req) // per RoundTripper contract
req2.Header.Set("Authorization", auth)
return t.rt.RoundTrip(req2)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
| [
"\"INFOSIGHT_URL\"",
"\"INFOSIGHT_CLIENT_KEY\"",
"\"INFOSIGHT_CLIENT_SECRET\""
]
| []
| [
"INFOSIGHT_URL",
"INFOSIGHT_CLIENT_SECRET",
"INFOSIGHT_CLIENT_KEY"
]
| [] | ["INFOSIGHT_URL", "INFOSIGHT_CLIENT_SECRET", "INFOSIGHT_CLIENT_KEY"] | go | 3 | 0 | |
1M/W2/Test3.py | # Mock Test III
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'anagram' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING s as parameter.
#
from collections import Counter
def anagram(s, ans=0):
if(len(s)%2!=0): return -1
return sum((Counter(s[:len(s)//2]) - Counter(s[len(s)//2:])).values())
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
for q_itr in range(q):
s = input()
result = anagram(s)
fptr.write(str(result) + '\n')
fptr.close() | []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
jira/examples/issue-field-options/update/update.go | package main
import (
"context"
"github.com/ctreminiom/go-atlassian/jira"
"log"
"os"
)
func main() {
var (
host = os.Getenv("HOST")
mail = os.Getenv("MAIL")
token = os.Getenv("TOKEN")
)
atlassian, err := jira.New(nil, host)
if err != nil {
return
}
atlassian.Auth.SetBasicAuth(mail, token)
var (
fieldID = "customfield_10038"
contextID = 10180
payload = &jira.FieldContextOptionListScheme{
Options: []*jira.CustomFieldContextOptionScheme{
// Single/Multiple Choice example
{
ID: "10064",
Value: "Option 3 - Updated",
Disabled: false,
},
{
ID: "10065",
Value: "Option 4 - Updated",
Disabled: true,
},
///////////////////////////////////////////
/*
// Cascading Choice example
{
OptionID: "1027",
Value: "Argentina",
Disabled: false,
},
{
OptionID: "1027",
Value: "Uruguay",
Disabled: false,
},
*/
}}
)
fieldOptions, response, err := atlassian.Issue.Field.Context.Option.Update(context.Background(), fieldID, contextID, payload)
if err != nil {
if response != nil {
log.Println("Response HTTP Response", string(response.BodyAsBytes))
}
return
}
log.Println("Response HTTP Code", response.StatusCode)
log.Println("HTTP Endpoint Used", response.Endpoint)
for _, option := range fieldOptions.Options {
log.Println(option)
}
}
| [
"\"HOST\"",
"\"MAIL\"",
"\"TOKEN\""
]
| []
| [
"MAIL",
"HOST",
"TOKEN"
]
| [] | ["MAIL", "HOST", "TOKEN"] | go | 3 | 0 | |
fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.max_sentences is not None
), "Must specify batch size either with --max-tokens or --max-sentences"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.max_sentences
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| []
| []
| [
"LOGLEVEL"
]
| [] | ["LOGLEVEL"] | python | 1 | 0 | |
tests/test_cli/test_dcos_docker/test_cli.py | """
Tests for the Docker CLI.
This mostly provides error case coverage.
We rely mostly on manual testing.
This is because automated tests for this would be very slow.
"""
import os
import uuid
from pathlib import Path
from tempfile import mkstemp
from textwrap import dedent
from typing import List
import pytest
from click.testing import CliRunner
from dcos_e2e_cli import dcos_docker, minidcos
_SUBCOMMANDS = [[item] for item in dcos_docker.commands.keys()]
_BASE_COMMAND = [[]] # type: List[List[str]]
_COMMANDS = _BASE_COMMAND + _SUBCOMMANDS
class TestHelp:
"""
Test help texts.
"""
@pytest.mark.parametrize(
'command',
_COMMANDS,
ids=[str(cmd) for cmd in _COMMANDS],
)
def test_help(self, command: List[str]) -> None:
"""
Expected help text is shown for ``minidcos docker`` commands.
This help text is defined in files.
To update these files, run the command
``bash admin/update_cli_tests.sh``.
"""
runner = CliRunner()
arguments = ['docker'] + command + ['--help']
result = runner.invoke(minidcos, arguments, catch_exceptions=False)
assert result.exit_code == 0
help_output_filename = '-'.join(['dcos-docker'] + command) + '.txt'
help_outputs_dir = Path(__file__).parent / 'help_outputs'
expected_help_file = help_outputs_dir / help_output_filename
try:
expected_help = expected_help_file.read_text()
assert result.output == expected_help
except (AssertionError, FileNotFoundError): # pragma: no cover
if os.getenv('FIX_CLI_TESTS') == '1':
help_outputs_dir.mkdir(exist_ok=True)
expected_help_file.touch()
expected_help_file.write_text(result.output)
else:
raise
class TestCreate:
"""
Tests for the `create` subcommand.
"""
def test_copy_to_master_bad_format(
self,
oss_installer: Path,
) -> None:
"""
An error is shown if ``--copy-to-master`` is given a value in an
invalid format.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--copy-to-master',
'/some/path',
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "--copy-to-master": "/some/path" is not in the format /absolute/local/path:/remote/path.
""",# noqa: E501,E261
)
# yapf: enable
assert result.output == expected_message
def test_copy_to_master_no_local(self, oss_installer: Path) -> None:
"""
An error is shown if the given local path does not exist.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--copy-to-master',
'/some/path:some/remote',
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "--copy-to-master": "/some/path" does not exist.
""",# noqa: E501,E261
)
# yapf: enable
assert result.output == expected_message
@pytest.mark.parametrize(
'option',
[
'--custom-volume',
'--custom-master-volume',
'--custom-agent-volume',
'--custom-public-agent-volume',
],
)
def test_custom_volume_bad_mode(
self,
oss_installer: Path,
option: str,
) -> None:
"""
Given volumes must have the mode "rw" or "ro", or no mode.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
option,
'/opt:/opt:ab',
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "{option}": Mode in "/opt:/opt:ab" is "ab". If given, the mode must be one of "ro", "rw".
""",# noqa: E501,E261
).format(option=option)
# yapf: enable
assert result.output == expected_message
@pytest.mark.parametrize(
'option',
[
'--custom-volume',
'--custom-master-volume',
'--custom-agent-volume',
'--custom-public-agent-volume',
],
)
def test_custom_volume_bad_format(
self,
oss_installer: Path,
option: str,
) -> None:
"""
Given volumes must have 0, 1 or 2 colons.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
option,
'/opt:/opt:/opt:rw',
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "{option}": "/opt:/opt:/opt:rw" is not a valid volume definition. See https://docs.docker.com/engine/reference/run/#volume-shared-filesystems for the syntax to use.
""",# noqa: E501,E261
).format(option=option)
# yapf: enable
assert result.output == expected_message
def test_copy_to_master_relative(
self,
oss_installer: Path,
) -> None:
"""
An error is shown if the given local path is not an absolute path.
"""
_, temporary_file_path = mkstemp(dir='.')
relative_path = Path(temporary_file_path).relative_to(os.getcwd())
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--copy-to-master',
'{relative}:some/remote'.format(relative=relative_path),
],
catch_exceptions=False,
)
Path(relative_path).unlink()
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "--copy-to-master": "some/remote is not an absolute path.
""",# noqa: E501,E261
)
# yapf: enable
assert result.output == expected_message
def test_invalid_installer_path(self) -> None:
"""
An error is shown if an invalid installer path is given.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'create', '/not/a/path'],
catch_exceptions=False,
)
assert result.exit_code == 2
expected_error = (
'Error: Invalid value for "INSTALLER": '
'File "/not/a/path" does not exist.'
)
assert expected_error in result.output
def test_config_does_not_exist(self, oss_installer: Path) -> None:
"""
An error is shown if the ``--extra-config`` file does not exist.
"""
runner = CliRunner()
invalid_path = '/' + uuid.uuid4().hex
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--extra-config',
invalid_path,
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Try "minidcos docker create -h" for help.
Error: Invalid value for "--extra-config": File "{path}" does not exist.
""",# noqa: E501,E261
).format(path=invalid_path)
# yapf: enable
assert result.output == expected_message
def test_invalid_yaml(self, oss_installer: Path, tmp_path: Path) -> None:
"""
An error is shown if invalid YAML is given in the file given to
``--extra-config``.
"""
invalid_file = tmp_path / uuid.uuid4().hex
invalid_file.write_text('@')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--extra-config',
str(invalid_file),
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "--extra-config": "@" is not valid YAML
""",# noqa: E501,E261
)
# yapf: enable
assert result.output == expected_message
def test_not_key_value(self, oss_installer: Path, tmp_path: Path) -> None:
"""
An error is shown if YAML is given for ``--extra-config`` which is not
a key-value mapping.
"""
invalid_file = tmp_path / uuid.uuid4().hex
invalid_file.write_text('example')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--extra-config',
str(invalid_file),
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "--extra-config": "example" is not a valid DC/OS configuration
""",# noqa: E501,E261
)
# yapf: enable
assert result.output == expected_message
@pytest.mark.parametrize('invalid_id', ['@', ''])
def test_invalid_cluster_id(
self,
oss_installer: Path,
invalid_id: str,
) -> None:
"""
Cluster IDs must match a certain pattern.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--cluster-id',
invalid_id,
],
catch_exceptions=False,
)
assert result.exit_code == 2
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_message = dedent(
"""\
Usage: minidcos docker create [OPTIONS] INSTALLER
Error: Invalid value for "-c" / "--cluster-id": Invalid cluster id "{cluster_id}", only [a-zA-Z0-9][a-zA-Z0-9_.-] are allowed and the cluster ID cannot be empty.
""",# noqa: E501,E261
).format(cluster_id=invalid_id)
# yapf: enable
assert result.output == expected_message
def test_genconf_path_not_exist(self, oss_installer: Path) -> None:
"""
Genconf path must exist.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--genconf-dir',
'non-existing',
],
catch_exceptions=False,
)
assert result.exit_code == 2
expected_error = (
'Error: Invalid value for "--genconf-dir": '
'Directory "non-existing" does not exist.'
)
assert expected_error in result.output
def test_genconf_path_is_file(
self,
oss_installer: Path,
tmp_path: Path,
) -> None:
"""
Genconf path must be a directory.
"""
genconf_file = tmp_path / 'testfile'
genconf_file.write_text('test')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--genconf-dir',
str(genconf_file),
],
catch_exceptions=False,
)
assert result.exit_code == 2
expected_error = (
'Error: Invalid value for "--genconf-dir": '
'Directory "{path}" is a file.'
).format(path=str(genconf_file))
assert expected_error in result.output
def test_workdir_path_not_exist(self, oss_installer: Path) -> None:
"""
``--workspace-dir`` must exist.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--workspace-dir',
'non-existing',
],
catch_exceptions=False,
)
assert result.exit_code == 2
expected_error = (
'Error: Invalid value for "--workspace-dir": '
'Directory "non-existing" does not exist.'
)
assert expected_error in result.output
def test_workspace_path_is_file(
self,
oss_installer: Path,
tmp_path: Path,
) -> None:
"""
``--workspace-dir`` must be a directory.
"""
workspace_file = tmp_path / 'testfile'
workspace_file.write_text('test')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'create',
str(oss_installer),
'--workspace-dir',
str(workspace_file),
],
catch_exceptions=False,
)
assert result.exit_code == 2
expected_error = (
'Error: Invalid value for "--workspace-dir": '
'Directory "{path}" is a file.'
).format(path=str(workspace_file))
assert expected_error in result.output
class TestDestroy:
"""
Tests for the `destroy` subcommand.
"""
def test_cluster_does_not_exist(self) -> None:
"""
An error is shown if the given cluster does not exist.
"""
unique = uuid.uuid4().hex
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'destroy', '--cluster-id', unique],
)
assert result.exit_code == 2
expected_error = 'Cluster "{unique}" does not exist'
expected_error = expected_error.format(unique=unique)
assert expected_error in result.output
class TestDestroyList:
"""
Tests for the `destroy-list` subcommand.
"""
def test_cluster_does_not_exist(self) -> None:
"""
An error is shown if the given cluster does not exist.
"""
unique = uuid.uuid4().hex
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'destroy-list', unique],
catch_exceptions=False,
)
assert result.exit_code == 0
expected_error = 'Cluster "{unique}" does not exist'
expected_error = expected_error.format(unique=unique)
assert expected_error in result.output
def test_multiple_clusters(self) -> None:
"""
It is possible to give multiple cluster IDs.
"""
unique = uuid.uuid4().hex
unique_2 = uuid.uuid4().hex
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'destroy-list', unique, unique_2],
catch_exceptions=False,
)
assert result.exit_code == 0
expected_error = 'Cluster "{unique}" does not exist'
expected_error = expected_error.format(unique=unique)
assert expected_error in result.output
expected_error = expected_error.format(unique=unique_2)
assert expected_error in result.output
class TestInspect:
"""
Tests for the `inspect` subcommand.
"""
def test_cluster_does_not_exist(self) -> None:
"""
An error is shown if the given cluster does not exist.
"""
unique = uuid.uuid4().hex
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'inspect', '--cluster-id', unique],
)
assert result.exit_code == 2
expected_error = 'Cluster "{unique}" does not exist'
expected_error = expected_error.format(unique=unique)
assert expected_error in result.output
class TestWait:
"""
Tests for the ``wait`` subcommand.
"""
def test_cluster_does_not_exist(self) -> None:
"""
An error is shown if the given cluster does not exist.
"""
unique = uuid.uuid4().hex
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'wait', '--cluster-id', unique],
)
assert result.exit_code == 2
expected_error = 'Cluster "{unique}" does not exist'
expected_error = expected_error.format(unique=unique)
assert expected_error in result.output
class TestDoctor:
"""
Tests for the ``doctor`` subcommand.
"""
def test_doctor(self) -> None:
"""
No exception is raised by the ``doctor`` subcommand.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'doctor'],
catch_exceptions=False,
)
assert result.exit_code == 0
class TestSetupMacNetwork():
"""
Tests for the ``setup-mac-network`` subcommand.
"""
def test_suffix_not_ovpn(self, tmp_path: Path) -> None:
"""
If a configuration file does not have the 'ovpn' suffix, an error is
shown.
"""
configuration_file = tmp_path / 'example.txt'
configuration_file.write_text('example')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'setup-mac-network',
'--configuration-dst',
str(configuration_file),
],
catch_exceptions=False,
)
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_error = dedent(
"""\
Usage: minidcos docker setup-mac-network [OPTIONS]
Error: Invalid value for "--configuration-dst": "{value}" does not have the suffix ".ovpn".
""",# noqa: E501,E261
).format(
value=str(configuration_file),
)
# yapf: enable
assert result.exit_code == 2
assert result.output == expected_error
def test_configuration_already_exists(self, tmp_path: Path) -> None:
"""
If a configuration file already exists at the given location, an error
is shown.
"""
configuration_file = tmp_path / 'example.ovpn'
configuration_file.write_text('example')
runner = CliRunner()
result = runner.invoke(
minidcos,
[
'docker',
'setup-mac-network',
'--configuration-dst',
str(configuration_file),
],
catch_exceptions=False,
)
# yapf breaks multi-line noqa, see
# https://github.com/google/yapf/issues/524.
# yapf: disable
expected_error = dedent(
"""\
Usage: minidcos docker setup-mac-network [OPTIONS]
Error: Invalid value for "--configuration-dst": "{value}" already exists so no new OpenVPN configuration was created.
To use {value}:
1. Install an OpenVPN client such as Tunnelblick (https://tunnelblick.net/downloads.html) or Shimo (https://www.shimovpn.com).
2. Run "open {value}".
3. In your OpenVPN client, connect to the new "example" profile.
4. Run "minidcos docker doctor" to confirm that everything is working.
""",# noqa: E501,E261
).format(
value=str(configuration_file),
)
# yapf: enable
assert result.exit_code == 2
assert result.output == expected_error
class TestCreateLoopbackSidecar:
"""
Tests for the ``create-loopback-sidecar`` subcommand.
"""
def test_sidecar_container_already_exists(self) -> None:
"""
An error is shown if the given sidecar container already exists.
"""
test_sidecar = 'test-sidecar'
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'create-loopback-sidecar', test_sidecar],
)
assert result.exit_code == 0
try:
result = runner.invoke(
minidcos,
['docker', 'create-loopback-sidecar', test_sidecar],
)
assert result.exit_code == 2
expected_error = 'Loopback sidecar "{name}" already exists'
expected_error = expected_error.format(name=test_sidecar)
assert expected_error in result.output
finally:
result = runner.invoke(
minidcos,
['docker', 'destroy-loopback-sidecar', test_sidecar],
)
assert result.exit_code == 0
class TestDestroyLoopbackSidecar:
"""
Tests for the ``destroy-loopback-sidecar`` subcommand.
"""
def test_sidecar_container_does_not_exist(self) -> None:
"""
An error is shown if the given sidecar container does not exist.
"""
does_not_exist = 'does-not-exist'
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'destroy-loopback-sidecar', does_not_exist],
)
assert result.exit_code == 2
expected_error = 'Loopback sidecar "{name}" does not exist'
expected_error = expected_error.format(name=does_not_exist)
assert expected_error in result.output
class TestListLoopbackSidecars:
"""
Tests for the ``list-loopback-sidecars`` subcommand.
"""
def test_no_error(self) -> None:
"""
A success code is given.
"""
runner = CliRunner()
result = runner.invoke(
minidcos,
['docker', 'list-loopback-sidecars'],
catch_exceptions=False,
)
assert result.exit_code == 0
| []
| []
| [
"FIX_CLI_TESTS"
]
| [] | ["FIX_CLI_TESTS"] | python | 1 | 0 | |
examples/db/truncate/truncateTable/main.go | package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/db"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Db.Truncate(&db.TruncateRequest{
Table: "example",
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
topdown/tokens_test.go | package topdown
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/jwx/jwk"
"github.com/open-policy-agent/opa/internal/jwx/jws"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
)
func TestParseTokenConstraints(t *testing.T) {
wallclock := ast.NumberTerm(int64ToJSONNumber(time.Now().UnixNano()))
t.Run("Empty", func(t *testing.T) {
c := ast.NewObject()
constraints, err := parseTokenConstraints(c, wallclock)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if constraints.alg != "" {
t.Errorf("alg: %v", constraints.alg)
}
if constraints.keys != nil {
t.Errorf("key: %v", constraints.keys)
}
})
t.Run("Alg", func(t *testing.T) {
c := ast.NewObject()
c.Insert(ast.StringTerm("alg"), ast.StringTerm("RS256"))
constraints, err := parseTokenConstraints(c, wallclock)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if constraints.alg != "RS256" {
t.Errorf("alg: %v", constraints.alg)
}
})
t.Run("Cert", func(t *testing.T) {
c := ast.NewObject()
c.Insert(ast.StringTerm("cert"), ast.StringTerm(`-----BEGIN CERTIFICATE-----
MIIBcDCCARagAwIBAgIJAMZmuGSIfvgzMAoGCCqGSM49BAMCMBMxETAPBgNVBAMM
CHdoYXRldmVyMB4XDTE4MDgxMDE0Mjg1NFoXDTE4MDkwOTE0Mjg1NFowEzERMA8G
A1UEAwwId2hhdGV2ZXIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATPwn3WCEXL
mjp/bFniDwuwsfu7bASlPae2PyWhqGeWwe23Xlyx+tSqxlkXYe4pZ23BkAAscpGj
yn5gXHExyDlKo1MwUTAdBgNVHQ4EFgQUElRjSoVgKjUqY5AXz2o74cLzzS8wHwYD
VR0jBBgwFoAUElRjSoVgKjUqY5AXz2o74cLzzS8wDwYDVR0TAQH/BAUwAwEB/zAK
BggqhkjOPQQDAgNIADBFAiEA4yQ/88ZrUX68c6kOe9G11u8NUaUzd8pLOtkKhniN
OHoCIHmNX37JOqTcTzGn2u9+c8NlnvZ0uDvsd1BmKPaUmjmm
-----END CERTIFICATE-----`))
constraints, err := parseTokenConstraints(c, wallclock)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
pubKey := constraints.keys[0].(*ecdsa.PublicKey)
if pubKey.Curve != elliptic.P256() {
t.Errorf("curve: %v", pubKey.Curve)
}
if pubKey.X.Text(16) != "cfc27dd60845cb9a3a7f6c59e20f0bb0b1fbbb6c04a53da7b63f25a1a86796c1" {
t.Errorf("x: %x", pubKey.X)
}
if pubKey.Y.Text(16) != "edb75e5cb1fad4aac6591761ee29676dc190002c7291a3ca7e605c7131c8394a" {
t.Errorf("y: %x", pubKey.Y)
}
})
t.Run("Cert Multi Key", func(t *testing.T) {
c := ast.NewObject()
c.Insert(ast.StringTerm("cert"), ast.StringTerm(`{
"keys": [
{
"kty": "EC",
"use": "sig",
"crv": "P-256",
"kid": "k1",
"x": "9Qq5S5VqMQoH-FOI4atcH6V3bua03C-5ZMZMG1rszwA",
"y": "LLbFxWkGBEBrTm1GMYZJy1OXCH1KLweJMCgIEPIsibU",
"alg": "ES256"
},
{
"kty": "RSA",
"e": "AQAB",
"use": "enc",
"kid": "k2",
"alg": "RS256",
"n": "sGu-fYVE2nq2dPxJlqAMI0Z8G3FD0XcWDnD8mkfO1ddKRGuUQZmfj4gWeZGyIk3cnuoy7KJCEqa3daXc08QHuFZyfn0rH33t8_AFsvb0q0i7R2FK-Gdqs_E0-sGpYMsRJdZWfCioLkYjIHEuVnRbi3DEsWqe484rEGbKF60jNRgGC4b-8pz-E538ZkssWxcqHrYIj5bjGEU36onjS3M_yrTuNvzv_8wRioK4fbcwmGne9bDxu8LcoSReWpPn0CnUkWnfqroRcMJnC87ZuJagDW1ZWCmU3psdsVanmFFh0DP6z0fsA4h8G2n9-qp-LEKFaWwo3IWlOsIzU3MHdcEiGw"
}
]
}
`))
constraints, err := parseTokenConstraints(c, wallclock)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
elPubKey := constraints.keys[0].(*ecdsa.PublicKey)
if elPubKey.Curve != elliptic.P256() {
t.Errorf("curve: %v", elPubKey.Curve)
}
rsaPubKey := constraints.keys[1].(*rsa.PublicKey)
if rsaPubKey.Size() != 256 {
t.Errorf("expected size 256 found %d", rsaPubKey.Size())
}
})
t.Run("Time", func(t *testing.T) {
now := time.Now()
wallclock := ast.NumberTerm(int64ToJSONNumber(now.UnixNano()))
t.Run("if provided, is parsed properly", func(t *testing.T) {
c := ast.NewObject()
c.Insert(ast.StringTerm("time"), wallclock)
constraints, err := parseTokenConstraints(c, ast.NumberTerm("12134"))
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if exp, act := float64(now.UnixNano()), constraints.time; exp != act {
t.Errorf("expected time constraint to be %f, got %f", exp, act)
}
})
t.Run("unset, defaults to wallclock", func(t *testing.T) {
c := ast.NewObject() // 'time' constraint is unset
constraints, err := parseTokenConstraints(c, wallclock)
if err != nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
if exp, act := float64(now.UnixNano()), constraints.time; exp != act {
t.Errorf("expected time constraint to be %f, got %f", exp, act)
}
})
})
t.Run("Unrecognized", func(t *testing.T) {
c := ast.NewObject()
c.Insert(ast.StringTerm("whatever"), ast.StringTerm("junk"))
_, err := parseTokenConstraints(c, wallclock)
if err == nil {
t.Fatalf("parseTokenConstraints: %v", err)
}
})
}
func TestParseTokenHeader(t *testing.T) {
t.Run("Errors", func(t *testing.T) {
token := &JSONWebToken{
header: "",
}
if err := token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = "###"
if err := token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = base64.RawURLEncoding.EncodeToString([]byte(`{`))
if err := token.decodeHeader(); err == nil {
t.Fatalf("token.decodeHeader: %v", err)
}
token.header = base64.RawURLEncoding.EncodeToString([]byte(`{}`))
if err := token.decodeHeader(); err != nil {
t.Fatalf("token.decodeHeader: %v", err)
}
header, err := parseTokenHeader(token)
if err != nil {
t.Fatalf("parseTokenHeader: %v", err)
}
if header.valid() {
t.Fatalf("tokenHeader valid")
}
})
t.Run("Alg", func(t *testing.T) {
token := &JSONWebToken{
header: base64.RawURLEncoding.EncodeToString([]byte(`{"alg":"RS256"}`)),
}
if err := token.decodeHeader(); err != nil {
t.Fatalf("token.decodeHeader: %v", err)
}
header, err := parseTokenHeader(token)
if err != nil {
t.Fatalf("parseTokenHeader: %v", err)
}
if !header.valid() {
t.Fatalf("tokenHeader !valid")
}
if header.alg != "RS256" {
t.Fatalf("alg: %s", header.alg)
}
})
}
func TestTopDownJWTEncodeSignES256(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const es256Hdr = `{"alg":"ES256"}`
const ecKey = `{
"kty":"EC",
"crv":"P-256",
"x":"f83OJ3D2xF1Bg8vub9tLe1gHMzV76e8Tus9uPHvRVEU",
"y":"x_FEzRu9m36HLN_tue659LNpXW6pCyStikYjKIWI5a0",
"d":"jpsQnnGQmL-YBIffH1136cspYG6-0iY7X1fCE9-E9LI"
}`
params := struct {
note string
input1 string
input2 string
input3 string
err string
}{
"https://tools.ietf.org/html/rfc7515#appendix-A.3",
"`" + es256Hdr + "`",
"`" + examplePayload + "`",
"`" + ecKey + "`",
"",
}
type test struct {
note string
rules []string
}
tc := test{
params.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%s, %s, %s, x) }`, params.input1, params.input2, params.input3)},
}
compiler, err := compileRules(nil, tc.rules, nil)
if err != nil {
t.Errorf("%v: Compiler error: %v", tc.note, err)
return
}
store := inmem.New()
path := []string{"generated", "p"}
var inputTerm *ast.Term
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(qrs) == 0 {
t.Fatal("Undefined result")
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
// Verification
standardHeaders := &jws.StandardHeaders{}
err = json.Unmarshal([]byte(es256Hdr), standardHeaders)
if err != nil {
t.Fatal("Failed to parse header")
}
alg := standardHeaders.GetAlgorithm()
keys, err := jwk.ParseString(ecKey)
if err != nil {
t.Fatal("Failed to parse JWK")
}
key, err := keys.Keys[0].Materialize()
if err != nil {
t.Fatal("Failed to create private key")
}
publicKey, err := jwk.GetPublicKey(key)
if err != nil {
t.Fatalf("failed to get public key: %v", err)
}
// Verify with vendor library
verifiedPayload, err := jws.Verify([]byte(result.(string)), alg, publicKey)
if err != nil || string(verifiedPayload) != examplePayload {
t.Fatal("Failed to verify message")
}
}
// TestTopDownJWTEncodeSignEC needs to perform all tests inline because we do not know the
// expected values before hand
func TestTopDownJWTEncodeSignES512(t *testing.T) {
const examplePayload = `{"iss":"joe",` + "\r\n" + ` "exp":1300819380,` + "\r\n" + ` "http://example.com/is_root":true}`
const es512Hdr = `{"alg":"ES512"}`
const ecKey = `{
"kty":"EC",
"crv":"P-521",
"x":"AekpBQ8ST8a8VcfVOTNl353vSrDCLLJXmPk06wTjxrrjcBpXp5EOnYG_NjFZ6OvLFV1jSfS9tsz4qUxcWceqwQGk",
"y":"ADSmRA43Z1DSNx_RvcLI87cdL07l6jQyyBXMoxVg_l2Th-x3S1WDhjDly79ajL4Kkd0AZMaZmh9ubmf63e3kyMj2",
"d":"AY5pb7A0UFiB3RELSD64fTLOSV_jazdF7fLYyuTw8lOfRhWg6Y6rUrPAxerEzgdRhajnu0ferB0d53vM9mE15j2C"
}`
params := struct {
note string
input1 string
input2 string
input3 string
err string
}{
"https://tools.ietf.org/html/rfc7515#appendix-A.4",
"`" + es512Hdr + "`",
"`" + examplePayload + "`",
"`" + ecKey + "`",
"",
}
type test struct {
note string
rules []string
}
var tests []test
tests = append(tests, test{
params.note,
[]string{fmt.Sprintf(`p = x { io.jwt.encode_sign_raw(%s, %s, %s, x) }`, params.input1, params.input2, params.input3)},
})
tc := tests[0]
compiler, err := compileRules(nil, tc.rules, nil)
if err != nil {
t.Errorf("%v: Compiler error: %v", tc.note, err)
return
}
store := inmem.New()
path := []string{"generated", "p"}
var inputTerm *ast.Term
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(qrs) == 0 {
t.Fatal("Undefined result")
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
// Verification
standardHeaders := &jws.StandardHeaders{}
err = json.Unmarshal([]byte(es512Hdr), standardHeaders)
if err != nil {
t.Fatal("Failed to parse header")
}
alg := standardHeaders.GetAlgorithm()
keys, err := jwk.ParseString(ecKey)
if err != nil {
t.Fatalf("Failed to parse JWK: %v", err)
}
key, err := keys.Keys[0].Materialize()
if err != nil {
t.Fatalf("Failed to create private key: %v", err)
}
publicKey, err := jwk.GetPublicKey(key)
if err != nil {
t.Fatalf("Failed to get public key: %v", err)
}
// Verify with vendor library
verifiedPayload, err := jws.Verify([]byte(result.(string)), alg, publicKey)
if err != nil || string(verifiedPayload) != examplePayload {
t.Fatal("Failed to verify message")
}
}
// NOTE(sr): The stdlib ecdsa package will randomly read 1 byte from the source
// and discard it: so passing a fixed-seed `rand.New(rand.Source(seed))` via
// `rego.WithSeed` will not do the trick, the output would still randomly be
// one of two possible signatures. To fix that for testing, we're reaching
// deeper here, and use a "constant number generator". It doesn't matter if the
// first byte is discarded, the second one looks just the same.
type cng struct{}
func (*cng) Read(p []byte) (int, error) {
for i := range p {
p[i] = 4
}
return len(p), nil
}
func TestTopdownJWTEncodeSignECWithSeedReturnsSameSignature(t *testing.T) {
query := `io.jwt.encode_sign({"alg": "ES256"},{"pay": "load"},
{"kty":"EC",
"crv":"P-256",
"x":"f83OJ3D2xF1Bg8vub9tLe1gHMzV76e8Tus9uPHvRVEU",
"y":"x_FEzRu9m36HLN_tue659LNpXW6pCyStikYjKIWI5a0",
"d":"jpsQnnGQmL-YBIffH1136cspYG6-0iY7X1fCE9-E9LI"
}, x)`
encodedSigned := "eyJhbGciOiAiRVMyNTYifQ.eyJwYXkiOiAibG9hZCJ9.05wmHY3NomU1jr7yvusBvKwhthRklPuJhUPOkoeIn5e5n_GXvE25EfRs9AJK2wOy6NoY2ljhj07M9BMtV0dfyA"
if !strings.HasPrefix(runtime.Version(), "go1.18") {
encodedSigned = "eyJhbGciOiAiRVMyNTYifQ.eyJwYXkiOiAibG9hZCJ9.-LoHxtbT8t_TnqlLyONI4BtjvfkySO8TcoCFENqTTH2AKxvn29nAjxOdlbY-0EKVM2nJ4ukCx4IGtZtuwXr0VQ"
}
for i := 0; i < 10; i++ {
q := NewQuery(ast.MustParseBody(query)).
WithSeed(&cng{}).
WithStrictBuiltinErrors(true).
WithCompiler(ast.NewCompiler())
qrs, err := q.Run(context.Background())
if err != nil {
t.Fatal(err)
} else if len(qrs) != 1 {
t.Fatal("expected exactly one result but got:", qrs)
}
if exp, act := 1, len(qrs); exp != act {
t.Fatalf("expected %d results, got %d", exp, act)
}
if exp, act := ast.String(encodedSigned), qrs[0][ast.Var("x")].Value; !exp.Equal(act) {
t.Fatalf("unexpected result: want %v, got %v", exp, act)
}
}
}
| [
"\"OPA_TRACE_TEST\"",
"\"OPA_TRACE_TEST\""
]
| []
| [
"OPA_TRACE_TEST"
]
| [] | ["OPA_TRACE_TEST"] | go | 1 | 0 | |
pkg/network/network_linux.go | // Copyright (c) 2021 Apptainer a Series of LF Projects LLC
// For website terms of use, trademark policy, privacy policy and other
// project policies see https://lfprojects.org/policies
// Copyright (c) 2018-2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package network
import (
"context"
"fmt"
"net"
"os"
"sort"
"strconv"
"strings"
"time"
"golang.org/x/sys/unix"
"github.com/apptainer/apptainer/internal/pkg/util/env"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/100"
"github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
)
type netError string
func (e netError) Error() string { return string(e) }
const (
// ErrNoCNIConfig corresponds to a missing CNI configuration path
ErrNoCNIConfig = netError("no CNI configuration path provided")
// ErrNoCNIPlugin corresponds to a missing CNI plugin path
ErrNoCNIPlugin = netError("no CNI plugin path provided")
)
// CNIPath contains path to CNI configuration directory and path to executable
// CNI plugins directory
type CNIPath struct {
Conf string
Plugin string
}
// Setup contains network installation setup
type Setup struct {
networks []string
networkConfList []*libcni.NetworkConfigList
runtimeConf []*libcni.RuntimeConf
result []types.Result
cniPath *CNIPath
containerID string
netNS string
envPath string
}
// PortMapEntry describes a port mapping between host and container
type PortMapEntry struct {
HostPort int `json:"hostPort"`
ContainerPort int `json:"containerPort"`
Protocol string `json:"protocol"`
HostIP string `json:"hostIP,omitempty"`
}
// GetAllNetworkConfigList lists configured networks in configuration path directory
// provided by cniPath
func GetAllNetworkConfigList(cniPath *CNIPath) ([]*libcni.NetworkConfigList, error) {
networks := make([]*libcni.NetworkConfigList, 0)
if cniPath == nil {
return networks, ErrNoCNIConfig
}
if cniPath.Conf == "" {
return networks, ErrNoCNIConfig
}
files, err := libcni.ConfFiles(cniPath.Conf, []string{".conf", ".json", ".conflist"})
if err != nil {
return nil, err
} else if len(files) == 0 {
return nil, libcni.NoConfigsFoundError{Dir: cniPath.Conf}
}
sort.Strings(files)
for _, file := range files {
if strings.HasSuffix(file, ".conflist") {
conf, err := libcni.ConfListFromFile(file)
if err != nil {
return nil, fmt.Errorf("%s: %s", file, err)
}
networks = append(networks, conf)
} else {
conf, err := libcni.ConfFromFile(file)
if err != nil {
return nil, fmt.Errorf("%s: %s", file, err)
}
confList, err := libcni.ConfListFromConf(conf)
if err != nil {
return nil, fmt.Errorf("%s: %s", file, err)
}
networks = append(networks, confList)
}
}
return networks, nil
}
// NewSetup creates and returns a network setup to configure, add and remove
// network interfaces in container
func NewSetup(networks []string, containerID string, netNS string, cniPath *CNIPath) (*Setup, error) {
if cniPath == nil {
return nil, ErrNoCNIConfig
}
if cniPath.Conf == "" {
return nil, ErrNoCNIConfig
}
networkConfList := make([]*libcni.NetworkConfigList, len(networks))
for i, network := range networks {
var err error
networkConfList[i], err = libcni.LoadConfList(cniPath.Conf, network)
if err != nil {
return nil, err
}
}
return NewSetupFromConfig(networkConfList, containerID, netNS, cniPath)
}
// NewSetupFromConfig creates and returns network setup to configure from
// a network configuration list
func NewSetupFromConfig(networkConfList []*libcni.NetworkConfigList, containerID string, netNS string, cniPath *CNIPath) (*Setup, error) {
id := containerID
if id == "" {
id = strconv.Itoa(os.Getpid())
}
if cniPath == nil {
return nil, ErrNoCNIConfig
}
if cniPath.Conf == "" {
return nil, ErrNoCNIConfig
}
if cniPath.Plugin == "" {
return nil, ErrNoCNIPlugin
}
runtimeConf := make([]*libcni.RuntimeConf, len(networkConfList))
networks := make([]string, len(networkConfList))
ifIndex := 0
for i, conf := range networkConfList {
runtimeConf[i] = &libcni.RuntimeConf{
ContainerID: containerID,
NetNS: netNS,
IfName: fmt.Sprintf("eth%d", ifIndex),
CapabilityArgs: make(map[string]interface{}),
Args: make([][2]string, 0),
}
networks[i] = conf.Name
ifIndex++
}
return &Setup{
networks: networks,
networkConfList: networkConfList,
runtimeConf: runtimeConf,
cniPath: cniPath,
netNS: netNS,
containerID: id,
},
nil
}
func parseArg(arg string) ([][2]string, error) {
argList := make([][2]string, 0)
pairs := strings.Split(arg, ";")
for _, pair := range pairs {
keyVal := strings.Split(pair, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("invalid argument: %s", pair)
}
argList = append(argList, [2]string{keyVal[0], keyVal[1]})
}
return argList, nil
}
// SetCapability sets capability arguments for the corresponding network plugin
// uses by a configured network
func (m *Setup) SetCapability(network string, capName string, args interface{}) error {
for i := range m.networks {
if m.networks[i] == network {
hasCap := false
for _, plugin := range m.networkConfList[i].Plugins {
if plugin.Network.Capabilities[capName] {
hasCap = true
break
}
}
if !hasCap {
return fmt.Errorf("%s network doesn't have %s capability", network, capName)
}
switch args := args.(type) {
case PortMapEntry:
if m.runtimeConf[i].CapabilityArgs[capName] == nil {
m.runtimeConf[i].CapabilityArgs[capName] = make([]PortMapEntry, 0)
}
m.runtimeConf[i].CapabilityArgs[capName] = append(
m.runtimeConf[i].CapabilityArgs[capName].([]PortMapEntry),
args,
)
case []allocator.Range:
if m.runtimeConf[i].CapabilityArgs[capName] == nil {
m.runtimeConf[i].CapabilityArgs[capName] = []allocator.RangeSet{args}
}
}
}
}
return nil
}
// SetArgs affects arguments to corresponding network plugins
func (m *Setup) SetArgs(args []string) error {
if len(m.networks) < 1 {
return fmt.Errorf("there is no configured network in list")
}
// Force plugins to ignore extra CNI_ARGS that they don't consume.
// If we don't do this we get an error when e.g. passing IP= to a
// bridge+ipam config, as bridge now handles args from v1.0.1, but
// doesn't consume IP.
for i := range m.networks {
m.runtimeConf[i].Args = append(m.runtimeConf[i].Args, [2]string{"IgnoreUnknown", "1"})
}
for _, arg := range args {
var splitted []string
networkName := ""
if strings.IndexByte(arg, ':') > strings.IndexByte(arg, '=') {
splitted = []string{m.networks[0], arg}
} else {
splitted = strings.SplitN(arg, ":", 2)
}
if len(splitted) < 1 && len(splitted) > 2 {
return fmt.Errorf("argument must be of form '<network>:KEY1=value1;KEY2=value1' or 'KEY1=value1;KEY2=value1'")
}
n := len(splitted) - 1
if n == 0 {
networkName = m.networks[0]
} else {
networkName = splitted[0]
}
hasNetwork := false
for _, network := range m.networks {
if network == networkName {
hasNetwork = true
break
}
}
if !hasNetwork {
return fmt.Errorf("network %s wasn't specified in --network option", networkName)
}
argList, err := parseArg(splitted[n])
if err != nil {
return err
}
for _, kv := range argList {
key := kv[0]
value := kv[1]
if key == "portmap" {
pm := &PortMapEntry{}
splittedPort := strings.SplitN(value, "/", 2)
if len(splittedPort) != 2 {
return fmt.Errorf("badly formatted portmap argument '%s', must be of form portmap=hostPort:containerPort/protocol", value)
}
pm.Protocol = splittedPort[1]
if pm.Protocol != "tcp" && pm.Protocol != "udp" {
return fmt.Errorf("only tcp and udp protocol can be specified")
}
ports := strings.Split(splittedPort[0], ":")
if len(ports) != 1 && len(ports) != 2 {
return fmt.Errorf("portmap port argument is badly formatted")
}
if n, err := strconv.ParseUint(ports[0], 0, 16); err == nil {
pm.HostPort = int(n)
if pm.HostPort <= 0 || pm.HostPort > 65535 {
return fmt.Errorf("host port must be greater than 0 and less than 65535")
}
} else {
return fmt.Errorf("can't convert host port '%s': %s", ports[0], err)
}
if len(ports) == 2 {
if n, err := strconv.ParseUint(ports[1], 0, 16); err == nil {
pm.ContainerPort = int(n)
if pm.ContainerPort <= 0 || pm.ContainerPort > 65535 {
return fmt.Errorf("container port must be greater than 0 and less than 65535")
}
} else {
return fmt.Errorf("can't convert container port '%s': %s", ports[1], err)
}
} else {
pm.ContainerPort = pm.HostPort
}
if err := m.SetCapability(networkName, "portMappings", *pm); err != nil {
return err
}
} else if key == "ipRange" {
ipRange := make([]allocator.Range, 1)
_, subnet, err := net.ParseCIDR(value)
if err != nil {
return err
}
ipRange[0].Subnet = types.IPNet(*subnet)
if err := m.SetCapability(networkName, "ipRanges", ipRange); err != nil {
return err
}
} else {
for i := range m.networks {
if m.networks[i] == networkName {
m.runtimeConf[i].Args = append(m.runtimeConf[i].Args, kv)
}
}
}
}
}
return nil
}
// GetNetworkIP returns IP associated with a configured network, if network
// is empty, the function returns IP for the first configured network
func (m *Setup) GetNetworkIP(network string, version string) (net.IP, error) {
n := network
if n == "" && len(m.networkConfList) > 0 {
n = m.networkConfList[0].Name
}
for i := 0; i < len(m.networkConfList); i++ {
if m.networkConfList[i].Name == n {
res, err := cnitypes.NewResultFromResult(m.result[i])
if err != nil {
return nil, fmt.Errorf("could not convert result: %v", err)
}
for _, ipResult := range res.IPs {
is4 := ipResult.Address.IP.To4() != nil
if (is4 && version == "4") || version == "6" {
return ipResult.Address.IP, nil
}
}
break
}
}
return nil, fmt.Errorf("no IP found for network %s", network)
}
// GetNetworkInterface returns container network interface associated
// with a network, if network is empty, the function returns interface
// for the first configured network
func (m *Setup) GetNetworkInterface(network string) (string, error) {
if network == "" && len(m.runtimeConf) > 0 {
return m.runtimeConf[0].IfName, nil
}
for i := 0; i < len(m.networkConfList); i++ {
if m.networkConfList[i].Name == network {
return m.runtimeConf[i].IfName, nil
}
}
return "", fmt.Errorf("no interface found for network %s", network)
}
// SetPortProtection provides a basic mechanism to prevent port hijacking
func (m *Setup) SetPortProtection(network string, lowPort int) error {
idx := -1
for i := 0; i < len(m.networkConfList); i++ {
if m.networkConfList[i].Name == network {
idx = i
break
}
}
if idx < 0 {
return fmt.Errorf("no configuration found for network %s", network)
}
entries, ok := m.runtimeConf[idx].CapabilityArgs["portMappings"].([]PortMapEntry)
if !ok {
return nil
}
for _, e := range entries {
sockProt := unix.IPPROTO_TCP
sockType := unix.SOCK_STREAM
if e.HostPort <= lowPort {
return fmt.Errorf("not authorized to map port under %d", lowPort)
}
if e.Protocol == "udp" {
sockProt = unix.IPPROTO_UDP
sockType = unix.SOCK_DGRAM
}
fd, err := unix.Socket(unix.AF_INET, sockType, sockProt)
if err != nil {
return fmt.Errorf("failed to create %s socket on port %d: %s", e.Protocol, e.HostPort, err)
}
err = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
if err != nil {
return fmt.Errorf("failed to set reuseport for %s socket on port %d: %s", e.Protocol, e.HostPort, err)
}
sockAddr := &unix.SockaddrInet4{
Port: e.HostPort,
}
err = unix.Bind(fd, sockAddr)
if err != nil {
return fmt.Errorf("failed to bind %s socket on port %d: %s", e.Protocol, e.HostPort, err)
}
if sockType == unix.SOCK_STREAM {
err = unix.Listen(fd, 1)
if err != nil {
return fmt.Errorf("failed to listen on %s socket port %d: %s", e.Protocol, e.HostPort, err)
}
}
}
return nil
}
// SetEnvPath allows to define custom paths for PATH environment
// variables used during CNI plugin execution
func (m *Setup) SetEnvPath(envPath string) {
m.envPath = envPath
}
// AddNetworks brings up networks interface in container
func (m *Setup) AddNetworks(ctx context.Context) error {
return m.command(ctx, "ADD")
}
// DelNetworks tears down networks interface in container
func (m *Setup) DelNetworks(ctx context.Context) error {
return m.command(ctx, "DEL")
}
func (m *Setup) command(ctx context.Context, command string) error {
if m.envPath != "" {
backupEnv := os.Environ()
os.Clearenv()
os.Setenv("PATH", m.envPath)
defer env.SetFromList(backupEnv)
}
config := &libcni.CNIConfig{Path: []string{m.cniPath.Plugin}}
// set a timeout context for the execution of the CNI plugin
// to interrupt its execution if it takes more than 5 seconds
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
if command == "ADD" {
m.result = make([]types.Result, len(m.networkConfList))
for i := 0; i < len(m.networkConfList); i++ {
var err error
if m.result[i], err = config.AddNetworkList(ctx, m.networkConfList[i], m.runtimeConf[i]); err != nil {
for j := i - 1; j >= 0; j-- {
if err := config.DelNetworkList(ctx, m.networkConfList[j], m.runtimeConf[j]); err != nil {
return err
}
}
return err
}
}
} else if command == "DEL" {
for i := 0; i < len(m.networkConfList); i++ {
if err := config.DelNetworkList(ctx, m.networkConfList[i], m.runtimeConf[i]); err != nil {
return err
}
}
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
sys/targets/targets.go | // Copyright 2017 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package targets
import (
"encoding/binary"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"sync"
)
type Target struct {
osCommon
OS string
Arch string
VMArch string // e.g. amd64 for 386, or arm64 for arm
PtrSize uint64
PageSize uint64
NumPages uint64
DataOffset uint64
Int64Alignment uint64
LittleEndian bool
CFlags []string
Triple string
CCompiler string
Objdump string // name of objdump executable
KernelCompiler string // override CC when running kernel make
KernelLinker string // override LD when running kernel make
KernelArch string
KernelHeaderArch string
BrokenCompiler string
// NeedSyscallDefine is used by csource package to decide when to emit __NR_* defines.
NeedSyscallDefine func(nr uint64) bool
HostEndian binary.ByteOrder
SyscallTrampolines map[string]string
init *sync.Once
initOther *sync.Once
// Target for the other compiler. If SYZ_CLANG says to use gcc, this will be clang. Or the other way around.
other *Target
}
type osCommon struct {
// What OS can build native binaries for this OS.
// If not set, defaults to itself (i.e. native build).
// Later we can extend this to be a list, but so far we don't have more than one OS.
BuildOS string
// Does the OS use syscall numbers (e.g. Linux) or has interface based on functions (e.g. fuchsia).
SyscallNumbers bool
// Syscalls accept int64 arguments (>sizeof(void*)).
Int64SyscallArgs bool
// E.g. "__NR_" or "SYS_".
SyscallPrefix string
// ipc<->executor communication tuning.
// If ExecutorUsesShmem, programs and coverage are passed through shmem, otherwise via pipes.
ExecutorUsesShmem bool
// If ExecutorUsesForkServer, executor uses extended protocol with handshake.
ExecutorUsesForkServer bool
// Special mode for OSes that do not have support for building Go binaries.
// In this mode we run Go binaries on the host machine, only executor runs on target.
HostFuzzer bool
// How to run syz-executor directly.
// Some systems build syz-executor into their images.
// If this flag is not empty, syz-executor will not be copied to the machine, and will be run using
// this command instead.
SyzExecutorCmd string
// Extension of executable files (notably, .exe for windows).
ExeExtension string
// Name of the kernel object file.
KernelObject string
// Name of cpp(1) executable.
CPP string
// Common CFLAGS for this OS.
cflags []string
}
const (
Akaros = "akaros"
FreeBSD = "freebsd"
Fuchsia = "fuchsia"
Linux = "linux"
NetBSD = "netbsd"
OpenBSD = "openbsd"
TestOS = "test"
Trusty = "trusty"
Windows = "windows"
AMD64 = "amd64"
ARM64 = "arm64"
ARM = "arm"
I386 = "386"
MIPS64LE = "mips64le"
PPC64LE = "ppc64le"
S390x = "s390x"
RiscV64 = "riscv64"
TestArch64 = "64"
TestArch64Fork = "64_fork"
TestArch32Shmem = "32_shmem"
TestArch32ForkShmem = "32_fork_shmem"
)
func Get(OS, arch string) *Target {
return GetEx(OS, arch, useClang)
}
func GetEx(OS, arch string, clang bool) *Target {
target := List[OS][arch]
if target == nil {
return nil
}
if clang == useClang {
target.init.Do(target.lazyInit)
return target
}
target.initOther.Do(func() {
other := new(Target)
*other = *target
other.setCompiler(clang)
other.lazyInit()
target.other = other
})
return target.other
}
// nolint: lll
var List = map[string]map[string]*Target{
TestOS: {
TestArch64: {
PtrSize: 8,
PageSize: 4 << 10,
// Compile with -no-pie due to issues with ASan + ASLR on ppc64le.
CFlags: []string{"-m64", "-fsanitize=address", "-no-pie"},
osCommon: osCommon{
SyscallNumbers: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: false,
ExecutorUsesForkServer: false,
},
},
TestArch64Fork: {
PtrSize: 8,
PageSize: 8 << 10,
// Compile with -no-pie due to issues with ASan + ASLR on ppc64le.
CFlags: []string{"-m64", "-fsanitize=address", "-no-pie"},
osCommon: osCommon{
SyscallNumbers: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: false,
ExecutorUsesForkServer: true,
},
},
TestArch32Shmem: {
PtrSize: 4,
PageSize: 8 << 10,
Int64Alignment: 4,
CFlags: []string{"-m32", "-static"},
osCommon: osCommon{
SyscallNumbers: true,
Int64SyscallArgs: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: false,
},
},
TestArch32ForkShmem: {
PtrSize: 4,
PageSize: 4 << 10,
CFlags: []string{"-m32", "-static"},
osCommon: osCommon{
SyscallNumbers: true,
Int64SyscallArgs: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: true,
HostFuzzer: true,
},
},
},
Linux: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
CFlags: []string{"-m64"},
Triple: "x86_64-linux-gnu",
KernelArch: "x86_64",
KernelHeaderArch: "x86",
NeedSyscallDefine: func(nr uint64) bool {
// Only generate defines for new syscalls
// (added after commit 8a1ab3155c2ac on 2012-10-04).
return nr >= 313
},
},
I386: {
VMArch: AMD64,
PtrSize: 4,
PageSize: 4 << 10,
Int64Alignment: 4,
LittleEndian: true,
CFlags: []string{"-m32"},
Triple: "x86_64-linux-gnu",
KernelArch: "i386",
KernelHeaderArch: "x86",
},
ARM64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
Triple: "aarch64-linux-gnu",
KernelArch: "arm64",
KernelHeaderArch: "arm64",
},
ARM: {
VMArch: ARM64,
PtrSize: 4,
PageSize: 4 << 10,
LittleEndian: true,
CFlags: []string{"-D__LINUX_ARM_ARCH__=6", "-march=armv6"},
Triple: "arm-linux-gnueabi",
KernelArch: "arm",
KernelHeaderArch: "arm",
},
MIPS64LE: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
CFlags: []string{"-march=mips64r2", "-mabi=64", "-EL"},
Triple: "mips64el-linux-gnuabi64",
KernelArch: "mips",
KernelHeaderArch: "mips",
},
PPC64LE: {
PtrSize: 8,
PageSize: 64 << 10,
LittleEndian: true,
CFlags: []string{"-D__powerpc64__"},
Triple: "powerpc64le-linux-gnu",
KernelArch: "powerpc",
KernelHeaderArch: "powerpc",
},
S390x: {
PtrSize: 8,
PageSize: 4 << 10,
DataOffset: 0xfffff000,
LittleEndian: false,
Triple: "s390x-linux-gnu",
KernelArch: "s390",
KernelHeaderArch: "s390",
SyscallTrampolines: map[string]string{
// The s390x Linux syscall ABI allows for upto 5 input parameters passed in registers, and this is not enough
// for the mmap syscall. Therefore, all input parameters for the mmap syscall are packed into a struct
// on user stack and the pointer to the struct is passed as an input parameter to the syscall.
// To work around this problem we therefore reroute the mmap syscall to the glibc mmap wrapper.
"mmap": "mmap",
},
},
RiscV64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
Triple: "riscv64-linux-gnu",
KernelArch: "riscv",
KernelHeaderArch: "riscv",
},
},
FreeBSD: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
CCompiler: "clang",
CFlags: []string{"-m64"},
NeedSyscallDefine: dontNeedSyscallDefine,
},
I386: {
VMArch: AMD64,
PtrSize: 4,
PageSize: 4 << 10,
// The default DataOffset doesn't work with 32-bit
// FreeBSD and using ld.lld due to collisions.
DataOffset: 256 << 20,
Int64Alignment: 4,
LittleEndian: true,
CCompiler: "clang",
CFlags: []string{"-m32"},
NeedSyscallDefine: dontNeedSyscallDefine,
},
},
NetBSD: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
CFlags: []string{
"-m64",
"-static",
"--sysroot", sourceDirVar + "/dest/",
},
CCompiler: sourceDirVar + "/tools/bin/x86_64--netbsd-g++",
},
},
OpenBSD: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
CCompiler: "c++",
CFlags: []string{"-m64", "-static", "-lutil"},
NeedSyscallDefine: func(nr uint64) bool {
switch nr {
case 8: // SYS___tfork
return true
case 94: // SYS___thrsleep
return true
case 198: // SYS___syscall
return true
case 295: // SYS___semctl
return true
case 301: // SYS___thrwakeup
return true
case 302: // SYS___threxit
return true
case 303: // SYS___thrsigdivert
return true
case 304: // SYS___getcwd
return true
case 329: // SYS___set_tcb
return true
case 330: // SYS___get_tcb
return true
}
return false
},
},
},
Fuchsia: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
KernelHeaderArch: "x64",
CCompiler: sourceDirVar + "/prebuilt/third_party/clang/linux-x64/bin/clang",
Objdump: sourceDirVar + "/prebuilt/third_party/clang/linux-x64/bin/llvm-objdump",
CFlags: fuchsiaCFlags("x64", "x86_64"),
},
ARM64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
KernelHeaderArch: ARM64,
CCompiler: sourceDirVar + "/prebuilt/third_party/clang/linux-x64/bin/clang",
Objdump: sourceDirVar + "/prebuilt/third_party/clang/linux-x64/bin/llvm-objdump",
CFlags: fuchsiaCFlags(ARM64, "aarch64"),
},
},
Windows: {
AMD64: {
PtrSize: 8,
// TODO(dvyukov): what should we do about 4k vs 64k?
PageSize: 4 << 10,
LittleEndian: true,
},
},
Akaros: {
AMD64: {
PtrSize: 8,
PageSize: 4 << 10,
LittleEndian: true,
KernelHeaderArch: "x86",
NeedSyscallDefine: dontNeedSyscallDefine,
CCompiler: sourceDirVar + "/toolchain/x86_64-ucb-akaros-gcc/bin/x86_64-ucb-akaros-g++",
CFlags: []string{
"-static",
},
},
},
Trusty: {
ARM: {
PtrSize: 4,
PageSize: 4 << 10,
LittleEndian: true,
NeedSyscallDefine: dontNeedSyscallDefine,
},
},
}
var oses = map[string]osCommon{
Linux: {
SyscallNumbers: true,
SyscallPrefix: "__NR_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: true,
KernelObject: "vmlinux",
cflags: []string{"-static"},
},
FreeBSD: {
SyscallNumbers: true,
Int64SyscallArgs: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: true,
KernelObject: "kernel.full",
CPP: "g++",
cflags: []string{"-static", "-lc++"},
},
NetBSD: {
BuildOS: Linux,
SyscallNumbers: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: true,
KernelObject: "netbsd.gdb",
},
OpenBSD: {
SyscallNumbers: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: true,
ExecutorUsesForkServer: true,
KernelObject: "bsd.gdb",
CPP: "ecpp",
},
Fuchsia: {
BuildOS: Linux,
SyscallNumbers: false,
ExecutorUsesShmem: false,
ExecutorUsesForkServer: false,
HostFuzzer: true,
SyzExecutorCmd: "syz-executor",
KernelObject: "zircon.elf",
},
Windows: {
SyscallNumbers: false,
ExecutorUsesShmem: false,
ExecutorUsesForkServer: false,
ExeExtension: ".exe",
KernelObject: "vmlinux",
},
Akaros: {
BuildOS: Linux,
SyscallNumbers: true,
SyscallPrefix: "SYS_",
ExecutorUsesShmem: false,
ExecutorUsesForkServer: true,
HostFuzzer: true,
KernelObject: "akaros-kernel-64b",
},
Trusty: {
SyscallNumbers: true,
Int64SyscallArgs: true,
SyscallPrefix: "__NR_",
},
}
var (
commonCFlags = []string{
"-O2",
"-pthread",
"-Wall",
"-Werror",
"-Wparentheses",
"-Wunused-const-variable",
"-Wframe-larger-than=16384", // executor uses stacks of limited size, so no jumbo frames
}
optionalCFlags = map[string]bool{
"-static": true, // some distributions don't have static libraries
"-Wunused-const-variable": true, // gcc 5 does not support this flag
"-fsanitize=address": true, // some OSes don't have ASAN
}
)
func fuchsiaCFlags(arch, clangArch string) []string {
out := sourceDirVar + "/out/" + arch
return []string{
"-Wno-deprecated",
"-target", clangArch + "-fuchsia",
"-ldriver",
"-lfdio",
"-lzircon",
"--sysroot", out + "/zircon_toolchain/obj/zircon/public/sysroot/sysroot",
"-I", sourceDirVar + "/sdk/lib/fdio/include",
"-I", sourceDirVar + "/zircon/system/ulib/fidl/include",
"-I", sourceDirVar + "/src/lib/ddk/include",
"-I", out + "/fidling/gen/sdk/fidl/fuchsia.device",
"-I", out + "/fidling/gen/sdk/fidl/fuchsia.device.manager",
"-I", out + "/fidling/gen/sdk/fidl/fuchsia.hardware.nand",
"-I", out + "/fidling/gen/sdk/fidl/fuchsia.hardware.power.statecontrol",
"-I", out + "/fidling/gen/sdk/fidl/fuchsia.hardware.usb.peripheral",
"-I", out + "/fidling/gen/zircon/vdso/zx",
"-L", out + "/" + arch + "-shared",
}
}
func init() {
for OS, archs := range List {
for arch, target := range archs {
initTarget(target, OS, arch)
}
}
goarch := runtime.GOARCH
goos := runtime.GOOS
if goos == "android" {
goos = Linux
}
for _, target := range List[TestOS] {
if List[goos] != nil {
if host := List[goos][goarch]; host != nil {
target.CCompiler = host.CCompiler
target.CPP = host.CPP
if goos == FreeBSD {
// For some configurations -no-pie is passed to the compiler,
// which is not used by clang.
// Ensure clang does not complain about it.
target.CFlags = append(target.CFlags, "-Wno-unused-command-line-argument")
// When building executor for the test OS, clang needs
// to link against the libc++ library.
target.CFlags = append(target.CFlags, "-lc++")
}
// In ESA/390 mode, the CPU is able to address only 31bit of memory but
// arithmetic operations are still 32bit
// Fix cflags by replacing compiler's -m32 option with -m31
if goarch == S390x {
for i := range target.CFlags {
target.CFlags[i] = strings.Replace(target.CFlags[i], "-m32", "-m31", -1)
}
}
}
if target.PtrSize == 4 && goos == FreeBSD && goarch == AMD64 {
// A hack to let 32-bit "test" target tests run on FreeBSD:
// freebsd/386 requires a non-default DataOffset to avoid
// clobbering mappings created by the C runtime. Since that is the
// only target with this constraint, just special-case it for now.
target.DataOffset = List[goos][I386].DataOffset
}
}
target.BuildOS = goos
}
}
func initTarget(target *Target, OS, arch string) {
if common, ok := oses[OS]; ok {
target.osCommon = common
}
target.init = new(sync.Once)
target.initOther = new(sync.Once)
target.OS = OS
target.Arch = arch
if target.KernelArch == "" {
target.KernelArch = target.Arch
}
if target.NeedSyscallDefine == nil {
target.NeedSyscallDefine = needSyscallDefine
}
if target.DataOffset == 0 {
target.DataOffset = 512 << 20
}
target.NumPages = (16 << 20) / target.PageSize
sourceDir := os.Getenv("SOURCEDIR_" + strings.ToUpper(OS))
if sourceDir == "" {
sourceDir = os.Getenv("SOURCEDIR")
}
for sourceDir != "" && sourceDir[len(sourceDir)-1] == '/' {
sourceDir = sourceDir[:len(sourceDir)-1]
}
target.replaceSourceDir(&target.CCompiler, sourceDir)
target.replaceSourceDir(&target.Objdump, sourceDir)
for i := range target.CFlags {
target.replaceSourceDir(&target.CFlags[i], sourceDir)
}
if OS == Linux && arch == runtime.GOARCH {
// Don't use cross-compiler for native compilation, there are cases when this does not work:
// https://github.com/google/syzkaller/pull/619
// https://github.com/google/syzkaller/issues/387
// https://github.com/google/syzkaller/commit/06db3cec94c54e1cf720cdd5db72761514569d56
target.Triple = ""
}
if target.CCompiler == "" {
target.setCompiler(useClang)
}
if target.CPP == "" {
target.CPP = "cpp"
}
if target.Objdump == "" {
target.Objdump = "objdump"
if target.Triple != "" {
target.Objdump = target.Triple + "-objdump"
}
}
if target.BuildOS == "" {
target.BuildOS = OS
}
if runtime.GOOS != target.BuildOS {
// Spoil native binaries if they are not usable, so that nobody tries to use them later.
target.CCompiler = fmt.Sprintf("cant-build-%v-on-%v", target.OS, runtime.GOOS)
target.CPP = target.CCompiler
}
for _, flags := range [][]string{commonCFlags, target.osCommon.cflags} {
target.CFlags = append(target.CFlags, flags...)
}
if OS == TestOS {
if runtime.GOARCH != S390x {
target.LittleEndian = true
} else {
target.LittleEndian = false
}
}
if target.LittleEndian {
target.HostEndian = binary.LittleEndian
} else {
target.HostEndian = binary.BigEndian
}
}
func (target *Target) setCompiler(clang bool) {
// setCompiler may be called effectively twice for target.other,
// so first we remove flags the previous call may have added.
pos := 0
for _, flag := range target.CFlags {
if flag == "-ferror-limit=0" ||
strings.HasPrefix(flag, "--target=") {
continue
}
target.CFlags[pos] = flag
pos++
}
target.CFlags = target.CFlags[:pos]
if clang {
target.CCompiler = "clang"
target.KernelCompiler = "clang"
target.KernelLinker = "ld.lld"
if target.Triple != "" {
target.CFlags = append(target.CFlags, "--target="+target.Triple)
}
target.CFlags = append(target.CFlags, "-ferror-limit=0")
} else {
target.CCompiler = "gcc"
target.KernelCompiler = ""
target.KernelLinker = ""
if target.Triple != "" {
target.CCompiler = target.Triple + "-" + target.CCompiler
}
}
}
func (target *Target) replaceSourceDir(param *string, sourceDir string) {
if !strings.Contains(*param, sourceDirVar) {
return
}
if sourceDir == "" {
target.BrokenCompiler = "SOURCEDIR is not set"
return
}
*param = strings.Replace(*param, sourceDirVar, sourceDir, -1)
}
func (target *Target) lazyInit() {
if runtime.GOOS != target.BuildOS || target.BrokenCompiler != "" {
return
}
// Only fail on CI for native build.
// On CI we want to fail loudly if cross-compilation breaks.
// Also fail if SOURCEDIR_GOOS is set b/c in that case user probably assumes it will work.
if (target.OS != runtime.GOOS || !runningOnCI) && os.Getenv("SOURCEDIR_"+strings.ToUpper(target.OS)) == "" {
if _, err := exec.LookPath(target.CCompiler); err != nil {
target.BrokenCompiler = fmt.Sprintf("%v is missing (%v)", target.CCompiler, err)
return
}
}
flags := make(map[string]*bool)
var wg sync.WaitGroup
for _, flag := range target.CFlags {
if !optionalCFlags[flag] {
continue
}
res := new(bool)
flags[flag] = res
wg.Add(1)
go func(flag string) {
defer wg.Done()
*res = checkFlagSupported(target, flag)
}(flag)
}
wg.Wait()
for i := 0; i < len(target.CFlags); i++ {
if res := flags[target.CFlags[i]]; res != nil && !*res {
copy(target.CFlags[i:], target.CFlags[i+1:])
target.CFlags = target.CFlags[:len(target.CFlags)-1]
i--
}
}
// Check that the compiler is actually functioning. It may be present, but still broken.
// Common for Linux distros, over time we've seen:
// Error: alignment too large: 15 assumed
// fatal error: asm/unistd.h: No such file or directory
// fatal error: asm/errno.h: No such file or directory
// collect2: error: ld terminated with signal 11 [Segmentation fault]
if runningOnCI || os.Getenv("SOURCEDIR_"+strings.ToUpper(target.OS)) != "" {
return // On CI all compilers are expected to work, so we don't do the following check.
}
args := []string{"-x", "c++", "-", "-o", "/dev/null"}
args = append(args, target.CFlags...)
cmd := exec.Command(target.CCompiler, args...)
cmd.Stdin = strings.NewReader(simpleProg)
if out, err := cmd.CombinedOutput(); err != nil {
target.BrokenCompiler = string(out)
return
}
}
func checkFlagSupported(target *Target, flag string) bool {
cmd := exec.Command(target.CCompiler, "-x", "c++", "-", "-o", "/dev/null", flag)
cmd.Stdin = strings.NewReader(simpleProg)
return cmd.Run() == nil
}
func needSyscallDefine(nr uint64) bool { return true }
func dontNeedSyscallDefine(nr uint64) bool { return false }
var (
runningOnCI = os.Getenv("CI") != ""
useClang = os.Getenv("SYZ_CLANG") != ""
)
const (
sourceDirVar = "${SOURCEDIR}"
simpleProg = `
#include <stdio.h>
#include <dirent.h> // ensures that system headers are installed
#include <algorithm> // ensures that C++ headers are installed
int main() { printf("Hello, World!\n"); }
`
)
| [
"\"SOURCEDIR_\" + strings.ToUpper(OS",
"\"SOURCEDIR\"",
"\"SOURCEDIR_\"+strings.ToUpper(target.OS",
"\"SOURCEDIR_\"+strings.ToUpper(target.OS",
"\"CI\"",
"\"SYZ_CLANG\""
]
| []
| [
"SOURCEDIR_\" + strings.ToUpper(O",
"CI",
"SYZ_CLANG",
"SOURCEDIR",
"SOURCEDIR_\"+strings.ToUpper(target.O"
]
| [] | ["SOURCEDIR_\" + strings.ToUpper(O", "CI", "SYZ_CLANG", "SOURCEDIR", "SOURCEDIR_\"+strings.ToUpper(target.O"] | go | 5 | 0 | |
util/testutil/env.go | package testutil
import (
"errors"
"os"
"strings"
)
type TestEnv struct {
ApiEndpoint string
Owner string
Repo string
Pull string
Token string
}
type TestEnvError []string
func (t TestEnvError) Error() string {
return strings.Join(t, "\n")
}
func GetTestEnv() (*TestEnv, error) {
err := TestEnvError{}
apiEndpoint := os.Getenv("UNITY_META_CHECK_GITHUB_API_ENDPOINT")
if apiEndpoint == "" {
err = append(err, "missing UNITY_META_CHECK_GITHUB_API_ENDPOINT")
}
owner := os.Getenv("UNITY_META_CHECK_GITHUB_OWNER")
if owner == "" {
err = append(err, "missing UNITY_META_CHECK_GITHUB_OWNER")
}
repo := os.Getenv("UNITY_META_CHECK_GITHUB_REPO")
if repo == "" {
err = append(err, "missing UNITY_META_CHECK_GITHUB_REPO")
}
pull := os.Getenv("UNITY_META_CHECK_GITHUB_PULL_NUMBER")
if pull == "" {
err = append(err, "missing UNITY_META_CHECK_GITHUB_PULL_NUMBER")
}
token := os.Getenv("UNITY_META_CHECK_GITHUB_TOKEN")
if token == "" {
return nil, errors.New("missing UNITY_META_CHECK_GITHUB_TOKEN")
}
return &TestEnv{
apiEndpoint,
owner,
repo,
pull,
token,
}, nil
}
| [
"\"UNITY_META_CHECK_GITHUB_API_ENDPOINT\"",
"\"UNITY_META_CHECK_GITHUB_OWNER\"",
"\"UNITY_META_CHECK_GITHUB_REPO\"",
"\"UNITY_META_CHECK_GITHUB_PULL_NUMBER\"",
"\"UNITY_META_CHECK_GITHUB_TOKEN\""
]
| []
| [
"UNITY_META_CHECK_GITHUB_REPO",
"UNITY_META_CHECK_GITHUB_API_ENDPOINT",
"UNITY_META_CHECK_GITHUB_PULL_NUMBER",
"UNITY_META_CHECK_GITHUB_TOKEN",
"UNITY_META_CHECK_GITHUB_OWNER"
]
| [] | ["UNITY_META_CHECK_GITHUB_REPO", "UNITY_META_CHECK_GITHUB_API_ENDPOINT", "UNITY_META_CHECK_GITHUB_PULL_NUMBER", "UNITY_META_CHECK_GITHUB_TOKEN", "UNITY_META_CHECK_GITHUB_OWNER"] | go | 5 | 0 | |
geo_agent/visualiser_geo.py | '''
contains all methods for visualisation tasks
'''
import matplotlib.pyplot as plt
import matplotlib as mpl
import geopandas as gpd
import geoplot.crs as gcrs
import geoplot as gplt
import numpy as np
import pandas as pd
from shapely.geometry import Point, LineString, shape
import ruamel.yaml as yaml
from parse_yaml import parse_config
import os
def set_style(Config):
'''sets the plot style
'''
if Config.plot_style.lower() == 'dark':
mpl.style.use('plot_styles/dark.mplstyle')
def build_fig_geo(Config, shapefile = './postcode_shape_files/osm_north_london.shp', figsize=(5,7)):
# set style parameters for plot
set_style(Config)
# Set total size of figure to 5'' x 7''
fig = plt.figure(figsize=(5,7))
# specify space where plots will be placed. There will be 2 subplots
# one on each row where each plot will stretch across the entire screen
spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[10,5])
# read in shapefile for map
map_df = gpd.read_file(shapefile)
#set projection for shapefile in first subplot
ax1 = fig.add_subplot(spec[0,0], projection=gcrs.AlbersEqualArea())
extent = [-0.18, 51.62, -0.10, 51.7]
# plot map
gplt.polyplot(map_df['geometry'], projection=gcrs.AlbersEqualArea(), ax=ax1, extent=extent)
plt.title('Map showing location of infected individuals')
# plt.show(ax1)
# Plot to show SIR curve
ax2 = fig.add_subplot(spec[1,0])
ax2.set_title('number of infected')
#ax2.set_xlim(0, simulation_steps)
ax2.set_ylim(0, Config.pop_size + 100)
return fig, spec, ax1, ax2, map_df
def draw_tstep(Config,
fig, ax1, ax2, shapefile_df):
#construct plot and visualise
#get color palettes
palette = Config.get_palette()
# specify space where plots will be placed. There will be 2 subplots
# one on each row where each plot will stretch across the entire screen
spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[10,5])
# clear previous subplots
ax1.clear()
ax2.clear()
extent = [-0.18, 51.62, -0.10, 51.7]
#set projection for shapefile in first subplot
ax1 = gplt.polyplot(shapefile_df, projection=gcrs.AlbersEqualArea(), ax=ax1)
# if Config.self_isolate and Config.isolation_bounds != None:
# build_hospital(Config.isolation_bounds[0], Config.isolation_bounds[2],
# Config.isolation_bounds[1], Config.isolation_bounds[3], ax1,
# addcross = False)
#plot population segments
all_agents = Config.point_plots_matrix
# # Create nd array containing healthy, infected, immune and fatalities in one column and a second column containing label
# healthy = population[population[:,6] == 0][:,1:3]
# healthy_label = ['healthy'] * healthy.shape[0]
# infected = population[population[:,6] == 1][:,1:3]
# infected_label = ['infected'] * infected.shape[0]
# immune = population[population[:,6] == 2][:,1:3]
# immune_label = ['immune'] * immune.shape[0]
# fatalities = population[population[:,6] == 3][:,1:3]
# fatalities_label = ['fatalities'] * fatalities.shape[0]
# # merge all data together in one nd array for plotting
# pop_coordinates = np.concatenate((healthy, infected, immune, fatalities))
# pop_labels = np.concatenate((healthy_label, infected_label, immune_label, fatalities_label))
# pop_matrix = np.vstack((pop_coordinates.T, pop_labels))
# pop_df = pd.DataFrame(pop_matrix.T, columns = ['Longitude', 'Latitude', 'Label'])
pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label', 'disease_progression'])
# Manually added landmarks for now
# pop_df = pop_df.append({'geometry' : Point(-0.1256032, 51.63368), 'label' : 'supermarket'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1713237, 51.6495658), 'label' : 'supermarket'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1137062, 51.6347074), 'label' : 'park'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1693677, 51.6615703), 'label' : 'home'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1705196, 51.6665827), 'label' : 'home'}, ignore_index=True)
pop_gdf = gpd.GeoDataFrame(pop_df, geometry=pop_df.geometry)
# pop_gdf = gpd.GeoDataFrame(all_agents, columns = ['geometry', 'label'])
extent = [-0.18, 51.62, -0.10, 51.7]
extent = [-0.2671, 51.6167, -0.1198, 51.6940]
gplt.pointplot(pop_gdf, ax=ax1, extent=extent, hue='label', s= 2)
# plt.show()
plt.draw()
plt.pause(0.001)
# Convert to geodataframe and plot on axis
# pop_gdf = gpd.GeoDa0taFrame(pop_df, geometry=gpd.points_from_xy(pop_df.Longitude.astype(float).values, pop_df.Latitude.astype(float).values))
# gplt.pointplot(pop_gdf, ax=ax1, extent=shapefile_df.total_bounds, hue = 'Label', legend=True)
# gplt.pointplot(pop_gdf, ax=ax1, hue = 'Label', legend=True)
def parse_agent_location(Config):
#plot population segments
all_agents = Config.point_plots_matrix
pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label', 'disease_progression'])
# pop_df = pd.DataFrame(all_agents, columns = ['geometry', 'label'])
# pop_df = pop_df.append({'geometry' : Point(-0.1256032, 51.63368), 'label' : 'supermarket'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1713237, 51.6495658), 'label' : 'supermarket'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1137062, 51.6347074), 'label' : 'park'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1693677, 51.6615703), 'label' : 'home'}, ignore_index=True)
# pop_df = pop_df.append({'geometry' : Point(-0.1705196, 51.6665827), 'label' : 'home'}, ignore_index=True)
pop_gdf = gpd.GeoDataFrame(pop_df, geometry=pop_df.geometry)
pop_gdf.index.name = 'id'
# local machine file path
# pop_gdf.to_file("/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/output.json", index=True, driver="GeoJSON")
# docker file path
pop_gdf.to_file("/openstreetmap-carto/output.json", index=True, driver="GeoJSON")
# convert_df_2_string(pop_gdf)
# parse_yaml_result()
# str_out = convert_df_2_string(pop_gdf)
# assign_agent_loc_2_mml_file(updated_locations = str_out)
# return str_out
def convert_df_2_string(df):
"""
Convert data frame rows to string output where each new line is defined as \n
"""
# ititialise string
output = 'agent,wkt\n'
for i, row in df.iterrows():
if i == len(df) - 1:
output += str(row['label']) + ',' + str(row['geometry'])
else:
output += str(row['label']) + ',' + str(row['geometry']) + '\n'
# set environment variable ${AGENTS}
# os.environ['AGENTS'] = output
return output
# local machine version
# def assign_agent_loc_2_mml_file(file='/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', updated_locations = None):
# docker version
def assign_agent_loc_2_mml_file(file='/openstreetmap-carto/project.mml', updated_locations = None):
yml = yaml.YAML()
yml.preserve_quotes = True
yml.width = 4096
with open(file, 'r') as stream:
file_string = yml.load(stream)
# update project.mml file with current agent locations
file_string['Layer'][-1]['Datasource']['inline'] = updated_locations
# write to yaml file
# local machine version
# with open('/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', 'w') as file:
# docker version
with open('/openstreetmap-carto/project.mml', 'w') as file:
yml.indent(mapping=2, sequence=4, offset=2)
documents = yml.dump(file_string, file)
def parse_yaml_result():
# local machine version
# conf = parse_config(path="/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml")
# with open('/Users/kevinryan/Documents/City_DSI/population_movement_simulations/openstreetmap-carto/project.mml', 'w') as file:
# docker version
conf = parse_config(path="/openstreetmap-carto/project.mml")
with open('/openstreetmap-carto/project.mml', 'w') as file:
yaml.dump(conf, file)
| []
| []
| [
"AGENTS"
]
| [] | ["AGENTS"] | python | 1 | 0 | |
src/_pytest/cacheprovider.py | """Implementation of the cache provider."""
# This plugin was not named "cache" to avoid conflicts with the external
# pytest-cache version.
import json
import os
from pathlib import Path
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import attr
from .pathlib import resolve_from_str
from .pathlib import rm_rf
from .reports import CollectReport
from _pytest import nodes
from _pytest._io import TerminalWriter
from _pytest.compat import final
from _pytest.compat import LEGACY_PATH
from _pytest.compat import legacy_path
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.deprecated import check_ispytest
from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.main import Session
from _pytest.python import Module
from _pytest.python import Package
from _pytest.reports import TestReport
README_CONTENT = """\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information.
"""
CACHEDIR_TAG_CONTENT = b"""\
Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag created by pytest.
# For information about cache directory tags, see:
# https://bford.info/cachedir/spec.html
"""
@final
@attr.s(init=False)
class Cache:
_cachedir = attr.ib(type=Path, repr=False)
_config = attr.ib(type=Config, repr=False)
# Sub-directory under cache-dir for directories created by `mkdir()`.
_CACHE_PREFIX_DIRS = "d"
# Sub-directory under cache-dir for values created by `set()`.
_CACHE_PREFIX_VALUES = "v"
def __init__(
self, cachedir: Path, config: Config, *, _ispytest: bool = False
) -> None:
check_ispytest(_ispytest)
self._cachedir = cachedir
self._config = config
@classmethod
def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache":
"""Create the Cache instance for a Config.
:meta private:
"""
check_ispytest(_ispytest)
cachedir = cls.cache_dir_from_config(config, _ispytest=True)
if config.getoption("cacheclear") and cachedir.is_dir():
cls.clear_cache(cachedir, _ispytest=True)
return cls(cachedir, config, _ispytest=True)
@classmethod
def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None:
"""Clear the sub-directories used to hold cached directories and values.
:meta private:
"""
check_ispytest(_ispytest)
for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
d = cachedir / prefix
if d.is_dir():
rm_rf(d)
@staticmethod
def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path:
"""Get the path to the cache directory for a Config.
:meta private:
"""
check_ispytest(_ispytest)
return resolve_from_str(config.getini("cache_dir"), config.rootpath)
def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None:
"""Issue a cache warning.
:meta private:
"""
check_ispytest(_ispytest)
import warnings
from _pytest.warning_types import PytestCacheWarning
warnings.warn(
PytestCacheWarning(fmt.format(**args) if args else fmt),
self._config.hook,
stacklevel=3,
)
def mkdir(self, name: str) -> Path:
"""Return a directory path object with the given name.
If the directory does not yet exist, it will be created. You can use
it to manage files to e.g. store/retrieve database dumps across test
sessions.
.. versionadded:: 6.3
:param name:
Must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
path = Path(name)
if len(path.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path)
res.mkdir(exist_ok=True, parents=True)
return res
def makedir(self, name: str) -> LEGACY_PATH:
"""Return a directory path object with the given name.
Same as :func:`mkdir`, but returns a legacy py path instance.
"""
return legacy_path(self.mkdir(name))
def _getvaluepath(self, key: str) -> Path:
return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key))
def get(self, key: str, default):
"""Return the cached value for the given key.
If no value was yet cached or the value cannot be read, the specified
default is returned.
:param key:
Must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default:
The value to return in case of a cache-miss or invalid cache value.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, OSError):
return default
def set(self, key: str, value: object) -> None:
"""Save value for the given key.
:param key:
Must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value:
Must be of any combination of basic python types,
including nested types like lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
if path.parent.is_dir():
cache_dir_exists_already = True
else:
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except OSError:
self.warn("could not create cache path {path}", path=path, _ispytest=True)
return
if not cache_dir_exists_already:
self._ensure_supporting_files()
data = json.dumps(value, indent=2, sort_keys=True)
try:
f = path.open("w")
except OSError:
self.warn("cache could not write path {path}", path=path, _ispytest=True)
else:
with f:
f.write(data)
def _ensure_supporting_files(self) -> None:
"""Create supporting files in the cache dir that are not really part of the cache."""
readme_path = self._cachedir / "README.md"
readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore")
msg = "# Created by pytest automatically.\n*\n"
gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
class LFPluginCollWrapper:
def __init__(self, lfplugin: "LFPlugin") -> None:
self.lfplugin = lfplugin
self._collected_at_least_one_failure = False
@hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector: nodes.Collector):
if isinstance(collector, Session):
out = yield
res: CollectReport = out.get_result()
# Sort any lf-paths to the beginning.
lf_paths = self.lfplugin._last_failed_paths
res.result = sorted(
res.result,
# use stable sort to priorize last failed
key=lambda x: x.path in lf_paths,
reverse=True,
)
return
elif isinstance(collector, Module):
if collector.path in self.lfplugin._last_failed_paths:
out = yield
res = out.get_result()
result = res.result
lastfailed = self.lfplugin.lastfailed
# Only filter with known failures.
if not self._collected_at_least_one_failure:
if not any(x.nodeid in lastfailed for x in result):
return
self.lfplugin.config.pluginmanager.register(
LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
)
self._collected_at_least_one_failure = True
session = collector.session
result[:] = [
x
for x in result
if x.nodeid in lastfailed
# Include any passed arguments (not trivial to filter).
or session.isinitpath(x.path)
# Keep all sub-collectors.
or isinstance(x, nodes.Collector)
]
return
yield
class LFPluginCollSkipfiles:
def __init__(self, lfplugin: "LFPlugin") -> None:
self.lfplugin = lfplugin
@hookimpl
def pytest_make_collect_report(
self, collector: nodes.Collector
) -> Optional[CollectReport]:
# Packages are Modules, but _last_failed_paths only contains
# test-bearing paths and doesn't try to include the paths of their
# packages, so don't filter them.
if isinstance(collector, Module) and not isinstance(collector, Package):
if collector.path not in self.lfplugin._last_failed_paths:
self.lfplugin._skipped_files += 1
return CollectReport(
collector.nodeid, "passed", longrepr=None, result=[]
)
return None
class LFPlugin:
"""Plugin which implements the --lf (run last-failing) option."""
def __init__(self, config: Config) -> None:
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
assert config.cache
self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {})
self._previously_failed_count: Optional[int] = None
self._report_status: Optional[str] = None
self._skipped_files = 0 # count skipped files during collection due to --lf
if config.getoption("lf"):
self._last_failed_paths = self.get_last_failed_paths()
config.pluginmanager.register(
LFPluginCollWrapper(self), "lfplugin-collwrapper"
)
def get_last_failed_paths(self) -> Set[Path]:
"""Return a set with all Paths()s of the previously failed nodeids."""
rootpath = self.config.rootpath
result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed}
return {x for x in result if x.exists()}
def pytest_report_collectionfinish(self) -> Optional[str]:
if self.active and self.config.getoption("verbose") >= 0:
return "run-last-failure: %s" % self._report_status
return None
def pytest_runtest_logreport(self, report: TestReport) -> None:
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report: CollectReport) -> None:
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
@hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection_modifyitems(
self, config: Config, items: List[nodes.Item]
) -> Generator[None, None, None]:
yield
if not self.active:
return
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# Running a subset of all tests with recorded failures
# only outside of it.
self._report_status = "%d known failures not in selected tests" % (
len(self.lastfailed),
)
else:
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else: # --failedfirst
items[:] = previously_failed + previously_passed
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
self._report_status = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
if self._skipped_files > 0:
files_noun = "file" if self._skipped_files == 1 else "files"
self._report_status += " (skipped {files} {files_noun})".format(
files=self._skipped_files, files_noun=files_noun
)
else:
self._report_status = "no previously failed tests, "
if self.config.getoption("last_failed_no_failures") == "none":
self._report_status += "deselecting all items."
config.hook.pytest_deselected(items=items[:])
items[:] = []
else:
self._report_status += "not deselecting items."
def pytest_sessionfinish(self, session: Session) -> None:
config = self.config
if config.getoption("cacheshow") or hasattr(config, "workerinput"):
return
assert config.cache is not None
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin:
"""Plugin which implements the --nf (run new-first) option."""
def __init__(self, config: Config) -> None:
self.config = config
self.active = config.option.newfirst
assert config.cache is not None
self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
@hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection_modifyitems(
self, items: List[nodes.Item]
) -> Generator[None, None, None]:
yield
if self.active:
new_items: Dict[str, nodes.Item] = {}
other_items: Dict[str, nodes.Item] = {}
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
new_items.values()
) + self._get_increasing_order(other_items.values())
self.cached_nodeids.update(new_items)
else:
self.cached_nodeids.update(item.nodeid for item in items)
def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]:
return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return]
def pytest_sessionfinish(self) -> None:
config = self.config
if config.getoption("cacheshow") or hasattr(config, "workerinput"):
return
if config.getoption("collectonly"):
return
assert config.cache is not None
config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests, but run the last failures first.\n"
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown.",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="append",
nargs="?",
dest="cacheshow",
help=(
"show cache contents, don't perform collection or tests. "
"Optional argument: glob (default: '*')."
),
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
cache_dir_default = ".pytest_cache"
if "TOX_ENV_DIR" in os.environ:
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="which tests to run with no previously (known) failures.",
)
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
return None
@hookimpl(tryfirst=True)
def pytest_configure(config: Config) -> None:
config.cache = Cache.for_config(config, _ispytest=True)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@fixture
def cache(request: FixtureRequest) -> Cache:
"""Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be ``/`` separated strings, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
assert request.config.cache is not None
return request.config.cache
def pytest_report_header(config: Config) -> Optional[str]:
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
assert config.cache is not None
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootpath)
except ValueError:
displaypath = cachedir
return f"cachedir: {displaypath}"
return None
def cacheshow(config: Config, session: Session) -> int:
from pprint import pformat
assert config.cache is not None
tw = TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
glob = config.option.cacheshow[0]
if glob is None:
glob = "*"
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / Cache._CACHE_PREFIX_VALUES
tw.sep("-", "cache values for %r" % glob)
for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
key = str(valpath.relative_to(vdir))
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / Cache._CACHE_PREFIX_DIRS
if ddir.is_dir():
contents = sorted(ddir.rglob(glob))
tw.sep("-", "cache directories for %r" % glob)
for p in contents:
# if p.is_dir():
# print("%s/" % p.relative_to(basedir))
if p.is_file():
key = str(p.relative_to(basedir))
tw.line(f"{key} is a file of length {p.stat().st_size:d}")
return 0
| []
| []
| [
"TOX_ENV_DIR"
]
| [] | ["TOX_ENV_DIR"] | python | 1 | 0 | |
core/commands/sysdiag.go | package commands
import (
"os"
"path"
"runtime"
cmds "github.com/ipfs/go-ipfs/commands"
config "github.com/ipfs/go-ipfs/repo/config"
manet "github.com/multiformats/go-multiaddr-net"
sysi "gx/ipfs/QmZRjKbHa6DenStpQJFiaPcEwkZqrx7TH6xTf342LDU3qM/go-sysinfo"
"github.com/ipfs/go-ipfs-cmdkit"
)
var sysDiagCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Print system diagnostic information.",
ShortDescription: `
Prints out information about your computer to aid in easier debugging.
`,
},
Run: func(req cmds.Request, res cmds.Response) {
info := make(map[string]interface{})
err := runtimeInfo(info)
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
err = envVarInfo(info)
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
err = diskSpaceInfo(info)
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
err = memInfo(info)
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
node, err := req.InvocContext().GetNode()
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
err = netInfo(node.OnlineMode(), info)
if err != nil {
res.SetError(err, cmdkit.ErrNormal)
return
}
info["ipfs_version"] = config.CurrentVersionNumber
info["ipfs_commit"] = config.CurrentCommit
res.SetOutput(info)
},
}
func runtimeInfo(out map[string]interface{}) error {
rt := make(map[string]interface{})
rt["os"] = runtime.GOOS
rt["arch"] = runtime.GOARCH
rt["compiler"] = runtime.Compiler
rt["version"] = runtime.Version()
rt["numcpu"] = runtime.NumCPU()
rt["gomaxprocs"] = runtime.GOMAXPROCS(0)
rt["numgoroutines"] = runtime.NumGoroutine()
out["runtime"] = rt
return nil
}
func envVarInfo(out map[string]interface{}) error {
ev := make(map[string]interface{})
ev["GOPATH"] = os.Getenv("GOPATH")
ev["IPFS_PATH"] = os.Getenv("IPFS_PATH")
out["environment"] = ev
return nil
}
func ipfsPath() string {
p := os.Getenv("IPFS_PATH")
if p == "" {
p = path.Join(os.Getenv("HOME"), ".ipfs")
}
return p
}
func diskSpaceInfo(out map[string]interface{}) error {
di := make(map[string]interface{})
dinfo, err := sysi.DiskUsage(ipfsPath())
if err != nil {
return err
}
di["fstype"] = dinfo.FsType
di["total_space"] = dinfo.Total
di["free_space"] = dinfo.Free
out["diskinfo"] = di
return nil
}
func memInfo(out map[string]interface{}) error {
m := make(map[string]interface{})
meminf, err := sysi.MemoryInfo()
if err != nil {
return err
}
m["swap"] = meminf.Swap
m["virt"] = meminf.Used
out["memory"] = m
return nil
}
func netInfo(online bool, out map[string]interface{}) error {
n := make(map[string]interface{})
addrs, err := manet.InterfaceMultiaddrs()
if err != nil {
return err
}
var straddrs []string
for _, a := range addrs {
straddrs = append(straddrs, a.String())
}
n["interface_addresses"] = straddrs
n["online"] = online
out["net"] = n
return nil
}
| [
"\"GOPATH\"",
"\"IPFS_PATH\"",
"\"IPFS_PATH\"",
"\"HOME\""
]
| []
| [
"GOPATH",
"IPFS_PATH",
"HOME"
]
| [] | ["GOPATH", "IPFS_PATH", "HOME"] | go | 3 | 0 | |
tensorflow/python/keras/backend.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_LAYER_NAME_UIDS:
PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
per_graph_layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
def set_eager_learning_phase(value):
"""Internal utility that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export('keras.backend.set_session')
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread,
inter_op_parallelism_threads=num_thread,
allow_soft_placement=True)
return config
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
self.device = device
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return op.device
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
# If the outer context is eager but we are executing under the keras
# FuncGraph, we create EagerTensors and use them as constants.
if (ops.executing_eagerly_outside_functions() and
getattr(get_graph(), 'name', '') == 'keras_graph'):
with ops.init_scope():
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return x.numpy()
elif not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
elif ops.inside_function():
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self.inputs = nest.flatten(inputs)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(nest.flatten(outputs))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def __call__(self, inputs):
inputs = nest.flatten(inputs)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
return nest.pack_sequence_as(self._outputs_structure,
fetched[:len(self.outputs)])
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._outputs_structure = outputs
inputs = nest.flatten(inputs)
outputs = nest.flatten(outputs)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {i.graph for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
updates_ops.append(update)
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self.inputs + list(exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self.inputs)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
inputs = nest.flatten(inputs)
converted_inputs = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
return nest.pack_sequence_as(self._outputs_structure,
[x.numpy() for x in outputs])
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(input_time_zero,
initial_states + constants)
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if hasattr(output_, 'set_shape'):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
axis = axis % len(output.shape)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
rank = len(output.shape)
axis = axis % rank
if axis != rank - 1:
permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
output = array_ops.transpose(output, perm=permutation)
output_shape = output.shape
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def in_multi_worker_mode():
"""Whether we are operating in a Multi-Worker setting."""
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_spec = server_lib.ClusterSpec(tf_config.get('cluster', {}))
return tf_config and 'master' not in cluster_spec.jobs
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return strategy is not None and strategy.__class__.__name__ == 'TPUStrategy'
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
| []
| []
| [
"OMP_NUM_THREADS",
"KERAS_HOME",
"TF_CONFIG"
]
| [] | ["OMP_NUM_THREADS", "KERAS_HOME", "TF_CONFIG"] | python | 3 | 0 | |
backend/store/postgres/storev2_test.go | package postgres
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/google/uuid"
"github.com/jackc/pgx/v4/pgxpool"
corev3 "github.com/sensu/sensu-go/api/core/v3"
"github.com/sensu/sensu-go/backend/store"
"github.com/sensu/sensu-go/backend/store/patch"
storev2 "github.com/sensu/sensu-go/backend/store/v2"
)
func testWithPostgresStoreV2(t *testing.T, fn func(storev2.Interface)) {
t.Helper()
if testing.Short() {
t.Skip("skipping postgres test")
return
}
pgURL := os.Getenv("PG_URL")
if pgURL == "" {
t.Skip("skipping postgres test")
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db, err := pgxpool.Connect(ctx, pgURL)
if err != nil {
t.Fatal(err)
}
dbName := "sensu" + strings.ReplaceAll(uuid.New().String(), "-", "")
if _, err := db.Exec(ctx, fmt.Sprintf("CREATE DATABASE %s;", dbName)); err != nil {
t.Fatal(err)
}
defer dropAll(context.Background(), dbName, pgURL)
db.Close()
db, err = pgxpool.Connect(ctx, fmt.Sprintf("dbname=%s ", dbName)+pgURL)
if err != nil {
t.Fatal(err)
}
defer db.Close()
if err := upgrade(ctx, db); err != nil {
t.Fatal(err)
}
fn(NewStoreV2(db, nil))
}
func TestStoreCreateOrUpdate(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
if err := s.CreateOrUpdate(req, wrapper); err != nil {
t.Error(err)
}
// Repeating the call to the store should succeed
if err := s.CreateOrUpdate(req, wrapper); err != nil {
t.Error(err)
}
rows, err := s.(*StoreV2).db.Query(context.Background(), "SELECT * FROM entities")
if err != nil {
t.Fatal(err)
}
defer rows.Close()
rowCount := 0
for rows.Next() {
rowCount++
}
if got, want := rowCount, 1; got != want {
t.Errorf("bad row count: got %d, want %d", got, want)
}
})
}
func TestStoreUpdateIfExists(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
// UpdateIfExists should fail
if err := s.UpdateIfExists(req, wrapper); err == nil {
t.Error("expected non-nil error")
} else {
if _, ok := err.(*store.ErrNotFound); !ok {
t.Errorf("wrong error: %s", err)
}
}
if err := s.CreateOrUpdate(req, wrapper); err != nil {
t.Fatal(err)
}
// UpdateIfExists should succeed
if err := s.UpdateIfExists(req, wrapper); err != nil {
t.Error(err)
}
})
}
func TestStoreCreateIfNotExists(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
// CreateIfNotExists should succeed
if err := s.CreateIfNotExists(req, wrapper); err != nil {
t.Fatal(err)
}
// CreateIfNotExists should fail
if err := s.CreateIfNotExists(req, wrapper); err == nil {
t.Error("expected non-nil error")
} else if _, ok := err.(*store.ErrAlreadyExists); !ok {
t.Errorf("wrong error: %s", err)
}
// UpdateIfExists should succeed
if err := s.UpdateIfExists(req, wrapper); err != nil {
t.Error(err)
}
})
}
func TestStoreGet(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
// CreateIfNotExists should succeed
if err := s.CreateOrUpdate(req, wrapper); err != nil {
t.Fatal(err)
}
got, err := s.Get(req)
if err != nil {
t.Fatal(err)
}
if want := wrapper; !reflect.DeepEqual(got, wrapper) {
t.Errorf("bad resource; got %#v, want %#v", got, want)
}
})
}
func TestStoreDelete(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
// CreateIfNotExists should succeed
if err := s.CreateIfNotExists(req, wrapper); err != nil {
t.Fatal(err)
}
if err := s.Delete(req); err != nil {
t.Fatal(err)
}
if err := s.Delete(req); err == nil {
t.Error("expected non-nil error")
} else if _, ok := err.(*store.ErrNotFound); !ok {
t.Errorf("expected ErrNotFound: got %s", err)
}
if _, err := s.Get(req); err == nil {
t.Error("expected non-nil error")
} else if _, ok := err.(*store.ErrNotFound); !ok {
t.Errorf("expected ErrNotFound: got %s", err)
}
})
}
func TestStoreList(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
for i := 0; i < 10; i++ {
// create 10 resources
fixture := corev3.FixtureEntityState(fmt.Sprintf("foo-%d", i))
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
if err := s.CreateIfNotExists(req, wrapper); err != nil {
t.Fatal(err)
}
}
ctx := context.Background()
req := storev2.NewResourceRequest(ctx, "default", "anything", new(corev3.EntityState).StoreName())
req.UsePostgres = true
pred := &store.SelectionPredicate{Limit: 5}
// Test listing with limit of 5
list, err := s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got, want := list.Len(), 5; got != want {
t.Errorf("wrong number of items: got %d, want %d", got, want)
}
if got, want := pred.Continue, `{"offset":5}`; got != want {
t.Errorf("bad continue token: got %q, want %q", got, want)
}
// get the rest of the list
pred.Limit = 6
list, err = s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got, want := list.Len(), 5; got != want {
t.Errorf("wrong number of items: got %d, want %d", got, want)
}
if pred.Continue != "" {
t.Error("expected empty continue token")
}
// Test listing from all namespaces
req.Namespace = ""
pred = &store.SelectionPredicate{Limit: 5}
list, err = s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got, want := list.Len(), 5; got != want {
t.Errorf("wrong number of items: got %d, want %d", got, want)
}
if got, want := pred.Continue, `{"offset":5}`; got != want {
t.Errorf("bad continue token: got %q, want %q", got, want)
}
pred.Limit = 6
// get the rest of the list
list, err = s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got, want := list.Len(), 5; got != want {
t.Errorf("wrong number of items: got %d, want %d", got, want)
}
if pred.Continue != "" {
t.Error("expected empty continue token")
}
pred.Limit = 5
// Test listing in descending order
pred.Continue = ""
req.SortOrder = storev2.SortDescend
list, err = s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got := list.Len(); got == 0 {
t.Fatalf("wrong number of items: got %d, want > %d", got, 0)
}
firstObj, err := list.(WrapList)[0].Unwrap()
if err != nil {
t.Fatal(err)
}
if got, want := firstObj.GetMetadata().Name, "foo-9"; got != want {
t.Errorf("unexpected first item in list: got %s, want %s", got, want)
}
// Test listing in ascending order
pred.Continue = ""
req.SortOrder = storev2.SortAscend
list, err = s.List(req, pred)
if err != nil {
t.Fatal(err)
}
if got := list.Len(); got == 0 {
t.Fatalf("wrong number of items: got %d, want > %d", got, 0)
}
firstObj, err = list.(WrapList)[0].Unwrap()
if err != nil {
t.Fatal(err)
}
if got, want := firstObj.GetMetadata().Name, "foo-0"; got != want {
t.Errorf("unexpected first item in list: got %s, want %s", got, want)
}
})
}
func TestStoreExists(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
// Exists should return false
got, err := s.Exists(req)
if err != nil {
t.Fatal(err)
}
if want := false; got != want {
t.Errorf("got true, want false")
}
// Create a resource under the default namespace
wrapper := WrapEntityState(fixture)
// CreateIfNotExists should succeed
if err := s.CreateIfNotExists(req, wrapper); err != nil {
t.Fatal(err)
}
got, err = s.Exists(req)
if err != nil {
t.Fatal(err)
}
if want := true; got != want {
t.Errorf("got false, want true")
}
})
}
func TestStorePatch(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
fixture := corev3.FixtureEntityState("foo")
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
if err := s.CreateOrUpdate(req, wrapper); err != nil {
t.Error(err)
}
patcher := &patch.Merge{
MergePatch: []byte(`{"metadata":{"labels":{"food":"hummus"}}}`),
}
if err := s.Patch(req, wrapper, patcher, nil); err != nil {
t.Fatal(err)
}
updatedWrapper, err := s.Get(req)
if err != nil {
t.Fatal(err)
}
updated, err := updatedWrapper.Unwrap()
if err != nil {
t.Fatal(err)
}
if got, want := updated.GetMetadata().Labels["food"], "hummus"; got != want {
t.Errorf("bad patched labels: got %q, want %q", got, want)
}
})
}
func TestStoreGetMultiple(t *testing.T) {
testWithPostgresStoreV2(t, func(s storev2.Interface) {
reqs := make([]storev2.ResourceRequest, 0)
for i := 0; i < 10; i++ {
// create 10 resources
fixture := corev3.FixtureEntityState(fmt.Sprintf("foo-%d", i))
ctx := context.Background()
req := storev2.NewResourceRequestFromResource(ctx, fixture)
reqs = append(reqs, req)
req.UsePostgres = true
wrapper := WrapEntityState(fixture)
if err := s.CreateIfNotExists(req, wrapper); err != nil {
t.Fatal(err)
}
}
result, err := s.(*StoreV2).GetMultiple(context.Background(), reqs[:5])
if err != nil {
t.Fatal(err)
}
if got, want := len(result), 5; got != want {
t.Fatalf("bad number of results: got %d, want %d", got, want)
}
for i := 0; i < 5; i++ {
wrapper, ok := result[reqs[i]]
if !ok {
t.Errorf("missing result %d", i)
continue
}
var entity corev3.EntityState
if err := wrapper.UnwrapInto(&entity); err != nil {
t.Error(err)
continue
}
if len(entity.System.Network.Interfaces) != 1 {
t.Error("wrong number of network interfaces")
}
if len(entity.System.Network.Interfaces[0].Addresses) != 1 {
t.Error("wrong number of IP addresses")
}
}
req := reqs[0]
req.Namespace = "notexists"
result, err = s.(*StoreV2).GetMultiple(context.Background(), []storev2.ResourceRequest{req})
if err != nil {
t.Fatal(err)
}
if len(result) != 0 {
t.Fatal("wrong result length")
}
})
}
| [
"\"PG_URL\""
]
| []
| [
"PG_URL"
]
| [] | ["PG_URL"] | go | 1 | 0 | |
pkg/ddevapp/providerDdevLive_test.go | package ddevapp_test
import (
"bufio"
"fmt"
"github.com/drud/ddev/pkg/nodeps"
"github.com/drud/ddev/pkg/output"
"github.com/stretchr/testify/require"
"os"
"path/filepath"
"strings"
"testing"
. "github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/testcommon"
"github.com/drud/ddev/pkg/util"
asrt "github.com/stretchr/testify/assert"
)
/**
* These tests rely on an external test account managed by DRUD. To run them, you'll
* need to set an environment variable called "DDEV_DDEVLIVE_API_TOKEN" with credentials for
* this account. If no such environment variable is present, these tests will be skipped.
*
* A valid site (with backups) must be present which matches the test site and environment name
* defined in the constants below.
*/
const ddevliveTestSite = "ddev-live-test-no-delete"
const ddevLiveOrgName = "ddltest"
// TestDdevLiveConfigCommand tests the interactive config options.
func TestDdevLiveConfigCommand(t *testing.T) {
t.Skip("This feature is about to be removed, so don't bother testing")
if os.Getenv("DDEV_DDEVLIVE_API_TOKEN") == "" {
t.Skipf("No DDEV_DDEVLIVE_API_TOKEN env var has been set. Skipping %v", t.Name())
}
_ = os.Setenv("DDEV_LIVE_NO_ANALYTICS", "true")
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir(t.Name())
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
docroot := "web"
// Create the docroot.
err := os.Mkdir(filepath.Join(testDir, docroot), 0755)
if err != nil {
t.Errorf("Could not create %s directory under %s", docroot, testDir)
}
// Create the ddevapp we'll use for testing.
app, err := NewApp(testDir, true, nodeps.ProviderDdevLive)
assert.NoError(err)
/**
* Do a full interactive configuration for a ddev-live environment.
*
* 1. Provide a valid site name. Ensure there is no error.
* 2. Provide a valid docroot (already tested elsewhere)
* 3. Provide a valid project type (drupal8)
**/
input := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n", ddevliveTestSite, docroot, "drupal8", ddevLiveOrgName, ddevliveTestSite)
scanner := bufio.NewScanner(strings.NewReader(input))
util.SetInputScanner(scanner)
restoreOutput := util.CaptureUserOut()
err = app.PromptForConfig()
assert.NoError(err, t)
out := restoreOutput()
// Get the provider interface and ensure it validates.
provider, err := app.GetProvider("")
assert.NoError(err)
err = provider.Validate()
assert.NoError(err)
// Ensure we have expected string values in output.
assert.Contains(out, testDir)
// Ensure values were properly set on the app struct.
assert.Equal(ddevliveTestSite, app.Name)
assert.Equal(nodeps.AppTypeDrupal8, app.Type)
assert.Equal(docroot, app.Docroot)
require.Equal(t, "*ddevapp.DdevLiveProvider", fmt.Sprintf("%T", provider))
realProvider := provider.(*DdevLiveProvider)
assert.Equal(ddevliveTestSite, realProvider.SiteName)
assert.Equal(ddevLiveOrgName, realProvider.OrgName)
err = PrepDdevDirectory(testDir)
assert.NoError(err)
output.UserOut.Print("")
}
// TestDdevLivePull ensures we can pull backups from ddev-live .
func TestDdevLivePull(t *testing.T) {
if os.Getenv("DDEV_DDEVLIVE_API_TOKEN") == "" {
t.Skipf("No DDEV_DDEVLIVE_API_TOKEN env var has been set. Skipping %v", t.Name())
}
_ = os.Setenv("DDEV_LIVE_NO_ANALYTICS", "true")
// Set up tests and give ourselves a working directory.
assert := asrt.New(t)
testDir := testcommon.CreateTmpDir(t.Name())
// testcommon.Chdir()() and CleanupDir() checks their own errors (and exit)
defer testcommon.CleanupDir(testDir)
defer testcommon.Chdir(testDir)()
// Move into the properly named ddev-live site (must match ddev-live sitename)
siteDir := filepath.Join(testDir, ddevliveTestSite)
err := os.MkdirAll(filepath.Join(siteDir, "web", "sites/default"), 0777)
assert.NoError(err)
err = os.Chdir(siteDir)
assert.NoError(err)
app, err := NewApp(siteDir, true, nodeps.ProviderDdevLive)
assert.NoError(err)
// nolint: errcheck
defer app.Stop(true, false)
app.Name = t.Name()
app.Type = nodeps.AppTypeDrupal8
app.Docroot = "web"
_ = os.MkdirAll(filepath.Join(app.AppRoot, app.Docroot, "sites/default/files"), 0755)
app.Hooks = map[string][]YAMLTask{"post-pull": {{"exec-host": "touch hello-post-pull-" + app.Name}}, "pre-pull": {{"exec-host": "touch hello-pre-pull-" + app.Name}}}
err = app.WriteConfig()
assert.NoError(err)
testcommon.ClearDockerEnv()
provider := DdevLiveProvider{}
err = provider.Init(app)
require.NoError(t, err)
provider.SiteName = ddevliveTestSite
provider.OrgName = ddevLiveOrgName
err = provider.Write(app.GetConfigPath("import.yaml"))
require.NoError(t, err)
err = app.WriteConfig()
require.NoError(t, err)
// Ensure we can do a pull on the configured site.
app, err = GetActiveApp("")
assert.NoError(err)
err = app.Pull(&provider, &PullOptions{})
require.NoError(t, err)
// Verify that we got the special file created in this site.
assert.FileExists(filepath.Join(app.AppRoot, "web/sites/default/files/i-exist-in-ddev-pull.txt"))
// Make sure that we have the actual database from the site
stdout, _, err := app.Exec(&ExecOpts{
Service: "db",
Cmd: "mysql -N -e 'select name from users_field_data where uid=2;' | cat",
})
assert.NoError(err)
assert.Equal("test-account-for-ddev-tests", strings.Trim(stdout, "\n"))
assert.FileExists("hello-pre-pull-" + app.Name)
assert.FileExists("hello-post-pull-" + app.Name)
err = os.Remove("hello-pre-pull-" + app.Name)
assert.NoError(err)
err = os.Remove("hello-post-pull-" + app.Name)
assert.NoError(err)
app.Hooks = nil
_ = app.WriteConfig()
err = app.Stop(true, false)
assert.NoError(err)
output.UserOut.Print("")
}
| [
"\"DDEV_DDEVLIVE_API_TOKEN\"",
"\"DDEV_DDEVLIVE_API_TOKEN\""
]
| []
| [
"DDEV_DDEVLIVE_API_TOKEN"
]
| [] | ["DDEV_DDEVLIVE_API_TOKEN"] | go | 1 | 0 | |
x/mongo/driver/integration/compressor_test.go | package integration
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/stlimtat/mongo-go-driver/bson"
"github.com/stlimtat/mongo-go-driver/internal/testutil"
"github.com/stlimtat/mongo-go-driver/mongo/writeconcern"
"github.com/stlimtat/mongo-go-driver/x/bsonx/bsoncore"
"github.com/stlimtat/mongo-go-driver/x/mongo/driver/operation"
)
func TestCompression(t *testing.T) {
comp := os.Getenv("MONGO_GO_DRIVER_COMPRESSOR")
if len(comp) == 0 {
t.Skip("Skipping because no compressor specified")
}
wc := writeconcern.New(writeconcern.WMajority())
collOne := testutil.ColName(t)
testutil.DropCollection(t, testutil.DBName(t), collOne)
testutil.InsertDocs(t, testutil.DBName(t), collOne, wc,
bsoncore.BuildDocument(nil, bsoncore.AppendStringElement(nil, "name", "compression_test")),
)
cmd := operation.NewCommand(bsoncore.BuildDocument(nil, bsoncore.AppendInt32Element(nil, "serverStatus", 1))).
Deployment(testutil.Topology(t)).
Database(testutil.DBName(t))
ctx := context.Background()
err := cmd.Execute(ctx)
noerr(t, err)
result := cmd.Result()
serverVersion, err := result.LookupErr("version")
noerr(t, err)
if testutil.CompareVersions(t, serverVersion.StringValue(), "3.4") < 0 {
t.Skip("skipping compression test for version < 3.4")
}
networkVal, err := result.LookupErr("network")
noerr(t, err)
require.Equal(t, networkVal.Type, bson.TypeEmbeddedDocument)
compressionVal, err := networkVal.Document().LookupErr("compression")
noerr(t, err)
compressorDoc, err := compressionVal.Document().LookupErr(comp)
noerr(t, err)
compressorKey := "compressor"
compareTo36 := testutil.CompareVersions(t, serverVersion.StringValue(), "3.6")
if compareTo36 < 0 {
compressorKey = "compressed"
}
compressor, err := compressorDoc.Document().LookupErr(compressorKey)
noerr(t, err)
bytesIn, err := compressor.Document().LookupErr("bytesIn")
noerr(t, err)
require.True(t, bytesIn.IsNumber())
require.True(t, bytesIn.Int64() > 0)
}
| [
"\"MONGO_GO_DRIVER_COMPRESSOR\""
]
| []
| [
"MONGO_GO_DRIVER_COMPRESSOR"
]
| [] | ["MONGO_GO_DRIVER_COMPRESSOR"] | go | 1 | 0 | |
tests/unit_test/c/gtest-1.7.0/test/gtest_xml_outfiles_test.py | #!/usr/bin/env python
#
# Copyright (c) 2021 HopeBayTech.
#
# This file is part of Tera.
# See https://github.com/HopeBayMobile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit test for the gtest_xml_output module."""
__author__ = "[email protected] (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| []
| []
| [
"GTEST_STACK_TRACE_DEPTH"
]
| [] | ["GTEST_STACK_TRACE_DEPTH"] | python | 1 | 0 | |
helloworld/helloworld.go |
package main
// If you're just starting out please see the "hello" sample instead. While
// this one is relatively simple, if has quite a few extra flags that can
// be set to control how the code behaves at runtime. So, this is great
// for debugging and exploring those options - but not great if you want
// to see the bare minimum needed to start an app.
// The main purpose of this is to run an App (http server), however, it
// can also be used as a Batch Job if the JOB_INDEX env var is set - which
// is set by the Code Engine batch processor. This can be useful if you want
// the same code to be used for both Apps and Jobs. In this respect it's
// very similar to the app-n-job sample, but this has all of the interesting
// debug/configuration flags that can be tweaked.
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"sort"
"strconv"
"strings"
"time"
)
func Curl(url string) (string, error) {
cmd := exec.Command("curl", "--http0.9", "-s", url)
res, err := cmd.CombinedOutput()
return string(res), err
}
var GlobalDebug = (os.Getenv("DEBUG") != "")
var envs = []string{}
var msg = ""
func Debug(doit bool, format string, args ...interface{}) {
// If either is 'true' then print it
if !GlobalDebug && !doit {
return
}
format = time.Now().Format("2006-01-02 15:04:05 ") + format + "\n"
fmt.Fprintf(os.Stderr, format, args...)
}
// Just print an cool essage to the Writer that's passed in
func PrintMessage(w io.Writer) {
fmt.Fprintf(w, `%s:
___ __ ____ ____
/ __)/ \( \( __)
( (__( O )) D ( ) _)
\___)\__/(____/(____)
____ __ _ ___ __ __ _ ____
( __)( ( \ / __)( )( ( \( __)
) _) / /( (_ \ )( / / ) _)
(____)\_)__) \___/(__)\_)__)(____)
`, msg)
fmt.Fprintf(w, "Some Env Vars:\n")
fmt.Fprintf(w, "--------------\n")
for _, env := range envs {
fmt.Fprintf(w, "%s\n", env)
}
}
// This func will handle all incoming HTTP requests
func HandleHTTP(w http.ResponseWriter, r *http.Request) {
body := []byte{}
debug := false
// If there's a body then read it in for later use
if r.Body != nil {
body, _ = ioutil.ReadAll(r.Body)
}
// Turn on debugging if the 'debug' query param is there. Just for
// this request tho - unless global debug is set.
if _, ok := r.URL.Query()["debug"]; ok {
debug = true
}
Debug(debug, "%s:\n%s %s\nHeaders:\n%s\n\nBody:\n%s\n",
time.Now().String(), r.Method, r.URL, r.Header, string(body))
// If the 'sleep' query parameter is passed in then sleep for
// that many seconds
if t := r.URL.Query().Get("sleep"); t != "" {
len, _ := strconv.Atoi(t)
Debug(debug, "Sleeping %d", len)
time.Sleep(time.Duration(len) * time.Second)
}
// If the 'crash' query parameter is passed in then crash!
if r.URL.Query().Get("crash") != "" {
Debug(debug, "Crashing...")
os.Exit(1)
}
// If 'fail' query parameter is there then return its value
// as the HTTP return code, defaults to '500'
if t, ok := r.URL.Query()["fail"]; ok {
status := 500
if t != nil && t[0] != "" {
status, _ = strconv.Atoi(t[0])
}
Debug(debug, "Failing with: %d", status)
w.WriteHeader(status)
}
// If there's no 'body' then just print something neat.
// But if there is a body, echo it back to the client.
if len(body) == 0 {
w.Header().Add("Content-Type", "text/plain")
// http://patorjk.com/software/taag/#p=display&f=Graceful&t=Code%0AEngine
PrintMessage(w)
} else {
fmt.Fprintf(w, string(body)+"\n")
}
}
func main() {
// If env var CRASH is set then crash immediately.
// If its value is of the form HH:MM then crash at the specified time
// time. The time is based on time returned from: http://time.nist.gov:13
// This is useful for testing what happens if the app crashes during
// startup. And the 'time' aspect of it allows for only certain instances
// of the app to crash - for example, we want the app to be created ok
// but then after a minute have any new instances crash.
if date := os.Getenv("CRASH"); date != "" { // Just crash!
// get time: curl http://time.nist.gov:13
// result : 58859 20-01-11 21:28:24 00 0 0 129.3 UTC(NIST) *
if len(date) > 3 && date[2] == ':' {
if now, err := Curl("http://time.nist.gov:13"); err == nil {
parts := strings.SplitN(now, " ", 4)
if len(parts) > 3 {
now = parts[2] // Just time part
now = now[:len(date)]
if now > date {
os.Exit(1)
}
}
} else {
Debug(true, "Curl: %s\n%s", now, err)
}
} else {
os.Exit(1)
}
}
// Figure out what message we want to print. You can override this
// via the "MSG" environment variable. Or, you can just change who
// it says hello to via the "TARGET" environment variable
msg = os.Getenv("MSG")
if msg == "" {
target := os.Getenv("TARGET")
if target == "" {
target = "World"
}
msg = "Brad Byer Topol newrev4 Hello " + target + " from"
}
// Get the list of env vars, and sort them for easy reading
envs = os.Environ()
sort.StringSlice(envs).Sort()
Debug(false, "Envs:\n%s", strings.Join(envs, "\n"))
// Real work.
// If we're being run as a Batch Jobthen the JOB_INDEX env var
// will be set. In which case, just print the message to stdout.
// Otherwise we're an App and we need to start the HTTP server
// to processing incoming requests
if jobIndex := os.Getenv("JOB_INDEX"); jobIndex != "" {
fmt.Printf("Hello from helloworld! I'm a batch job! Index: %s\n\n",
jobIndex)
PrintMessage(os.Stdout)
} else {
// Debug the http handler for all requests
http.HandleFunc("/", HandleHTTP)
// HTTP_DELAY will pause for 'delay' seconds before starting the
// HTTP server. This is useful for simulating a long readiness probe
if delay := os.Getenv("HTTP_DELAY"); delay != "" {
if sec, _ := strconv.Atoi(delay); sec != 0 {
Debug(false, "Sleeping %d seconds", sec)
time.Sleep(time.Duration(sec) * time.Second)
}
}
Debug(true, "Listening on port 8080")
http.ListenAndServe(":8080", nil)
}
}
| [
"\"DEBUG\"",
"\"CRASH\"",
"\"MSG\"",
"\"TARGET\"",
"\"JOB_INDEX\"",
"\"HTTP_DELAY\""
]
| []
| [
"MSG",
"CRASH",
"JOB_INDEX",
"DEBUG",
"TARGET",
"HTTP_DELAY"
]
| [] | ["MSG", "CRASH", "JOB_INDEX", "DEBUG", "TARGET", "HTTP_DELAY"] | go | 6 | 0 | |
controllers/finalizers/suit_test.go | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package finalizers
import (
"context"
"os"
"path/filepath"
"testing"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
"github.com/go-logr/logr"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"go.uber.org/fx"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/schedule/utils"
"github.com/chaos-mesh/chaos-mesh/controllers/types"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/test"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var app *fx.App
var k8sClient client.Client
var lister *utils.ActiveLister
var config *rest.Config
var testEnv *envtest.Environment
var setupLog = ctrl.Log.WithName("setup")
func TestSchedule(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Schedule suit",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
}
err := v1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
config, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(config).ToNot(BeNil())
k8sClient, err = client.New(config, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
app = fx.New(
fx.Options(
test.Module,
fx.Provide(
fx.Annotated{
Group: "controller",
Target: NewController,
},
),
fx.Supply(config),
types.ChaosObjects,
),
fx.Invoke(Run),
)
startCtx, cancel := context.WithTimeout(context.Background(), app.StartTimeout())
defer cancel()
if err := app.Start(startCtx); err != nil {
setupLog.Error(err, "fail to start manager")
}
Expect(err).ToNot(HaveOccurred())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
stopCtx, cancel := context.WithTimeout(context.Background(), app.StopTimeout())
defer cancel()
if err := app.Stop(stopCtx); err != nil {
setupLog.Error(err, "fail to stop manager")
}
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
type RunParams struct {
fx.In
Mgr ctrl.Manager
Logger logr.Logger
Controllers []types.Controller `group:"controller"`
Objs []types.Object `group:"objs"`
}
func Run(params RunParams) error {
lister = utils.NewActiveLister(k8sClient, params.Logger)
return nil
}
| [
"\"TEST_USE_EXISTING_CLUSTER\""
]
| []
| [
"TEST_USE_EXISTING_CLUSTER"
]
| [] | ["TEST_USE_EXISTING_CLUSTER"] | go | 1 | 0 | |
vips.go | package bimg
/*
#cgo pkg-config: vips
#include "vips.h"
*/
import "C"
import (
"errors"
"fmt"
"math"
"os"
"runtime"
"strings"
"sync"
"unsafe"
)
// VipsVersion exposes the current libvips semantic version
const VipsVersion = string(C.VIPS_VERSION)
// VipsMajorVersion exposes the current libvips major version number
const VipsMajorVersion = int(C.VIPS_MAJOR_VERSION)
// VipsMinorVersion exposes the current libvips minor version number
const VipsMinorVersion = int(C.VIPS_MINOR_VERSION)
const (
maxCacheMem = 100 * 1024 * 1024
maxCacheSize = 500
)
var (
m sync.Mutex
initialized bool
)
// VipsMemoryInfo represents the memory stats provided by libvips.
type VipsMemoryInfo struct {
Memory int64
MemoryHighwater int64
Allocations int64
}
// vipsSaveOptions represents the internal option used to talk with libvips.
type vipsSaveOptions struct {
Speed int
Quality int
Compression int
Type ImageType
Interlace bool
NoProfile bool
StripMetadata bool
Lossless bool
InputICC string // Absolute path to the input ICC profile
OutputICC string // Absolute path to the output ICC profile
Interpretation Interpretation
Palette bool
}
type vipsWatermarkOptions struct {
Width C.int
DPI C.int
Margin C.int
NoReplicate C.int
Opacity C.float
Background [3]C.double
}
type vipsWatermarkImageOptions struct {
Left C.int
Top C.int
Opacity C.float
}
type vipsWatermarkTextOptions struct {
Text *C.char
Font *C.char
}
func init() {
Initialize()
}
// Initialize is used to explicitly start libvips in thread-safe way.
// Only call this function if you have previously turned off libvips.
func Initialize() {
if C.VIPS_MAJOR_VERSION <= 7 && C.VIPS_MINOR_VERSION < 40 {
panic("unsupported libvips version!")
}
m.Lock()
runtime.LockOSThread()
defer m.Unlock()
defer runtime.UnlockOSThread()
err := C.vips_init(C.CString("bimg"))
if err != 0 {
panic("unable to start vips!")
}
// Set libvips cache params
C.vips_cache_set_max_mem(maxCacheMem)
C.vips_cache_set_max(maxCacheSize)
// Define a custom thread concurrency limit in libvips (this may generate thread-unsafe issues)
// See: https://github.com/jcupitt/libvips/issues/261#issuecomment-92850414
if os.Getenv("VIPS_CONCURRENCY") == "" {
C.vips_concurrency_set(1)
}
// Enable libvips cache tracing
if os.Getenv("VIPS_TRACE") != "" {
C.vips_enable_cache_set_trace()
}
initialized = true
}
// Shutdown is used to shutdown libvips in a thread-safe way.
// You can call this to drop caches as well.
// If libvips was already initialized, the function is no-op
func Shutdown() {
m.Lock()
defer m.Unlock()
if initialized {
C.vips_shutdown()
initialized = false
}
}
// VipsCacheSetMaxMem Sets the maximum amount of tracked memory allowed before the vips operation cache
// begins to drop entries.
func VipsCacheSetMaxMem(maxCacheMem int) {
C.vips_cache_set_max_mem(C.size_t(maxCacheMem))
}
// VipsCacheSetMax sets the maximum number of operations to keep in the vips operation cache.
func VipsCacheSetMax(maxCacheSize int) {
C.vips_cache_set_max(C.int(maxCacheSize))
}
// VipsCacheDropAll drops the vips operation cache, freeing the allocated memory.
func VipsCacheDropAll() {
C.vips_cache_drop_all()
}
// VipsVectorSetEnabled enables or disables SIMD vector instructions. This can give speed-up,
// but can also be unstable on some systems and versions.
func VipsVectorSetEnabled(enable bool) {
flag := 0
if enable {
flag = 1
}
C.vips_vector_set_enabled(C.int(flag))
}
// VipsDebugInfo outputs to stdout libvips collected data. Useful for debugging.
func VipsDebugInfo() {
C.im__print_all()
}
// VipsMemory gets memory info stats from libvips (cache size, memory allocs...)
func VipsMemory() VipsMemoryInfo {
return VipsMemoryInfo{
Memory: int64(C.vips_tracked_get_mem()),
MemoryHighwater: int64(C.vips_tracked_get_mem_highwater()),
Allocations: int64(C.vips_tracked_get_allocs()),
}
}
// VipsIsTypeSupported returns true if the given image type
// is supported by the current libvips compilation.
func VipsIsTypeSupported(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_bridge(C.PNG)) != 0
}
if t == GIF {
return int(C.vips_type_find_bridge(C.GIF)) != 0
}
if t == PDF {
return int(C.vips_type_find_bridge(C.PDF)) != 0
}
if t == SVG {
return int(C.vips_type_find_bridge(C.SVG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_bridge(C.TIFF)) != 0
}
if t == MAGICK {
return int(C.vips_type_find_bridge(C.MAGICK)) != 0
}
if t == HEIF {
return int(C.vips_type_find_bridge(C.HEIF)) != 0
}
if t == AVIF {
return int(C.vips_type_find_bridge(C.HEIF)) != 0
}
return false
}
// VipsIsTypeSupportedSave returns true if the given image type
// is supported by the current libvips compilation for the
// save operation.
func VipsIsTypeSupportedSave(t ImageType) bool {
if t == JPEG {
return int(C.vips_type_find_save_bridge(C.JPEG)) != 0
}
if t == WEBP {
return int(C.vips_type_find_save_bridge(C.WEBP)) != 0
}
if t == PNG {
return int(C.vips_type_find_save_bridge(C.PNG)) != 0
}
if t == TIFF {
return int(C.vips_type_find_save_bridge(C.TIFF)) != 0
}
if t == HEIF {
return int(C.vips_type_find_save_bridge(C.HEIF)) != 0
}
if t == AVIF {
return int(C.vips_type_find_save_bridge(C.HEIF)) != 0
}
if t == GIF {
return int(C.vips_type_find_save_bridge(C.GIF)) != 0
}
return false
}
func vipsExifStringTag(image *C.VipsImage, tag string) string {
return vipsExifShort(C.GoString(C.vips_exif_tag(image, C.CString(tag))))
}
func vipsExifIntTag(image *C.VipsImage, tag string) int {
return int(C.vips_exif_tag_to_int(image, C.CString(tag)))
}
func vipsExifOrientation(image *C.VipsImage) int {
return int(C.vips_exif_orientation(image))
}
func vipsExifShort(s string) string {
i := strings.Index(s, " (")
if i > 0 {
return s[:i]
}
return s
}
func vipsHasAlpha(image *C.VipsImage) bool {
return int(C.has_alpha_channel(image)) > 0
}
func vipsHasProfile(image *C.VipsImage) bool {
return int(C.has_profile_embed(image)) > 0
}
func vipsWindowSize(name string) float64 {
cname := C.CString(name)
defer C.free(unsafe.Pointer(cname))
return float64(C.interpolator_window_size(cname))
}
func vipsSpace(image *C.VipsImage) string {
return C.GoString(C.vips_enum_nick_bridge(image))
}
func vipsRotate(image *C.VipsImage, angle Angle) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_rotate_bridge(image, &out, C.int(angle))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsAutoRotate(image *C.VipsImage) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_autorot_bridge(image, &out)
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsTransformICC(image *C.VipsImage, inputICC string, outputICC string) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
outputIccPath := C.CString(outputICC)
defer C.free(unsafe.Pointer(outputIccPath))
inputIccPath := C.CString(inputICC)
defer C.free(unsafe.Pointer(inputIccPath))
err := C.vips_icc_transform_with_default_bridge(image, &out, outputIccPath, inputIccPath)
//err := C.vips_icc_transform_bridge2(image, &outImage, outputIccPath, inputIccPath)
if int(err) != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsFlip(image *C.VipsImage, direction Direction) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_flip_bridge(image, &out, C.int(direction))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsZoom(image *C.VipsImage, zoom int) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_zoom_bridge(image, &out, C.int(zoom), C.int(zoom))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsWatermark(image *C.VipsImage, w Watermark) (*C.VipsImage, error) {
var out *C.VipsImage
// Defaults
noReplicate := 0
if w.NoReplicate {
noReplicate = 1
}
text := C.CString(w.Text)
font := C.CString(w.Font)
background := [3]C.double{C.double(w.Background.R), C.double(w.Background.G), C.double(w.Background.B)}
textOpts := vipsWatermarkTextOptions{text, font}
opts := vipsWatermarkOptions{C.int(w.Width), C.int(w.DPI), C.int(w.Margin), C.int(noReplicate), C.float(w.Opacity), background}
defer C.free(unsafe.Pointer(text))
defer C.free(unsafe.Pointer(font))
err := C.vips_watermark(image, &out, (*C.WatermarkTextOptions)(unsafe.Pointer(&textOpts)), (*C.WatermarkOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsRead(buf []byte) (*C.VipsImage, ImageType, error) {
var image *C.VipsImage
imageType := vipsImageType(buf)
if imageType == UNKNOWN {
return nil, UNKNOWN, errors.New("Unsupported image format")
}
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
err := C.vips_init_image(imageBuf, length, C.int(imageType), &image)
if err != 0 {
return nil, UNKNOWN, catchVipsError()
}
return image, imageType, nil
}
func vipsColourspaceIsSupportedBuffer(buf []byte) (bool, error) {
image, _, err := vipsRead(buf)
if err != nil {
return false, err
}
C.g_object_unref(C.gpointer(image))
return vipsColourspaceIsSupported(image), nil
}
func vipsColourspaceIsSupported(image *C.VipsImage) bool {
return int(C.vips_colourspace_issupported_bridge(image)) == 1
}
func vipsInterpretationBuffer(buf []byte) (Interpretation, error) {
image, _, err := vipsRead(buf)
if err != nil {
return InterpretationError, err
}
interp := vipsInterpretation(image)
C.g_object_unref(C.gpointer(image))
return interp, nil
}
func vipsInterpretation(image *C.VipsImage) Interpretation {
return Interpretation(C.vips_image_guess_interpretation_bridge(image))
}
func vipsFlattenBackground(image *C.VipsImage, background Color) (*C.VipsImage, error) {
var outImage *C.VipsImage
backgroundC := [3]C.double{
C.double(background.R),
C.double(background.G),
C.double(background.B),
}
if vipsHasAlpha(image) {
err := C.vips_flatten_background_brigde(image, &outImage,
backgroundC[0], backgroundC[1], backgroundC[2])
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
image = outImage
}
return image, nil
}
func vipsPreSave(image *C.VipsImage, o *vipsSaveOptions) (*C.VipsImage, error) {
var outImage *C.VipsImage
// Remove ICC profile metadata
if o.NoProfile {
C.remove_profile(image)
}
// Use a default interpretation and cast it to C type
if o.Interpretation == 0 {
o.Interpretation = InterpretationSRGB
}
interpretation := C.VipsInterpretation(o.Interpretation)
// Apply the proper colour space
if vipsColourspaceIsSupported(image) {
err := C.vips_colourspace_bridge(image, &outImage, interpretation)
if int(err) != 0 {
return nil, catchVipsError()
}
image = outImage
}
if o.OutputICC != "" && o.InputICC != "" {
outputIccPath := C.CString(o.OutputICC)
defer C.free(unsafe.Pointer(outputIccPath))
inputIccPath := C.CString(o.InputICC)
defer C.free(unsafe.Pointer(inputIccPath))
err := C.vips_icc_transform_with_default_bridge(image, &outImage, outputIccPath, inputIccPath)
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
return outImage, nil
}
if o.OutputICC != "" && vipsHasProfile(image) {
outputIccPath := C.CString(o.OutputICC)
defer C.free(unsafe.Pointer(outputIccPath))
err := C.vips_icc_transform_bridge(image, &outImage, outputIccPath)
if int(err) != 0 {
return nil, catchVipsError()
}
C.g_object_unref(C.gpointer(image))
image = outImage
}
return image, nil
}
func vipsSave(image *C.VipsImage, o vipsSaveOptions) ([]byte, error) {
defer C.g_object_unref(C.gpointer(image))
tmpImage, err := vipsPreSave(image, &o)
if err != nil {
return nil, err
}
// When an image has an unsupported color space, vipsPreSave
// returns the pointer of the image passed to it unmodified.
// When this occurs, we must take care to not dereference the
// original image a second time; we may otherwise erroneously
// free the object twice.
if tmpImage != image {
defer C.g_object_unref(C.gpointer(tmpImage))
}
length := C.size_t(0)
saveErr := C.int(0)
interlace := C.int(boolToInt(o.Interlace))
quality := C.int(o.Quality)
strip := C.int(boolToInt(o.StripMetadata))
lossless := C.int(boolToInt(o.Lossless))
palette := C.int(boolToInt(o.Palette))
speed := C.int(o.Speed)
if o.Type != 0 && !IsTypeSupportedSave(o.Type) {
return nil, fmt.Errorf("VIPS cannot save to %#v", ImageTypes[o.Type])
}
var ptr unsafe.Pointer
switch o.Type {
case WEBP:
saveErr = C.vips_webpsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless)
case PNG:
saveErr = C.vips_pngsave_bridge(tmpImage, &ptr, &length, strip, C.int(o.Compression), quality, interlace, palette, speed)
case TIFF:
saveErr = C.vips_tiffsave_bridge(tmpImage, &ptr, &length)
case HEIF:
saveErr = C.vips_heifsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless)
case AVIF:
saveErr = C.vips_avifsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless, speed)
case GIF:
saveErr = C.vips_gifsave_bridge(tmpImage, &ptr, &length, strip)
default:
saveErr = C.vips_jpegsave_bridge(tmpImage, &ptr, &length, strip, quality, interlace)
}
if int(saveErr) != 0 {
return nil, catchVipsError()
}
buf := C.GoBytes(ptr, C.int(length))
// Clean up
C.g_free(C.gpointer(ptr))
C.vips_error_clear()
return buf, nil
}
func getImageBuffer(image *C.VipsImage) ([]byte, error) {
var ptr unsafe.Pointer
length := C.size_t(0)
interlace := C.int(0)
quality := C.int(100)
err := C.int(0)
err = C.vips_jpegsave_bridge(image, &ptr, &length, 1, quality, interlace)
if int(err) != 0 {
return nil, catchVipsError()
}
defer C.g_free(C.gpointer(ptr))
defer C.vips_error_clear()
return C.GoBytes(ptr, C.int(length)), nil
}
func vipsExtract(image *C.VipsImage, left, top, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > maxSize || height > maxSize {
return nil, errors.New("Maximum image size exceeded")
}
top, left = max(top), max(left)
err := C.vips_extract_area_bridge(image, &buf, C.int(left), C.int(top), C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsSmartCrop(image *C.VipsImage, width, height int) (*C.VipsImage, error) {
var buf *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
if width > maxSize || height > maxSize {
return nil, errors.New("Maximum image size exceeded")
}
err := C.vips_smartcrop_bridge(image, &buf, C.int(width), C.int(height))
if err != 0 {
return nil, catchVipsError()
}
return buf, nil
}
func vipsTrim(image *C.VipsImage, background Color, threshold float64) (int, int, int, int, error) {
defer C.g_object_unref(C.gpointer(image))
top := C.int(0)
left := C.int(0)
width := C.int(0)
height := C.int(0)
err := C.vips_find_trim_bridge(image,
&top, &left, &width, &height,
C.double(background.R), C.double(background.G), C.double(background.B),
C.double(threshold))
if err != 0 {
return 0, 0, 0, 0, catchVipsError()
}
return int(top), int(left), int(width), int(height), nil
}
func vipsShrinkJpeg(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
var ptr = unsafe.Pointer(&buf[0])
defer C.g_object_unref(C.gpointer(input))
err := C.vips_jpegload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsShrinkWebp(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
var ptr = unsafe.Pointer(&buf[0])
defer C.g_object_unref(C.gpointer(input))
err := C.vips_webpload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsShrink(input *C.VipsImage, shrink int) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_shrink_bridge(input, &image, C.double(float64(shrink)), C.double(float64(shrink)))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsReduce(input *C.VipsImage, xshrink float64, yshrink float64) (*C.VipsImage, error) {
var image *C.VipsImage
defer C.g_object_unref(C.gpointer(input))
err := C.vips_reduce_bridge(input, &image, C.double(xshrink), C.double(yshrink))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsEmbed(input *C.VipsImage, left, top, width, height int, extend Extend, background Color) (*C.VipsImage, error) {
var image *C.VipsImage
// Max extend value, see: https://libvips.github.io/libvips/API/current/libvips-conversion.html#VipsExtend
if extend > 5 {
extend = ExtendBackground
}
defer C.g_object_unref(C.gpointer(input))
err := C.vips_embed_bridge(input, &image, C.int(left), C.int(top), C.int(width),
C.int(height), C.int(extend), C.double(background.R), C.double(background.G), C.double(background.B))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsAffine(input *C.VipsImage, residualx, residualy float64, i Interpolator, extend Extend) (*C.VipsImage, error) {
if extend > 5 {
extend = ExtendBackground
}
var image *C.VipsImage
cstring := C.CString(i.String())
interpolator := C.vips_interpolate_new(cstring)
defer C.free(unsafe.Pointer(cstring))
defer C.g_object_unref(C.gpointer(input))
defer C.g_object_unref(C.gpointer(interpolator))
err := C.vips_affine_interpolator(input, &image, C.double(residualx), 0, 0, C.double(residualy), interpolator, C.int(extend))
if err != 0 {
return nil, catchVipsError()
}
return image, nil
}
func vipsImageType(buf []byte) ImageType {
if len(buf) < 12 {
return UNKNOWN
}
if buf[0] == 0xFF && buf[1] == 0xD8 && buf[2] == 0xFF {
return JPEG
}
if IsTypeSupported(GIF) && buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46 {
return GIF
}
if buf[0] == 0x89 && buf[1] == 0x50 && buf[2] == 0x4E && buf[3] == 0x47 {
return PNG
}
if IsTypeSupported(TIFF) &&
((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) ||
(buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) {
return TIFF
}
if IsTypeSupported(PDF) && buf[0] == 0x25 && buf[1] == 0x50 && buf[2] == 0x44 && buf[3] == 0x46 {
return PDF
}
if IsTypeSupported(WEBP) && buf[8] == 0x57 && buf[9] == 0x45 && buf[10] == 0x42 && buf[11] == 0x50 {
return WEBP
}
if IsTypeSupported(SVG) && IsSVGImage(buf) {
return SVG
}
if IsTypeSupported(MAGICK) && strings.HasSuffix(readImageType(buf), "MagickBuffer") {
return MAGICK
}
// NOTE: libheif currently only supports heic sub types; see:
// https://github.com/strukturag/libheif/issues/83#issuecomment-421427091
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x69 && buf[11] == 0x63 {
// This is a HEIC file, ftypheic
return HEIF
}
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x6d && buf[9] == 0x69 && buf[10] == 0x66 && buf[11] == 0x31 {
// This is a HEIF file, ftypmif1
return HEIF
}
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x6d && buf[9] == 0x73 && buf[10] == 0x66 && buf[11] == 0x31 {
// This is a HEIFS file, ftypmsf1
return HEIF
}
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x69 && buf[11] == 0x73 {
// This is a HEIFS file, ftypheis
return HEIF
}
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x76 && buf[11] == 0x63 {
// This is a HEIFS file, ftyphevc
return HEIF
}
if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 &&
buf[8] == 0x61 && buf[9] == 0x76 && buf[10] == 0x69 && buf[11] == 0x66 {
return AVIF
}
return UNKNOWN
}
func readImageType(buf []byte) string {
length := C.size_t(len(buf))
imageBuf := unsafe.Pointer(&buf[0])
load := C.vips_foreign_find_load_buffer(imageBuf, length)
return C.GoString(load)
}
func catchVipsError() error {
s := C.GoString(C.vips_error_buffer())
C.vips_error_clear()
C.vips_thread_shutdown()
return errors.New(s)
}
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
func vipsGaussianBlur(image *C.VipsImage, o GaussianBlur) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_gaussblur_bridge(image, &out, C.double(o.Sigma), C.double(o.MinAmpl))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsSharpen(image *C.VipsImage, o Sharpen) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_sharpen_bridge(image, &out, C.int(o.Radius), C.double(o.X1), C.double(o.Y2), C.double(o.Y3), C.double(o.M1), C.double(o.M2))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func max(x int) int {
return int(math.Max(float64(x), 0))
}
func vipsDrawWatermark(image *C.VipsImage, o WatermarkImage) (*C.VipsImage, error) {
var out *C.VipsImage
watermark, _, e := vipsRead(o.Buf)
if e != nil {
return nil, e
}
opts := vipsWatermarkImageOptions{C.int(o.Left), C.int(o.Top), C.float(o.Opacity)}
err := C.vips_watermark_image(image, watermark, &out, (*C.WatermarkImageOptions)(unsafe.Pointer(&opts)))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsGamma(image *C.VipsImage, Gamma float64) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_gamma_bridge(image, &out, C.double(Gamma))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsBrightness(image *C.VipsImage, brightness float64) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_brightness_bridge(image, &out, C.double(brightness))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
func vipsContrast(image *C.VipsImage, contrast float64) (*C.VipsImage, error) {
var out *C.VipsImage
defer C.g_object_unref(C.gpointer(image))
err := C.vips_contrast_bridge(image, &out, C.double(contrast))
if err != 0 {
return nil, catchVipsError()
}
return out, nil
}
| [
"\"VIPS_CONCURRENCY\"",
"\"VIPS_TRACE\""
]
| []
| [
"VIPS_TRACE",
"VIPS_CONCURRENCY"
]
| [] | ["VIPS_TRACE", "VIPS_CONCURRENCY"] | go | 2 | 0 | |
cmd/podman/common.go | package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/containers/buildah"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/fatih/camelcase"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
stores = make(map[storage.Store]struct{})
)
const (
idTruncLength = 12
)
func splitCamelCase(src string) string {
entries := camelcase.Split(src)
return strings.Join(entries, " ")
}
func shortID(id string) string {
if len(id) > idTruncLength {
return id[:idTruncLength]
}
return id
}
// checkAllAndLatest checks that --all and --latest are used correctly
func checkAllAndLatest(c *cobra.Command, args []string, ignoreArgLen bool) error {
argLen := len(args)
if c.Flags().Lookup("all") == nil || c.Flags().Lookup("latest") == nil {
return errors.New("unable to lookup values for 'latest' or 'all'")
}
all, _ := c.Flags().GetBool("all")
latest, _ := c.Flags().GetBool("latest")
if all && latest {
return errors.Errorf("--all and --latest cannot be used together")
}
if ignoreArgLen {
return nil
}
if (all || latest) && argLen > 0 {
return errors.Errorf("no arguments are needed with --all or --latest")
}
if argLen < 1 && !all && !latest {
return errors.Errorf("you must provide at least one name or id")
}
return nil
}
// noSubArgs checks that there are no further positional parameters
func noSubArgs(c *cobra.Command, args []string) error {
if len(args) > 0 {
return errors.Errorf("`%s` takes no arguments", c.CommandPath())
}
return nil
}
func commandRunE() func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return errors.Errorf("unrecognized command `%s %s`\nTry '%s --help' for more information.", cmd.CommandPath(), args[0], cmd.CommandPath())
} else {
return errors.Errorf("missing command '%s COMMAND'\nTry '%s --help' for more information.", cmd.CommandPath(), cmd.CommandPath())
}
}
}
// getAllOrLatestContainers tries to return the correct list of containers
// depending if --all, --latest or <container-id> is used.
// It requires the Context (c) and the Runtime (runtime). As different
// commands are using different container state for the --all option
// the desired state has to be specified in filterState. If no filter
// is desired a -1 can be used to get all containers. For a better
// error message, if the filter fails, a corresponding verb can be
// specified which will then appear in the error message.
func getAllOrLatestContainers(c *cliconfig.PodmanCommand, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
var containers []*libpod.Container
var lastError error
var err error
if c.Bool("all") {
if filterState != -1 {
var filterFuncs []libpod.ContainerFilter
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
return state == filterState
})
containers, err = runtime.GetContainers(filterFuncs...)
} else {
containers, err = runtime.GetContainers()
}
if err != nil {
return nil, errors.Wrapf(err, "unable to get %s containers", verb)
}
} else if c.Bool("latest") {
lastCtr, err := runtime.GetLatestContainer()
if err != nil {
return nil, errors.Wrapf(err, "unable to get latest container")
}
containers = append(containers, lastCtr)
} else {
args := c.InputArgs
for _, i := range args {
container, err := runtime.LookupContainer(i)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err, "unable to find container %s", i)
}
if container != nil {
// This is here to make sure this does not return [<nil>] but only nil
containers = append(containers, container)
}
}
}
return containers, lastError
}
// getContext returns a non-nil, empty context
func getContext() context.Context {
if Ctx != nil {
return Ctx
}
return context.TODO()
}
func getDefaultNetwork() string {
if rootless.IsRootless() {
return "slirp4netns"
}
return "bridge"
}
func getCreateFlags(c *cliconfig.PodmanCommand) {
createFlags := c.Flags()
createFlags.StringSlice(
"add-host", []string{},
"Add a custom host-to-IP mapping (host:ip) (default [])",
)
createFlags.StringSlice(
"annotation", []string{},
"Add annotations to container (key:value) (default [])",
)
createFlags.StringSliceP(
"attach", "a", []string{},
"Attach to STDIN, STDOUT or STDERR (default [])",
)
createFlags.String(
"blkio-weight", "",
"Block IO weight (relative weight) accepts a weight value between 10 and 1000.",
)
createFlags.StringSlice(
"blkio-weight-device", []string{},
"Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`)",
)
createFlags.StringSlice(
"cap-add", []string{},
"Add capabilities to the container",
)
createFlags.StringSlice(
"cap-drop", []string{},
"Drop capabilities from the container",
)
createFlags.String(
"cgroup-parent", "",
"Optional parent cgroup for the container",
)
createFlags.String(
"cidfile", "",
"Write the container ID to the file",
)
createFlags.String(
"conmon-pidfile", "",
"Path to the file that will receive the PID of conmon",
)
createFlags.Uint64(
"cpu-period", 0,
"Limit the CPU CFS (Completely Fair Scheduler) period",
)
createFlags.Int64(
"cpu-quota", 0,
"Limit the CPU CFS (Completely Fair Scheduler) quota",
)
createFlags.Uint64(
"cpu-rt-period", 0,
"Limit the CPU real-time period in microseconds",
)
createFlags.Int64(
"cpu-rt-runtime", 0,
"Limit the CPU real-time runtime in microseconds",
)
createFlags.Uint64(
"cpu-shares", 0,
"CPU shares (relative weight)",
)
createFlags.Float64(
"cpus", 0,
"Number of CPUs. The default is 0.000 which means no limit",
)
createFlags.String(
"cpuset-cpus", "",
"CPUs in which to allow execution (0-3, 0,1)",
)
createFlags.String(
"cpuset-mems", "",
"Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
)
createFlags.BoolP(
"detach", "d", false,
"Run container in background and print container ID",
)
createFlags.String(
"detach-keys", "",
"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`",
)
createFlags.StringSlice(
"device", []string{},
"Add a host device to the container (default [])",
)
createFlags.StringSlice(
"device-read-bps", []string{},
"Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
)
createFlags.StringSlice(
"device-read-iops", []string{},
"Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000)",
)
createFlags.StringSlice(
"device-write-bps", []string{},
"Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb)",
)
createFlags.StringSlice(
"device-write-iops", []string{},
"Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000)",
)
createFlags.StringSlice(
"dns", []string{},
"Set custom DNS servers",
)
createFlags.StringSlice(
"dns-opt", []string{},
"Set custom DNS options",
)
createFlags.StringSlice(
"dns-search", []string{},
"Set custom DNS search domains",
)
createFlags.String(
"entrypoint", "",
"Overwrite the default ENTRYPOINT of the image",
)
createFlags.StringSliceP(
"env", "e", []string{},
"Set environment variables in container",
)
createFlags.StringSlice(
"env-file", []string{},
"Read in a file of environment variables",
)
createFlags.StringSlice(
"expose", []string{},
"Expose a port or a range of ports (default [])",
)
createFlags.StringSlice(
"gidmap", []string{},
"GID map to use for the user namespace",
)
createFlags.StringSlice(
"group-add", []string{},
"Add additional groups to join (default [])",
)
createFlags.Bool(
"help", false, "",
)
createFlags.String(
"healthcheck-command", "",
"set a healthcheck command for the container ('none' disables the existing healthcheck)",
)
createFlags.String(
"healthcheck-interval", "30s",
"set an interval for the healthchecks",
)
createFlags.Uint(
"healthcheck-retries", 3,
"the number of retries allowed before a healthcheck is considered to be unhealthy",
)
createFlags.String(
"healthcheck-start-period", "0s",
"the initialization time needed for a container to bootstrap",
)
createFlags.String(
"healthcheck-timeout", "30s",
"the maximum time allowed to complete the healthcheck before an interval is considered failed",
)
createFlags.StringP(
"hostname", "h", "",
"Set container hostname",
)
createFlags.String(
"image-volume", "bind",
"Tells podman how to handle the builtin image volumes. The options are: 'bind', 'tmpfs', or 'ignore' (default 'bind')",
)
createFlags.Bool(
"init", false,
"Run an init binary inside the container that forwards signals and reaps processes",
)
createFlags.String(
"init-path", "",
// Do not use the Value field for setting the default value to determine user input (i.e., non-empty string)
fmt.Sprintf("Path to the container-init binary (default: %q)", libpod.DefaultInitPath),
)
createFlags.BoolP(
"interactive", "i", false,
"Keep STDIN open even if not attached",
)
createFlags.String(
"ip", "",
"Specify a static IPv4 address for the container",
)
createFlags.String(
"ipc", "",
"IPC namespace to use",
)
createFlags.String(
"kernel-memory", "",
"Kernel memory limit (format: `<number>[<unit>]`, where unit = b, k, m or g)",
)
createFlags.StringArrayP(
"label", "l", []string{},
"Set metadata on container (default [])",
)
createFlags.StringSlice(
"label-file", []string{},
"Read in a line delimited file of labels (default [])",
)
createFlags.String(
"log-driver", "",
"Logging driver for the container",
)
createFlags.StringSlice(
"log-opt", []string{},
"Logging driver options (default [])",
)
createFlags.String(
"mac-address", "",
"Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported",
)
createFlags.StringP(
"memory", "m", "",
"Memory limit (format: <number>[<unit>], where unit = b, k, m or g)",
)
createFlags.String(
"memory-reservation", "",
"Memory soft limit (format: <number>[<unit>], where unit = b, k, m or g)",
)
createFlags.String(
"memory-swap", "",
"Swap limit equal to memory plus swap: '-1' to enable unlimited swap",
)
createFlags.Int64(
"memory-swappiness", -1,
"Tune container memory swappiness (0 to 100) (default -1)",
)
createFlags.String(
"name", "",
"Assign a name to the container",
)
createFlags.String(
"net", getDefaultNetwork(),
"Connect a container to a network",
)
createFlags.String(
"network", getDefaultNetwork(),
"Connect a container to a network",
)
createFlags.Bool(
"oom-kill-disable", false,
"Disable OOM Killer",
)
createFlags.Int(
"oom-score-adj", 0,
"Tune the host's OOM preferences (-1000 to 1000)",
)
createFlags.String(
"pid", "",
"PID namespace to use",
)
createFlags.Int64(
"pids-limit", 0,
"Tune container pids limit (set -1 for unlimited)",
)
createFlags.String(
"pod", "",
"Run container in an existing pod",
)
createFlags.Bool(
"privileged", false,
"Give extended privileges to container",
)
createFlags.StringSliceP(
"publish", "p", []string{},
"Publish a container's port, or a range of ports, to the host (default [])",
)
createFlags.BoolP(
"publish-all", "P", false,
"Publish all exposed ports to random ports on the host interface",
)
createFlags.BoolP(
"quiet", "q", false,
"Suppress output information when pulling images",
)
createFlags.Bool(
"read-only", false,
"Make containers root filesystem read-only",
)
createFlags.String(
"restart", "",
"Restart is not supported. Please use a systemd unit file for restart",
)
createFlags.Bool(
"rm", false,
"Remove container (and pod if created) after exit",
)
createFlags.Bool(
"rootfs", false,
"The first argument is not an image but the rootfs to the exploded container",
)
createFlags.StringArray(
"security-opt", []string{},
"Security Options (default [])",
)
createFlags.String(
"shm-size", "65536k",
"Size of `/dev/shm`. The format is `<number><unit>`",
)
createFlags.String(
"stop-signal", "",
"Signal to stop a container. Default is SIGTERM",
)
createFlags.Uint(
"stop-timeout", libpod.CtrRemoveTimeout,
"Timeout (in seconds) to stop a container. Default is 10",
)
createFlags.StringSlice(
"storage-opt", []string{},
"Storage driver options per container (default [])",
)
createFlags.String(
"subgidname", "",
"Name of range listed in /etc/subgid for use in user namespace",
)
createFlags.String(
"subuidname", "",
"Name of range listed in /etc/subuid for use in user namespace",
)
createFlags.StringSlice(
"sysctl", []string{},
"Sysctl options (default [])",
)
createFlags.Bool(
"systemd", true,
"Run container in systemd mode if the command executable is systemd or init",
)
createFlags.StringSlice(
"tmpfs", []string{},
"Mount a temporary filesystem (`tmpfs`) into a container (default [])",
)
createFlags.BoolP(
"tty", "t", false,
"Allocate a pseudo-TTY for container",
)
createFlags.StringSlice(
"uidmap", []string{},
"UID map to use for the user namespace",
)
createFlags.StringSlice(
"ulimit", []string{},
"Ulimit options (default [])",
)
createFlags.StringP(
"user", "u", "",
"Username or UID (format: <name|uid>[:<group|gid>])",
)
createFlags.String(
"userns", "",
"User namespace to use",
)
createFlags.String(
"uts", "",
"UTS namespace to use",
)
createFlags.StringArray(
"mount", []string{},
"Attach a filesystem mount to the container (default [])",
)
createFlags.StringArrayP(
"volume", "v", []string{},
"Bind mount a volume into the container (default [])",
)
createFlags.StringSlice(
"volumes-from", []string{},
"Mount volumes from the specified container(s) (default [])",
)
createFlags.StringP(
"workdir", "w", "",
"Working directory inside the container",
)
}
func getFormat(c *cliconfig.PodmanCommand) (string, error) {
format := strings.ToLower(c.String("format"))
if strings.HasPrefix(format, buildah.OCI) {
return buildah.OCIv1ImageManifest, nil
}
if strings.HasPrefix(format, buildah.DOCKER) {
return buildah.Dockerv2ImageManifest, nil
}
return "", errors.Errorf("unrecognized image type %q", format)
}
func getAuthFile(authfile string) string {
if authfile != "" {
return authfile
}
return os.Getenv("REGISTRY_AUTH_FILE")
}
// scrubServer removes 'http://' or 'https://' from the front of the
// server/registry string if either is there. This will be mostly used
// for user input from 'podman login' and 'podman logout'.
func scrubServer(server string) string {
server = strings.TrimPrefix(server, "https://")
return strings.TrimPrefix(server, "http://")
}
// HelpTemplate returns the help template for podman commands
// This uses the short and long options.
// command should not use this.
func HelpTemplate() string {
return `{{.Short}}
Description:
{{.Long}}
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
}
// UsageTemplate returns the usage template for podman commands
// This blocks the desplaying of the global options. The main podman
// command should not use this.
func UsageTemplate() string {
return `Usage:{{if (and .Runnable (not .HasAvailableSubCommands))}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
{{end}}
`
}
| [
"\"REGISTRY_AUTH_FILE\""
]
| []
| [
"REGISTRY_AUTH_FILE"
]
| [] | ["REGISTRY_AUTH_FILE"] | go | 1 | 0 | |
models/base.go | package models
import (
"github.com/joho/godotenv"
"github.com/wilsontwm/user-registration"
"os"
"strconv"
)
var IsActivationRequired = false
// Initialization
func init() {
err := godotenv.Load()
if err != nil {
panic(err)
}
IsActivationRequired, _ = strconv.ParseBool(os.Getenv("is_activation_required"))
// Start of initialization of the user registration module
dbConfig := userreg.DBConfig{
Driver: os.Getenv("db_type"),
Username: os.Getenv("db_user"),
Password: os.Getenv("db_pass"),
Host: os.Getenv("db_host"),
DBName: os.Getenv("db_name"),
InstanceConnectionName: os.Getenv("db_instance"),
}
tableName := "users"
userreg.Initialize(dbConfig)
userreg.Config(userreg.TableName(tableName), userreg.UserActivation(IsActivationRequired), userreg.MigrateDatabase())
// End of initialization of the user registration module
}
| [
"\"is_activation_required\"",
"\"db_type\"",
"\"db_user\"",
"\"db_pass\"",
"\"db_host\"",
"\"db_name\"",
"\"db_instance\""
]
| []
| [
"db_user",
"db_type",
"db_name",
"db_instance",
"db_pass",
"is_activation_required",
"db_host"
]
| [] | ["db_user", "db_type", "db_name", "db_instance", "db_pass", "is_activation_required", "db_host"] | go | 7 | 0 | |
examples/docker/main.go | package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"text/template"
"time"
"github.com/gin-gonic/contrib/cors"
"github.com/gin-gonic/gin"
"github.com/oglinuk/sbh"
)
var (
algorithm = flag.String("a", "", "Hashing algorithm to use, defaults to sha256")
uptimes = flag.Int("ut", 0, "Number of letters to make uppercase")
symbols = flag.String("s", "", "Symbols to add to SBH")
length = flag.Int("l", 0, "Length of the returned string")
web = flag.Bool("w", false, "Serve SBH over http")
tpl *template.Template
)
func SBH() {
scanner := bufio.NewScanner(os.Stdin)
fmt.Printf("Plaintext: ")
scanner.Scan()
plaintext := scanner.Text()
var nrots int64
fmt.Printf("Number of Rotations: ")
fmt.Scan(&nrots)
var seed int64
fmt.Printf("Seed: ")
fmt.Scan(&seed)
secbaehash := sbh.SBH{
Plaintext: plaintext,
NRots: nrots,
Seed: seed,
Algorithm: *algorithm,
UppercaseTimes: *uptimes,
Symbols: *symbols,
Length: *length,
}
if *algorithm == "" {
fmt.Println("No algorithm specified with -a, defaulting to sha256 ...")
}
sTime := time.Now()
fmt.Printf("SBH: %s\nElapsed time: %v\n",
sbh.Generate(secbaehash), time.Since(sTime))
}
func main() {
flag.Parse()
if *web {
PORT := os.Getenv("PORT")
if PORT == "" {
PORT = "9001"
}
tpl = template.Must(template.ParseGlob("templates/*"))
grouter := gin.Default()
corsConfig := cors.DefaultConfig()
corsConfig.AllowAllOrigins = true
grouter.GET("/", indexHandler)
grouter.POST("/", uiHandler)
grouter.POST("/sbh", restHandler)
grouter.Static("/static/", "./static")
log.Printf("Server is running at :%s ...", PORT)
log.Fatal(grouter.Run(fmt.Sprintf(":%s", PORT)), nil)
} else {
SBH()
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
transport/http/tester/main.go | package main
import (
"fmt"
"os"
"time"
"github.com/suborbital/grav/discovery/local"
"github.com/suborbital/grav/grav"
ghttp "github.com/suborbital/grav/transport/http"
"github.com/suborbital/vektor/vk"
"github.com/suborbital/vektor/vlog"
)
func main() {
logger := vlog.Default(vlog.Level(vlog.LogLevelDebug))
gravhttp := ghttp.New()
locald := local.New()
port := os.Getenv("VK_HTTP_PORT")
g := grav.New(
grav.UseLogger(logger),
grav.UseEndpoint(port, ""),
grav.UseTransport(gravhttp),
grav.UseDiscovery(locald),
)
pod := g.Connect()
pod.On(func(msg grav.Message) error {
fmt.Println("received something:", string(msg.Data()))
return nil
})
vk := vk.New(vk.UseAppName("http tester"))
vk.POST("/meta/message", gravhttp.HandlerFunc())
go func() {
<-time.After(time.Second * time.Duration(10))
pod.Send(grav.NewMsg(grav.MsgTypeDefault, []byte("hello, world")))
<-time.After(time.Second * time.Duration(10))
pod.Send(grav.NewMsg(grav.MsgTypeDefault, []byte("hello, again")))
}()
vk.Start()
}
| [
"\"VK_HTTP_PORT\""
]
| []
| [
"VK_HTTP_PORT"
]
| [] | ["VK_HTTP_PORT"] | go | 1 | 0 | |
lib/apiconfig/load.go | // Copyright (c) 2017-2020 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apiconfig
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/kelseyhightower/envconfig"
log "github.com/sirupsen/logrus"
yaml "github.com/projectcalico/go-yaml-wrapper"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
)
// LoadClientConfig loads the ClientConfig from the specified file (if specified)
// or from environment variables (if the file is not specified).
func LoadClientConfig(filename string) (*CalicoAPIConfig, error) {
// Override / merge with values loaded from the specified file.
if filename != "" {
return LoadClientConfigFromFile(filename)
} else {
return LoadClientConfigFromEnvironment()
}
}
// LoadClientConfigFromFile loads the ClientConfig from the specified file, which must exist.
// The datastore type is defaulted if not specified.
func LoadClientConfigFromFile(filename string) (*CalicoAPIConfig, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
c, err := LoadClientConfigFromBytes(b)
if err != nil {
return nil, fmt.Errorf("syntax error in %s: %v", filename, err)
}
return c, nil
}
// LoadClientConfigFromBytes loads the ClientConfig from the supplied bytes containing YAML or JSON format data.
// The datastore type is defaulted if not specified.
func LoadClientConfigFromBytes(b []byte) (*CalicoAPIConfig, error) {
var c CalicoAPIConfig
log.Debug("Loading config from JSON or YAML data")
if err := yaml.UnmarshalStrict(b, &c); err != nil {
return nil, fmt.Errorf("failed to parse config as YAML/JSON: %w", err)
}
// Validate the version and kind.
if c.APIVersion != apiv3.GroupVersionCurrent {
return nil, errors.New("invalid config file: unknown APIVersion '" + c.APIVersion + "'")
}
if c.Kind != KindCalicoAPIConfig {
return nil, errors.New("invalid config file: expected kind '" + KindCalicoAPIConfig + "', got '" + c.Kind + "'")
}
applyConfigDefaults(&c)
log.Debug("Datastore type: ", c.Spec.DatastoreType)
return &c, nil
}
// LoadClientConfigFromEnvironment loads a client config from the environment.
// The datastore type is defaulted if not specified.
func LoadClientConfigFromEnvironment() (*CalicoAPIConfig, error) {
c := NewCalicoAPIConfig()
if err := envconfig.Process("calico", &c.Spec); err != nil {
return nil, fmt.Errorf("failed to load config from env vars: %w", err)
}
applyConfigDefaults(c)
return c, nil
}
// applyConfigDefaults tries to detect the correct datastore type and config parameters.
func applyConfigDefaults(c *CalicoAPIConfig) {
if c.Spec.DatastoreType == "" {
log.Debug("Datastore type isn't set, trying to detect it")
if c.Spec.EtcdEndpoints != "" {
log.Debug("EtcdEndpoints specified, detected etcdv3.")
c.Spec.DatastoreType = EtcdV3
} else {
log.Debug("No EtcdEndpoints specified, defaulting to kubernetes.")
c.Spec.DatastoreType = Kubernetes
}
}
if c.Spec.DatastoreType == Kubernetes {
// Default to using $(HOME)/.kube/config, unless another means has been configured.
switch {
case c.Spec.Kubeconfig != "":
log.WithField("kubeconfig", c.Spec.Kubeconfig).Debug("kubeconfig provided.")
case c.Spec.K8sAPIEndpoint != "":
log.WithField("apiEndpoint", c.Spec.K8sAPIEndpoint).Debug("API endpoint provided.")
case os.Getenv("HOME") == "":
// No home directory, can't build a default config path.
log.Debug("No home directory, default path doesn't apply.")
default:
// Default the kubeconfig.
kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config")
if _, err := os.Stat(kubeconfig); err == nil {
c.Spec.Kubeconfig = kubeconfig
log.WithField("kubeconfig", c.Spec.Kubeconfig).Debug("Using default kubeconfig path.")
} else {
// The Kubernetes client can try other defaults if we leave the field blank (for example, the
// in cluster config).
log.Debug("No kubeconfig file at default path, leaving blank.")
}
}
}
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
internal/serverless/lambda/lambda.go | package lambda
import (
"fmt"
"os"
"time"
"github.com/aws/aws-lambda-go/lambda"
"gopkg.in/yaml.v3"
"github.com/benthosdev/benthos/v4/internal/component/output"
"github.com/benthosdev/benthos/v4/internal/config"
"github.com/benthosdev/benthos/v4/internal/serverless"
)
var handler *serverless.Handler
// Run executes Benthos as an AWS Lambda function. Configuration can be stored
// within the environment variable BENTHOS_CONFIG.
func Run() {
// A list of default config paths to check for if not explicitly defined
defaultPaths := []string{
"./benthos.yaml",
"./config.yaml",
"/benthos.yaml",
"/etc/benthos/config.yaml",
"/etc/benthos.yaml",
}
if path := os.Getenv("BENTHOS_CONFIG_PATH"); path != "" {
defaultPaths = append([]string{path}, defaultPaths...)
}
conf := config.New()
conf.Metrics.Type = "none"
conf.Logger.Format = "json"
conf.Output.Type = "switch"
conf.Output.Switch.RetryUntilSuccess = false
errorCase := output.NewSwitchConfigCase()
errorCase.Check = "errored()"
errorCase.Output.Type = "reject"
errorCase.Output.Reject = "processing failed due to: ${! error() }"
responseCase := output.NewSwitchConfigCase()
responseCase.Output.Type = "sync_response"
conf.Output.Switch.Cases = append(conf.Output.Switch.Cases, errorCase, responseCase)
if confStr := os.Getenv("BENTHOS_CONFIG"); len(confStr) > 0 {
confBytes := config.ReplaceEnvVariables([]byte(confStr))
if err := yaml.Unmarshal(confBytes, &conf); err != nil {
fmt.Fprintf(os.Stderr, "Configuration file read error: %v\n", err)
os.Exit(1)
}
} else {
// Iterate default config paths
for _, path := range defaultPaths {
if _, err := os.Stat(path); err == nil {
if _, err = config.ReadFileLinted(path, false, &conf); err != nil {
fmt.Fprintf(os.Stderr, "Configuration file read error: %v\n", err)
os.Exit(1)
}
break
}
}
}
var err error
if handler, err = serverless.NewHandler(conf); err != nil {
fmt.Fprintf(os.Stderr, "Initialisation error: %v\n", err)
os.Exit(1)
}
lambda.Start(handler.Handle)
if err = handler.Close(time.Second * 30); err != nil {
fmt.Fprintf(os.Stderr, "Shut down error: %v\n", err)
os.Exit(1)
}
}
| [
"\"BENTHOS_CONFIG_PATH\"",
"\"BENTHOS_CONFIG\""
]
| []
| [
"BENTHOS_CONFIG_PATH",
"BENTHOS_CONFIG"
]
| [] | ["BENTHOS_CONFIG_PATH", "BENTHOS_CONFIG"] | go | 2 | 0 | |
cmd/box/main.go | package main
import (
"bytes"
"context"
"fmt"
"os"
"strconv"
"strings"
"github.com/YouEclipse/steam-box/pkg/steambox"
"github.com/google/go-github/github"
)
func main() {
var err error
steamAPIKey := os.Getenv("STEAM_API_KEY")
steamID, _ := strconv.ParseUint(os.Getenv("STEAM_ID"), 10, 64)
appIDs := os.Getenv("APP_ID")
appIDList := make([]uint32, 0)
for _, appID := range strings.Split(appIDs, ",") {
appid, err := strconv.ParseUint(appID, 10, 32)
if err != nil {
continue
}
appIDList = append(appIDList, uint32(appid))
}
ghToken := os.Getenv("GH_TOKEN")
ghUsername := os.Getenv("GH_USER")
gistID := os.Getenv("GIST_ID")
steamOption := "RECENT" // options for types of games to list: RECENT (recently played games), ALLTIME <default> (playtime of games in descending order)
if os.Getenv("STEAM_OPTION") != "" {
steamOption = os.Getenv("STEAM_OPTION")
}
multiLined := false // boolean for whether hours should have their own line - YES = true, NO = false
if os.Getenv("MULTILINE") != "" {
lineOption := os.Getenv("MULTILINE")
if lineOption == "YES" {
multiLined = true
}
}
updateOption := os.Getenv("UPDATE_OPTION") // options for update: GIST (Gist only), MARKDOWN (README only), GIST_AND_MARKDOWN (Gist and README)
markdownFile := os.Getenv("MARKDOWN_FILE") // the markdown filename (e.g. MYFILE.md)
var updateGist, updateMarkdown bool
if updateOption == "MARKDOWN" {
updateMarkdown = true
} else if updateOption == "GIST_AND_MARKDOWN" {
updateGist = true
updateMarkdown = true
} else {
updateGist = true
}
box := steambox.NewBox(steamAPIKey, ghUsername, ghToken)
ctx := context.Background()
var (
filename string
lines []string
)
if steamOption == "ALLTIME" {
filename = "🎮 Steam playtime leaderboard"
lines, err = box.GetPlayTime(ctx, steamID, multiLined, appIDList...)
if err != nil {
panic("GetPlayTime err:" + err.Error())
}
} else if steamOption == "RECENT" {
filename = "🎮 Recently played Steam games"
lines, err = box.GetRecentGames(ctx, steamID, multiLined)
if err != nil {
panic("GetRecentGames err:" + err.Error())
}
}
if updateGist {
gist, err := box.GetGist(ctx, gistID)
if err != nil {
panic("GetGist err:" + err.Error())
}
f := gist.Files[github.GistFilename(filename)]
f.Content = github.String(strings.Join(lines, "\n"))
gist.Files[github.GistFilename(filename)] = f
err = box.UpdateGist(ctx, gistID, gist)
if err != nil {
panic("UpdateGist err:" + err.Error())
}
}
if updateMarkdown && markdownFile != "" {
title := filename
if updateGist {
title = fmt.Sprintf(`#### <a href="https://gist.github.com/%s" target="_blank">%s</a>`, gistID, title)
}
content := bytes.NewBuffer(nil)
content.WriteString(strings.Join(lines, "\n"))
err = box.UpdateMarkdown(ctx, title, markdownFile, content.Bytes())
if err != nil {
fmt.Println(err)
}
fmt.Println("updating markdown successfully on ", markdownFile)
}
}
| [
"\"STEAM_API_KEY\"",
"\"STEAM_ID\"",
"\"APP_ID\"",
"\"GH_TOKEN\"",
"\"GH_USER\"",
"\"GIST_ID\"",
"\"STEAM_OPTION\"",
"\"STEAM_OPTION\"",
"\"MULTILINE\"",
"\"MULTILINE\"",
"\"UPDATE_OPTION\"",
"\"MARKDOWN_FILE\""
]
| []
| [
"STEAM_OPTION",
"GH_TOKEN",
"GH_USER",
"MARKDOWN_FILE",
"GIST_ID",
"MULTILINE",
"APP_ID",
"STEAM_API_KEY",
"STEAM_ID",
"UPDATE_OPTION"
]
| [] | ["STEAM_OPTION", "GH_TOKEN", "GH_USER", "MARKDOWN_FILE", "GIST_ID", "MULTILINE", "APP_ID", "STEAM_API_KEY", "STEAM_ID", "UPDATE_OPTION"] | go | 10 | 0 | |
tests/test_linenotipy.py | import os
import pytest
import time
from linenotipy import Line
@pytest.fixture(scope="module", autouse=True)
def scope_module():
token = os.environ["line_notify_token"]
yield Line(token=token)
@pytest.fixture(scope="function", autouse=True)
def line(scope_module):
time.sleep(1)
yield scope_module
# @pytest.mark.skip
def test_line_post_message(line):
expected = "ok"
actual = line.post(message="Hello, world.")
assert expected == actual["message"]
# @pytest.mark.skip
def test_line_post_image(line):
expected = "ok"
actual = line.post(message="Hello, image.", imageFile="tests/test.png")
assert expected == actual["message"]
# @pytest.mark.skip
def test_line_post_stamp(line):
expected = "ok"
actual = line.post(message="Hello, stamp.", stickerPackageId=3, stickerId=180)
assert expected == actual["message"]
| []
| []
| [
"line_notify_token"
]
| [] | ["line_notify_token"] | python | 1 | 0 | |
backend/cli/watcher/main.go | package main
import (
_ "github.com/joho/godotenv/autoload"
)
import (
"context"
"encoding/json"
"github.com/HydroProtocol/nights-watch/plugin"
"github.com/HydroProtocol/nights-watch/structs"
"strconv"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/cli"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/connection"
"github.com/HydroProtocol/hydro-scaffold-dex/backend/models"
"github.com/HydroProtocol/hydro-sdk-backend/common"
"github.com/HydroProtocol/hydro-sdk-backend/sdk"
"github.com/HydroProtocol/hydro-sdk-backend/utils"
"github.com/HydroProtocol/nights-watch"
"os"
)
type DBTransactionHandler struct {
eventQueue common.IQueue
kvStore common.IKVStore
}
func (handler DBTransactionHandler) TxHandlerFunc(txAndReceipt *structs.RemovableTxAndReceipt) {
tx := txAndReceipt.Tx
txReceipt := txAndReceipt.Receipt
launchLog := models.LaunchLogDao.FindByHash(tx.GetHash())
if launchLog == nil {
utils.Debugf("Skip useless transaction %s", tx.GetHash())
return
}
if launchLog.Status != common.STATUS_PENDING {
utils.Infof("LaunchLog is not pending %s, skip", launchLog.Hash.String)
return
}
txResult := txReceipt.GetResult()
hash := tx.GetHash()
transaction := models.TransactionDao.FindTransactionByID(launchLog.ItemID)
utils.Infof("Transaction %s txResult is %+v", tx.GetHash(), txResult)
var status string
if txResult {
status = common.STATUS_SUCCESSFUL
} else {
status = common.STATUS_FAILED
}
//approve event should not process with engine, so update and return
if launchLog.ItemType == "hydroApprove" {
launchLog.Status = status
err := models.LaunchLogDao.UpdateLaunchLog(launchLog)
if err != nil {
panic(err)
}
return
}
event := &common.ConfirmTransactionEvent{
Event: common.Event{
Type: common.EventConfirmTransaction,
MarketID: transaction.MarketID,
},
Hash: hash,
Status: status,
//Timestamp: timestamp, //todo
}
bts, _ := json.Marshal(event)
err := handler.eventQueue.Push(bts)
if err != nil {
utils.Errorf("Push event into Queue Error: %v", err)
}
handler.kvStore.Set(common.HYDRO_WATCHER_BLOCK_NUMBER_CACHE_KEY, strconv.FormatUint(tx.GetBlockNumber(), 10), 0)
}
func main() {
ctx, stop := context.WithCancel(context.Background())
go cli.WaitExitSignal(stop)
// Init Database Client
models.Connect(os.Getenv("HSK_DATABASE_URL"))
// Init Redis client
client := connection.NewRedisClient(os.Getenv("HSK_REDIS_URL"))
// init Key/Value Store
kvStore, err := common.InitKVStore(&common.RedisKVStoreConfig{
Ctx: ctx,
Client: client,
})
if err != nil {
panic(err)
}
queue, err := common.InitQueue(&common.RedisQueueConfig{
Name: common.HYDRO_ENGINE_EVENTS_QUEUE_KEY,
Client: client,
Ctx: ctx,
})
if err != nil {
panic(err)
}
// only interested in tx send by launcher
filter := func(tx sdk.Transaction) bool {
launchLog := models.LaunchLogDao.FindByHash(tx.GetHash())
if launchLog == nil {
utils.Debugf("Skip useless transaction %s", tx.GetHash())
return false
} else {
return true
}
}
dbTxHandler := DBTransactionHandler{
eventQueue: queue,
kvStore: kvStore,
}
p := plugin.NewTxReceiptPluginWithFilter(dbTxHandler.TxHandlerFunc, filter)
api := os.Getenv("HSK_BLOCKCHAIN_RPC_URL")
w := nights_watch.NewHttpBasedEthWatcher(ctx, api)
w.RegisterTxReceiptPlugin(p)
syncedBlockInCache, err := kvStore.Get(common.HYDRO_WATCHER_BLOCK_NUMBER_CACHE_KEY)
if err != nil && err != common.KVStoreEmpty {
panic(err)
}
var startFromBlock uint64
if b, err := strconv.Atoi(syncedBlockInCache); err == nil {
startFromBlock = uint64(b) + 1
} else {
startFromBlock = 0
}
err = w.RunTillExitFromBlock(startFromBlock)
if err != nil {
utils.Infof("Watcher Exit with err: %s", err)
} else {
utils.Infof("Watcher Exit")
}
}
| [
"\"HSK_DATABASE_URL\"",
"\"HSK_REDIS_URL\"",
"\"HSK_BLOCKCHAIN_RPC_URL\""
]
| []
| [
"HSK_DATABASE_URL",
"HSK_BLOCKCHAIN_RPC_URL",
"HSK_REDIS_URL"
]
| [] | ["HSK_DATABASE_URL", "HSK_BLOCKCHAIN_RPC_URL", "HSK_REDIS_URL"] | go | 3 | 0 | |
vendor/github.com/cloudfoundry/libbuildpack/cutlass/cf.go | package cutlass
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/tidwall/gjson"
)
type cfConfig struct {
SpaceFields struct {
GUID string
}
}
type cfApps struct {
Resources []struct {
Metadata struct {
GUID string `json:"guid"`
} `json:"metadata"`
} `json:"resources"`
}
type cfInstance struct {
State string `json:"state"`
}
type App struct {
Name string
Path string
Stack string
Buildpacks []string
Memory string
Disk string
StartCommand string
Stdout *Buffer
appGUID string
env map[string]string
logCmd *exec.Cmd
HealthCheck string
}
func New(fixture string) *App {
return &App{
Name: filepath.Base(fixture) + "-" + RandStringRunes(20),
Path: fixture,
Stack: os.Getenv("CF_STACK"),
Buildpacks: []string{},
Memory: DefaultMemory,
Disk: DefaultDisk,
StartCommand: "",
appGUID: "",
env: map[string]string{},
logCmd: nil,
HealthCheck: "",
}
}
func ApiVersion() (string, error) {
cmd := exec.Command("cf", "curl", "/v2/info")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return "", err
}
var info struct {
ApiVersion string `json:"api_version"`
}
if err := json.Unmarshal(bytes, &info); err != nil {
return "", err
}
return info.ApiVersion, nil
}
func ApiGreaterThan(version string) (bool, error) {
apiVersionString, err := ApiVersion()
if err != nil {
return false, err
}
apiVersion, err := semver.Make(apiVersionString)
if err != nil {
return false, err
}
reqVersion, err := semver.ParseRange(">= " + version)
if err != nil {
return false, err
}
return reqVersion(apiVersion), nil
}
func Stacks() ([]string, error) {
cmd := exec.Command("cf", "curl", "/v2/stacks")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return nil, err
}
var info struct {
Resources []struct {
Entity struct {
Name string `json:"name"`
} `json:"entity"`
} `json:"resources"`
}
if err := json.Unmarshal(bytes, &info); err != nil {
return nil, err
}
var out []string
for _, r := range info.Resources {
out = append(out, r.Entity.Name)
}
return out, nil
}
func DeleteOrphanedRoutes() error {
command := exec.Command("cf", "delete-orphaned-routes", "-f")
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func DeleteBuildpack(language string) error {
command := exec.Command("cf", "delete-buildpack", "-f", fmt.Sprintf("%s_buildpack", language))
if data, err := command.CombinedOutput(); err != nil {
fmt.Println(string(data))
return err
}
return nil
}
func UpdateBuildpack(language, file, stack string) error {
updateBuildpackArgs := []string{"update-buildpack", fmt.Sprintf("%s_buildpack", language), "-p", file, "--enable"}
stackAssociationSupported, err := ApiGreaterThan("2.113.0")
if err != nil {
return err
}
if stack != "" && stackAssociationSupported {
updateBuildpackArgs = append(updateBuildpackArgs, "-s", stack)
}
command := exec.Command("cf", updateBuildpackArgs...)
if data, err := command.CombinedOutput(); err != nil {
return fmt.Errorf("Failed to update buildpack by running '%s':\n%s\n%v", strings.Join(command.Args, " "), string(data), err)
}
return nil
}
func createBuildpack(language, file string) error {
command := exec.Command("cf", "create-buildpack", fmt.Sprintf("%s_buildpack", language), file, "100", "--enable")
if data, err := command.CombinedOutput(); err != nil {
return fmt.Errorf("Failed to create buildpack by running '%s':\n%s\n%v", strings.Join(command.Args, " "), string(data), err)
}
return nil
}
func CountBuildpack(language string, stack string) (int, error) {
command := exec.Command("cf", "buildpacks")
targetBpname := fmt.Sprintf("%s_buildpack", language)
matches := 0
lines, err := command.CombinedOutput()
if err != nil {
return -1, err
}
for _, line := range strings.Split(string(lines), "\n") {
bpname := strings.SplitN(line, " ", 2)[0]
split := strings.Split(line, " ")
stackval := split[len(split)-1]
if bpname == targetBpname {
if stack == "" || stack == stackval {
matches++
}
}
}
return matches, nil
}
func CreateOrUpdateBuildpack(language, file, stack string) error {
count, err := CountBuildpack(language, stack)
if err != nil {
return err
}
if count == 0 {
return createBuildpack(language, file)
}
return UpdateBuildpack(language, file, stack)
}
func (a *App) ConfirmBuildpack(version string) error {
if !strings.Contains(a.Stdout.String(), fmt.Sprintf("Buildpack version %s\n", version)) {
var versionLine string
for _, line := range strings.Split(a.Stdout.String(), "\n") {
if versionLine == "" && strings.Contains(line, " Buildpack version ") {
versionLine = line
}
}
return fmt.Errorf("Wrong buildpack version. Expected '%s', but this was logged: %s", version, versionLine)
}
return nil
}
func (a *App) RunTask(command string) ([]byte, error) {
cmd := exec.Command("cf", "run-task", a.Name, command)
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return bytes, err
}
return bytes, nil
}
func (a *App) Stop() error {
command := exec.Command("cf", "stop", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) Restart() error {
command := exec.Command("cf", "restart", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) SetEnv(key, value string) {
a.env[key] = value
}
func (a *App) SpaceGUID() (string, error) {
cfHome := os.Getenv("CF_HOME")
if cfHome == "" {
cfHome = os.Getenv("HOME")
}
bytes, err := ioutil.ReadFile(filepath.Join(cfHome, ".cf", "config.json"))
if err != nil {
return "", err
}
var config cfConfig
if err := json.Unmarshal(bytes, &config); err != nil {
return "", err
}
return config.SpaceFields.GUID, nil
}
func (a *App) AppGUID() (string, error) {
if a.appGUID != "" {
return a.appGUID, nil
}
guid, err := a.SpaceGUID()
if err != nil {
return "", err
}
cmd := exec.Command("cf", "curl", "/v2/apps?q=space_guid:"+guid+"&q=name:"+a.Name)
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return "", err
}
var apps cfApps
if err := json.Unmarshal(bytes, &apps); err != nil {
return "", err
}
if len(apps.Resources) != 1 {
return "", fmt.Errorf("Expected one app, found %d", len(apps.Resources))
}
a.appGUID = apps.Resources[0].Metadata.GUID
return a.appGUID, nil
}
func (a *App) InstanceStates() ([]string, error) {
guid, err := a.AppGUID()
if err != nil {
return []string{}, err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/instances")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return []string{}, err
}
var data map[string]cfInstance
if err := json.Unmarshal(bytes, &data); err != nil {
return []string{}, err
}
var states []string
for _, value := range data {
states = append(states, value.State)
}
return states, nil
}
func (a *App) PushNoStart() error {
args := []string{"push", a.Name, "--no-start", "-p", a.Path}
if a.Stack != "" {
args = append(args, "-s", a.Stack)
}
for _, buildpack := range a.Buildpacks {
args = append(args, "-b", buildpack)
}
if _, err := os.Stat(filepath.Join(a.Path, "manifest.yml")); err == nil {
args = append(args, "-f", filepath.Join(a.Path, "manifest.yml"))
}
if a.Memory != "" {
args = append(args, "-m", a.Memory)
}
if a.Disk != "" {
args = append(args, "-k", a.Disk)
}
if a.StartCommand != "" {
args = append(args, "-c", a.StartCommand)
}
if a.HealthCheck != "" {
args = append(args, "-u", a.HealthCheck)
}
command := exec.Command("cf", args...)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
for k, v := range a.env {
command := exec.Command("cf", "set-env", a.Name, k, v)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
}
if a.logCmd == nil {
a.logCmd = exec.Command("cf", "logs", a.Name)
a.logCmd.Stderr = DefaultStdoutStderr
a.Stdout = &Buffer{}
a.logCmd.Stdout = a.Stdout
if err := a.logCmd.Start(); err != nil {
return err
}
}
return nil
}
func (a *App) V3Push() error {
if err := a.PushNoStart(); err != nil {
return err
}
args := []string{"v3-push", a.Name, "-p", a.Path}
if len(a.Buildpacks) > 1 {
for _, buildpack := range a.Buildpacks {
args = append(args, "-b", buildpack)
}
}
command := exec.Command("cf", args...)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) Push() error {
if err := a.PushNoStart(); err != nil {
return err
}
command := exec.Command("cf", "start", a.Name)
buf := &bytes.Buffer{}
command.Stdout = buf
command.Stderr = buf
if err := command.Run(); err != nil {
return fmt.Errorf("err: %s\n\nlogs: %s", err, buf)
}
return nil
}
func (a *App) GetUrl(path string) (string, error) {
guid, err := a.AppGUID()
if err != nil {
return "", err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/summary")
cmd.Stderr = DefaultStdoutStderr
data, err := cmd.Output()
if err != nil {
return "", err
}
schema, found := os.LookupEnv("CUTLASS_SCHEMA")
if !found {
schema = "http"
}
host := gjson.Get(string(data), "routes.0.host").String()
domain := gjson.Get(string(data), "routes.0.domain.name").String()
return fmt.Sprintf("%s://%s.%s%s", schema, host, domain, path), nil
}
func (a *App) Get(path string, headers map[string]string) (string, map[string][]string, error) {
url, err := a.GetUrl(path)
if err != nil {
return "", map[string][]string{}, err
}
insecureSkipVerify, _ := os.LookupEnv("CUTLASS_SKIP_TLS_VERIFY")
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify == "true"},
},
}
if headers["NoFollow"] == "true" {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
delete(headers, "NoFollow")
}
req, _ := http.NewRequest("GET", url, nil)
for k, v := range headers {
req.Header.Add(k, v)
}
if headers["user"] != "" && headers["password"] != "" {
req.SetBasicAuth(headers["user"], headers["password"])
delete(headers, "user")
delete(headers, "password")
}
resp, err := client.Do(req)
if err != nil {
return "", map[string][]string{}, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", map[string][]string{}, err
}
resp.Header["StatusCode"] = []string{strconv.Itoa(resp.StatusCode)}
return string(data), resp.Header, err
}
func (a *App) GetBody(path string) (string, error) {
body, _, err := a.Get(path, map[string]string{})
// TODO: Non 200 ??
// if !(len(headers["StatusCode"]) == 1 && headers["StatusCode"][0] == "200") {
// return "", fmt.Errorf("non 200 status: %v", headers)
// }
return body, err
}
func (a *App) Files(path string) ([]string, error) {
cmd := exec.Command("cf", "ssh", a.Name, "-c", "find "+path)
cmd.Stderr = DefaultStdoutStderr
output, err := cmd.Output()
if err != nil {
return []string{}, err
}
return strings.Split(string(output), "\n"), nil
}
func (a *App) DownloadDroplet(path string) error {
guid, err := a.AppGUID()
if err != nil {
return err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/droplet/download", "--output", path)
cmd.Stderr = DefaultStdoutStderr
_, err = cmd.Output()
return err
}
func (a *App) Destroy() error {
if a.logCmd != nil && a.logCmd.Process != nil {
if err := a.logCmd.Process.Kill(); err != nil {
return err
}
}
command := exec.Command("cf", "delete", "-f", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
return command.Run()
}
| [
"\"CF_STACK\"",
"\"CF_HOME\"",
"\"HOME\""
]
| []
| [
"CF_STACK",
"HOME",
"CF_HOME"
]
| [] | ["CF_STACK", "HOME", "CF_HOME"] | go | 3 | 0 | |
coral_model/hydrodynamics.py | """
coral_model v3 - hydrodynamics
@author: Gijs G. Hendrickx
"""
import numpy as np
import os
from scipy.optimize import fsolve
# TODO: Check if the BMI-package can be removed from this project; i.e. check if once installed, it is no longer needed.
import bmi.wrapper
import faulthandler
faulthandler.enable()
class Delft3D:
"""Coupling of coral_model to Delft3D using the BMI wrapper."""
def __init__(self, home_dir, mdu_file, config_file=None):
self.home = home_dir
self.mdu = mdu_file
self.config = config_file
self.environment()
self.initialize()
self.timestep = None
def __str__(self):
if self.config:
incl = f'DFlow- and DWaves-modules'
files = f'\n\tDFlow file : {self.mdu}'\
f'\n\tConfiguration file : {self.config}'
else:
incl = f'DFlow-module'
files = f'\n\tDFlow file : {self.mdu}'
msg = (
f'Coupling with Delft3D model (incl. {incl}) with the following '
f'settings:'
f'\n\tDelft3D home dir. : {self.home}'
f'{files}'
)
return msg
def __repr__(self):
msg = (
f'Delft3D(home_dir={self.home}, mdu_file={self.mdu}, '
f'config_file={self.config})'
)
return msg
@property
def dflow_dir(self):
"""Directory to DFlow-ddl."""
return os.path.join(self.home, 'dflowfm', 'bin', 'dflowfm.dll')
@dflow_dir.setter
def dflow_dir(self, directory):
"""Set directory to DFlow-ddl."""
if isinstance(directory, str):
directory = directory.replace('/', '\\').split('\\')
self.dflow_dir = os.path.join(self.home, *directory)
@property
def dimr_dir(self):
"""Directory to DIMR-dll."""
return os.path.join(self.home, 'dimr', 'bin', 'dimr_dll.dll')
@dimr_dir.setter
def dimr_dir(self, directory):
"""Set directory to DIMR-dll."""
if isinstance(directory, str):
directory = directory.replace('/', '\\').split('\\')
self.dimr_dir = os.path.join(self.home, *directory)
@property
def model(self):
"""Main model-object."""
if self.config:
return self.model_dimr
return self.model_fm
@property
def model_fm(self):
"""Deflt3D-FM model-object."""
return bmi.wrapper.BMIWrapper(
engine=self.dflow_dir,
configfile=self.mdu
)
@property
def model_dimr(self):
"""Delft3D DIMR model-object."""
if not self.config:
return bmi.wrapper.BMIWrapper(
engine=self.dimr_dir,
configfile=self.config
)
def environment(self):
"""Set Python environment to include Delft3D-code."""
dirs = [
os.path.join(self.home, 'share', 'bin'),
os.path.join(self.home, 'dflowfm', 'bin'),
]
if self.config:
dirs.extend([
os.path.join(self.home, 'dimr', 'bin'),
os.path.join(self.home, 'dwaves', 'bin'),
os.path.join(self.home, 'esmf', 'scripts'),
os.path.join(self.home, 'swan', 'scripts'),
])
env = ';'.join(dirs)
os.environ['PATH'] = env
print(f'\nEnvironment \"PATH\":')
[print(f'\t{path}') for path in dirs]
def initialize(self):
"""Initialize the working model."""
self.model.initialize()
def update(self, timestep):
"""Update the working model."""
self.timestep = timestep
self.reset_counters()
self.model.update(self.timestep)
def finalize(self):
"""Finalize the working model."""
self.model.finalize()
def reset_counters(self):
"""Reset properties for next model update."""
sums = self.model_fm.get_var('is_sumvalsnd')
sums.fill(0.)
self.model_fm.set_var('is_sumvalsnd', sums)
maxs = self.model_fm.get_var('is_maxvalsnd')
maxs.fill(0.)
self.model_fm.set_var('is_maxvalsnd', maxs)
def get_var(self, variable):
"""Get variable from DFlow-model."""
return self.model_fm.get_var(variable)
def set_var(self, variable):
"""Set variable to DFlow-model."""
self.model_fm.set_var(variable)
@property
def space(self):
"""Number of non-boundary boxes; i.e. within-domain boxes."""
return self.model_fm.get_var('ndxi')
@property
def x(self):
"""Center of gravity's x-coordinates as part of `space`."""
return self.model_fm.get_var('xzw')[range(self.space)]
@property
def y(self):
"""Center of gravity's y-coodinates as part of `space`."""
return self.model_fm.get_var('yzw')[range(self.space)]
@property
def vel_wave(self):
"""Wave orbital velocity [ms-1] as part of `space`."""
return self.model_fm.get_var('Uorb')[range(self.space)]
@property
def vel_curr_mn(self):
"""Mean current velocity [ms-1] as part of `space`."""
vel_sum = self.model_fm.get_var('is_sumvalsnd')[range(self.space), 1]
return vel_sum / self.timestep
@property
def vel_curr_mx(self):
"""Maximum current velocity [ms-1] as part of `space`."""
return self.model_fm.get_var('is_maxvalsnd')[range(self.space), 1]
@property
def per_wave(self):
"""Peak wave period [s] as part of `space`."""
return self.model_fm.get_var('twav')[range(self.space)]
@property
def depth(self):
"""Water depth [m] as part of `space`"""
dep_sum = self.model_fm.get_var('is_sumvalsnd')[range(self.space), 2]
return dep_sum / self.timestep
@property
def can_dia(self):
"""Representative diameter of the canopy [m] as part of `space`."""
return self.model_fm.get_var('diaveg')[range(self.space)]
@can_dia.setter
def can_dia(self, canopy_diameter):
self.model_fm.set_var('diaveg', canopy_diameter)
@property
def can_height(self):
"""Height of the canopy [m] as part of `space`."""
return self.model_fm.get_var('stemheight')[range(self.space)]
@can_height.setter
def can_height(self, canopy_height):
self.model_fm.set_var('stemheight', canopy_height)
@property
def can_den(self):
"""Density of the canopy [pcs m-2] as part of `space`."""
return self.model_fm.get_var('rnveg')[range(self.space)]
@can_den.setter
def can_den(self, canopy_density):
self.model_fm.set_var('rnveg', canopy_density)
class Reef1D:
"""Simplified one-dimensional hydrodynamic model over a (coral) reef."""
# TODO: Complete the one-dimensional hydrodynamic model
def __init__(self, bathymetry, wave_height, wave_period, dx=1):
"""
Initialisation of `Reef1D`.
Parameters
----------
bathymetry : numeric
Bathymetric cross-shore data with means sea level as reference [m]
and x=0 at the offshore boundary.
wave_height : numeric
Significant wave height [m].
wave_period : numeric
Peak wave period [s].
dx : numeric
Spatial step between bathymetric data points [m].
"""
self.bath = bathymetry
self.Hs = wave_height
self.Tp = wave_period
self.dx = dx
self.z = np.zeros(self.space)
self._diameter = None
self._height = None
self._density = None
def __str__(self):
msg = (
f'One-dimensional simple hydrodynamic model to simulate the '
f'hydrodynamics on a (coral) reef with the following settings:'
f'\n\tBathymetric cross-shore data : {type(self.bath).__name__}'
f'\n\t\trange [m] : {min(self.bath)}-{max(self.bath)}'
f'\n\t\tlength [m] : {self.space * self.dx}'
f'\n\tSignificant wave height [m] : {self.Hs}'
f'\n\tPeak wave period [s] : {self.Tp}'
)
return msg
def __repr__(self):
msg = (
f'Reef1D(bathymetry={self.bath}, wave_height={self.Hs}, '
f'wave_period={self.Tp})'
)
return msg
def update(self, timestep):
pass
def finalize(self):
pass
@property
def space(self):
return len(self.bath)
@property
def x(self):
return np.arange(0, self.space, self.dx)
@property
def y(self):
return 0
@property
def vel_wave(self):
pass
@property
def vel_curr_mn(self):
return 0
@property
def vel_curr_mx(self):
return 0
@property
def per_wav(self):
return self.Tp
@property
def depth(self):
return self.bath + self.z
@property
def can_dia(self):
return self._diameter
@can_dia.setter
def can_dia(self, canopy_diameter):
self._diameter = canopy_diameter
@property
def can_height(self):
return self._height
@can_height.setter
def can_height(self, canopy_height):
self._height = canopy_height
@property
def can_den(self):
return self._density
@can_den.setter
def can_den(self, canopy_density):
self._density = canopy_density
def dispersion(self, wave_length, wave_period, depth, grav_acc):
"""Dispersion relation to determine the wave length based on the
wave period.
"""
func = wave_length - ((grav_acc * wave_period ** 2) / (2 * np.pi)) * \
np.tanh(2 * np.pi * depth / wave_length)
return func
@property
def wave_length(self):
"""Solve the dispersion relation to retrive the wave length."""
L0 = 9.81 * self.per_wav ** 2
L = np.zeros(len(self.depth))
for i, h in enumerate(self.depth):
if h > 0:
L[i] = fsolve(self.dispersion, L0, args=(self.per_wav, h, 9.81))
return L
@property
def wave_frequnecy(self):
return 2 * np.pi / self.per_wav
@property
def wave_number(self):
k = np.zeros(len(self.wave_length))
k[self.wave_length > 0] = 2 * np.pi / self.wave_length[
self.wave_length > 0]
return k
@property
def wave_celerity(self):
return self.wave_length / self.per_wav
@property
def group_celerity(self):
n = .5 * (1 + (2 * self.wave_number * self.depth) /
(np.sinh(self.wave_number * self.depth)))
return n * self.wave_celerity
if __name__ == '__main__':
import matplotlib.pyplot as plt
model = Reef1D(np.linspace(20, 2), 1, 4)
plt.plot(model.x, model.z)
plt.plot(model.x, -model.depth)
plt.plot(model.x, model.wave_celerity)
plt.plot(model.x, model.group_celerity)
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
examples/tinygo-outbound-redis/main.go | package main
import (
"net/http"
"os"
spin_http "github.com/fermyon/spin/sdk/go/http"
"github.com/fermyon/spin/sdk/go/redis"
)
func init() {
// handler for the http trigger
spin_http.Handle(func(w http.ResponseWriter, r *http.Request) {
// addr is the environment variable set in `spin.toml` that points to the
// address of the Redis server.
addr := os.Getenv("REDIS_ADDRESS")
// channel is the environment variable set in `spin.toml` that specifies
// the Redis channel that the component will publish to.
channel := os.Getenv("REDIS_CHANNEL")
// payload is the data publish to the redis channel.
payload := []byte(`Hello redis from tinygo!`)
if err := redis.Publish(addr, channel, payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// set redis `mykey` = `myvalue`
if err := redis.Set(addr, "mykey", []byte("myvalue")); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// get redis payload for `mykey`
if payload, err := redis.Get(addr, "mykey"); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
w.Write([]byte("mykey value was: "))
w.Write(payload)
}
})
}
func main() {}
| [
"\"REDIS_ADDRESS\"",
"\"REDIS_CHANNEL\""
]
| []
| [
"REDIS_CHANNEL",
"REDIS_ADDRESS"
]
| [] | ["REDIS_CHANNEL", "REDIS_ADDRESS"] | go | 2 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"github.com/joho/godotenv"
"github.com/luqmanarifin/kentang/handler"
)
func main() {
if os.Getenv("APP_ENV") != "production" {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
}
log.Printf("channel secret %s\n", os.Getenv("CHANNEL_SECRET"))
log.Printf("channel token %s\n", os.Getenv("CHANNEL_TOKEN"))
log.Printf("port %s\n", os.Getenv("PORT"))
handler := handler.NewHandler(
os.Getenv("CHANNEL_SECRET"),
os.Getenv("CHANNEL_TOKEN"),
)
http.HandleFunc("/healthz", handler.Healthz)
http.HandleFunc("/callback", handler.Callback)
// This is just sample code.
// For actual use, you must support HTTPS by using `ListenAndServeTLS`, a reverse proxy or something else.
if err := http.ListenAndServe(":"+os.Getenv("PORT"), nil); err != nil {
log.Fatal(err)
}
}
| [
"\"APP_ENV\"",
"\"CHANNEL_SECRET\"",
"\"CHANNEL_TOKEN\"",
"\"PORT\"",
"\"CHANNEL_SECRET\"",
"\"CHANNEL_TOKEN\"",
"\"PORT\""
]
| []
| [
"APP_ENV",
"PORT",
"CHANNEL_TOKEN",
"CHANNEL_SECRET"
]
| [] | ["APP_ENV", "PORT", "CHANNEL_TOKEN", "CHANNEL_SECRET"] | go | 4 | 0 | |
pystasmvt/mvtcreator.py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import json
import os
import logging
from pystasmvt import mvtsql
LOGGER = logging.getLogger(__name__)
#設定テンプレート
CONFIG={
'connection':{
'user':'',#require
'password':'',#require
'host':'',#require
'port':'',#require
'dbname':''#require
},
'groups':[
{
'group_name':'',#require
'layers':{#require
'layername':'',#require
'namespace':[],#option
'tablename':'',#require
'attr_col':'',#require
'where':'',#require
'geometry_col':'',#require
'srid':4236,#require
'geotype':'',#require
'enable_scale':[]#require
},
'time_out':'',#option
'extent':''#option
}
]
}
# 最大スケールレベル
MAX_SCALE_LEVEL=19
DEFAULT_TIME_OUT=60000#ms
DEFAULT_EXTENT = 16386
DEFAULT_BUFFER = 256
DEFAULT_CLIP=True
class MvtCreator(object):
def __init__(self):
self._GROUP_SQL_LIST= {}
self._ENGINE=None
def init_db_session(self,config):
""" 事前準備処理
設定ファイルの読み込み
PREPAREの実行
EXECUTE文のリスト作成
"""
p_user = config['connection']['user']
p_pw = config['connection']['password']
p_host = config['connection']['host']
p_port = config['connection']['port']
p_dbname = config['connection']['dbname']
#self._ENGINE = create_engine('postgresql://'+os.getenv('POSTGRES_USER','map')+':'+os.getenv('POSTGRES_PASSWORD','map')+'@'+os.getenv('POSTGRES_HOST','localhost')+':'+os.getenv('POSTGRES_PORT','5432')+'/'+os.getenv('POSTGRES_DB','gis_test2'),
self._ENGINE = create_engine('postgresql://'+p_user+':'+p_pw+'@'+p_host+':'+p_port+'/'+p_dbname,pool_size=20, max_overflow=0)
for group in config['groups']:
if group['group_name'] not in self._GROUP_SQL_LIST.keys():
self._GROUP_SQL_LIST[group['group_name']]={}
time_out = DEFAULT_TIME_OUT
if 'time_out' in group.keys():
time_out = group['time_out']
extent = DEFAULT_EXTENT
if 'extent' in group.keys():
extent = group['extent']
buffer = DEFAULT_BUFFER
if 'buffer' in group.keys():
extent = group['buffer']
clip = DEFAULT_CLIP
if 'clip' in group.keys():
clip = group['clip']
layers = group['layers']
if not layers:
return False
for scale in range(MAX_SCALE_LEVEL):
prepared = mvtsql.MvtSql(layers,scale,scale,time_out,extent,buffer,clip)
if prepared:
self._GROUP_SQL_LIST[group['group_name']][scale] = prepared
return True
def get_mvt(self,group_name,zoom,x,y):
""" ベクタータイルのバイナリを生成
"""
try: # Sanitize the inputs
sani_zoom,sani_x,sani_y = int(zoom),int(x),int(y)
del zoom,x,y
except Exception as e:
LOGGER.error('suspicious')
LOGGER.exception(e)
return None
if group_name not in self._GROUP_SQL_LIST.keys():
return None
layergroup = self._GROUP_SQL_LIST[group_name]
if sani_zoom not in layergroup.keys():
return None
if not self._ENGINE:
return None
DBSession = sessionmaker(bind=self._ENGINE)
session = DBSession()
final_query = layergroup[sani_zoom]
if not final_query.is_query():
return None
try:
return final_query.get_mvt_by_query(session,sani_x,sani_y,sani_zoom)
except Exception as e:
LOGGER.error("get_mvt")
LOGGER.exception(e)
raise
finally:
session.connection().close() | []
| []
| [
"POSTGRES_USER",
"POSTGRES_HOST",
"POSTGRES_PORT",
"POSTGRES_DB",
"POSTGRES_PASSWORD"
]
| [] | ["POSTGRES_USER", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_DB", "POSTGRES_PASSWORD"] | python | 5 | 0 | |
python/cinn/__init__.py | # Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ.setdefault('runtime_include_dir', runtime_include_dir)
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| []
| []
| []
| [] | [] | python | 0 | 0 | |
acceptance/install/scenario1_test.go | package install_test
import (
"encoding/json"
"os"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/epinio/epinio/acceptance/helpers/catalog"
"github.com/epinio/epinio/acceptance/helpers/epinio"
"github.com/epinio/epinio/acceptance/helpers/proc"
"github.com/epinio/epinio/acceptance/helpers/route53"
"github.com/epinio/epinio/acceptance/testenv"
)
// This test uses AWS route53 to update the system domain's records
var _ = Describe("<Scenario1> GKE, epinio-ca", func() {
var (
flags []string
epinioHelper epinio.Epinio
appName = catalog.NewAppName()
loadbalancer string
domain string
zoneID string
)
BeforeEach(func() {
epinioHelper = epinio.NewEpinioHelper(testenv.EpinioBinaryPath())
domain = os.Getenv("EPINIO_SYSTEM_DOMAIN")
Expect(domain).ToNot(BeEmpty())
zoneID = os.Getenv("AWS_ZONE_ID")
Expect(zoneID).ToNot(BeEmpty())
flags = []string{
"--set", "global.domain=" + domain,
}
})
AfterEach(func() {
out, err := epinioHelper.Uninstall()
Expect(err).NotTo(HaveOccurred(), out)
})
It("Installs with loadbalancer IP, custom domain and pushes an app", func() {
By("Installing Epinio", func() {
out, err := epinioHelper.Install(flags...)
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(ContainSubstring("STATUS: deployed"))
out, err = testenv.PatchEpinio()
Expect(err).ToNot(HaveOccurred(), out)
})
By("Extracting Loadbalancer IP", func() {
out, err := proc.RunW("kubectl", "get", "service", "-n", "traefik", "traefik", "-o", "json")
Expect(err).NotTo(HaveOccurred(), out)
status := &testenv.LoadBalancerHostname{}
err = json.Unmarshal([]byte(out), status)
Expect(err).NotTo(HaveOccurred())
Expect(status.Status.LoadBalancer.Ingress).To(HaveLen(1))
loadbalancer = status.Status.LoadBalancer.Ingress[0].IP
Expect(loadbalancer).ToNot(BeEmpty())
})
By("Updating DNS Entries", func() {
change := route53.A(domain, loadbalancer, "UPSERT")
out, err := route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
change = route53.A("*."+domain, loadbalancer, "UPSERT")
out, err = route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
})
// Check that DNS entry is correctly propagated
By("Checking that DNS entry is correctly propagated", func() {
Eventually(func() string {
out, err := route53.TestDnsAnswer(zoneID, domain, "A")
Expect(err).NotTo(HaveOccurred(), out)
answer := &route53.DNSAnswer{}
err = json.Unmarshal([]byte(out), answer)
Expect(err).NotTo(HaveOccurred())
if len(answer.RecordData) == 0 {
return ""
}
return answer.RecordData[0]
}, "5m", "2s").Should(Equal(loadbalancer))
})
// Workaround to (try to!) ensure that the DNS is really propagated!
time.Sleep(3 * time.Minute)
By("Updating Epinio config", func() {
out, err := epinioHelper.Run("config", "update")
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(ContainSubstring("Ok"))
})
By("Checking Epinio info command", func() {
Eventually(func() string {
out, _ := epinioHelper.Run("info")
return out
}, "2m", "2s").Should(ContainSubstring("Epinio Server Version:"))
})
By("Pushing an app", func() {
out, err := epinioHelper.Run("push",
"--name", appName,
"--path", testenv.AssetPath("sample-app"))
Expect(err).ToNot(HaveOccurred(), out)
// Verify cluster_issuer is used
out, err = proc.RunW("kubectl", "get", "certificate",
"-n", testenv.DefaultWorkspace,
"--selector", "app.kubernetes.io/name="+appName,
"-o", "jsonpath='{.items[*].spec.issuerRef.name}'")
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(Equal("'epinio-ca'"))
})
By("Delete an app", func() {
out, err := epinioHelper.Run("apps", "delete", appName)
Expect(err).NotTo(HaveOccurred(), out)
Expect(out).To(Or(ContainSubstring("Application deleted")))
})
By("Cleaning DNS Entries", func() {
change := route53.A(domain, loadbalancer, "DELETE")
out, err := route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
change = route53.A("*."+domain, loadbalancer, "DELETE")
out, err = route53.Update(zoneID, change, nodeTmpDir)
Expect(err).NotTo(HaveOccurred(), out)
})
})
})
| [
"\"EPINIO_SYSTEM_DOMAIN\"",
"\"AWS_ZONE_ID\""
]
| []
| [
"AWS_ZONE_ID",
"EPINIO_SYSTEM_DOMAIN"
]
| [] | ["AWS_ZONE_ID", "EPINIO_SYSTEM_DOMAIN"] | go | 2 | 0 | |
ipxact2verilog.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
sys.path.append('ipyxact')
from ipyxact.ipyxact import Component
from mako.lookup import TemplateLookup
from mako.template import Template
from mako.runtime import Context
import argparse
import yaml
parser = argparse.ArgumentParser(description='Generate Verilog RTL for Config/Status-Register')
parser.add_argument('xml_path', metavar='<ipcore.xml>', type=file, nargs=1,
help='path to IP-XACT xml-file to be parse')
args = parser.parse_args()
inputname = os.path.splitext(args.xml_path[0].name)[0] + ".yml"
try:
with open(inputname, 'r') as configfile:
cfg = yaml.load(configfile)
except IOError:
with open("config.yml", 'r') as configfile:
cfg = yaml.load(configfile)
'''debug print of config sections
for section in cfg:
print(section)
'''
clk = str(cfg['General']['Clock'])
rst = str(cfg['General']['Reset'])
rst_level = int(cfg['General']['ResetActiveLevel'])
rst_is_sync = int(cfg['General']['ResetIsSync'])
component = Component()
component.load(args.xml_path[0].name)
addressBlock = component.memoryMaps.memoryMap[0].addressBlock[0]
busByteWidth = component.memoryMaps.memoryMap[0].addressBlock[0].width / 8
busBitWidth = component.memoryMaps.memoryMap[0].addressBlock[0].width
addr_width = component.memoryMaps.memoryMap[0].addressBlock[0].width
data_width = component.memoryMaps.memoryMap[0].addressBlock[0].register[0].size
fileName = component.name.lower() + '_regs.v'
lookup = TemplateLookup(directories=['templates'],
input_encoding='utf-8',
output_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace')
buffer = open(fileName, 'w')
for reg in addressBlock.register:
template = lookup.get_template('verilog_regs_wr.mako')
ctx = Context(buffer,
reg = reg,
cfg = cfg,
addr_width = addr_width,
data_width = data_width)
template.render_context(ctx)
template = lookup.get_template('verilog_regs_rd.mako')
ctx = Context(buffer,
register = addressBlock.register,
cfg = cfg,
addr_width = addr_width,
data_width = data_width)
template.render_context(ctx)
| []
| []
| []
| [] | [] | python | null | null | null |
src/python/sikuli.py | """
Created on 2015-08-19
@author: [email protected]
"""
import socket
import logging
import sys
import os
import glob
import time
import threading
import codecs
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from xmlrpclib import ProtocolError
except ImportError:
from xmlrpc.client import ProtocolError
from robot.libraries.Process import Process
from robot.libraries.Remote import Remote
from robot.libraries.BuiltIn import BuiltIn
from .version import VERSION
try:
from .keywords import KEYWORDS
except ImportError:
pass
class SikuliLibrary(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, port=0, timeout=3.0, mode='OLD'):
"""
@port: sikuli java process socket port
@timeout: Timeout of waiting java process started
@mode: if set as 'DOC', will stop java process automatically,
if set as 'PYTHON', means library is running out of robot environment
if set as 'CREATE', it is only for mvn package usage, will create keywords.py file
if set as 'OLD'(default), sikuli java process will be started when library is inited
if set as 'NEW', user should use 'start_sikuli_process' to start java process
"""
self.logger = self._init_logger()
self.timeout = float(timeout)
self.port = None
self.remote = None
self.mode = mode.upper().strip()
if mode == 'OLD':
self.start_sikuli_process(port)
if mode.upper().strip() == 'DOC':
self.start_sikuli_process()
self._stop_thread(4)
elif mode.upper().strip() == 'PYTHON':
pass
elif mode.upper().strip() == 'CREATE':
self._create_keywords_file()
elif mode.upper().strip() != 'NEW':
self._check_robot_running()
def start_sikuli_process(self, port=None):
"""
This keyword is used to start sikuli java process.
If library is inited with mode "OLD", sikuli java process is started automatically.
If library is inited with mode "NEW", this keyword should be used.
:param port: port of sikuli java process, if value is None or 0, a random free port will be used
:return: None
"""
if port is None or int(port) == 0:
port = self._get_free_tcp_port()
self.port = port
start_retries = 0
started = False
while start_retries < 5:
try:
self._start_sikuli_java_process()
except RuntimeError as err:
print('error........%s' % err)
if self.process:
self.process.terminate_process()
self.port = self._get_free_tcp_port()
start_retries += 1
continue
started = True
break
if not started:
raise RuntimeError('Start sikuli java process failed!')
self.remote = self._connect_remote_library()
def _create_keywords_file(self):
keywordDict = {}
self.start_sikuli_process()
try:
keywordList = self.get_keyword_names()
for keywordName in keywordList:
keywordDict[keywordName] = {}
keywordDict[keywordName]['arg'] = self.get_keyword_arguments(keywordName)
keywordDict[keywordName]['doc'] = self.get_keyword_documentation(keywordName)
with codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'keywords.py'), 'w',
encoding='utf-8') as f:
f.write('# -*- coding: utf-8 -*-\n')
# keywords = ','.join(['"%s": %s' % (k, keywordDict[k]) for k in keywordDict.keys()])
f.write('KEYWORDS = %s' % keywordDict )
finally:
self._stop_thread(3)
def _check_robot_running(self):
try:
BuiltIn().get_variable_value('${SUITE SOURCE}')
except Exception as err:
self.logger.warn('Robot may not running, stop java process: %s' % err)
self._stop_thread(1)
def _init_logger(self):
robotLogLevels = {'TRACE': int(logging.DEBUG / 2),
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'HTML': logging.INFO,
'WARN': logging.WARN}
builtIn = BuiltIn()
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('SikuliLibraryLogger')
logger.addHandler(handler)
level = logging.DEBUG
try:
logLevel = builtIn.get_variable_value('${LOG_LEVEL}')
level = robotLogLevels[logLevel]
except Exception:
pass
logger.setLevel(level)
return logger
def _get_free_tcp_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(1)
host, port = sock.getsockname()
self.logger.debug('Free TCP port is: %d' % port)
sock.close()
return port
def _start_sikuli_java_process(self):
libFolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib')
jarList = glob.glob(libFolder + os.sep + '*.jar')
if len(jarList) != 1:
raise Exception('Sikuli jar package should be exist in lib folder')
sikuliJar = jarList[0]
java = 'java'
arguments = ['-jar', sikuliJar, str(self.port), self._get_output_folder()]
self.process = Process()
if os.getenv("DISABLE_SIKULI_LOG"):
self.process.start_process(java, *arguments, shell=True)
else:
self.process.start_process(java, *arguments, shell=True, stdout=self._output_file(),
stderr=self._err_file())
self.logger.info('Start sikuli java process on port %s' % str(self.port))
self._wait_process_started()
self.logger.info('Sikuli java process is started')
def _wait_process_started(self):
url = "http://127.0.0.1:%s/" % str(self.port)
currentTime = startedTime = time.time()
started = False
while (currentTime - startedTime) < self.timeout:
try:
urlopen(url).read()
except Exception:
currentTime = time.time()
time.sleep(1.0)
continue
started = True
break
if not started:
raise RuntimeError('Start sikuli java process failed!')
def _output_file(self):
outputDir = self._get_output_folder()
outputFile = 'Sikuli_java_stdout_' + str(time.time()) + '.txt'
return os.path.join(outputDir, outputFile)
def _err_file(self):
outputDir = self._get_output_folder()
errFile = 'Sikuli_java_stderr_' + str(time.time()) + '.txt'
return os.path.join(outputDir, errFile)
def _get_output_folder(self):
outputDir = os.path.abspath(os.curdir)
try:
outputDir = BuiltIn().get_variable_value('${OUTPUTDIR}')
except Exception:
pass
return outputDir
def _connect_remote_library(self):
remoteUrl = 'http://127.0.0.1:%s/' % str(self.port)
remote = Remote(remoteUrl)
self._test_get_keyword_names(remote)
return remote
def _test_get_keyword_names(self, remote):
currentTime = startedTime = time.time()
started = False
while (currentTime - startedTime) < self.timeout:
try:
remote.get_keyword_names()
except Exception as err:
self.logger.warn("Test get_keyword_names failed! %s" % err)
currentTime = time.time()
time.sleep(1.0)
continue
started = True
break
if not started:
raise RuntimeError('Failed to get_keyword_names!')
def get_keyword_names(self):
if self.mode == 'CREATE':
return self.remote.get_keyword_names() + ['start_sikuli_process']
return list(KEYWORDS.keys()) + ['start_sikuli_process']
# return self.remote.get_keyword_names() + ['start_sikuli_process']
def get_keyword_arguments(self, name):
if name == 'start_sikuli_process':
return ['port=None']
if self.mode == 'CREATE':
return self.remote.get_keyword_arguments(name)
return KEYWORDS[name]['arg']
def get_keyword_documentation(self, name):
if name == 'start_sikuli_process':
return self.start_sikuli_process.__doc__
elif name == '__intro__':
return SikuliLibrary.__doc__
elif name == '__init__':
return getattr(self, name).__doc__
if self.mode == 'CREATE':
return self.remote.get_keyword_documentation(name)
return KEYWORDS[name]['doc']
def run_keyword(self, name, arguments=[], kwargs={}):
if name == 'start_sikuli_process':
return self.start_sikuli_process(*arguments)
return self.remote.run_keyword(name, arguments, kwargs)
def _stop_thread(self, timeout):
def stop():
time.sleep(float(timeout))
self.run_keyword('stop_remote_server')
thread = threading.Thread(target=stop, args=())
thread.start()
| []
| []
| [
"DISABLE_SIKULI_LOG"
]
| [] | ["DISABLE_SIKULI_LOG"] | python | 1 | 0 | |
Sensation1/sensation_models.py | import torch
from torch.nn import (
Conv2d,BatchNorm2d,MaxPool2d,#AvgPool2d,
ConvTranspose2d,Upsample,
Linear,LayerNorm,
Module,
)
import os
import random
from .config import config
from typing import Union,Tuple
class ResBlock2d(torch.nn.Module):
def __init__(
self,in_channels:int,out_channels:int,
kernel_size_1st:Union[int,Tuple[int,int]],kernel_size_2nd:Union[int,Tuple[int,int]]) -> None:
super().__init__()
f_ch = int(out_channels//2)
self.convf = Conv2d(in_channels,f_ch,kernel_size=kernel_size_1st)
self.normf = BatchNorm2d(f_ch)
self.pool = MaxPool2d(2)
# fork1
self.conv_ch = Conv2d(f_ch,out_channels,1)
self.norm_ch = BatchNorm2d(out_channels)
# fork2
pad = torch.floor(torch.tensor(kernel_size_2nd)/2).type(torch.long)
if pad.dim() == 0:
pad = int(pad)
else:
pad = tuple(pad)
s_ch = int(out_channels//4)
self.conv_rc1 = Conv2d(f_ch,s_ch,1)
self.norm_rc1 = BatchNorm2d(s_ch)
self.conv_r = Conv2d(s_ch,s_ch,kernel_size_2nd,padding=pad)
self.norm_r = BatchNorm2d(s_ch)
self.conv_rc2 = Conv2d(s_ch,out_channels,1)
self.norm_rc2 = BatchNorm2d(out_channels)
self.norm = BatchNorm2d(out_channels)
def forward(self,x):
x = self.normf(self.convf(x))
x = torch.relu(x)
x = self.pool(x)
# fork1
x1 = self.norm_ch(self.conv_ch(x))
# fork2
x2 = self.norm_rc1(self.conv_rc1(x))
x2 = torch.relu(x2)
x2 = self.norm_r(self.conv_r(x2))
x2 = torch.relu(x2)
x2 = self.norm_rc2(self.conv_rc2(x2))
# merge
x = self.norm(torch.add(x1,x2))
x = torch.relu(x)
return x
class InverseBlock2d(Module):
def __init__(
self,in_channels:int,out_channels:int,
kernel_size_1st:Union[int,Tuple[int,int]],kernel_size_2nd:Union[int,Tuple[int,int]]) -> None:
super().__init__()
self.upsampler = Upsample(scale_factor=2)
self.Dcon1 = ConvTranspose2d(in_channels,out_channels,kernel_size_1st)
self.norm1 = BatchNorm2d(out_channels)
self.Dcon2 = ConvTranspose2d(out_channels,out_channels,kernel_size_2nd)
self.norm2 = BatchNorm2d(out_channels)
def forward(self,x):
x = torch.relu(self.norm1(self.Dcon1(x)))
x = self.upsampler(x)
x = self.norm2(self.Dcon2(x))
return x
class Encoder(Module):
input_size = (1,config.channels,config.width,config.height) # input size
output_size = (1,256,7,3) # output size
def __init__(self):
super().__init__()
self.reset_seed()
# Model layers
self.Conv1 = ResBlock2d(config.channels,8,9,5)
self.Conv2 = ResBlock2d(8,16,7,3)
self.Conv3 = ResBlock2d(16,32,3,3)
self.Conv4 = ResBlock2d(32,64,3,3)
self.Conv5 = ResBlock2d(64,128,3,3)
self.Conv6 = ResBlock2d(128,256,3,3)
self.outconv = Conv2d(256,256,1,1)
def forward(self,x):
x = x.view(-1,config.channels,config.width,config.height)
#x = self.pool(x)
x = self.Conv1(x)
x = self.Conv2(x)
x = self.Conv3(x)
x = self.Conv4(x)
x = self.Conv5(x)
x = self.Conv6(x)
x = torch.tanh(self.outconv(x))
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class Decoder(Module):
input_size = Encoder.output_size
output_size = Encoder.input_size
insize = (-1,) + input_size[1:]
def __init__(self):
super().__init__()
self.reset_seed()
# Model layers
self.Dcon1 = InverseBlock2d(256,128,(7,3),5)
self.Dcon2 = InverseBlock2d(128,64,(6,5),3)
self.Dcon3 = InverseBlock2d(64,32,3,3)
self.Dcon4 = InverseBlock2d(32,16,3,(7,5))
self.Dcon5 = InverseBlock2d(16,3,7,(9,5))
def forward(self,x):
x = x.view(self.insize)
x = self.Dcon1(x)
x = torch.relu(x)
x = self.Dcon2(x)
x = torch.relu(x)
x = self.Dcon3(x)
x = torch.relu(x)
x = self.Dcon4(x)
x = torch.relu(x)
x = self.Dcon5(x)
x = torch.sigmoid(x)
return x
def reset_seed(self,seed=0):
os.environ['PYTHONHASHSEED'] = '0'
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class AutoEncoder(Module):
def __init__(self):
"""
This class is used training only.
How about using it like this?
>>> model = AutoEncoder()
>>> # -- Training Model Process --
>>> torch.save(model.encoder.state_dict(),encoder_name)
>>> torch.save(model.decoder.state_dict(),decoder_name)
"""
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self,x):
x = self.encoder(x)
x = self.decoder(x)
return x
if __name__ == '__main__':
from torchsummaryX import summary
model = AutoEncoder()
dummy = torch.randn(model.encoder.input_size)
print(summary(model.encoder,dummy))
""" Documentation
""" | []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
internal/child/child.go | // The proj/child package manages traversing into child projects.
package child
import (
"fmt"
"github.com/djmitche/proj/internal/config"
"log"
"os"
"path"
)
// a function to recursively "run" proj in a new project
type recurseFunc func(path string) error
// information that is useful to each child function
type childInfo struct {
// child configuration
childConfig *config.ChildConfig
// host configuration
hostConfig *config.HostConfig
// remaining proj path after this child
path string
// a pointer to the top-level "run" function, useful for recursing into a child
// in the same process
recurse recurseFunc
}
// a child function, implemented in other files in this package
type childFunc func(info *childInfo) error
// the map of all known child functions
var childFuncs map[string]childFunc = make(map[string]childFunc)
// load a child configuration given the child project name
func loadChildConfigFor(elt string) (*config.ChildConfig, error) {
// try .proj/<child>.cfg
configFilename := path.Join(".proj", elt+".cfg")
st, err := os.Stat(configFilename)
if err == nil {
return config.LoadChildConfig(configFilename)
}
// try a simple subdirectory
st, err = os.Stat(elt)
if err == nil && st != nil && st.IsDir() {
cfg := config.ChildConfig{Type: "cd"}
cfg.Cd.Dir = elt
return &cfg, nil
}
return nil, fmt.Errorf("No such child %s", elt)
}
// Exists returns true if the given child exists
func Exists(hostConfig *config.HostConfig, elt string) bool {
_, err := loadChildConfigFor(elt)
return err == nil
}
// Start the child named by `elt`
func StartChild(hostConfig *config.HostConfig, elt string, path string, recurse recurseFunc) error {
log.Printf("startChild(%+v, %+v, %+v)\n", hostConfig, elt, path)
childConfig, err := loadChildConfigFor(elt)
if err != nil {
return fmt.Errorf("No such child %s", elt)
}
// apply `prepend` regardless of child type
prepend := childConfig.Common().Prepend
if prepend != "" {
log.Printf("prepending %q", prepend)
if path == "" {
path = prepend
} else {
path = prepend + "/" + path
}
}
f, ok := childFuncs[childConfig.Type]
if !ok {
return fmt.Errorf("No such child type %s", childConfig.Type)
}
err = f(&childInfo{
childConfig: childConfig,
hostConfig: hostConfig,
path: path,
recurse: recurse,
})
if err != nil {
return fmt.Errorf("while starting child %q: %s", elt, err)
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
shortcut.go | package shortcut_api
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
)
// Shortcut is the primary client for this library.
// To initialize, using the NewShortcut constructor.
//
// Usage example:
//
// api := shortcut_api.NewShortcut(
// shortcut_api.WithShortcutToken(os.Getenv("SHORTCUT_TOKEN")),
// shortcut_api.WithUrl("https://api.app.shortcut.com/api/v3"),
// )
// epics, err := api.ListEpics()
type Shortcut struct {
url string
token string
client *http.Client
}
// NewShortcut is the constructor for Shortcut.
// NewShortcut uses the "Functional options" pattern
// to easy construction.
//
// Usage example:
//
// api := shortcut_api.NewShortcut(
// shortcut_api.WithShortcutToken(os.Getenv("SHORTCUT_TOKEN")),
// shortcut_api.WithUrl("https://api.app.shortcut.com/api/v3"),
// )
// epics, err := api.ListEpics()
func NewShortcut(options ...ShortcutOption) *Shortcut {
s := &Shortcut{url: "https://api.app.shortcut.com/api/v3", client: http.DefaultClient}
for _, option := range options {
option(s)
}
return s
}
type ShortcutOption func(shortcut *Shortcut)
// WithShortcutToken is meant to be used along with
// NewShortcut as a way to pass your "Shortcut-Token" header
// to shortcut api requests.
func WithShortcutToken(token string) ShortcutOption {
return func(shortcut *Shortcut) {
shortcut.token = token
}
}
// WithUrl is meant to be used along with
// NewShortcut as a way to point to the shortcut api.
// By default, it points to "https://api.app.shortcut.com/api/v3".
func WithUrl(url string) ShortcutOption {
return func(shortcut *Shortcut) {
shortcut.url = url
}
}
// WithHttpClient is meant to be used along with
// NewShortcut as a way to override the http client being used.
// By default, it uses http.DefaultClient.
func WithHttpClient(client *http.Client) ShortcutOption {
return func(shortcut *Shortcut) {
shortcut.client = client
}
}
// makeQuery is a function which is the backbone of this library.
// All shortcut calls are made via this function.
func (s Shortcut) makeQuery(req *http.Request, t interface{}) error {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Shortcut-Token", s.token)
resp, err := s.client.Do(req)
if err != nil {
return err
}
rb, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
defer func() { err = resp.Body.Close() }()
if resp.StatusCode != 200 {
return errors.New("request failed [" + resp.Status + "]: " + string(rb))
}
return json.Unmarshal(rb, t)
}
// Entity is used to avoid redundancy in shortcut's api.
// That being said, their api is rather inconsistent.
// Look at how WorkflowState's id is an int. Whereas,
// Member's id is a string.
type Entity struct {
Id int `json:"id"`
Name string `json:"name"`
}
| [
"\"SHORTCUT_TOKEN\"",
"\"SHORTCUT_TOKEN\""
]
| []
| [
"SHORTCUT_TOKEN"
]
| [] | ["SHORTCUT_TOKEN"] | go | 1 | 0 | |
config/config.go | // Package config collects together all configuration settings
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/git-lfs/git-lfs/fs"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/tools"
"github.com/rubyist/tracerx"
)
var (
ShowConfigWarnings = false
defaultRemote = "origin"
gitConfigWarningPrefix = "lfs."
)
type Configuration struct {
// Os provides a `*Environment` used to access to the system's
// environment through os.Getenv. It is the point of entry for all
// system environment configuration.
Os Environment
// Git provides a `*Environment` used to access to the various levels of
// `.gitconfig`'s. It is the point of entry for all Git environment
// configuration.
Git Environment
currentRemote *string
pushRemote *string
// gitConfig can fetch or modify the current Git config and track the Git
// version.
gitConfig *git.Configuration
ref *git.Ref
remoteRef *git.Ref
fs *fs.Filesystem
gitDir *string
workDir string
loading sync.Mutex // guards initialization of gitConfig and remotes
loadingGit sync.Mutex // guards initialization of local git and working dirs
remotes []string
extensions map[string]Extension
}
func New() *Configuration {
return NewIn("", "")
}
func NewIn(workdir, gitdir string) *Configuration {
gitConf := git.NewConfig(workdir, gitdir)
c := &Configuration{
Os: EnvironmentOf(NewOsFetcher()),
gitConfig: gitConf,
}
if len(gitConf.WorkDir) > 0 {
c.gitDir = &gitConf.GitDir
c.workDir = gitConf.WorkDir
}
c.Git = &delayedEnvironment{
callback: func() Environment {
sources, err := gitConf.Sources(filepath.Join(c.LocalWorkingDir(), ".lfsconfig"))
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading git config: %s\n", err)
}
return c.readGitConfig(sources...)
},
}
return c
}
func (c *Configuration) readGitConfig(gitconfigs ...*git.ConfigurationSource) Environment {
gf, extensions, uniqRemotes := readGitConfig(gitconfigs...)
c.extensions = extensions
c.remotes = make([]string, 0, len(uniqRemotes))
for remote, isOrigin := range uniqRemotes {
if isOrigin {
continue
}
c.remotes = append(c.remotes, remote)
}
return EnvironmentOf(gf)
}
// Values is a convenience type used to call the NewFromValues function. It
// specifies `Git` and `Env` maps to use as mock values, instead of calling out
// to real `.gitconfig`s and the `os.Getenv` function.
type Values struct {
// Git and Os are the stand-in maps used to provide values for their
// respective environments.
Git, Os map[string][]string
}
// NewFrom returns a new `*config.Configuration` that reads both its Git
// and Enviornment-level values from the ones provided instead of the actual
// `.gitconfig` file or `os.Getenv`, respectively.
//
// This method should only be used during testing.
func NewFrom(v Values) *Configuration {
c := &Configuration{
Os: EnvironmentOf(mapFetcher(v.Os)),
gitConfig: git.NewConfig("", ""),
}
c.Git = &delayedEnvironment{
callback: func() Environment {
source := &git.ConfigurationSource{
Lines: make([]string, 0, len(v.Git)),
}
for key, values := range v.Git {
for _, value := range values {
fmt.Printf("Config: %s=%s\n", key, value)
source.Lines = append(source.Lines, fmt.Sprintf("%s=%s", key, value))
}
}
return c.readGitConfig(source)
},
}
return c
}
// BasicTransfersOnly returns whether to only allow "basic" HTTP transfers.
// Default is false, including if the lfs.basictransfersonly is invalid
func (c *Configuration) BasicTransfersOnly() bool {
return c.Git.Bool("lfs.basictransfersonly", false)
}
// TusTransfersAllowed returns whether to only use "tus.io" HTTP transfers.
// Default is false, including if the lfs.tustransfers is invalid
func (c *Configuration) TusTransfersAllowed() bool {
return c.Git.Bool("lfs.tustransfers", false)
}
func (c *Configuration) FetchIncludePaths() []string {
patterns, _ := c.Git.Get("lfs.fetchinclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) FetchExcludePaths() []string {
patterns, _ := c.Git.Get("lfs.fetchexclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) CurrentRef() *git.Ref {
c.loading.Lock()
defer c.loading.Unlock()
if c.ref == nil {
r, err := git.CurrentRef()
if err != nil {
tracerx.Printf("Error loading current ref: %s", err)
c.ref = &git.Ref{}
} else {
c.ref = r
}
}
return c.ref
}
func (c *Configuration) IsDefaultRemote() bool {
return c.Remote() == defaultRemote
}
// Remote returns the default remote based on:
// 1. The currently tracked remote branch, if present
// 2. Any other SINGLE remote defined in .git/config
// 3. Use "origin" as a fallback.
// Results are cached after the first hit.
func (c *Configuration) Remote() string {
ref := c.CurrentRef()
c.loading.Lock()
defer c.loading.Unlock()
if c.currentRemote == nil {
if len(ref.Name) == 0 {
c.currentRemote = &defaultRemote
return defaultRemote
}
if remote, ok := c.Git.Get(fmt.Sprintf("branch.%s.remote", ref.Name)); ok {
// try tracking remote
c.currentRemote = &remote
} else if remotes := c.Remotes(); len(remotes) == 1 {
// use only remote if there is only 1
c.currentRemote = &remotes[0]
} else {
// fall back to default :(
c.currentRemote = &defaultRemote
}
}
return *c.currentRemote
}
func (c *Configuration) PushRemote() string {
ref := c.CurrentRef()
c.loading.Lock()
defer c.loading.Unlock()
if c.pushRemote == nil {
if remote, ok := c.Git.Get(fmt.Sprintf("branch.%s.pushRemote", ref.Name)); ok {
c.pushRemote = &remote
} else if remote, ok := c.Git.Get("remote.pushDefault"); ok {
c.pushRemote = &remote
} else {
c.loading.Unlock()
remote := c.Remote()
c.loading.Lock()
c.pushRemote = &remote
}
}
return *c.pushRemote
}
func (c *Configuration) SetValidRemote(name string) error {
if err := git.ValidateRemote(name); err != nil {
return err
}
c.SetRemote(name)
return nil
}
func (c *Configuration) SetRemote(name string) {
c.currentRemote = &name
}
func (c *Configuration) Remotes() []string {
c.loadGitConfig()
return c.remotes
}
func (c *Configuration) Extensions() map[string]Extension {
c.loadGitConfig()
return c.extensions
}
// SortedExtensions gets the list of extensions ordered by Priority
func (c *Configuration) SortedExtensions() ([]Extension, error) {
return SortExtensions(c.Extensions())
}
func (c *Configuration) SkipDownloadErrors() bool {
return c.Os.Bool("GIT_LFS_SKIP_DOWNLOAD_ERRORS", false) || c.Git.Bool("lfs.skipdownloaderrors", false)
}
func (c *Configuration) SetLockableFilesReadOnly() bool {
return c.Os.Bool("GIT_LFS_SET_LOCKABLE_READONLY", true) && c.Git.Bool("lfs.setlockablereadonly", true)
}
func (c *Configuration) HookDir() string {
if git.IsGitVersionAtLeast("2.9.0") {
hp, ok := c.Git.Get("core.hooksPath")
if ok {
return hp
}
}
return filepath.Join(c.LocalGitDir(), "hooks")
}
func (c *Configuration) InRepo() bool {
return len(c.LocalGitDir()) > 0
}
func (c *Configuration) LocalWorkingDir() string {
c.loadGitDirs()
return c.workDir
}
func (c *Configuration) LocalGitDir() string {
c.loadGitDirs()
return *c.gitDir
}
func (c *Configuration) loadGitDirs() {
c.loadingGit.Lock()
defer c.loadingGit.Unlock()
if c.gitDir != nil {
return
}
gitdir, workdir, err := git.GitAndRootDirs()
if err != nil {
errMsg := err.Error()
tracerx.Printf("Error running 'git rev-parse': %s", errMsg)
if !strings.Contains(errMsg, "Not a git repository") {
fmt.Fprintf(os.Stderr, "Error: %s\n", errMsg)
}
c.gitDir = &gitdir
}
gitdir = tools.ResolveSymlinks(gitdir)
c.gitDir = &gitdir
c.workDir = tools.ResolveSymlinks(workdir)
}
func (c *Configuration) LocalGitStorageDir() string {
return c.Filesystem().GitStorageDir
}
func (c *Configuration) LocalReferenceDir() string {
return c.Filesystem().ReferenceDir
}
func (c *Configuration) LFSStorageDir() string {
return c.Filesystem().LFSStorageDir
}
func (c *Configuration) LFSObjectDir() string {
return c.Filesystem().LFSObjectDir()
}
func (c *Configuration) LFSObjectExists(oid string, size int64) bool {
return c.Filesystem().ObjectExists(oid, size)
}
func (c *Configuration) EachLFSObject(fn func(fs.Object) error) error {
return c.Filesystem().EachObject(fn)
}
func (c *Configuration) LocalLogDir() string {
return c.Filesystem().LogDir()
}
func (c *Configuration) TempDir() string {
return c.Filesystem().TempDir()
}
func (c *Configuration) Filesystem() *fs.Filesystem {
c.loadGitDirs()
c.loading.Lock()
defer c.loading.Unlock()
if c.fs == nil {
lfsdir, _ := c.Git.Get("lfs.storage")
c.fs = fs.New(c.LocalGitDir(), c.LocalWorkingDir(), lfsdir)
}
return c.fs
}
func (c *Configuration) Cleanup() error {
c.loading.Lock()
defer c.loading.Unlock()
return c.fs.Cleanup()
}
func (c *Configuration) OSEnv() Environment {
return c.Os
}
func (c *Configuration) GitEnv() Environment {
return c.Git
}
func (c *Configuration) GitConfig() *git.Configuration {
return c.gitConfig
}
func (c *Configuration) FindGitGlobalKey(key string) string {
return c.gitConfig.FindGlobal(key)
}
func (c *Configuration) FindGitSystemKey(key string) string {
return c.gitConfig.FindSystem(key)
}
func (c *Configuration) FindGitLocalKey(key string) string {
return c.gitConfig.FindLocal(key)
}
func (c *Configuration) SetGitGlobalKey(key, val string) (string, error) {
return c.gitConfig.SetGlobal(key, val)
}
func (c *Configuration) SetGitSystemKey(key, val string) (string, error) {
return c.gitConfig.SetSystem(key, val)
}
func (c *Configuration) SetGitLocalKey(key, val string) (string, error) {
return c.gitConfig.SetLocal(key, val)
}
func (c *Configuration) UnsetGitGlobalSection(key string) (string, error) {
return c.gitConfig.UnsetGlobalSection(key)
}
func (c *Configuration) UnsetGitSystemSection(key string) (string, error) {
return c.gitConfig.UnsetSystemSection(key)
}
func (c *Configuration) UnsetGitLocalSection(key string) (string, error) {
return c.gitConfig.UnsetLocalSection(key)
}
func (c *Configuration) UnsetGitLocalKey(key string) (string, error) {
return c.gitConfig.UnsetLocalKey(key)
}
// loadGitConfig is a temporary measure to support legacy behavior dependent on
// accessing properties set by ReadGitConfig, namely:
// - `c.extensions`
// - `c.uniqRemotes`
// - `c.gitConfig`
//
// Since the *gitEnvironment is responsible for setting these values on the
// (*config.Configuration) instance, we must call that method, if it exists.
//
// loadGitConfig returns a bool returning whether or not `loadGitConfig` was
// called AND the method did not return early.
func (c *Configuration) loadGitConfig() {
if g, ok := c.Git.(*delayedEnvironment); ok {
g.Load()
}
}
// CurrentCommitter returns the name/email that would be used to author a commit
// with this configuration. In particular, the "user.name" and "user.email"
// configuration values are used
func (c *Configuration) CurrentCommitter() (name, email string) {
name, _ = c.Git.Get("user.name")
email, _ = c.Git.Get("user.email")
return
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
guardiancmd/command.go | package guardiancmd
import (
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/commandrunner"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/garden/server"
"code.cloudfoundry.org/guardian/bindata"
"code.cloudfoundry.org/guardian/gardener"
"code.cloudfoundry.org/guardian/imageplugin"
"code.cloudfoundry.org/guardian/kawasaki"
kawasakifactory "code.cloudfoundry.org/guardian/kawasaki/factory"
"code.cloudfoundry.org/guardian/kawasaki/iptables"
"code.cloudfoundry.org/guardian/kawasaki/mtu"
"code.cloudfoundry.org/guardian/kawasaki/ports"
"code.cloudfoundry.org/guardian/kawasaki/subnets"
"code.cloudfoundry.org/guardian/kubener"
"code.cloudfoundry.org/guardian/logging"
"code.cloudfoundry.org/guardian/metrics"
"code.cloudfoundry.org/guardian/netplugin"
locksmithpkg "code.cloudfoundry.org/guardian/pkg/locksmith"
"code.cloudfoundry.org/guardian/properties"
"code.cloudfoundry.org/guardian/rundmc"
"code.cloudfoundry.org/guardian/rundmc/bundlerules"
"code.cloudfoundry.org/guardian/rundmc/depot"
"code.cloudfoundry.org/guardian/rundmc/goci"
"code.cloudfoundry.org/guardian/rundmc/peas"
"code.cloudfoundry.org/guardian/rundmc/peas/privchecker"
"code.cloudfoundry.org/guardian/rundmc/preparerootfs"
"code.cloudfoundry.org/guardian/rundmc/processes"
"code.cloudfoundry.org/guardian/rundmc/runrunc"
"code.cloudfoundry.org/guardian/rundmc/runrunc/pid"
"code.cloudfoundry.org/guardian/rundmc/stopper"
"code.cloudfoundry.org/guardian/rundmc/users"
"code.cloudfoundry.org/guardian/sysinfo"
"code.cloudfoundry.org/idmapper"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/localip"
"github.com/cloudfoundry/dropsonde"
_ "github.com/docker/docker/daemon/graphdriver/aufs" // aufs needed for garden-shed
_ "github.com/docker/docker/pkg/chrootarchive" // allow reexec of docker-applyLayer
"github.com/docker/docker/pkg/reexec"
"github.com/eapache/go-resiliency/retrier"
uuid "github.com/nu7hatch/gouuid"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/sigmon"
)
const containerdNamespace = "garden"
// These are the maximum caps an unprivileged container process ever gets
// (it may get less if the user is not root, see NonRootMaxCaps)
var unprivilegedMaxCaps = []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
}
// These are the maximum caps a privileged container process ever gets
// (it may get less if the user is not root, see NonRootMaxCaps)
var privilegedMaxCaps = []string{
"CAP_AUDIT_CONTROL",
"CAP_AUDIT_READ",
"CAP_AUDIT_WRITE",
"CAP_BLOCK_SUSPEND",
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_KILL",
"CAP_LEASE",
"CAP_LINUX_IMMUTABLE",
"CAP_MAC_ADMIN",
"CAP_MAC_OVERRIDE",
"CAP_MKNOD",
"CAP_NET_ADMIN",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_SETUID",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_CHROOT",
"CAP_SYS_MODULE",
"CAP_SYS_NICE",
"CAP_SYS_PACCT",
"CAP_SYS_PTRACE",
"CAP_SYS_RAWIO",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
}
type GardenFactory interface {
WireResolvConfigurer() kawasaki.DnsResolvConfigurer
WireMkdirer() runrunc.Mkdirer
CommandRunner() commandrunner.CommandRunner
WireVolumizer(logger lager.Logger) gardener.Volumizer
WireCgroupsStarter(logger lager.Logger) gardener.Starter
WireExecRunner(runMode, runcRoot string, containerRootUID, containerRootGID uint32) runrunc.ExecRunner
WireRootfsFileCreator() rundmc.RootfsFileCreator
}
// These are the maximum capabilities a non-root user gets whether privileged or unprivileged
// In other words in a privileged container a non-root user still only gets the unprivileged set
// plus CAP_SYS_ADMIN.
var nonRootMaxCaps = append(unprivilegedMaxCaps, "CAP_SYS_ADMIN")
var PrivilegedContainerNamespaces = []specs.LinuxNamespace{
goci.NetworkNamespace, goci.PIDNamespace, goci.UTSNamespace, goci.IPCNamespace, goci.MountNamespace,
}
var (
worldReadWrite = os.FileMode(0666)
fuseDevice = specs.LinuxDevice{
Path: "/dev/fuse",
Type: "c",
Major: 10,
Minor: 229,
FileMode: &worldReadWrite,
}
allowedDevices = []specs.LinuxDeviceCgroup{
// runc allows these
{Access: "m", Type: "c", Major: deviceWildcard(), Minor: deviceWildcard(), Allow: true},
{Access: "m", Type: "b", Major: deviceWildcard(), Minor: deviceWildcard(), Allow: true},
{Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(3), Allow: true}, // /dev/null
{Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(8), Allow: true}, // /dev/random
{Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(7), Allow: true}, // /dev/full
{Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(0), Allow: true}, // /dev/tty
{Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(5), Allow: true}, // /dev/zero
{Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(9), Allow: true}, // /dev/urandom
{Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(1), Allow: true}, // /dev/console
{Access: "rwm", Type: "c", Major: intRef(136), Minor: deviceWildcard(), Allow: true}, // /dev/pts/*
{Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(2), Allow: true}, // /dev/ptmx
{Access: "rwm", Type: "c", Major: intRef(10), Minor: intRef(200), Allow: true}, // /dev/net/tun
// We allow these
{Access: "rwm", Type: fuseDevice.Type, Major: intRef(fuseDevice.Major), Minor: intRef(fuseDevice.Minor), Allow: true},
}
)
type GdnCommand struct {
SetupCommand *SetupCommand `command:"setup"`
ServerCommand *ServerCommand `command:"server"`
// This must be present to stop go-flags complaining, but it's not actually
// used. We parse this flag outside of the go-flags framework.
ConfigFilePath string `long:"config" description:"Config file path."`
}
type ServerCommand struct {
Logger LagerFlag
Server struct {
BindIP IPFlag `long:"bind-ip" description:"Bind with TCP on the given IP."`
BindPort uint16 `long:"bind-port" description:"Bind with TCP on the given port."`
BindSocket string `long:"bind-socket" default:"/tmp/garden.sock" description:"Bind with Unix on the given socket path."`
DebugBindIP IPFlag `long:"debug-bind-ip" description:"Bind the debug server on the given IP."`
DebugBindPort uint16 `long:"debug-bind-port" default:"17013" description:"Bind the debug server to the given port."`
Tag string `hidden:"true" long:"tag" description:"Optional 2-character identifier used for namespacing global configuration."`
SkipSetup bool `long:"skip-setup" description:"Skip the preparation part of the host that requires root privileges"`
} `group:"Server Configuration"`
Containers struct {
Dir string `long:"depot" default:"/var/run/gdn/depot" description:"Directory in which to store container data."`
PropertiesPath string `long:"properties-path" description:"Path in which to store properties."`
ConsoleSocketsPath string `long:"console-sockets-path" description:"Path in which to store temporary sockets"`
CleanupProcessDirsOnWait bool `long:"cleanup-process-dirs-on-wait" description:"Clean up proccess dirs on first invocation of wait"`
DisablePrivilgedContainers bool `long:"disable-privileged-containers" description:"Disable creation of privileged containers"`
UIDMapStart uint32 `long:"uid-map-start" default:"1" description:"The lowest numerical subordinate user ID the user is allowed to map"`
UIDMapLength uint32 `long:"uid-map-length" description:"The number of numerical subordinate user IDs the user is allowed to map"`
GIDMapStart uint32 `long:"gid-map-start" default:"1" description:"The lowest numerical subordinate group ID the user is allowed to map"`
GIDMapLength uint32 `long:"gid-map-length" description:"The number of numerical subordinate group IDs the user is allowed to map"`
DefaultRootFS string `long:"default-rootfs" description:"Default rootfs to use when not specified on container creation."`
DefaultGraceTime time.Duration `long:"default-grace-time" description:"Default time after which idle containers should expire."`
DestroyContainersOnStartup bool `long:"destroy-containers-on-startup" description:"Clean up all the existing containers on startup."`
ApparmorProfile string `long:"apparmor" description:"Apparmor profile to use for unprivileged container processes"`
} `group:"Container Lifecycle"`
Bin struct {
AssetsDir string `long:"assets-dir" default:"/var/gdn/assets" description:"Directory in which to extract packaged assets"`
Dadoo FileFlag `long:"dadoo-bin" description:"Path to the 'dadoo' binary."`
NSTar FileFlag `long:"nstar-bin" description:"Path to the 'nstar' binary."`
Tar FileFlag `long:"tar-bin" description:"Path to the 'tar' binary."`
IPTables FileFlag `long:"iptables-bin" default:"/sbin/iptables" description:"path to the iptables binary"`
IPTablesRestore FileFlag `long:"iptables-restore-bin" default:"/sbin/iptables-restore" description:"path to the iptables-restore binary"`
Init FileFlag `long:"init-bin" description:"Path execute as pid 1 inside each container."`
} `group:"Binary Tools"`
Runtime struct {
Plugin string `long:"runtime-plugin" default:"runc" description:"Path to the runtime plugin binary."`
PluginExtraArgs []string `long:"runtime-plugin-extra-arg" description:"Extra argument to pass to the runtime plugin. Can be specified multiple times."`
} `group:"Runtime"`
Graph struct {
Dir string `long:"graph" default:"/var/gdn/graph" description:"Directory on which to store imported rootfs graph data."`
CleanupThresholdInMegabytes int `long:"graph-cleanup-threshold-in-megabytes" default:"-1" description:"Disk usage of the graph dir at which cleanup should trigger, or -1 to disable graph cleanup."`
PersistentImages []string `long:"persistent-image" description:"Image that should never be garbage collected. Can be specified multiple times."`
} `group:"Image Graph"`
Image struct {
Plugin FileFlag `long:"image-plugin" description:"Path to image plugin binary."`
PluginExtraArgs []string `long:"image-plugin-extra-arg" description:"Extra argument to pass to the image plugin to create unprivileged images. Can be specified multiple times."`
PrivilegedPlugin FileFlag `long:"privileged-image-plugin" description:"Path to privileged image plugin binary."`
PrivilegedPluginExtraArgs []string `long:"privileged-image-plugin-extra-arg" description:"Extra argument to pass to the image plugin to create privileged images. Can be specified multiple times."`
} `group:"Image"`
Docker struct {
Registry string `long:"docker-registry" default:"registry-1.docker.io" description:"Docker registry API endpoint."`
InsecureRegistries []string `long:"insecure-docker-registry" description:"Docker registry to allow connecting to even if not secure. Can be specified multiple times."`
} `group:"Docker Image Fetching"`
Network struct {
Pool CIDRFlag `long:"network-pool" default:"10.254.0.0/22" description:"Network range to use for dynamically allocated container subnets."`
AllowHostAccess bool `long:"allow-host-access" description:"Allow network access to the host machine."`
DenyNetworks []CIDRFlag `long:"deny-network" description:"Network ranges to which traffic from containers will be denied. Can be specified multiple times."`
DNSServers []IPFlag `long:"dns-server" description:"DNS server IP address to use instead of automatically determined servers. Can be specified multiple times."`
AdditionalDNSServers []IPFlag `long:"additional-dns-server" description:"DNS server IP address to append to the automatically determined servers. Can be specified multiple times."`
AdditionalHostEntries []string `long:"additional-host-entry" description:"Per line hosts entries. Can be specified multiple times and will be appended verbatim in order to /etc/hosts"`
ExternalIP IPFlag `long:"external-ip" description:"IP address to use to reach container's mapped ports. Autodetected if not specified."`
PortPoolStart uint32 `long:"port-pool-start" default:"61001" description:"Start of the ephemeral port range used for mapped container ports."`
PortPoolSize uint32 `long:"port-pool-size" default:"4534" description:"Size of the port pool used for mapped container ports."`
PortPoolPropertiesPath string `long:"port-pool-properties-path" description:"Path in which to store port pool properties."`
Mtu int `long:"mtu" description:"MTU size for container network interfaces. Defaults to the MTU of the interface used for outbound access by the host. Max allowed value is 1500."`
Plugin FileFlag `long:"network-plugin" description:"Path to network plugin binary."`
PluginExtraArgs []string `long:"network-plugin-extra-arg" description:"Extra argument to pass to the network plugin. Can be specified multiple times."`
} `group:"Container Networking"`
Limits struct {
CPUQuotaPerShare uint64 `long:"cpu-quota-per-share" default:"0" description:"Maximum number of microseconds each cpu share assigned to a container allows per quota period"`
TCPMemoryLimit uint64 `long:"tcp-memory-limit" default:"0" description:"Set hard limit for the tcp buf memory, value in bytes"`
DefaultBlockIOWeight uint16 `long:"default-container-blockio-weight" default:"0" description:"Default block IO weight assigned to a container"`
MaxContainers uint64 `long:"max-containers" default:"0" description:"Maximum number of containers that can be created."`
DisableSwapLimit bool `long:"disable-swap-limit" description:"Disable swap memory limit"`
} `group:"Limits"`
Metrics struct {
EmissionInterval time.Duration `long:"metrics-emission-interval" default:"1m" description:"Interval on which to emit metrics."`
DropsondeOrigin string `long:"dropsonde-origin" default:"garden-linux" description:"Origin identifier for Dropsonde-emitted metrics."`
DropsondeDestination string `long:"dropsonde-destination" default:"127.0.0.1:3457" description:"Destination for Dropsonde-emitted metrics."`
} `group:"Metrics"`
Containerd struct {
Socket string `long:"containerd-socket" description:"Path to a containerd socket."`
UseContainerdForProcesses bool `long:"use-containerd-for-processes" description:"Use containerd to run processes in containers."`
} `group:"Containerd"`
Kube struct {
UseKube bool `long:"use-kube" description:"Use kube!"`
} `group:"Kube"`
}
func init() {
if reexec.Init() {
os.Exit(0)
}
}
func (cmd *ServerCommand) Execute([]string) error {
// gdn can be compiled for one of two possible run "modes"
// 1. all-in-one - this is meant for standalone deployments
// 2. bosh-deployed - this is meant for deployment via BOSH
// when compiling an all-in-one gdn, the bindata package will contain a
// number of compiled assets (e.g. iptables, runc, etc.), thus we check to
// see if we have any compiled assets here and perform additional setup
// (e.g. updating bin paths to point to the compiled assets) if required
if len(bindata.AssetNames()) > 0 {
depotDir := cmd.Containers.Dir
err := os.MkdirAll(depotDir, 0755)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
restoredAssetsDir, err := restoreUnversionedAssets(cmd.Bin.AssetsDir)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
cmd.Runtime.Plugin = filepath.Join(restoredAssetsDir, "bin", "runc")
cmd.Bin.Dadoo = FileFlag(filepath.Join(restoredAssetsDir, "bin", "dadoo"))
cmd.Bin.Init = FileFlag(filepath.Join(restoredAssetsDir, "bin", "init"))
cmd.Bin.NSTar = FileFlag(filepath.Join(restoredAssetsDir, "bin", "nstar"))
cmd.Bin.Tar = FileFlag(filepath.Join(restoredAssetsDir, "bin", "tar"))
cmd.Bin.IPTables = FileFlag(filepath.Join(restoredAssetsDir, "sbin", "iptables"))
cmd.Bin.IPTablesRestore = FileFlag(filepath.Join(restoredAssetsDir, "sbin", "iptables-restore"))
cmd.Image.Plugin = FileFlag(filepath.Join(restoredAssetsDir, "bin", "grootfs"))
cmd.Image.PluginExtraArgs = []string{
"--store", "/var/lib/grootfs/store",
"--tardis-bin", FileFlag(filepath.Join(restoredAssetsDir, "bin", "tardis")).Path(),
"--log-level", cmd.Logger.LogLevel,
}
cmd.Image.PrivilegedPlugin = FileFlag(filepath.Join(restoredAssetsDir, "bin", "grootfs"))
cmd.Image.PrivilegedPluginExtraArgs = []string{
"--store", "/var/lib/grootfs/store-privileged",
"--tardis-bin", FileFlag(filepath.Join(restoredAssetsDir, "bin", "tardis")).Path(),
"--log-level", cmd.Logger.LogLevel,
}
cmd.Network.AllowHostAccess = true
maxId := mustGetMaxValidUID()
initStoreCmd := newInitStoreCommand(cmd.Image.Plugin.Path(), cmd.Image.PluginExtraArgs)
initStoreCmd.Args = append(initStoreCmd.Args,
"--uid-mapping", fmt.Sprintf("0:%d:1", maxId),
"--uid-mapping", fmt.Sprintf("1:1:%d", maxId-1),
"--gid-mapping", fmt.Sprintf("0:%d:1", maxId),
"--gid-mapping", fmt.Sprintf("1:1:%d", maxId-1))
runCommand(initStoreCmd)
privInitStoreCmd := newInitStoreCommand(cmd.Image.PrivilegedPlugin.Path(), cmd.Image.PrivilegedPluginExtraArgs)
runCommand(privInitStoreCmd)
}
return <-ifrit.Invoke(sigmon.New(cmd)).Wait()
}
func newInitStoreCommand(pluginPath string, pluginGlobalArgs []string) *exec.Cmd {
return exec.Command(pluginPath, append(pluginGlobalArgs, "init-store", "--store-size-bytes", strconv.Itoa(10*1024*1024*1024))...)
}
func runCommand(cmd *exec.Cmd) {
output, err := cmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "Err: %v Output: %s", err, string(output))
os.Exit(1)
}
}
func runningAsRoot() bool {
return os.Geteuid() == 0
}
func restoreUnversionedAssets(assetsDir string) (string, error) {
linuxAssetsDir := filepath.Join(assetsDir, "linux")
_, err := os.Stat(linuxAssetsDir)
if err == nil {
return linuxAssetsDir, nil
}
err = bindata.RestoreAssets(assetsDir, "linux")
if err != nil {
return "", err
}
return linuxAssetsDir, nil
}
func (cmd *ServerCommand) idMappings() (idmapper.MappingList, idmapper.MappingList) {
containerRootUID := mustGetMaxValidUID()
containerRootGID := mustGetMaxValidUID()
if !runningAsRoot() {
containerRootUID = os.Geteuid()
containerRootGID = os.Getegid()
}
cmd.calculateDefaultMappingLengths(containerRootUID, containerRootGID)
uidMappings := idmapper.MappingList{
{
ContainerID: 0,
HostID: uint32(containerRootUID),
Size: 1,
},
{
ContainerID: 1,
HostID: cmd.Containers.UIDMapStart,
Size: cmd.Containers.UIDMapLength,
},
}
gidMappings := idmapper.MappingList{
{
ContainerID: 0,
HostID: uint32(containerRootGID),
Size: 1,
},
{
ContainerID: 1,
HostID: cmd.Containers.GIDMapStart,
Size: cmd.Containers.GIDMapLength,
},
}
return uidMappings, gidMappings
}
func (cmd *ServerCommand) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
logger, reconfigurableSink := cmd.Logger.Logger("guardian")
factory := cmd.NewGardenFactory()
propManager, err := cmd.loadProperties(logger, cmd.Containers.PropertiesPath)
if err != nil {
return err
}
portPool, err := cmd.wirePortPool(logger)
if err != nil {
return err
}
networker, iptablesStarter, err := cmd.wireNetworker(logger, factory, propManager, portPool)
if err != nil {
logger.Error("failed-to-wire-networker", err)
return err
}
restorer := gardener.NewRestorer(networker)
if cmd.Containers.DestroyContainersOnStartup {
restorer = &gardener.NoopRestorer{}
}
volumizer := factory.WireVolumizer(logger)
starters := []gardener.Starter{}
if !cmd.Server.SkipSetup {
starters = append(starters, factory.WireCgroupsStarter(logger))
}
if cmd.Network.Plugin.Path() == "" {
starters = append(starters, iptablesStarter)
}
var bulkStarter gardener.BulkStarter = gardener.NewBulkStarter(starters)
peaCleaner := cmd.wirePeaCleaner(factory, volumizer)
containerizer, err := cmd.wireContainerizer(logger, factory, propManager, volumizer, peaCleaner)
if err != nil {
logger.Error("failed-to-wire-containerizer", err)
return err
}
var backend garden.Backend
if cmd.Kube.UseKube {
backend = &kubener.Kubener{
Logger: logger,
}
} else {
backend = &gardener.Gardener{
UidGenerator: wireUIDGenerator(),
BulkStarter: bulkStarter,
SysInfoProvider: sysinfo.NewResourcesProvider(cmd.Containers.Dir),
Networker: networker,
Volumizer: volumizer,
Containerizer: containerizer,
PropertyManager: propManager,
MaxContainers: cmd.Limits.MaxContainers,
Restorer: restorer,
PeaCleaner: peaCleaner,
// We want to be able to disable privileged containers independently of
// whether or not gdn is running as root.
AllowPrivilgedContainers: !cmd.Containers.DisablePrivilgedContainers,
Logger: logger,
}
}
var listenNetwork, listenAddr string
if cmd.Server.BindIP != nil {
listenNetwork = "tcp"
listenAddr = fmt.Sprintf("%s:%d", cmd.Server.BindIP.IP(), cmd.Server.BindPort)
} else {
listenNetwork = "unix"
listenAddr = cmd.Server.BindSocket
}
gardenServer := server.New(listenNetwork, listenAddr, cmd.Containers.DefaultGraceTime, backend, logger.Session("api"))
cmd.initializeDropsonde(logger)
metricsProvider := cmd.wireMetricsProvider(logger)
debugServerMetrics := map[string]func() int{
"numCPUS": metricsProvider.NumCPU,
"numGoRoutines": metricsProvider.NumGoroutine,
"loopDevices": metricsProvider.LoopDevices,
"backingStores": metricsProvider.BackingStores,
"depotDirs": metricsProvider.DepotDirs,
}
periodicMetronMetrics := map[string]func() int{
"DepotDirs": metricsProvider.DepotDirs,
}
if cmd.Image.Plugin == "" && cmd.Image.PrivilegedPlugin == "" {
periodicMetronMetrics["LoopDevices"] = metricsProvider.LoopDevices
periodicMetronMetrics["BackingStores"] = metricsProvider.BackingStores
}
metronNotifier := cmd.wireMetronNotifier(logger, periodicMetronMetrics)
metronNotifier.Start()
if cmd.Server.DebugBindIP != nil {
addr := fmt.Sprintf("%s:%d", cmd.Server.DebugBindIP.IP(), cmd.Server.DebugBindPort)
metrics.StartDebugServer(addr, reconfigurableSink, debugServerMetrics)
}
if err := backend.Start(); err != nil {
logger.Error("starting-guardian-backend", err)
return err
}
if err := gardenServer.SetupBomberman(); err != nil {
logger.Error("setting-up-bomberman", err)
return err
}
if err := startServer(gardenServer, logger); err != nil {
return err
}
close(ready)
logger.Info("started", lager.Data{
"network": listenNetwork,
"addr": listenAddr,
})
<-signals
gardenServer.Stop()
cmd.saveProperties(logger, cmd.Containers.PropertiesPath, propManager)
portPoolState := portPool.RefreshState()
ports.SaveState(cmd.Network.PortPoolPropertiesPath, portPoolState)
return nil
}
func (cmd *ServerCommand) wirePeaCleaner(factory GardenFactory, volumizer gardener.Volumizer) gardener.PeaCleaner {
cmdRunner := factory.CommandRunner()
runcLogRunner := runrunc.NewLogRunner(cmdRunner, runrunc.LogDir(os.TempDir()).GenerateLogFile)
runcBinary := goci.RuncBinary{Path: cmd.Runtime.Plugin, Root: cmd.computeRuncRoot()}
runcDeleter := runrunc.NewDeleter(runcLogRunner, runcBinary)
return peas.NewPeaCleaner(runcDeleter, volumizer, cmd.Containers.Dir)
}
func (cmd *ServerCommand) calculateDefaultMappingLengths(containerRootUID, containerRootGID int) {
if cmd.Containers.UIDMapLength == 0 {
cmd.Containers.UIDMapLength = uint32(containerRootUID) - cmd.Containers.UIDMapStart
}
if cmd.Containers.GIDMapLength == 0 {
cmd.Containers.GIDMapLength = uint32(containerRootGID) - cmd.Containers.GIDMapStart
}
}
func wireUIDGenerator() gardener.UidGeneratorFunc {
return gardener.UidGeneratorFunc(func() string { return mustStringify(uuid.NewV4()) })
}
func startServer(gardenServer *server.GardenServer, logger lager.Logger) error {
socketFDStr := os.Getenv("SOCKET2ME_FD")
if socketFDStr == "" {
go func() {
if err := gardenServer.ListenAndServe(); err != nil {
logger.Fatal("failed-to-start-server", err)
}
}()
return nil
}
socketFD, err := strconv.Atoi(socketFDStr)
if err != nil {
return err
}
if err = ensureServerSocketDoesNotLeak(uintptr(socketFD)); err != nil {
logger.Error("failed-to-set-cloexec-on-server-socket", err)
return err
}
listener, err := net.FileListener(os.NewFile(uintptr(socketFD), fmt.Sprintf("/proc/self/fd/%d", socketFD)))
if err != nil {
logger.Error("failed-to-listen-on-socket-fd", err)
return err
}
go func() {
if err := gardenServer.Serve(listener); err != nil {
logger.Fatal("failed-to-start-server", err)
}
}()
return nil
}
func (cmd *ServerCommand) loadProperties(logger lager.Logger, propertiesPath string) (*properties.Manager, error) {
propManager, err := properties.Load(propertiesPath)
if err != nil {
logger.Error("failed-to-load-properties", err, lager.Data{"propertiesPath": propertiesPath})
return &properties.Manager{}, err
}
return propManager, nil
}
func (cmd *ServerCommand) saveProperties(logger lager.Logger, propertiesPath string, propManager *properties.Manager) {
if propertiesPath != "" {
err := properties.Save(propertiesPath, propManager)
if err != nil {
logger.Error("failed-to-save-properties", err, lager.Data{"propertiesPath": propertiesPath})
}
}
}
func (cmd *ServerCommand) wirePortPool(logger lager.Logger) (*ports.PortPool, error) {
portPoolState, err := ports.LoadState(cmd.Network.PortPoolPropertiesPath)
if err != nil {
if _, ok := err.(ports.StateFileNotFoundError); ok {
logger.Info("no-port-pool-state-to-recover-starting-clean")
} else {
logger.Error("failed-to-parse-port-pool-properties", err)
}
}
portPool, err := ports.NewPool(
cmd.Network.PortPoolStart,
cmd.Network.PortPoolSize,
portPoolState,
)
if err != nil {
return nil, fmt.Errorf("invalid pool range: %s", err)
}
return portPool, nil
}
func (cmd *ServerCommand) wireDepot(bundleGenerator depot.BundleGenerator, bundleSaver depot.BundleSaver, bindMountSourceCreator depot.BindMountSourceCreator) *depot.DirectoryDepot {
return depot.New(cmd.Containers.Dir, bundleGenerator, bundleSaver, bindMountSourceCreator)
}
func extractIPs(ipflags []IPFlag) []net.IP {
ips := make([]net.IP, len(ipflags))
for i, ipflag := range ipflags {
ips[i] = ipflag.IP()
}
return ips
}
func (cmd *ServerCommand) wireNetworker(log lager.Logger, factory GardenFactory, propManager kawasaki.ConfigStore, portPool *ports.PortPool) (gardener.Networker, gardener.Starter, error) {
externalIP, err := defaultExternalIP(cmd.Network.ExternalIP)
if err != nil {
return nil, nil, err
}
dnsServers := extractIPs(cmd.Network.DNSServers)
additionalDNSServers := extractIPs(cmd.Network.AdditionalDNSServers)
if cmd.Network.Plugin.Path() != "" {
resolvConfigurer := factory.WireResolvConfigurer()
externalNetworker := netplugin.New(
factory.CommandRunner(),
propManager,
externalIP,
dnsServers,
additionalDNSServers,
resolvConfigurer,
cmd.Network.Plugin.Path(),
cmd.Network.PluginExtraArgs,
)
return externalNetworker, externalNetworker, nil
}
var denyNetworksList []string
for _, network := range cmd.Network.DenyNetworks {
denyNetworksList = append(denyNetworksList, network.String())
}
interfacePrefix := fmt.Sprintf("w%s", cmd.Server.Tag)
chainPrefix := fmt.Sprintf("w-%s-", cmd.Server.Tag)
idGenerator := kawasaki.NewSequentialIDGenerator(time.Now().UnixNano())
locksmith := &locksmithpkg.FileSystem{}
iptRunner := &logging.Runner{CommandRunner: factory.CommandRunner(), Logger: log.Session("iptables-runner")}
ipTables := iptables.New(cmd.Bin.IPTables.Path(), cmd.Bin.IPTablesRestore.Path(), iptRunner, locksmith, chainPrefix)
nonLoggingIPTables := iptables.New(cmd.Bin.IPTables.Path(), cmd.Bin.IPTablesRestore.Path(), factory.CommandRunner(), locksmith, chainPrefix)
ipTablesStarter := iptables.NewStarter(nonLoggingIPTables, cmd.Network.AllowHostAccess, interfacePrefix, denyNetworksList, cmd.Containers.DestroyContainersOnStartup, log)
ruleTranslator := iptables.NewRuleTranslator()
containerMtu := cmd.Network.Mtu
if containerMtu == 0 {
containerMtu, err = mtu.MTU(externalIP.String())
if err != nil {
return nil, nil, err
}
}
networker := kawasaki.New(
kawasaki.SpecParserFunc(kawasaki.ParseSpec),
subnets.NewPool(cmd.Network.Pool.CIDR()),
kawasaki.NewConfigCreator(idGenerator, interfacePrefix, chainPrefix, externalIP, dnsServers, additionalDNSServers, cmd.Network.AdditionalHostEntries, containerMtu),
propManager,
kawasakifactory.NewDefaultConfigurer(ipTables, cmd.Containers.Dir),
portPool,
iptables.NewPortForwarder(ipTables),
iptables.NewFirewallOpener(ruleTranslator, ipTables),
)
return networker, ipTablesStarter, nil
}
func (cmd *ServerCommand) wireImagePlugin(commandRunner commandrunner.CommandRunner, uid, gid int) gardener.Volumizer {
var unprivilegedCommandCreator imageplugin.CommandCreator = &imageplugin.NotImplementedCommandCreator{
Err: errors.New("no image_plugin provided"),
}
var privilegedCommandCreator imageplugin.CommandCreator = &imageplugin.NotImplementedCommandCreator{
Err: errors.New("no privileged_image_plugin provided"),
}
if cmd.Image.Plugin.Path() != "" {
unprivilegedCommandCreator = &imageplugin.DefaultCommandCreator{
BinPath: cmd.Image.Plugin.Path(),
ExtraArgs: cmd.Image.PluginExtraArgs,
}
}
if cmd.Image.PrivilegedPlugin.Path() != "" {
privilegedCommandCreator = &imageplugin.DefaultCommandCreator{
BinPath: cmd.Image.PrivilegedPlugin.Path(),
ExtraArgs: cmd.Image.PrivilegedPluginExtraArgs,
}
}
imagePlugin := &imageplugin.ImagePlugin{
UnprivilegedCommandCreator: unprivilegedCommandCreator,
PrivilegedCommandCreator: privilegedCommandCreator,
ImageSpecCreator: imageplugin.NewOCIImageSpecCreator(cmd.Containers.Dir),
CommandRunner: commandRunner,
DefaultRootfs: cmd.Containers.DefaultRootFS,
}
return gardener.NewVolumeProvider(imagePlugin, imagePlugin, gardener.CommandFactory(preparerootfs.Command), commandRunner, uid, gid)
}
func (cmd *ServerCommand) wireContainerizer(log lager.Logger, factory GardenFactory,
properties gardener.PropertyManager, volumizer peas.Volumizer, peaCleaner gardener.PeaCleaner) (*rundmc.Containerizer, error) {
initMount, initPath := initBindMountAndPath(cmd.Bin.Init.Path())
defaultMounts := append(defaultBindMounts(), initMount)
privilegedMounts := append(defaultMounts, privilegedMounts()...)
unprivilegedMounts := append(defaultMounts, unprivilegedMounts()...)
// TODO centralize knowledge of garden -> runc capability schema translation
baseProcess := specs.Process{
Capabilities: &specs.LinuxCapabilities{
Effective: unprivilegedMaxCaps,
Bounding: unprivilegedMaxCaps,
Inheritable: unprivilegedMaxCaps,
Permitted: unprivilegedMaxCaps,
},
Args: []string{initPath},
Cwd: "/",
ConsoleSize: &specs.Box{},
}
baseBundle := goci.Bundle().
WithNamespaces(PrivilegedContainerNamespaces...).
WithRootFS(cmd.Containers.DefaultRootFS).
WithProcess(baseProcess).
WithRootFSPropagation("private")
uidMappings, gidMappings := cmd.idMappings()
unprivilegedBundle := baseBundle.
WithNamespace(goci.UserNamespace).
WithUIDMappings(uidMappings...).
WithGIDMappings(gidMappings...).
WithMounts(unprivilegedMounts...).
WithMaskedPaths(defaultMaskedPaths())
unprivilegedBundle.Spec.Linux.Seccomp = seccomp
if cmd.Containers.ApparmorProfile != "" {
unprivilegedBundle = unprivilegedBundle.WithApparmorProfile(cmd.Containers.ApparmorProfile)
}
privilegedBundle := baseBundle.
WithMounts(privilegedMounts...).
WithDevices(getPrivilegedDevices()...).
WithCapabilities(privilegedMaxCaps...).
WithDeviceRestrictions(append(
[]specs.LinuxDeviceCgroup{{Allow: false, Access: "rwm"}},
allowedDevices...,
))
log.Debug("base-bundles", lager.Data{
"privileged": privilegedBundle,
"unprivileged": unprivilegedBundle,
})
cgroupRootPath := "garden"
if cmd.Server.Tag != "" {
cgroupRootPath = fmt.Sprintf("%s-%s", cgroupRootPath, cmd.Server.Tag)
}
bundleRules := []rundmc.BundlerRule{
bundlerules.Base{
PrivilegedBase: privilegedBundle,
UnprivilegedBase: unprivilegedBundle,
},
bundlerules.Namespaces{},
bundlerules.CGroupPath{
Path: cgroupRootPath,
},
wireMounts(),
bundlerules.Env{},
bundlerules.Hostname{},
bundlerules.Windows{},
bundlerules.RootFS{},
bundlerules.Limits{
CpuQuotaPerShare: cmd.Limits.CPUQuotaPerShare,
TCPMemoryLimit: int64(cmd.Limits.TCPMemoryLimit),
BlockIOWeight: cmd.Limits.DefaultBlockIOWeight,
DisableSwapLimit: cmd.Limits.DisableSwapLimit,
},
}
template := &rundmc.BundleTemplate{Rules: bundleRules}
bundleSaver := &goci.BundleSaver{}
bindMountSourceCreator := wireBindMountSourceCreator(uidMappings, gidMappings)
depot := cmd.wireDepot(template, bundleSaver, bindMountSourceCreator)
bndlLoader := &goci.BndlLoader{}
processBuilder := processes.NewBuilder(wireEnvFunc(), nonRootMaxCaps)
cmdRunner := factory.CommandRunner()
runcLogRunner := runrunc.NewLogRunner(cmdRunner, runrunc.LogDir(os.TempDir()).GenerateLogFile)
runcRoot := cmd.computeRuncRoot()
runcBinary := goci.RuncBinary{Path: cmd.Runtime.Plugin, Root: runcRoot}
pidFileReader := wirePidfileReader()
privilegeChecker := &privchecker.PrivilegeChecker{BundleLoader: bndlLoader}
runcDeleter := runrunc.NewDeleter(runcLogRunner, runcBinary)
var runner rundmc.OCIRuntime
var pidGetter peas.PidGetter
var peaCreator *peas.PeaCreator
userLookupper := users.LookupFunc(users.LookupUser)
wireExecerFunc := func(pidGetter runrunc.PidGetter) *runrunc.Execer {
return runrunc.NewExecer(bndlLoader, processBuilder, factory.WireMkdirer(),
userLookupper, factory.WireExecRunner("exec", runcRoot, uint32(uidMappings.Map(0)), uint32(gidMappings.Map(0))), pidGetter)
}
statser := runrunc.NewStatser(runcLogRunner, runcBinary)
if cmd.Kube.UseKube {
runner, _ = wireKube()
} else if cmd.useContainerd() {
var err error
runner, pidGetter, err = wireContainerd(cmd.Containerd.Socket, bndlLoader, processBuilder, userLookupper, wireExecerFunc, statser, cmd.Containerd.UseContainerdForProcesses)
if err != nil {
return nil, err
}
} else {
pidGetter = &pid.ContainerPidGetter{Depot: depot, PidFileReader: pidFileReader}
runner = runrunc.New(
cmdRunner,
runcLogRunner,
runcBinary,
cmd.Runtime.Plugin,
cmd.Runtime.PluginExtraArgs,
wireExecerFunc(pidGetter),
statser,
)
}
eventStore := rundmc.NewEventStore(properties)
stateStore := rundmc.NewStateStore(properties)
peaCreator = &peas.PeaCreator{
Volumizer: volumizer,
PidGetter: pidGetter,
PrivilegedGetter: privilegeChecker,
BindMountSourceCreator: bindMountSourceCreator,
BundleGenerator: template,
ProcessBuilder: processBuilder,
BundleSaver: bundleSaver,
ExecRunner: factory.WireExecRunner("run", runcRoot, uint32(uidMappings.Map(0)), uint32(gidMappings.Map(0))),
RuncDeleter: runcDeleter,
PeaCleaner: peaCleaner,
}
peaUsernameResolver := &peas.PeaUsernameResolver{
PidGetter: pidFileReader,
PeaCreator: peaCreator,
Loader: bndlLoader,
UserLookupper: users.LookupFunc(users.LookupUser),
}
nstar := rundmc.NewNstarRunner(cmd.Bin.NSTar.Path(), cmd.Bin.Tar.Path(), cmdRunner)
stopper := stopper.New(stopper.NewRuncStateCgroupPathResolver(runcRoot), nil, retrier.New(retrier.ConstantBackoff(10, 1*time.Second), nil))
return rundmc.New(depot, runner, bndlLoader, nstar, stopper, eventStore, stateStore, factory.WireRootfsFileCreator(), peaCreator, peaUsernameResolver), nil
}
func (cmd *ServerCommand) useContainerd() bool {
return cmd.Containerd.Socket != ""
}
func wirePidfileReader() *pid.FileReader {
return &pid.FileReader{
Clock: clock.NewClock(),
Timeout: 10 * time.Second,
SleepInterval: time.Millisecond * 100,
}
}
func (cmd *ServerCommand) wireMetricsProvider(log lager.Logger) *metrics.MetricsProvider {
var backingStoresPath string
if cmd.Graph.Dir != "" {
backingStoresPath = filepath.Join(cmd.Graph.Dir, "backing_stores")
}
return metrics.NewMetricsProvider(log, backingStoresPath, cmd.Containers.Dir)
}
func (cmd *ServerCommand) wireMetronNotifier(log lager.Logger, metricsProvider metrics.Metrics) *metrics.PeriodicMetronNotifier {
return metrics.NewPeriodicMetronNotifier(
log, metricsProvider, cmd.Metrics.EmissionInterval, clock.NewClock(),
)
}
func wireBindMountSourceCreator(uidMappings, gidMappings idmapper.MappingList) depot.BindMountSourceCreator {
return &depot.DepotBindMountSourceCreator{
BindMountPoints: bindMountPoints(),
Chowner: &depot.OSChowner{},
ContainerRootHostUID: uidMappings.Map(0),
ContainerRootHostGID: gidMappings.Map(0),
}
}
func (cmd *ServerCommand) initializeDropsonde(log lager.Logger) {
err := dropsonde.Initialize(cmd.Metrics.DropsondeDestination, cmd.Metrics.DropsondeOrigin)
if err != nil {
log.Error("failed to initialize dropsonde", err)
}
}
func defaultExternalIP(ip IPFlag) (net.IP, error) {
if ip != nil {
return ip.IP(), nil
}
localIP, err := localip.LocalIP()
if err != nil {
return nil, fmt.Errorf("Couldn't determine local IP to use for --external-ip parameter. You can use the --external-ip flag to pass an external IP explicitly.")
}
return net.ParseIP(localIP), nil
}
func defaultMaskedPaths() []string {
return []string{
"/proc/kcore",
"/proc/latency_stats",
"/proc/sched_debug",
"/proc/scsi",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/keys",
"/sys/firmware",
}
}
func mustStringify(s interface{}, e error) string {
if e != nil {
panic(e)
}
return fmt.Sprintf("%s", s)
}
func mustOpen(path string) io.ReadCloser {
if r, err := os.Open(path); err != nil {
panic(err)
} else {
return r
}
}
func deviceWildcard() *int64 {
return intRef(-1)
}
func intRef(i int64) *int64 {
return &i
}
| [
"\"SOCKET2ME_FD\""
]
| []
| [
"SOCKET2ME_FD"
]
| [] | ["SOCKET2ME_FD"] | go | 1 | 0 | |
tests/test_trainer/configs/test_trainer_resnet.py | import os
from pathlib import Path
BATCH_SIZE = 128
IMG_SIZE = 32
num_epochs = 200
# resnet 50
model = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 4, 6, 3],
num_cls=10
)
train_data = dict(
dataset=dict(
type='CIFAR10Dataset',
root=Path(os.environ['DATA']),
transform_pipeline=[
dict(type='Resize', size=IMG_SIZE),
dict(type='RandomCrop', size=IMG_SIZE, padding=4),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize',
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010]),
]
),
dataloader=dict(
batch_size=BATCH_SIZE,
pin_memory=True,
num_workers=4,
shuffle=True
)
)
test_data = dict(
dataset=dict(
type='CIFAR10Dataset',
root=Path(os.environ['DATA']),
train=False,
transform_pipeline=[
dict(type='Resize', size=IMG_SIZE),
dict(type='ToTensor'),
dict(type='Normalize',
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010]
),
]
),
dataloader=dict(
batch_size=BATCH_SIZE,
pin_memory=True,
num_workers=4,
shuffle=True
)
)
optimizer = dict(
type='SGD',
lr=0.2,
momentum=0.9,
weight_decay=5e-4
)
loss = dict(
type='CrossEntropyLoss',
)
parallel = dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
)
hooks = [
dict(type='LogMetricByEpochHook'),
dict(type='AccuracyHook'),
dict(type='LossHook'),
dict(type='TensorboardHook', log_dir='./tfb_logs'),
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='CosineAnnealingLR',
warmup_steps=5
)
),
dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'),
]
| []
| []
| [
"DATA"
]
| [] | ["DATA"] | python | 1 | 0 | |
cli.go | // cli - An Extensible, POSIX Compatible Command-Line Argument Parser
// Copyright (c) 2017 Fadhli Dzil Ikram
package cli
import (
"os"
)
type Command struct {
Options []Option
Commands []Command
Name string
Description string
Arguments string
Remarks string
Handler Handler
ErrorHandler Handler
}
func Cmd(name string, description string) Command {
return Command{
Name: name,
Description: description,
}
}
func Subcmd(name string, description string, arguments string) Command {
return Command{
Name: name,
Description: description,
Arguments: arguments,
}
}
func (c *Command) SetCommands(cmd ...Command) {
c.Commands = cmd
}
func (c *Command) SetOptions(opt ...Option) {
c.Options = opt
}
func (c *Command) SetHandler(fn HandlerFunc) {
c.Handler = fn
}
func (c *Command) exec(ctx *Context) error {
var err error
// Add current command to context stack
ctx.Stack.Push(c)
// Run option initialisation function if available
for _, opt := range c.Options {
if initOpt, ok := opt.(OptionInit); ok {
initOpt.Init(ctx)
}
}
// Process option until end of option flag no argument left
for len(c.Options) > 0 && err != ErrEndOfOption && len(ctx.Arguments) > 0 {
// Iterate over option list
for _, opt := range c.Options {
if err = ctx.Application.OptionHandler.Parse(ctx, opt); err == nil || err == ErrEndOfOption {
break
} else if err != ErrNextOption {
return err
}
}
// Checks if option not found
if err == ErrNextOption {
var opt string
// Get unknown options
if opt, err = ctx.Arguments.Get(); err != nil {
return err
}
ctx.Arguments.Pop()
// Return syntax error
return NewError("Unknown option '%s'", opt)
}
}
// Run command handler if defined
if c.Handler != nil {
if err = c.Handler.Run(ctx); err != nil {
return err
}
}
// Execute subcommand finder if defined
if len(c.Commands) > 0 {
var cmd string
if cmd, err = ctx.Arguments.Get(); err == ErrEmptyContextArgument {
printUsage(os.Stderr, ctx)
os.Exit(0)
return nil
} else if err != nil {
return err
}
ctx.Arguments.Pop()
// Iterate over subcommand list
for _, command := range c.Commands {
if command.Name == cmd {
// Run subcommand Exec()
return command.Exec(ctx)
}
}
// Tell user that we don't know the command
return NewError("Unknown command '%s'", cmd)
}
// Return execution without error
return nil
}
func (c *Command) Exec(ctx *Context) error {
var err error
// Run the internal executor and get the error result
if err = c.exec(ctx); err != nil {
// Check if error handler is exists
if c.ErrorHandler != nil {
// Write error to application Context
ctx.Error = err
// Run the error handler
return c.ErrorHandler.Run(ctx)
}
}
// No error handler present, pass error to parent
return err
}
type Application struct {
Command
Version string
OptionHandler OptionHandler
}
func NewApp(name string, description string, arguments string, version string) Application {
return Application{
Command: Command{
Name: name,
Description: description,
Arguments: arguments,
},
Version: version,
}
}
func NewCmdApp(name string, description string, version string) Application {
return Application{
Command: Command{
Name: name,
Description: description,
},
Version: version,
}
}
func (a *Application) Run(osArgs []string) error {
// Initialize context
ctx := NewContext(a, osArgs[1:])
// Set the default option handler if not set
if a.OptionHandler == nil {
a.OptionHandler = OptionParser
}
// Set the default error handler if not set
if a.ErrorHandler == nil {
a.ErrorHandler = ErrorHandler
}
// Run the top level Exec()
return a.Exec(ctx)
}
| []
| []
| []
| [] | [] | go | null | null | null |
ucloud-sdk-java-uhost/src/test/java/cn/ucloud/uhost/client/ReinstallUHostInstanceTest.java | package cn.ucloud.uhost.client;
import cn.ucloud.uhost.model.ReinstallUHostInstanceParam;
import cn.ucloud.uhost.model.ReinstallUHostInstanceResult;
import cn.ucloud.common.pojo.Account;
import cn.ucloud.uhost.pojo.UhostConfig;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertNull;
/**
* @description:
* @author: codezhang
* @date: 2018-09-18 17:18
**/
public class ReinstallUHostInstanceTest {
private UhostClient client;
private ReinstallUHostInstanceParam param;
@Before
public void initData() {
client = new DefaultUhostClient(new UhostConfig(
new Account(System.getenv("UCloudPrivateKey"),
System.getenv("UCloudPublicKey"))));
param = new ReinstallUHostInstanceParam("cn-sh2","uhost-rjonwb2x");
param.setPassword("123456ab");
param.setProjectId("org-izug1m");
}
@Test
public void reinstallUHostInstance() {
try {
ReinstallUHostInstanceResult result = client.reinstallUHostInstance(param);
JSONComparator.jsonComparator(result);
} catch (Exception e) {
assertNull(e);
}
}
} | [
"\"UCloudPrivateKey\"",
"\"UCloudPublicKey\""
]
| []
| [
"UCloudPrivateKey",
"UCloudPublicKey"
]
| [] | ["UCloudPrivateKey", "UCloudPublicKey"] | java | 2 | 0 | |
internal/infrastructure/orm/boil_main_test.go | // Code generated by SQLBoiler (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package orm
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/volatiletech/sqlboiler/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 4
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}
| [
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"XDG_CONFIG_HOME"
]
| [] | ["HOME", "XDG_CONFIG_HOME"] | go | 2 | 0 | |
server/authentication.go | package server
import (
"basis/db"
"bytes"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
"time"
"github.com/lestrrat-go/jwx/v2/jwa"
"github.com/lestrrat-go/jwx/v2/jwt"
"golang.org/x/crypto/bcrypt"
)
var jwtKey *rsa.PrivateKey
func InitJwtKey() {
pemString := os.Getenv("JWT_KEY")
pemString = strings.ReplaceAll(pemString, `\n`, "\n")
block, _ := pem.Decode([]byte(pemString))
key, _ := x509.ParsePKCS1PrivateKey(block.Bytes)
jwtKey = key
}
type Credentials struct {
Username string `json:"username"`
Password string `json:"password"`
}
func signUp(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
var creds Credentials
bodyBytes, _ := ioutil.ReadAll(r.Body)
r.Body.Close()
r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
err := json.NewDecoder(bytes.NewBuffer(bodyBytes)).Decode(&creds)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
usernameValid, _ := regexp.MatchString("[a-zA-Z0-9]{3,}", creds.Username)
passwordValid, _ := regexp.MatchString("[a-zA-Z0-9]{3,}", creds.Password)
if !usernameValid || !passwordValid {
w.WriteHeader(http.StatusBadRequest)
return
}
_, userNotFoundErr := db.QueryUserByUsername(creds.Username)
if userNotFoundErr == nil {
w.WriteHeader(http.StatusConflict) // username taken
return
}
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(creds.Password), bcrypt.DefaultCost)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
err = db.SaveUser(db.User{Username: creds.Username, PasswordHash: string(hashedPassword), LastActive: time.Now()})
if err != nil {
w.WriteHeader(http.StatusInternalServerError) // username taken
return
}
signIn(w, r)
}
func signIn(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
var creds Credentials
err := json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
expectedUser, userNotFoundErr := db.QueryUserByUsername(creds.Username)
passwordMismatchErr := bcrypt.CompareHashAndPassword([]byte(expectedUser.PasswordHash), []byte(creds.Password))
if userNotFoundErr != nil || passwordMismatchErr != nil {
w.WriteHeader(http.StatusUnauthorized)
return
}
expirationTime := 100000 * time.Hour
token, err := jwt.NewBuilder().
IssuedAt(time.Now()).
Expiration(time.Now().Add(expirationTime)).
Claim("username", creds.Username).
Build()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
signedToken, err := jwt.Sign(token, jwt.WithKey(jwa.RS256, jwtKey))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
http.SetCookie(w, &http.Cookie{
Name: "access_token",
Value: string(signedToken),
Expires: time.Now().Add(expirationTime),
})
}
func validateTokenAndExtractUsername(token []byte) (string, error) {
verifiedToken, err := jwt.Parse(token, jwt.WithKey(jwa.RS256, jwtKey))
if err != nil {
return "", err
}
username := fmt.Sprintf("%v", verifiedToken.PrivateClaims()["username"])
return username, nil
}
| [
"\"JWT_KEY\""
]
| []
| [
"JWT_KEY"
]
| [] | ["JWT_KEY"] | go | 1 | 0 | |
markets/consts.go | package main
import "os"
import "time"
const (
marketsDB = "altcoin"
count = 10
apiKey = ""
apiPass = ""
interval = 10 * time.Second
)
var (
// EXCHANGES
vPoloniex = false
vBinance = true
vBittrex = true
vCryptopia = true
// MARKETS
vBTC = true
vUSDT = true
// LOGIN
username = os.Getenv("DBUSER")
password = os.Getenv("DBPASS")
serverURL = os.Getenv("SERVERDB")
)
| [
"\"DBUSER\"",
"\"DBPASS\"",
"\"SERVERDB\""
]
| []
| [
"DBUSER",
"SERVERDB",
"DBPASS"
]
| [] | ["DBUSER", "SERVERDB", "DBPASS"] | go | 3 | 0 | |
src/genie/libs/parser/iosxr/show_routing.py | '''
show_route.py
'''
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional
# ====================================================
# schema for show route ipv4
# ====================================================
class ShowRouteIpv4Schema(MetaParser):
"""Schema for show route ipv4"""
schema = {
'vrf': {
Any(): {
'address_family': {
Any(): {
Optional('routes'): {
Any(): {
'route': str,
'active': bool,
Optional('ip'): str,
Optional('mask'): str,
Optional('route_preference'): int,
Optional('metric'): int,
Optional('source_protocol'): str,
Optional('source_protocol_codes'): str,
Optional('known_via'): str,
Optional('distance'): int,
Optional('type'): str,
Optional('tag'): str,
Optional('installed'): {
'date': str,
'for': str,
},
Optional('redist_advertisers'): {
Any(): {
'protoid': int,
'clientid': int,
},
},
'next_hop': {
Optional('outgoing_interface'): {
Any(): {
'outgoing_interface': str,
Optional('updated'): str,
Optional('metric'): int,
}
},
Optional('next_hop_list'): {
Any(): { # index
'index': int,
Optional('next_hop'): str,
Optional('outgoing_interface'): str,
Optional('updated'): str,
Optional('metric'): int,
Optional('from'): str,
Optional('table'): str,
Optional('address_family'): str,
Optional('table_id'): str,
Optional('nexthop_in_vrf'): str,
}
}
}
}
},
},
},
Optional('last_resort'): {
Optional('gateway'): str,
Optional('to_network'): str,
},
},
}
}
# ====================================================
# parser for show route ipv4
# ====================================================
class ShowRouteIpv4(ShowRouteIpv4Schema):
cli_command = [
'show route ipv4',
'show route vrf {vrf} ipv4',
'show route ipv4 {protocol}',
'show route vrf {vrf} ipv4 {protocol}',
'show route ipv4 {route}',
'show route vrf {vrf} ipv4 {route}'
]
"""
Codes: C - connected, S - static, R - RIP, B - BGP, (>) - Diversion path
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, E - EGP
i - ISIS, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS inter area, su - IS-IS summary null, * - candidate default
U - per-user static route, o - ODR, L - local, G - DAGR, l - LISP
A - access/subscriber, a - Application route
M - mobile route, r - RPL, t - Traffic Engineering, (!) - FRR Backup path
"""
source_protocol_dict = {
'ospf': ['O', 'IA', 'N1', 'N2', 'E1', 'E2'],
'odr': ['o'],
'isis': ['i', 'su', 'L1', 'L2', 'ia'],
'eigrp': ['D', 'EX'],
'static': ['S'],
'egp': ['E'],
'dagr': ['G'],
'rpl': ['r'],
'mobile router': ['M'],
'lisp': ['I', 'l'],
'nhrp': ['H'],
'local': ['L'],
'connected': ['C'],
'bgp': ['B'],
'rip': ['R'],
'per-user static route': ['U'],
'access/subscriber': ['A'],
'traffic engineering': ['t'],
}
protocol_set = {'ospf', 'odr', 'isis', 'eigrp', 'static', 'mobile',
'rip', 'lisp', 'nhrp', 'local', 'connected', 'bgp'}
def cli(self, vrf=None, route=None, protocol=None, output=None):
# Check if argument from device.parse is protocol or route
if protocol and protocol not in self.protocol_set:
route = protocol
protocol = None
if output is None:
if vrf and route:
cmd = self.cli_command[5].format(
vrf=vrf,
route=route
)
elif vrf and protocol:
cmd = self.cli_command[3].format(
vrf=vrf,
protocol=protocol
)
elif vrf:
cmd = self.cli_command[1].format(
vrf=vrf
)
elif protocol:
cmd = self.cli_command[2].format(
protocol=protocol
)
elif route:
cmd = self.cli_command[4].format(
route=route
)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# VRF: VRF501
# VRF: L:123
p1 = re.compile(r'^\s*VRF: +(?P<vrf>\S+)$')
# R 10.1.0.0/8 [120/1] via 10.12.120.1, 1w0d, GigabitEthernet0/0/0/0.120
# B 10.21.33.33/32 [200/0] via 10.166.13.13, 00:52:31
# i L2 10.154.219.32/32 [115/100030] via 10.4.1.1, 1d06h, HundredGigE0/0/1/1 (!)
# S 10.36.3.3/32 [1/0] via 10.2.3.3, 01:51:13, GigabitEthernet0/0/0/1
# B 10.19.31.31/32 [200/0] via 10.229.11.11, 00:55:14
# i L1 10.76.23.23/32 [115/11] via 10.2.3.3, 00:52:41, GigabitEthernet0/0/0/1
# S* 192.168.4.4/10 [111/10] via 172.16.84.11, 1w0d
# R 10.145.110.10/4 [10/10] via 192.168.10.12, 12:03:42, GigabitEthernet0/0/1/1.1
# B 10.100.3.160/31 [200/0] via 172.23.6.198 (nexthop in vrf default), 5d13h
p2 = re.compile(r'^(?P<code1>[\w](\*)*)\s*(?P<code2>\S+)? +(?P<network>\S+) +'
r'\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +via +'
r'(?P<next_hop>\S+)( +\(nexthop +in +vrf +\w+\))?,'
r'( +(?P<date>[\w:]+),?)?( +(?P<interface>[\w\/\.\-]+))?'
r'( +(?P<code3>[\w\*\(\>\)\!]+))?$')
# [90/15360] via 10.23.90.3, 1w0d, GigabitEthernet0/0/0/1.90
# [110/2] via 10.1.2.1, 01:50:49, GigabitEthernet0/0/0/3
p3 = re.compile(r'^\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +via +'
r'(?P<next_hop>\S+),( +(?P<date>[\w:]+))?,? +'
r'(?P<interface>[\w\/\.\-]+)$')
# L 10.16.2.2/32 is directly connected, 3w5d, Loopback0
# is directly connected, 01:51:13, GigabitEthernet0/0/0/3
# S 10.4.1.1/32 is directly connected, 01:51:13, GigabitEthernet0/0/0/0
# S 10.2.2.2/32 is directly connected, 00:06:36, Null0
p4 = re.compile(r'^((?P<code1>[\w])\s*(?P<code2>\S+)?(\s+'
r'(?P<network>\S+)\s+))?(is\s+directly\s+connected,\s+'
r'(?P<date>[\w:]+))?,?\s+(?P<interface>[\w\/\.\-]+)?$')
# Routing entry for 10.151.0.0/24, 1 known subnets
# Routing entry for 0.0.0.0/0, supernet
# Routing entry for 192.168.154.0/24
p5 = re.compile(r'^Routing +entry +for +(?P<network>(?P<ip>[\w\:\.]+)'
r'\/(?P<mask>\d+))(?:, +(?P<net>[\w\s]+))?$')
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
# Known via "ospf 3", distance 110, metric 32001, type extern 1
p6 = re.compile(r'^Known +via +\"(?P<known_via>[\w ]+)\", +distance +'
r'(?P<distance>\d+), +metric +(?P<metric>\d+)( \(connected\))?'
r'(, +type +(?P<type>[\S\s]+))?(, +candidate +default +path)?$')
# * directly connected, via GigabitEthernet1.120
p7 = re.compile(r'^(\* +)?directly +connected, via +(?P<interface>\S+)$')
# Route metric is 10880, traffic share count is 1
p8 = re.compile(r'^Route +metric +is +(?P<metric>\d+)(, +'
r'traffic +share +count +is +(?P<share_count>\d+))?$')
# eigrp/100 (protoid=5, clientid=22)
p9 = re.compile(r'^(?P<redist_advertiser>\S+) +\(protoid=(?P<protoid>\d+)'
r', +clientid=(?P<clientid>\d+)\)$')
# Installed Oct 23 22:09:38.380 for 5d21h
p10 = re.compile(r'^Installed +(?P<date>[\S\s]+) +for +(?P<for>\S+)$')
# 10.12.90.1, from 10.12.90.1, via GigabitEthernet0/0/0/0.90
# 172.23.6.96, from 172.23.15.196
# 172.25.253.121, from 172.25.253.121, BGP external
p11 = re.compile(r'^(?P<nexthop>\S+),\s+from\s+(?P<from>\S+)(, '
r'+via\s+(?P<interface>\S+))?'
r'(, +BGP external)?$')
# R2_xrv#show route ipv4
# Routing Descriptor Blocks
# No advertising protos.
p12 = re.compile(r'^((\S+#)?(show +route))|(Routing +Descriptor +'
r'Blocks)|(No +advertising +protos\.)|(Redist +Advertisers:)')
# Tag 10584, type internal
p13 = re.compile(r'^Tag\s+(?P<tag>\d+)\,\s+type\s+(?P<type>\w+)$')
# Nexthop in Vrf: "default", Table: "default", IPv4 Unicast, Table Id: 0xe0000000
p14 = re.compile(r'^Nexthop\s+in\s+[V|v]rf\:\s+\"(?P<interface>\w+)\"\, '
r'+[T|t]able\:\s+\"(?P<table>\w+)\"\, '
r'+(?P<address_family>[\w\s]+)\,\s+[T|t]able '
r'+[I|i]d\:\s+(?P<table_id>\S+)$')
# Gateway of last resort is 172.16.0.88 to network 0.0.0.0
p15 = re.compile(r'^Gateway +of +last +resort +is '
r'+(?P<gateway>(not +set)|\S+)( +to +network '
r'+(?P<to_network>\S+))?$')
# initial variables
ret_dict = {}
index = 0
address_family = 'ipv4'
if not vrf:
vrf = 'default'
for line in out.splitlines():
line = line.strip()
# R2_xrv#show route ipv4
# Routing Descriptor Blocks
# No advertising protos.
m = p12.match(line)
if m or not line:
continue
# VRF: VRF501
# VRF: L:123
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
# R 10.1.0.0/8 [120/1] via 10.12.120.1, 1w0d, GigabitEthernet0/0/0/0.120
m = p2.match(line)
if m:
group = m.groupdict()
code1 = group['code1']
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group['code2']
if code2:
code1 = '{} {}'.format(code1, code2)
code3 = group['code3']
if code3:
code1 = '{} {}'.format(code1, code3)
network = group['network']
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group['next_hop']
updated = group['date']
interface = group['interface']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
index = 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# [90/15360] via 10.23.90.3, 1w0d, GigabitEthernet0/0/0/1.90
m = p3.match(line)
if m:
group = m.groupdict()
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group['next_hop']
updated = group['date']
interface = group['interface']
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
index += 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# L 10.16.2.2/32 is directly connected, 3w5d, Loopback0
# is directly connected, 01:51:13, GigabitEthernet0/0/0/3
# S 10.2.2.2/32 is directly connected, 00:06:36, Null0
m = p4.match(line)
if m:
try:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
if source_protocol:
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
if interface:
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
except Exception:
print('--->'+line)
continue
# Routing entry for 10.151.0.0/24, 1 known subnets
# Routing entry for 0.0.0.0/0, supernet
# Routing entry for 192.168.154.0/24
m = p5.match(line)
if m:
group = m.groupdict()
network = group['network']
ip = group['ip']
mask = group['mask']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'ip': ip})
route_dict.update({'mask': mask})
route_dict.update({'active': True})
continue
# Known via "static", distance 1, metric 0, candidate default path
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "rip", distance 120, metric 2
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
# Known via "ospf 3", distance 110, metric 32001, type extern 1
m = p6.match(line)
if m:
group = m.groupdict()
known_via = group['known_via']
metric = int(group['metric'])
distance = int(group['distance'])
_type = group['type']
route_dict.update({'known_via': known_via})
route_dict.update({'metric': metric})
route_dict.update({'distance': distance})
if _type:
route_dict.update({'type': _type})
continue
# * directly connected, via GigabitEthernet1.120
m = p7.match(line)
if m:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split('\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
if interface:
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
# Route metric is 10880, traffic share count is 1
m = p8.match(line)
if m:
group = m.groupdict()
metric = int(group['metric'])
outgoing_interface_dict.update({'metric': metric})
if group.get('share_count', None):
share_count = int(group['share_count'])
outgoing_interface_dict.update({'share_count': share_count})
# outgoing_interface_dict.update({k:v for k,v in group.items() if v})
continue
# eigrp/100 (protoid=5, clientid=22)
m = p9.match(line)
if m:
group = m.groupdict()
redist_advertiser = group['redist_advertiser']
protoid = int(group['protoid'])
clientid = int(group['clientid'])
redist_advertiser_dict = route_dict.setdefault('redist_advertisers', {}). \
setdefault(redist_advertiser, {})
redist_advertiser_dict.update({'protoid': protoid})
redist_advertiser_dict.update({'clientid': clientid})
continue
# Installed Oct 23 22:09:38.380 for 5d21h
m = p10.match(line)
if m:
group = m.groupdict()
installed_dict = route_dict.setdefault('installed', {})
installed_dict.update({k:v for k,v in group.items() if v})
continue
# 10.12.90.1, from 10.12.90.1, via GigabitEthernet0/0/0/0.90
# 172.23.6.96, from 172.23.15.196
m = p11.match(line)
if m:
group = m.groupdict()
nexthop = group['nexthop']
_from = group['from']
interface = group['interface']
index += 1
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
outgoing_interface_dict.update({'index': index})
if interface:
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'from': _from})
outgoing_interface_dict.update({'next_hop': nexthop})
continue
# Tag 10584, type internal
m13 = p13.match(line)
if m13:
group = m13.groupdict()
route_dict.update({'tag': group['tag']})
route_dict.update({'type': group['type']})
continue
# Nexthop in Vrf: "default", Table: "default", IPv4 Unicast, Table Id: 0xe0000000
m14 = p14.match(line)
if m14:
group = m14.groupdict()
interface = group['interface']
table = group['table']
address_family = group['address_family']
table_id = group['table_id']
if interface:
nexthop_intf_dict = route_dict.setdefault('next_hop', {}).\
setdefault('next_hop_list', {}). \
setdefault(index, {})
nexthop_intf_dict.update({'index': index})
if interface:
nexthop_intf_dict.update({'nexthop_in_vrf': interface})
nexthop_intf_dict.update({'table': table})
nexthop_intf_dict.update({'address_family': address_family})
nexthop_intf_dict.update({'table_id': table_id})
continue
# Gateway of last resort is 172.16.0.88 to network 0.0.0.0
m15 = p15.match(line)
if m15:
group = m15.groupdict()
gw_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('last_resort', {})
gw_dict.update({'gateway': group['gateway']})
if group['to_network']:
gw_dict.update({'to_network': group['to_network']})
return ret_dict
# ====================================================
# parser for show route ipv6
# ====================================================
class ShowRouteIpv6(ShowRouteIpv4Schema):
"""Parser for :
show route ipv6
show route vrf <vrf> ipv6"""
cli_command = [
'show route ipv6',
'show route vrf {vrf} ipv6',
'show route ipv6 {protocol}',
'show route vrf {vrf} ipv6 {protocol}',
'show route ipv6 {route}',
'show route vrf {vrf} ipv6 {route}'
]
"""
Codes: C - connected, S - static, R - RIP, B - BGP, (>) - Diversion path
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, E - EGP
i - ISIS, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS inter area, su - IS-IS summary null, * - candidate default
U - per-user static route, o - ODR, L - local, G - DAGR, l - LISP
A - access/subscriber, a - Application route
M - mobile route, r - RPL, t - Traffic Engineering, (!) - FRR Backup path
"""
source_protocol_dict = {
'ospf': ['O', 'IA', 'N1', 'N2', 'E1', 'E2'],
'odr': ['o'],
'isis': ['i', 'su', 'L1', 'L2', 'ia'],
'eigrp': ['D', 'EX'],
'static': ['S'],
'egp': ['E'],
'dagr': ['G'],
'rpl': ['r'],
'mobile router': ['M'],
'lisp': ['I', 'l'],
'nhrp': ['H'],
'local': ['L'],
'connected': ['C'],
'bgp': ['B'],
'rip': ['R'],
'per-user static route': ['U'],
'access/subscriber': ['A'],
'traffic engineering': ['t'],
'application route' : ['a'],
}
protocol_set = {'ospf', 'odr', 'isis', 'eigrp', 'static', 'mobile',
'rip', 'lisp', 'nhrp', 'local', 'connected', 'bgp'}
def cli(self, vrf=None, route=None, protocol=None, output=None):
# Check if argument from device.parse is protocol or route
if protocol and protocol not in self.protocol_set:
route = protocol
protocol = None
if output is None:
if vrf and route:
cmd = self.cli_command[5].format(
vrf=vrf,
route=route
)
elif vrf and protocol:
cmd = self.cli_command[3].format(
vrf=vrf,
protocol=protocol
)
elif vrf:
cmd = self.cli_command[1].format(
vrf=vrf
)
elif protocol:
cmd = self.cli_command[2].format(
protocol=protocol
)
elif route:
cmd = self.cli_command[4].format(
route=route
)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# VRF: VRF501
# VRF: L:123
p1 = re.compile(r'^\s*VRF: +(?P<vrf>\S+)$')
# S 2001:1:1:1::1/128
# L 2001:2:2:2::2/128 is directly connected,
# i L2 2001:0:10:204:0:33::/126
# i L1 2001:21:21:21::21/128
# i*L2 ::/0
# a* ::/0
p2 = re.compile(r'^((?P<code1>[\w](\*)*)(\s*)?(?P<code2>\w+)? '
r'+(?P<network>\S+))?( +is +directly +connected\,)?$')
# [1/0] via 2001:20:1:2::1, 01:52:23, GigabitEthernet0/0/0/0
# [200/0] via 2001:13:13:13::13, 00:53:22
# [0/0] via ::, 5w2d
p3 = re.compile(r'^\[(?P<route_preference>\d+)\/(?P<metric>\d+)\] +'
r'via +(?P<next_hop>\S+)( +\(nexthop +in +vrf +\w+\))?,'
r'( +(?P<date>[\w:]+))?,?( +(?P<interface>[\w\/\.\-]+))?$')
# 01:52:24, Loopback0
p5 = re.compile(r'^(?P<date>[\w+:]+), +(?P<interface>\S+)$')
# Routing entry for 2001:1:1:1::1/128, 1 known subnets
# Routing entry for 2001:1:1:1::1/128, supernet
# Routing entry for 2001:1:1:1::1/128
p6 = re.compile(r'^Routing +entry +for +(?P<network>(?P<ip>[\w\:\.]+)'
r'\/(?P<mask>\d+))(?:, +(?P<net>[\w\s]+))?$')
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
p7 = re.compile(r'^Known +via +\"(?P<known_via>[\w ]+)\", +'
r'distance +(?P<distance>\d+), +metric +(?P<metric>\d+)'
r'( \(connected\))?(, +type +(?P<type>\S+))?(, +candidate +'
r'default +path)?$')
# * directly connected, via GigabitEthernet1.120
p8 = re.compile(r'^(\* +)?directly +connected, via +(?P<interface>\S+)$')
# Route metric is 10880, traffic share count is 1
p9 = re.compile(r'^Route +metric +is +(?P<metric>\d+)(, +'
r'traffic +share +count +is +(?P<share_count>\d+))?$')
# eigrp/100 (protoid=5, clientid=22)
p10 = re.compile(r'^(?P<redist_advertiser>\S+) +\(protoid=(?P<protoid>\d+)'
r', +clientid=(?P<clientid>\d+)\)$')
# Installed Oct 23 22:09:38.380 for 5d21h
p11 = re.compile(r'^Installed +(?P<date>[\S\s]+) +for +(?P<for>\S+)$')
# fe80::f816:3eff:fe76:b56d, from fe80::f816:3eff:fe76:b56d, via GigabitEthernet0/0/0/0.390
p12 = re.compile(r'^(?P<nexthop>\S+), from +(?P<from>\S+), '
r'+via +(?P<interface>\S+)$')
# R2_xrv#show route ipv6
p13 = re.compile(r'^((\S+#)?(show +route))|(Routing +Descriptor +'
r'Blocks)|(No +advertising +protos\.)|(Redist +Advertisers:)')
# Gateway of last resort is fe80::10ff:fe04:209e to network ::
# Gateway of last resort is not set
# Gateway of last resort is 10.50.15.1 to network 0.0.0.0
p14 = re.compile(r'^Gateway +of +last +resort +is '
r'+(?P<gateway>(not +set)|\S+)( +to +network '
r'+(?P<to_network>\S+))?$')
ret_dict = {}
address_family = 'ipv6'
index = 0
if not vrf:
vrf = 'default'
for line in out.splitlines():
line = line.strip()
# R2_xrv#show route ipv6
# Routing Descriptor Blocks
# No advertising protos.
m = p13.match(line)
if m or not line:
continue
# VRF: VRF501
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
# S 2001:1:1:1::1/128
# L 2001:2:2:2::2/128 is directly connected,
# i L2 2001:0:10:204:0:33::/126
# i L1 2001:21:21:21::21/128
# i*L2 ::/0
# a* ::/0
m = p2.match(line)
if m:
group = m.groupdict()
code1 = group['code1']
source_protocol_code = re.split(r'\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group['code2']
if code2:
code1 = '{} {}'.format(code1, code2)
network = group['network']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
route_dict.update({'route': network})
route_dict.update({'active': True})
index = 0
continue
m = p3.match(line)
if m:
group = m.groupdict()
route_preference = int(group['route_preference'])
metric = int(group['metric'])
next_hop = group.get('next_hop', None)
updated = group.get('date', None)
interface = group.get('interface', None)
route_dict.update({'route_preference': route_preference})
route_dict.update({'metric': metric})
index += 1
next_hop_list_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
next_hop_list_dict.update({'index': index})
if next_hop:
next_hop_list_dict.update({'next_hop': next_hop})
if interface:
next_hop_list_dict.update({'outgoing_interface': interface})
if updated:
next_hop_list_dict.update({'updated': updated})
continue
# 01:52:24, Loopback0
m = p5.match(line)
if m:
group = m.groupdict()
updated = group['date']
interface = group['interface']
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'updated': updated})
continue
# Routing entry for 2001:1:1:1::1/128, 1 known subnets
# Routing entry for 2001:1:1:1::1/128, supernet
# Routing entry for 2001:1:1:1::1/128
m = p6.match(line)
if m:
group = m.groupdict()
network = group['network']
ip = group['ip']
mask = group['mask']
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'ip': ip})
route_dict.update({'mask': mask})
route_dict.update({'active': True})
continue
# Known via "static", distance 1, metric 0, candidate default path
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "rip", distance 120, metric 2
# Known via "connected", distance 0, metric 0 (connected)
# Known via "eigrp 1", distance 130, metric 10880, type internal
# Known via "bgp 65161", distance 20, metric 0, candidate default path
m = p7.match(line)
if m:
group = m.groupdict()
known_via = group['known_via']
metric = int(group['metric'])
distance = int(group['distance'])
_type = group['type']
route_dict.update({'known_via': known_via})
route_dict.update({'metric': metric})
route_dict.update({'distance': distance})
if _type:
route_dict.update({'type': _type})
continue
# * directly connected, via GigabitEthernet1.120
m = p8.match(line)
if m:
group = m.groupdict()
code1 = group.get('code1', None)
source_protocol = None
network = group.get('network', None)
updated = group.get('date', None)
interface = group.get('interface', None)
if network:
route_dict = ret_dict.setdefault('vrf', {}). \
setdefault(vrf, {}). \
setdefault('address_family', {}). \
setdefault(address_family, {}). \
setdefault('routes', {}). \
setdefault(network, {})
route_dict.update({'route': network})
route_dict.update({'active': True})
if code1:
source_protocol_code = re.split(r'\*|\(\!\)|\(\>\)', code1)[0].strip()
for key,val in self.source_protocol_dict.items():
if source_protocol_code in val:
source_protocol = key
code2 = group.get('code2', None)
if code2:
code1 = '{} {}'.format(code1, code2)
route_dict.update({'source_protocol': source_protocol})
route_dict.update({'source_protocol_codes': code1})
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('outgoing_interface', {}). \
setdefault(interface, {})
if interface:
outgoing_interface_dict.update({'outgoing_interface': interface})
if updated:
outgoing_interface_dict.update({'updated': updated})
# Route metric is 10880, traffic share count is 1
m = p9.match(line)
if m:
group = m.groupdict()
metric = int(group['metric'])
outgoing_interface_dict.update({'metric': metric})
if group.get('share_count', None):
share_count = int(group['share_count'])
outgoing_interface_dict.update({'share_count': share_count})
# outgoing_interface_dict.update({k:v for k,v in group.items() if v})
continue
# eigrp/100 (protoid=5, clientid=22)
m = p10.match(line)
if m:
group = m.groupdict()
redist_advertiser = group['redist_advertiser']
protoid = int(group['protoid'])
clientid = int(group['clientid'])
redist_advertiser_dict = route_dict.setdefault('redist_advertisers', {}). \
setdefault(redist_advertiser, {})
redist_advertiser_dict.update({'protoid': protoid})
redist_advertiser_dict.update({'clientid': clientid})
continue
# Installed Oct 23 22:09:38.380 for 5d21h
m = p11.match(line)
if m:
group = m.groupdict()
installed_dict = route_dict.setdefault('installed', {})
installed_dict.update({k:v for k,v in group.items() if v})
continue
# fe80::f816:3eff:fe76:b56d, from fe80::f816:3eff:fe76:b56d, via GigabitEthernet0/0/0/0.390
m = p12.match(line)
if m:
group = m.groupdict()
nexthop = group['nexthop']
_from = group['from']
interface = group['interface']
index += 1
outgoing_interface_dict = route_dict.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}). \
setdefault(index, {})
outgoing_interface_dict.update({'index': index})
outgoing_interface_dict.update({'outgoing_interface': interface})
outgoing_interface_dict.update({'from': _from})
outgoing_interface_dict.update({'next_hop': nexthop})
continue
# Gateway of last resort is fe80::10ff:fe04:209e to network ::
# Gateway of last resort is not set
# Gateway of last resort is 10.50.15.1 to network 0.0.0.0
m14 = p14.match(line)
if m14:
group = m14.groupdict()
gw_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('last_resort', {})
gw_dict.update({'gateway': group['gateway']})
if group['to_network']:
gw_dict.update({'to_network' : group['to_network']})
continue
return ret_dict
| []
| []
| []
| [] | [] | python | null | null | null |
examples/pwr_run/checkpointing/nonpc_short/k80_only/job47.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0035
args_model = 'densenet201'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_k80_only/' + job_name + '*'
total_epochs = 130
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_k80_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
musicbot/entry.py | import os
import asyncio
import logging
import traceback
import re
import sys
from enum import Enum
from .constructs import Serializable
from .exceptions import ExtractionError
from .utils import get_header, md5sum
log = logging.getLogger(__name__)
class EntryTypes(Enum):
URL = 1
STEAM = 2
FILE = 3
def __str__(self):
return self.name
class BasePlaylistEntry(Serializable):
def __init__(self):
self.filename = None
self._is_downloading = False
self._waiting_futures = []
@property
def is_downloaded(self):
if self._is_downloading:
return False
return bool(self.filename)
async def _download(self):
raise NotImplementedError
def get_ready_future(self):
"""
Returns a future that will fire when the song is ready to be played. The future will either fire with the result (being the entry) or an exception
as to why the song download failed.
"""
future = asyncio.Future()
if self.is_downloaded:
# In the event that we're downloaded, we're already ready for playback.
future.set_result(self)
else:
# If we request a ready future, let's ensure that it'll actually resolve at one point.
self._waiting_futures.append(future)
asyncio.ensure_future(self._download())
log.debug('Created future for {0}'.format(self.filename))
return future
def _for_each_future(self, cb):
"""
Calls `cb` for each future that is not cancelled. Absorbs and logs any errors that may have occurred.
"""
futures = self._waiting_futures
self._waiting_futures = []
for future in futures:
if future.cancelled():
continue
try:
cb(future)
except:
traceback.print_exc()
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
class URLPlaylistEntry(BasePlaylistEntry):
def __init__(self, playlist, url, title, duration=0, expected_filename=None, **meta):
super().__init__()
self.playlist = playlist
self.url = url
self.title = title
self.duration = duration
self.expected_filename = expected_filename
self.meta = meta
self.aoptions = '-vn'
self.filename_thumbnail = None
self.download_folder = self.playlist.downloader.download_folder
def __json__(self):
return self._enclose_json({
'version': 1,
'url': self.url,
'title': self.title,
'duration': self.duration,
'downloaded': self.is_downloaded,
'expected_filename': self.expected_filename,
'filename': self.filename,
'full_filename': os.path.abspath(self.filename) if self.filename else self.filename,
'filename_thumbnail': self.filename_thumbnail,
'meta': {
name: {
'type': obj.__class__.__name__,
'id': obj.id,
'name': obj.name
} for name, obj in self.meta.items() if obj
},
'aoptions': self.aoptions
})
@classmethod
def _deserialize(cls, data, playlist=None):
assert playlist is not None, cls._bad('playlist')
try:
# TODO: version check
url = data['url']
title = data['title']
duration = data['duration']
downloaded = data['downloaded'] if playlist.bot.config.save_videos else False
filename = data['filename'] if downloaded else None
filename_thumbnail = data['filename_thumbnail'] if downloaded else None
expected_filename = data['expected_filename']
meta = {}
# TODO: Better [name] fallbacks
if 'channel' in data['meta']:
# int() it because persistent queue from pre-rewrite days saved ids as strings
meta['channel'] = playlist.bot.get_channel(int(data['meta']['channel']['id']))
if not meta['channel']:
log.warning('Cannot find channel in an entry loaded from persistent queue. Chennel id: {}'.format(data['meta']['channel']['id']))
meta.pop('channel')
elif 'author' in data['meta']:
# int() it because persistent queue from pre-rewrite days saved ids as strings
meta['author'] = meta['channel'].guild.get_member(int(data['meta']['author']['id']))
if not meta['author']:
log.warning('Cannot find author in an entry loaded from persistent queue. Author id: {}'.format(data['meta']['author']['id']))
meta.pop('author')
entry = cls(playlist, url, title, duration, expected_filename, **meta)
entry.filename = filename
return entry
except Exception as e:
log.error("Could not load {}".format(cls.__name__), exc_info=e)
# noinspection PyTypeChecker
async def _download(self):
if self._is_downloading:
return
self._is_downloading = True
try:
# Ensure the folder that we're going to move into exists.
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
# self.expected_filename: audio_cache\youtube-9R8aSKwTEMg-NOMA_-_Brain_Power.m4a
extractor = os.path.basename(self.expected_filename).split('-')[0]
# the generic extractor requires special handling
if extractor == 'generic':
# remove thumbnail images from list
imgPattern = re.compile('(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
flistdir = [f.rsplit('-', 1)[0] for f in os.listdir(self.download_folder) if not imgPattern.search(f)]
expected_fname_noex, fname_ex = os.path.basename(self.expected_filename).rsplit('.', 1)
if expected_fname_noex in flistdir:
try:
rsize = int(await get_header(self.playlist.bot.aiosession, self.url, 'CONTENT-LENGTH'))
except:
rsize = 0
lfile = os.path.join(
self.download_folder,
os.listdir(self.download_folder)[flistdir.index(expected_fname_noex)]
)
# print("Resolved %s to %s" % (self.expected_filename, lfile))
lsize = os.path.getsize(lfile)
# print("Remote size: %s Local size: %s" % (rsize, lsize))
if lsize != rsize:
await self._really_download(hash=True)
else:
# print("[Download] Cached:", self.url)
self.filename = lfile
else:
# print("File not found in cache (%s)" % expected_fname_noex)
await self._really_download(hash=True)
else:
imgPattern = re.compile('(\.(jpg|jpeg|png|gif|bmp))$', flags=re.IGNORECASE)
ldir = [f for f in os.listdir(self.download_folder) if not imgPattern.search(f)]
flistdir = [f.rsplit('.', 1)[0] for f in ldir]
expected_fname_base = os.path.basename(self.expected_filename)
expected_fname_noex = expected_fname_base.rsplit('.', 1)[0]
# idk wtf this is but its probably legacy code
# or i have youtube to blame for changing shit again
if expected_fname_base in ldir:
self.filename = os.path.join(self.download_folder, expected_fname_base)
log.info("Download cached: {}".format(self.url))
elif expected_fname_noex in flistdir:
log.info("Download cached (different extension): {}".format(self.url))
self.filename = os.path.join(self.download_folder, ldir[flistdir.index(expected_fname_noex)])
log.debug("Expected {}, got {}".format(
self.expected_filename.rsplit('.', 1)[-1],
self.filename.rsplit('.', 1)[-1]
))
else:
await self._really_download()
if self.playlist.bot.config.use_experimental_equalization:
try:
mean, maximum = await self.get_mean_volume(self.filename)
aoptions = '-af "volume={}dB"'.format((maximum * -1))
except Exception as e:
log.error('There as a problem with working out EQ, likely caused by a strange installation of FFmpeg. '
'This has not impacted the ability for the bot to work, but will mean your tracks will not be equalised.')
aoptions = "-vn"
else:
aoptions = "-vn"
self.aoptions = aoptions
# Trigger ready callbacks.
self._for_each_future(lambda future: future.set_result(self))
except Exception as e:
traceback.print_exc()
self._for_each_future(lambda future: future.set_exception(e))
finally:
self._is_downloading = False
async def run_command(self, cmd):
p = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
log.debug('Starting asyncio subprocess ({0}) with command: {1}'.format(p, cmd))
stdout, stderr = await p.communicate()
return stdout + stderr
def get(self, program):
def is_exe(fpath):
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
if not found and sys.platform == 'win32':
fpath = fpath + ".exe"
found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
return found
fpath, __ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
async def get_mean_volume(self, input_file):
log.debug('Calculating mean volume of {0}'.format(input_file))
cmd = '"' + self.get('ffmpeg') + '" -i "' + input_file + '" -af "volumedetect" -f null /dev/null'
output = await self.run_command(cmd)
output = output.decode("utf-8")
# print('----', output)
mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output)
if (mean_volume_matches):
mean_volume = float(mean_volume_matches[0])
else:
mean_volume = float(0)
max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output)
if (max_volume_matches):
max_volume = float(max_volume_matches[0])
else:
max_volume = float(0)
log.debug('Calculated mean volume as {0}'.format(mean_volume))
return mean_volume, max_volume
# noinspection PyShadowingBuiltins
async def _really_download(self, *, hash=False):
log.info("Download started: {}".format(self.url))
retry = True
while retry:
try:
result = await self.playlist.downloader.extract_info(self.playlist.loop, self.url, download=True)
break
except Exception as e:
raise ExtractionError(e)
log.info("Download complete: {}".format(self.url))
if result is None:
log.critical("YTDL has failed, everyone panic")
raise ExtractionError("ytdl broke and hell if I know why")
# What the fuck do I do now?
self.filename = unhashed_fname = self.playlist.downloader.ytdl.prepare_filename(result)
imgPattern = re.compile(self.filename.lstrip(self.download_folder + os.sep).rsplit('.', 1)[0] + '(\.(jpg|jpeg|png|gif|bmp))$', re.IGNORECASE)
self.filename_thumbnail = next(os.path.join(self.download_folder, f) for f in os.listdir(self.download_folder) if imgPattern.search(f))
if hash:
# insert the 8 last characters of the file hash to the file name to ensure uniqueness
self.filename = md5sum(unhashed_fname, 8).join('-.').join(unhashed_fname.rsplit('.', 1))
if os.path.isfile(self.filename):
# Oh bother it was actually there.
os.unlink(unhashed_fname)
else:
# Move the temporary file to it's final location.
os.rename(unhashed_fname, self.filename)
class StreamPlaylistEntry(BasePlaylistEntry):
def __init__(self, playlist, url, title, *, destination=None, **meta):
super().__init__()
self.playlist = playlist
self.url = url
self.title = title
self.destination = destination
self.duration = 0
self.meta = meta
if self.destination:
self.filename = self.destination
def __json__(self):
return self._enclose_json({
'version': 1,
'url': self.url,
'filename': self.filename,
'title': self.title,
'destination': self.destination,
'meta': {
name: {
'type': obj.__class__.__name__,
'id': obj.id,
'name': obj.name
} for name, obj in self.meta.items() if obj
}
})
@classmethod
def _deserialize(cls, data, playlist=None):
assert playlist is not None, cls._bad('playlist')
try:
# TODO: version check
url = data['url']
title = data['title']
destination = data['destination']
filename = data['filename']
meta = {}
# TODO: Better [name] fallbacks
if 'channel' in data['meta']:
ch = playlist.bot.get_channel(data['meta']['channel']['id'])
meta['channel'] = ch or data['meta']['channel']['name']
if 'author' in data['meta']:
meta['author'] = meta['channel'].guild.get_member(data['meta']['author']['id'])
entry = cls(playlist, url, title, destination=destination, **meta)
if not destination and filename:
entry.filename = destination
return entry
except Exception as e:
log.error("Could not load {}".format(cls.__name__), exc_info=e)
# noinspection PyMethodOverriding
async def _download(self, *, fallback=False):
self._is_downloading = True
url = self.destination if fallback else self.url
try:
result = await self.playlist.downloader.extract_info(self.playlist.loop, url, download=False)
except Exception as e:
if not fallback and self.destination:
return await self._download(fallback=True)
raise ExtractionError(e)
else:
self.filename = result['url']
# I might need some sort of events or hooks or shit
# for when ffmpeg inevitebly fucks up and i have to restart
# although maybe that should be at a slightly lower level
finally:
self._is_downloading = False
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
controllers/suite_integration_test.go | //go:build integration
// +build integration
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/redhat-cop/operator-utils/pkg/util"
redhatcopv1alpha1 "github.com/redhat-cop/vault-config-operator/api/v1alpha1"
controllertestutils "github.com/redhat-cop/vault-config-operator/controllers/controllertestutils"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var _ *rest.Config
var k8sIntegrationClient client.Client
var testIntegrationEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
var vaultTestNamespace *corev1.Namespace
var vaultAdminNamespace *corev1.Namespace
const (
vaultTestNamespaceName = "test-vault-config-operator"
vaultAdminNamespaceName = "vault-admin"
)
func TestIntegrationAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Integration Suite",
[]Reporter{printer.NewlineReporter{}})
}
var decoder = controllertestutils.NewDecoder()
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
Expect(os.Setenv("USE_EXISTING_CLUSTER", "true")).To(Succeed())
_, isSet := os.LookupEnv("VAULT_ADDR")
if !isSet {
Expect(os.Setenv("VAULT_ADDR", "http://localhost:8200")).To(Succeed())
}
Expect(os.Getenv("ACCESSOR")).ToNot(BeEmpty())
_, err := http.Get(os.Getenv("VAULT_ADDR"))
Expect(err).To(BeNil())
By("bootstrapping test environment")
testIntegrationEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testIntegrationEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = redhatcopv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sIntegrationClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sIntegrationClient).NotTo(BeNil())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&VaultSecretReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("VaultSecret"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("VaultSecret"),
ControllerName: "VaultSecret",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&PasswordPolicyReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("PasswordPolicy"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("PasswordPolicy"),
ControllerName: "PasswordPolicy",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&PolicyReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("Policy"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("Policy"),
ControllerName: "Policy",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&KubernetesAuthEngineRoleReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("KubernetesAuthEngineRole"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("KubernetesAuthEngineRole"),
ControllerName: "KubernetesAuthEngineRole",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&SecretEngineMountReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("SecretEngineMount"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("SecretEngineMount"),
ControllerName: "SecretEngineMount",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&RandomSecretReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("RandomSecret"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("RandomSecret"),
ControllerName: "RandomSecret",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&PKISecretEngineConfigReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("PKISecretEngineConfig"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("PKISecretEngineConfig"),
ControllerName: "PKISecretEngineConfig",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
err = (&PKISecretEngineRoleReconciler{
ReconcilerBase: util.NewReconcilerBase(mgr.GetClient(), mgr.GetScheme(), mgr.GetConfig(), mgr.GetEventRecorderFor("PKISecretEngineRole"), mgr.GetAPIReader()),
Log: ctrl.Log.WithName("controllers").WithName("PKISecretEngineRole"),
ControllerName: "PKISecretEngineRole",
}).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("Creating the %v namespace", vaultAdminNamespaceName))
vaultAdminNamespace = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: vaultAdminNamespaceName,
},
}
Expect(k8sIntegrationClient.Create(ctx, vaultAdminNamespace)).Should(Succeed())
By(fmt.Sprintf("Creating the %v namespace", vaultTestNamespaceName))
vaultTestNamespace = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: vaultTestNamespaceName,
Labels: map[string]string{
"database-engine-admin": "true",
},
},
}
Expect(k8sIntegrationClient.Create(ctx, vaultTestNamespace)).Should(Succeed())
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
}()
}, 60)
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testIntegrationEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
| [
"\"ACCESSOR\"",
"\"VAULT_ADDR\""
]
| []
| [
"VAULT_ADDR",
"ACCESSOR"
]
| [] | ["VAULT_ADDR", "ACCESSOR"] | go | 2 | 0 | |
vendor/github.com/centrifugal/centrifuge/internal/prepared/reply.go | package prepared
import (
"sync"
"github.com/centrifugal/protocol"
)
// Reply is structure for encoding reply only once.
type Reply struct {
ProtoType protocol.Type
Reply *protocol.Reply
data []byte
once sync.Once
}
// NewReply initializes Reply.
func NewReply(reply *protocol.Reply, protoType protocol.Type) *Reply {
return &Reply{
Reply: reply,
ProtoType: protoType,
}
}
// Data returns data associated with reply which is only calculated once.
func (r *Reply) Data() []byte {
r.once.Do(func() {
encoder := protocol.GetReplyEncoder(r.ProtoType)
_ = encoder.Encode(r.Reply)
data := encoder.Finish()
protocol.PutReplyEncoder(r.ProtoType, encoder)
r.data = data
})
return r.data
}
| []
| []
| []
| [] | [] | go | null | null | null |
showResults.py | import pymongo
import os
from dotenv import load_dotenv
load_dotenv()
first_user_handle = os.getenv("FIRST_USER")
second_user_handle = os.getenv("SECOND_USER")
mongo_user = os.getenv("MONGO_USER")
mongo_password = os.getenv("MONGO_PASSWORD")
mongo_url = "mongodb+srv://" + mongo_user + ":" + mongo_password + \
"@cluster0.ecwwk.mongodb.net/<dbname>?retryWrites=true&w=majority"
client = pymongo.MongoClient(mongo_url)
db = client.twitter_db
followers_collection = db["followers"]
# Statements in console
num_checked = followers_collection.count_documents({first_user_handle + 'Checked': True})
num_overlap = followers_collection.count_documents({first_user_handle: True})
num_of_fails = followers_collection.count_documents({"failed": True})
print("Tweepy failed ", num_of_fails, " times")
print("Followers of ", second_user_handle, " analyzed: ", num_checked)
print("Overlaps with ", first_user_handle, " found: ", num_overlap) | []
| []
| [
"SECOND_USER",
"MONGO_PASSWORD",
"FIRST_USER",
"MONGO_USER"
]
| [] | ["SECOND_USER", "MONGO_PASSWORD", "FIRST_USER", "MONGO_USER"] | python | 4 | 0 | |
chatroom/init.go | package chatroom
import (
"../../antgo"
"../../antgo/multinet"
"../../antgo/protocol"
"bytes"
"strings"
"time"
)
var Timeout time.Duration
var Handlers map[string]func(conn *antgo.Conn, data string, worker *Worker)
func init() {
Timeout = 10 * time.Second
}
func NewListenDialer(Ltype string, Transport string, IP string, Port int) antgo.ListenDialer {
var buffer bytes.Buffer
buffer.WriteString("new")
buffer.WriteString(strings.ToLower(Ltype))
buffer.WriteString("listendialer")
// buffer.Reset()
return multinet.Indexes[buffer.String()](Transport, IP, Port)
}
func NewProtocol(Ptype string, listendialer antgo.ListenDialer) antgo.Protocol {
var buffer bytes.Buffer
buffer.WriteString("new")
buffer.WriteString(strings.ToLower(Ptype))
buffer.WriteString("protocol")
// buffer.Reset()
return protocol.Indexes[buffer.String()](listendialer, Ptype)
}
| []
| []
| []
| [] | [] | go | null | null | null |
integration_tests/samples/basic_usage/channels.py | import logging
logging.basicConfig(level=logging.DEBUG)
# export SLACK_API_TOKEN=xoxb-***
# python3 integration_tests/samples/basic_usage/channels.py
import os
from slack_sdk.web import WebClient
client = WebClient(token=os.environ["SLACK_API_TOKEN"])
response = client.conversations_list(exclude_archived=1)
channel_id = response["channels"][0]["id"]
response = client.conversations_info(channel=channel_id)
response = client.conversations_join(channel=channel_id)
response = client.conversations_leave(channel=channel_id)
response = client.conversations_join(channel=channel_id)
| []
| []
| [
"SLACK_API_TOKEN"
]
| [] | ["SLACK_API_TOKEN"] | python | 1 | 0 | |
boto/connection.py | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010 Google
# Copyright (c) 2008 rPath, Inc.
# Copyright (c) 2009 The Echo Nest Corporation
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Handles basic connections to AWS
"""
from __future__ import with_statement
import base64
import errno
import httplib
import os
import Queue
import random
import re
import socket
import sys
import time
import urllib
import urlparse
import xml.sax
import copy
import auth
import auth_handler
import boto
import boto.utils
import boto.handler
import boto.cacerts
from boto import config, UserAgent
from boto.exception import AWSConnectionError, BotoClientError
from boto.exception import BotoServerError
from boto.provider import Provider
from boto.resultset import ResultSet
HAVE_HTTPS_CONNECTION = False
try:
import ssl
from boto import https_connection
# Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError.
if hasattr(ssl, 'SSLError'):
HAVE_HTTPS_CONNECTION = True
except ImportError:
pass
try:
import threading
except ImportError:
import dummy_threading as threading
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
PORTS_BY_SECURITY = {True: 443,
False: 80}
DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
class HostConnectionPool(object):
"""
A pool of connections for one remote (host,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
before the response body has been read, so they connections aren't
ready to send another request yet. They stay in the pending queue
until they are ready for another request, at which point they are
returned to the pool of ready connections.
The pool of ready connections is an ordered list of
(connection,time) pairs, where the time is the time the connection
was returned from _mexe. After a certain period of time,
connections are considered stale, and discarded rather than being
reused. This saves having to wait for the connection to time out
if AWS has decided to close it on the other end because of
inactivity.
Thread Safety:
This class is used only fram ConnectionPool while it's mutex
is held.
"""
def __init__(self):
self.queue = []
def size(self):
"""
Returns the number of connections in the pool for this host.
Some of the connections may still be in use, and may not be
ready to be returned by get().
"""
return len(self.queue)
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
added.
"""
self.queue.append((conn, time.time()))
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None
def _conn_ready(self, conn):
"""
There is a nice state diagram at the top of httplib.py. It
indicates that once the response headers have been read (which
_mexe does before adding the connection to the pool), a
response is attached to the connection, and it stays there
until it's done reading. This isn't entirely true: even after
the client is done reading, the response may be closed, but
not removed from the connection yet.
This is ugly, reading a private instance variable, but the
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
# Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
"""
Get rid of stale connections.
"""
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0)
def _pair_stale(self, pair):
"""
Returns true of the (connection,time) pair is too old to be
used.
"""
(_conn, return_time) = pair
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
class ConnectionPool(object):
"""
A connection pool that expires connections after a fixed period of
time. This saves time spent waiting for a connection that AWS has
timed out on the other end.
This class is thread-safe.
"""
#
# The amout of time between calls to clean.
#
CLEAN_INTERVAL = 5.0
#
# How long before a connection becomes "stale" and won't be reused
# again. The intention is that this time is less that the timeout
# period that AWS uses, so we'll never try to reuse a connection
# and find that AWS is timing it out.
#
# Experimentation in July 2011 shows that AWS starts timing things
# out after three minutes. The 60 seconds here is conservative so
# we should never hit that 3-minute timout.
#
STALE_DURATION = 60.0
def __init__(self):
# Mapping from (host,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
ConnectionPool.STALE_DURATION = \
config.getfloat('Boto', 'connection_stale_duration',
ConnectionPool.STALE_DURATION)
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
pickled_dict['host_to_pool'] = {}
del pickled_dict['mutex']
return pickled_dict
def __setstate__(self, dct):
self.__init__()
def size(self):
"""
Returns the number of connections in the pool.
"""
return sum(pool.size() for pool in self.host_to_pool.values())
def get_http_connection(self, host, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
responsibility to call close() on the connection when it's no longer
needed.
"""
self.clean()
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
def put_http_connection(self, host, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
params, headers, body):
"""Represents an HTTP request.
:type method: string
:param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
:type protocol: string
:param protocol: The http protocol used, 'http' or 'https'.
:type host: string
:param host: Host to which the request is addressed. eg. abc.com
:type port: int
:param port: port on which the request is being sent. Zero means unset,
in which case default port will be chosen.
:type path: string
:param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
authentication string.
:type params: dict
:param params: HTTP url query parameters, with key as name of
the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
empty string ('').
"""
self.method = method
self.protocol = protocol
self.host = host
self.port = port
self.path = path
if auth_path is None:
auth_path = path
self.auth_path = auth_path
self.params = params
# chunked Transfer-Encoding should act only on PUT request.
if headers and 'Transfer-Encoding' in headers and \
headers['Transfer-Encoding'] == 'chunked' and \
self.method != 'PUT':
self.headers = headers.copy()
del self.headers['Transfer-Encoding']
else:
self.headers = headers
self.body = body
def __str__(self):
return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
'params(%s) headers(%s) body(%s)') % (self.method,
self.protocol, self.host, self.port, self.path, self.params,
self.headers, self.body))
def authorize(self, connection, **kwargs):
for key in self.headers:
val = self.headers[key]
if isinstance(val, unicode):
self.headers[key] = urllib.quote_plus(val.encode('utf-8'))
connection._auth_handler.add_auth(self, **kwargs)
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(httplib.HTTPResponse):
def __init__(self, *args, **kwargs):
httplib.HTTPResponse.__init__(self, *args, **kwargs)
self._cached_response = ''
def read(self, amt=None):
"""Read the response.
This method does not have the same behavior as
httplib.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.
"""
if amt is None:
# The reason for doing this is that many places in boto call
# response.read() and except to get the response body that they
# can then process. To make sure this always works as they expect
# we're caching the response so that multiple calls to read()
# will return the full body. Note that this behavior only
# happens if the amt arg is not specified.
if not self._cached_response:
self._cached_response = httplib.HTTPResponse.read(self)
return self._cached_response
else:
return httplib.HTTPResponse.read(self, amt)
class AWSAuthConnection(object):
def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
validate_certs=True):
"""
:type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
factory and the exceptions to catch. The factory should have
a similar interface to L{httplib.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
:type proxy_port: int
:param proxy_port: The port to use when connecting over a proxy
:type proxy_user: str
:param proxy_user: The username to connect with on the proxy
:type proxy_pass: str
:param proxy_pass: The password to use when connection over a proxy.
:type port: int
:param port: The port to use to connect
:type suppress_consec_slashes: bool
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type validate_certs: bool
:param validate_certs: Controls whether SSL certificates
will be validated or not. Defaults to True.
"""
self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
# Whether or not to validate server certificates.
# The default is now to validate certificates. This can be
# overridden in the boto config file are by passing an
# explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
'Boto', 'https_validate_certificates',
validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
"configuration, but Python dependencies required to "
"support this feature are not available. Certificate "
"validation is only supported when running under Python "
"2.6 or later.")
self.ca_certificates_file = config.get_value(
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
# define exceptions from httplib that we want to catch and retry
self.http_exceptions = (httplib.HTTPException, socket.error,
socket.gaierror)
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
# define values in socket exceptions we don't want to catch
self.socket_exception_values = (errno.EINTR,)
if https_connection_factory is not None:
self.https_connection_factory = https_connection_factory[0]
self.http_exceptions += https_connection_factory[1]
else:
self.https_connection_factory = None
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.host = host
self.path = path
# if the value passed in for debug
if not isinstance(debug, (int, long)):
debug = 0
self.debug = config.getint('Boto', 'debug', debug)
if port:
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
# Timeout used to tell httplib how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
# the socket's default global timeout being used. To specify a
# timeout, set http_socket_timeout in Boto config. Regardless,
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
if config.has_option('Boto', 'http_socket_timeout'):
timeout = config.getint('Boto', 'http_socket_timeout')
self.http_connection_kwargs['timeout'] = timeout
if isinstance(provider, Provider):
# Allow overriding Provider
self.provider = provider
else:
self._provider_type = provider
self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token)
# allow config file to override default host
if self.provider.host:
self.host = self.provider.host
self._pool = ConnectionPool()
self._connection = (self.server_name(), self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
def _required_auth_capability(self):
return []
def connection(self):
return self.get_http_connection(*self._connection)
connection = property(connection)
def aws_access_key_id(self):
return self.provider.access_key
aws_access_key_id = property(aws_access_key_id)
gs_access_key_id = aws_access_key_id
access_key = aws_access_key_id
def aws_secret_access_key(self):
return self.provider.secret_key
aws_secret_access_key = property(aws_secret_access_key)
gs_secret_access_key = aws_secret_access_key
secret_key = aws_secret_access_key
def get_path(self, path='/'):
# The default behavior is to suppress consecutive slashes for reasons
# discussed at
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
return self.path + re.sub('^/*', "", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
path = path[:pos]
else:
params = None
if path[-1] == '/':
need_trailing = True
else:
need_trailing = False
path_elements = self.path.split('/')
path_elements.extend(path.split('/'))
path_elements = [p for p in path_elements if p]
path = '/' + '/'.join(path_elements)
if path[-1] != '/' and need_trailing:
path += '/'
if params:
path = path + params
return path
def server_name(self, port=None):
if not port:
port = self.port
if port == 80:
signature_host = self.host
else:
# This unfortunate little hack can be attributed to
# a difference in the 2.6 version of httplib. In old
# versions, it would append ":443" to the hostname sent
# in the Host header and so we needed to make sure we
# did the same when calculating the V2 signature. In 2.6
# (and higher!)
# it no longer does that. Hence, this kludge.
if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or
sys.version[:3] in ('2.6', '2.7')) and port == 443:
signature_host = self.host
else:
signature_host = '%s:%d' % (self.host, port)
return signature_host
def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy = proxy
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
'(?P<host>[\w\-\.]+)' \
'(?::(?P<port>\d+))?'
)
match = pattern.match(os.environ['http_proxy'])
if match:
self.proxy = match.group('host')
self.proxy_port = match.group('port')
self.proxy_user = match.group('user')
self.proxy_pass = match.group('pass')
else:
if not self.proxy:
self.proxy = config.get_value('Boto', 'proxy', None)
if not self.proxy_port:
self.proxy_port = config.get_value('Boto', 'proxy_port', None)
if not self.proxy_user:
self.proxy_user = config.get_value('Boto', 'proxy_user', None)
if not self.proxy_pass:
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
if not self.proxy_port and self.proxy:
print "http_proxy environment variable does not specify " \
"a port, using default"
self.proxy_port = self.port
self.use_proxy = (self.proxy != None)
def get_http_connection(self, host, is_secure):
conn = self._pool.get_http_connection(host, is_secure)
if conn is not None:
return conn
else:
return self.new_http_connection(host, is_secure)
def new_http_connection(self, host, is_secure):
if self.use_proxy:
host = '%s:%d' % (self.proxy, int(self.proxy_port))
if host is None:
host = self.server_name()
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
host, self.http_connection_kwargs)
if self.use_proxy:
connection = self.proxy_ssl()
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPSConnection(host,
**self.http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(host,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, is_secure, connection):
self._pool.put_http_connection(host, is_secure, connection)
def proxy_ssl(self):
host = '%s:%d' % (self.host, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.proxy, int(self.proxy_port)))
except:
raise
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
sock.sendall("User-Agent: %s\r\n" % UserAgent)
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall("%s: %s\r\n" % (k, v))
# See discussion about this config option at
# https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
sock.sendall("\r\n")
else:
sock.sendall("\r\n")
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
resp.begin()
if resp.status != 200:
# Fake a socket error, use a code that make it obvious it hasn't
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
(self.proxy, self.proxy_port,
resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
h = httplib.HTTPConnection(host)
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
boto.log.debug("wrapping ssl socket for proxied connection; "
"CA certificate file=%s",
self.ca_certificates_file)
key_file = self.http_connection_kwargs.get('key_file', None)
cert_file = self.http_connection_kwargs.get('cert_file', None)
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
certfile=cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certificates_file)
cert = sslSock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not https_connection.ValidateCertificateHostname(cert, hostname):
raise https_connection.InvalidCertificateException(
hostname, cert, 'hostname mismatch')
else:
# Fallback for old Python without ssl.wrap_socket
if hasattr(httplib, 'ssl'):
sslSock = httplib.ssl.SSLSocket(sock)
else:
sslSock = socket.ssl(sock, None, None)
sslSock = httplib.FakeSocket(sock, sslSock)
# This is a bit unclean
h.sock = sslSock
return h
def prefix_proxy_to_path(self, path, host=None):
path = self.protocol + '://' + (host or self.server_name()) + path
return path
def get_proxy_auth_header(self):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
response = None
body = None
e = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status == 500 or response.status == 503:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
response = None
continue
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
connection = self.new_http_connection(request.host,
self.is_secure)
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already h#appened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
if params == None:
params = {}
else:
params = params.copy()
if headers == None:
headers = {}
else:
headers = headers.copy()
host = host or self.host
if self.use_proxy:
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
if self.proxy_user and self.proxy_pass and not self.is_secure:
# If is_secure, we don't have to set the proxy authentication
# header here, we did that in the CONNECT to the proxy.
headers.update(self.get_proxy_auth_header())
return HTTPRequest(method, self.protocol, host, self.port,
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
params=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
self._connection = None # compat field
class AWSQueryConnection(AWSAuthConnection):
APIVersion = ''
ResponseError = BotoServerError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return []
def get_utf8_value(self, value):
return boto.utils.get_utf8_value(value)
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
self.server_name())
if action:
http_request.params['Action'] = action
if self.APIVersion:
http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
if isinstance(items, str):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
# generics
def get_list(self, action, params, markers, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet(markers)
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_object(self, action, params, cls, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(body, h)
return obj
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_status(self, action, params, path='/', parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet()
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs.status
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| []
| []
| [
"http_proxy"
]
| [] | ["http_proxy"] | python | 1 | 0 | |
internal/test/environment/environment.go | package environment // import "github.com/docker/docker/internal/test/environment"
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/internal/test"
"github.com/docker/docker/internal/test/fixtures/load"
"github.com/pkg/errors"
"gotest.tools/assert"
)
// Execution contains information about the current test execution and daemon
// under test
type Execution struct {
client client.APIClient
DaemonInfo types.Info
OSType string
PlatformDefaults PlatformDefaults
protectedElements protectedElements
}
// PlatformDefaults are defaults values for the platform of the daemon under test
type PlatformDefaults struct {
BaseImage string
VolumesConfigPath string
ContainerStoragePath string
}
// New creates a new Execution struct
// This is configured useing the env client (see client.FromEnv)
func New() (*Execution, error) {
c, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil, errors.Wrapf(err, "failed to create client")
}
return FromClient(c)
}
// FromClient creates a new Execution environment from the passed in client
func FromClient(c *client.Client) (*Execution, error) {
info, err := c.Info(context.Background())
if err != nil {
return nil, errors.Wrapf(err, "failed to get info from daemon")
}
osType := getOSType(info)
return &Execution{
client: c,
DaemonInfo: info,
OSType: osType,
PlatformDefaults: getPlatformDefaults(info, osType),
protectedElements: newProtectedElements(),
}, nil
}
func getOSType(info types.Info) string {
// Docker EE does not set the OSType so allow the user to override this value.
userOsType := os.Getenv("TEST_OSTYPE")
if userOsType != "" {
return userOsType
}
return info.OSType
}
func getPlatformDefaults(info types.Info, osType string) PlatformDefaults {
volumesPath := filepath.Join(info.DockerRootDir, "volumes")
containersPath := filepath.Join(info.DockerRootDir, "containers")
switch osType {
case "linux":
return PlatformDefaults{
BaseImage: "scratch",
VolumesConfigPath: toSlash(volumesPath),
ContainerStoragePath: toSlash(containersPath),
}
case "windows":
baseImage := "microsoft/windowsservercore"
if overrideBaseImage := os.Getenv("WINDOWS_BASE_IMAGE"); overrideBaseImage != "" {
baseImage = overrideBaseImage
if overrideBaseImageTag := os.Getenv("WINDOWS_BASE_IMAGE_TAG"); overrideBaseImageTag != "" {
baseImage = baseImage + ":" + overrideBaseImageTag
}
}
fmt.Println("INFO: Windows Base image is ", baseImage)
return PlatformDefaults{
BaseImage: baseImage,
VolumesConfigPath: filepath.FromSlash(volumesPath),
ContainerStoragePath: filepath.FromSlash(containersPath),
}
default:
panic(fmt.Sprintf("unknown OSType for daemon: %s", osType))
}
}
// Make sure in context of daemon, not the local platform. Note we can't
// use filepath.FromSlash or ToSlash here as they are a no-op on Unix.
func toSlash(path string) string {
return strings.Replace(path, `\`, `/`, -1)
}
// IsLocalDaemon is true if the daemon under test is on the same
// host as the test process.
//
// Deterministically working out the environment in which CI is running
// to evaluate whether the daemon is local or remote is not possible through
// a build tag.
//
// For example Windows to Linux CI under Jenkins tests the 64-bit
// Windows binary build with the daemon build tag, but calls a remote
// Linux daemon.
//
// We can't just say if Windows then assume the daemon is local as at
// some point, we will be testing the Windows CLI against a Windows daemon.
//
// Similarly, it will be perfectly valid to also run CLI tests from
// a Linux CLI (built with the daemon tag) against a Windows daemon.
func (e *Execution) IsLocalDaemon() bool {
return os.Getenv("DOCKER_REMOTE_DAEMON") == ""
}
// IsRemoteDaemon is true if the daemon under test is on different host
// as the test process.
func (e *Execution) IsRemoteDaemon() bool {
return !e.IsLocalDaemon()
}
// DaemonAPIVersion returns the negotiated daemon api version
func (e *Execution) DaemonAPIVersion() string {
version, err := e.APIClient().ServerVersion(context.TODO())
if err != nil {
return ""
}
return version.APIVersion
}
// Print the execution details to stdout
// TODO: print everything
func (e *Execution) Print() {
if e.IsLocalDaemon() {
fmt.Println("INFO: Testing against a local daemon")
} else {
fmt.Println("INFO: Testing against a remote daemon")
}
}
// APIClient returns an APIClient connected to the daemon under test
func (e *Execution) APIClient() client.APIClient {
return e.client
}
// IsUserNamespace returns whether the user namespace remapping is enabled
func (e *Execution) IsUserNamespace() bool {
root := os.Getenv("DOCKER_REMAP_ROOT")
return root != ""
}
// HasExistingImage checks whether there is an image with the given reference.
// Note that this is done by filtering and then checking whether there were any
// results -- so ambiguous references might result in false-positives.
func (e *Execution) HasExistingImage(t assert.TestingT, reference string) bool {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
client := e.APIClient()
filter := filters.NewArgs()
filter.Add("dangling", "false")
filter.Add("reference", reference)
imageList, err := client.ImageList(context.Background(), types.ImageListOptions{
All: true,
Filters: filter,
})
assert.NilError(t, err, "failed to list images")
return len(imageList) > 0
}
// EnsureFrozenImagesLinux loads frozen test images into the daemon
// if they aren't already loaded
func EnsureFrozenImagesLinux(testEnv *Execution) error {
if testEnv.OSType == "linux" {
err := load.FrozenImagesLinux(testEnv.APIClient(), frozenImages...)
if err != nil {
return errors.Wrap(err, "error loading frozen images")
}
}
return nil
}
| [
"\"TEST_OSTYPE\"",
"\"WINDOWS_BASE_IMAGE\"",
"\"WINDOWS_BASE_IMAGE_TAG\"",
"\"DOCKER_REMOTE_DAEMON\"",
"\"DOCKER_REMAP_ROOT\""
]
| []
| [
"DOCKER_REMAP_ROOT",
"WINDOWS_BASE_IMAGE_TAG",
"DOCKER_REMOTE_DAEMON",
"WINDOWS_BASE_IMAGE",
"TEST_OSTYPE"
]
| [] | ["DOCKER_REMAP_ROOT", "WINDOWS_BASE_IMAGE_TAG", "DOCKER_REMOTE_DAEMON", "WINDOWS_BASE_IMAGE", "TEST_OSTYPE"] | go | 5 | 0 | |
.test/test/task2/Aufgabe1/python-lib/jetpack_sdk_env.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
def welcome():
"""
Perform a bunch of sanity tests to make sure the Add-on SDK
environment is sane, and then display a welcome message.
"""
try:
if sys.version_info[0] > 2:
print ("Error: You appear to be using Python %d, but "
"the Add-on SDK only supports the Python 2.x line." %
(sys.version_info[0]))
return
import mozrunner
if 'CUDDLEFISH_ROOT' not in os.environ:
print ("Error: CUDDLEFISH_ROOT environment variable does "
"not exist! It should point to the root of the "
"Add-on SDK repository.")
return
env_root = os.environ['CUDDLEFISH_ROOT']
bin_dir = os.path.join(env_root, 'bin')
python_lib_dir = os.path.join(env_root, 'python-lib')
path = os.environ['PATH'].split(os.path.pathsep)
if bin_dir not in path:
print ("Warning: the Add-on SDK binary directory %s "
"does not appear to be in your PATH. You may "
"not be able to run 'cfx' or other SDK tools." %
bin_dir)
if python_lib_dir not in sys.path:
print ("Warning: the Add-on SDK python-lib directory %s "
"does not appear to be in your sys.path, which "
"is odd because I'm running from it." % python_lib_dir)
if not mozrunner.__path__[0].startswith(env_root):
print ("Warning: your mozrunner package is installed at %s, "
"which does not seem to be located inside the Jetpack "
"SDK. This may cause problems, and you may want to "
"uninstall the other version. See bug 556562 for "
"more information." % mozrunner.__path__[0])
except Exception:
# Apparently we can't get the actual exception object in the
# 'except' clause in a way that's syntax-compatible for both
# Python 2.x and 3.x, so we'll have to use the traceback module.
import traceback
_, e, _ = sys.exc_info()
print ("Verification of Add-on SDK environment failed (%s)." % e)
print ("Your SDK may not work properly.")
return
print ("Welcome to the Add-on SDK. Run 'cfx docs' for assistance.")
if __name__ == '__main__':
welcome()
| []
| []
| [
"CUDDLEFISH_ROOT",
"PATH"
]
| [] | ["CUDDLEFISH_ROOT", "PATH"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.