filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
cell/cell_suite_test.go
|
package cell_test
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"time"
"code.cloudfoundry.org/consuladapter/consulrunner"
"code.cloudfoundry.org/durationjson"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/localip"
. "github.com/onsi/ginkgo"
ginkgoconfig "github.com/onsi/ginkgo/config"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
"github.com/tedsuo/ifrit/grouper"
"code.cloudfoundry.org/bbs"
bbsconfig "code.cloudfoundry.org/bbs/cmd/bbs/config"
"code.cloudfoundry.org/bbs/serviceclient"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/inigo/helpers"
"code.cloudfoundry.org/inigo/helpers/certauthority"
"code.cloudfoundry.org/inigo/helpers/portauthority"
"code.cloudfoundry.org/inigo/inigo_announcement_server"
"code.cloudfoundry.org/inigo/world"
)
var (
componentMaker world.ComponentMaker
plumbing, bbsProcess, gardenProcess ifrit.Process
gardenClient garden.Client
bbsClient bbs.InternalClient
bbsServiceClient serviceclient.ServiceClient
lgr lager.Logger
suiteTempDir string
)
func overrideConvergenceRepeatInterval(conf *bbsconfig.BBSConfig) {
conf.ConvergeRepeatInterval = durationjson.Duration(time.Second)
}
var _ = SynchronizedBeforeSuite(func() []byte {
suiteTempDir = world.TempDir("before-suite")
artifacts := world.BuiltArtifacts{
Lifecycles: world.BuiltLifecycles{},
}
artifacts.Lifecycles.BuildLifecycles("dockerapplifecycle", suiteTempDir)
artifacts.Executables = CompileTestedExecutables()
artifacts.Healthcheck = CompileHealthcheckExecutable(suiteTempDir)
payload, err := json.Marshal(artifacts)
Expect(err).NotTo(HaveOccurred())
return payload
}, func(encodedBuiltArtifacts []byte) {
var builtArtifacts world.BuiltArtifacts
err := json.Unmarshal(encodedBuiltArtifacts, &builtArtifacts)
Expect(err).NotTo(HaveOccurred())
_, dbBaseConnectionString := world.DBInfo()
localIP, err := localip.LocalIP()
Expect(err).NotTo(HaveOccurred())
addresses := world.ComponentAddresses{
Garden: fmt.Sprintf("127.0.0.1:%d", 10000+ginkgoconfig.GinkgoConfig.ParallelNode),
NATS: fmt.Sprintf("127.0.0.1:%d", 11000+ginkgoconfig.GinkgoConfig.ParallelNode),
Consul: fmt.Sprintf("127.0.0.1:%d", 12750+ginkgoconfig.GinkgoConfig.ParallelNode*consulrunner.PortOffsetLength),
Rep: fmt.Sprintf("127.0.0.1:%d", 14000+ginkgoconfig.GinkgoConfig.ParallelNode),
FileServer: fmt.Sprintf("%s:%d", localIP, 17000+ginkgoconfig.GinkgoConfig.ParallelNode),
Router: fmt.Sprintf("127.0.0.1:%d", 18000+ginkgoconfig.GinkgoConfig.ParallelNode),
RouterStatus: fmt.Sprintf("127.0.0.1:%d", 18100+ginkgoconfig.GinkgoConfig.ParallelNode),
BBS: fmt.Sprintf("127.0.0.1:%d", 20500+ginkgoconfig.GinkgoConfig.ParallelNode*2),
Health: fmt.Sprintf("127.0.0.1:%d", 20500+ginkgoconfig.GinkgoConfig.ParallelNode*2+1),
Auctioneer: fmt.Sprintf("127.0.0.1:%d", 23000+ginkgoconfig.GinkgoConfig.ParallelNode),
SSHProxy: fmt.Sprintf("127.0.0.1:%d", 23500+ginkgoconfig.GinkgoConfig.ParallelNode),
SSHProxyHealthCheck: fmt.Sprintf("127.0.0.1:%d", 24500+ginkgoconfig.GinkgoConfig.ParallelNode),
FakeVolmanDriver: fmt.Sprintf("127.0.0.1:%d", 25500+ginkgoconfig.GinkgoConfig.ParallelNode),
Locket: fmt.Sprintf("127.0.0.1:%d", 26500+ginkgoconfig.GinkgoConfig.ParallelNode),
SQL: fmt.Sprintf("%sdiego_%d", dbBaseConnectionString, ginkgoconfig.GinkgoConfig.ParallelNode),
}
node := GinkgoParallelNode()
startPort := 1000 * node
portRange := 950
endPort := startPort + portRange
allocator, err := portauthority.New(startPort, endPort)
Expect(err).NotTo(HaveOccurred())
certDepot := world.TempDirWithParent(suiteTempDir, "cert-depot")
certAuthority, err := certauthority.NewCertAuthority(certDepot, "ca")
Expect(err).NotTo(HaveOccurred())
componentMaker = world.MakeComponentMaker(builtArtifacts, addresses, allocator, certAuthority)
componentMaker.Setup()
})
var _ = AfterSuite(func() {
if componentMaker != nil {
componentMaker.Teardown()
}
deleteSuiteTempDir := func() error { return os.RemoveAll(suiteTempDir) }
Eventually(deleteSuiteTempDir).Should(Succeed())
})
var _ = BeforeEach(func() {
plumbing = ginkgomon.Invoke(grouper.NewOrdered(os.Kill, grouper.Members{
{"initial-services", grouper.NewParallel(os.Kill, grouper.Members{
{"sql", componentMaker.SQL()},
{"nats", componentMaker.NATS()},
{"consul", componentMaker.Consul()},
})},
{"locket", componentMaker.Locket()},
}))
gardenProcess = ginkgomon.Invoke(componentMaker.Garden())
bbsProcess = ginkgomon.Invoke(componentMaker.BBS())
helpers.ConsulWaitUntilReady(componentMaker.Addresses())
lgr = lager.NewLogger("test")
lgr.RegisterSink(lager.NewWriterSink(GinkgoWriter, lager.DEBUG))
gardenClient = componentMaker.GardenClient()
bbsClient = componentMaker.BBSClient()
bbsServiceClient = componentMaker.BBSServiceClient(lgr)
inigo_announcement_server.Start(os.Getenv("EXTERNAL_ADDRESS"))
})
var _ = AfterEach(func() {
inigo_announcement_server.Stop()
destroyContainerErrors := helpers.CleanupGarden(gardenClient)
helpers.StopProcesses(bbsProcess)
helpers.StopProcesses(gardenProcess)
helpers.StopProcesses(plumbing)
Expect(destroyContainerErrors).To(
BeEmpty(),
"%d containers failed to be destroyed!",
len(destroyContainerErrors),
)
})
func TestCell(t *testing.T) {
helpers.RegisterDefaultTimeouts()
RegisterFailHandler(Fail)
RunSpecs(t, "Cell Integration Suite")
}
func CompileHealthcheckExecutable(tmpDir string) string {
healthcheckDir := world.TempDirWithParent(tmpDir, "healthcheck")
healthcheckPath, err := gexec.Build("code.cloudfoundry.org/healthcheck/cmd/healthcheck", "-race")
Expect(err).NotTo(HaveOccurred())
err = os.Rename(healthcheckPath, filepath.Join(healthcheckDir, "healthcheck"))
Expect(err).NotTo(HaveOccurred())
return healthcheckDir
}
func CompileTestedExecutables() world.BuiltExecutables {
var err error
builtExecutables := world.BuiltExecutables{}
cwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
Expect(os.Chdir(os.Getenv("GARDEN_GOPATH"))).To(Succeed())
builtExecutables["garden"], err = gexec.Build("./cmd/gdn", "-race", "-a", "-tags", "daemon")
Expect(err).NotTo(HaveOccurred())
Expect(os.Chdir(cwd)).To(Succeed())
builtExecutables["auctioneer"], err = gexec.Build("code.cloudfoundry.org/auctioneer/cmd/auctioneer", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["rep"], err = gexec.Build("code.cloudfoundry.org/rep/cmd/rep", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["bbs"], err = gexec.Build("code.cloudfoundry.org/bbs/cmd/bbs", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["locket"], err = gexec.Build("code.cloudfoundry.org/locket/cmd/locket", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["file-server"], err = gexec.Build("code.cloudfoundry.org/fileserver/cmd/file-server", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["route-emitter"], err = gexec.Build("code.cloudfoundry.org/route-emitter/cmd/route-emitter", "-race")
Expect(err).NotTo(HaveOccurred())
if runtime.GOOS != "windows" {
Expect(os.Chdir(os.Getenv("ROUTER_GOPATH"))).To(Succeed())
builtExecutables["router"], err = gexec.Build("code.cloudfoundry.org/gorouter", "-race")
Expect(err).NotTo(HaveOccurred())
Expect(os.Chdir(cwd)).To(Succeed())
}
builtExecutables["routing-api"], err = gexec.Build("code.cloudfoundry.org/routing-api/cmd/routing-api", "-race")
Expect(err).NotTo(HaveOccurred())
builtExecutables["ssh-proxy"], err = gexec.Build("code.cloudfoundry.org/diego-ssh/cmd/ssh-proxy", "-race")
Expect(err).NotTo(HaveOccurred())
os.Setenv("CGO_ENABLED", "0")
builtExecutables["sshd"], err = gexec.Build("code.cloudfoundry.org/diego-ssh/cmd/sshd", "-a", "-installsuffix", "static")
os.Unsetenv("CGO_ENABLED")
Expect(err).NotTo(HaveOccurred())
return builtExecutables
}
|
[
"\"EXTERNAL_ADDRESS\"",
"\"GARDEN_GOPATH\"",
"\"ROUTER_GOPATH\""
] |
[] |
[
"ROUTER_GOPATH",
"EXTERNAL_ADDRESS",
"GARDEN_GOPATH"
] |
[]
|
["ROUTER_GOPATH", "EXTERNAL_ADDRESS", "GARDEN_GOPATH"]
|
go
| 3 | 0 | |
cmd/controller-manager/main.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/controller/backup"
"github.com/pingcap/tidb-operator/pkg/controller/backupschedule"
"github.com/pingcap/tidb-operator/pkg/controller/restore"
"github.com/pingcap/tidb-operator/pkg/controller/tidbcluster"
"github.com/pingcap/tidb-operator/pkg/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/logs"
glog "k8s.io/klog"
)
var (
printVersion bool
workers int
autoFailover bool
pdFailoverPeriod time.Duration
tikvFailoverPeriod time.Duration
tidbFailoverPeriod time.Duration
leaseDuration = 15 * time.Second
renewDuration = 5 * time.Second
retryPeriod = 3 * time.Second
waitDuration = 5 * time.Second
)
func init() {
flag.BoolVar(&printVersion, "V", false, "Show version and quit")
flag.BoolVar(&printVersion, "version", false, "Show version and quit")
flag.IntVar(&workers, "workers", 5, "The number of workers that are allowed to sync concurrently. Larger number = more responsive management, but more CPU (and network) load")
flag.BoolVar(&controller.ClusterScoped, "cluster-scoped", true, "Whether tidb-operator should manage kubernetes cluster wide TiDB Clusters")
flag.StringVar(&controller.DefaultStorageClassName, "default-storage-class-name", "standard", "Default storage class name")
flag.StringVar(&controller.DefaultBackupStorageClassName, "default-backup-storage-class-name", "standard", "Default storage class name for backup and restore")
flag.BoolVar(&autoFailover, "auto-failover", true, "Auto failover")
flag.DurationVar(&pdFailoverPeriod, "pd-failover-period", time.Duration(5*time.Minute), "PD failover period default(5m)")
flag.DurationVar(&tikvFailoverPeriod, "tikv-failover-period", time.Duration(5*time.Minute), "TiKV failover period default(5m)")
flag.DurationVar(&tidbFailoverPeriod, "tidb-failover-period", time.Duration(5*time.Minute), "TiDB failover period")
flag.DurationVar(&controller.ResyncDuration, "resync-duration", time.Duration(30*time.Second), "Resync time of informer")
flag.BoolVar(&controller.TestMode, "test-mode", false, "whether tidb-operator run in test mode")
flag.StringVar(&controller.TidbBackupManagerImage, "tidb-backup-manager-image", "pingcap/tidb-backup-manager:latest", "The image of backup manager tool")
flag.Parse()
}
func main() {
if printVersion {
version.PrintVersionInfo()
os.Exit(0)
}
version.LogVersionInfo()
logs.InitLogs()
defer logs.FlushLogs()
hostName, err := os.Hostname()
if err != nil {
glog.Fatalf("failed to get hostname: %v", err)
}
ns := os.Getenv("NAMESPACE")
if ns == "" {
glog.Fatal("NAMESPACE environment variable not set")
}
cfg, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("failed to get config: %v", err)
}
cli, err := versioned.NewForConfig(cfg)
if err != nil {
glog.Fatalf("failed to create Clientset: %v", err)
}
kubeCli, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Fatalf("failed to get kubernetes Clientset: %v", err)
}
var informerFactory informers.SharedInformerFactory
var kubeInformerFactory kubeinformers.SharedInformerFactory
if controller.ClusterScoped {
informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration)
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration)
} else {
options := []informers.SharedInformerOption{
informers.WithNamespace(ns),
}
informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)
kubeoptions := []kubeinformers.SharedInformerOption{
kubeinformers.WithNamespace(ns),
}
kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)
}
rl := resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: "tidb-controller-manager",
},
Client: kubeCli.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
}
tcController := tidbcluster.NewController(kubeCli, cli, informerFactory, kubeInformerFactory, autoFailover, pdFailoverPeriod, tikvFailoverPeriod, tidbFailoverPeriod)
backupController := backup.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
restoreController := restore.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
bsController := backupschedule.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
controllerCtx, cancel := context.WithCancel(context.Background())
defer cancel()
// Start informer factories after all controller are initialized.
informerFactory.Start(controllerCtx.Done())
kubeInformerFactory.Start(controllerCtx.Done())
// Wait for all started informers' cache were synced.
for v, synced := range informerFactory.WaitForCacheSync(wait.NeverStop) {
if !synced {
glog.Fatalf("error syncing informer for %v", v)
}
}
for v, synced := range kubeInformerFactory.WaitForCacheSync(wait.NeverStop) {
if !synced {
glog.Fatalf("error syncing informer for %v", v)
}
}
glog.Infof("cache of informer factories sync successfully")
onStarted := func(ctx context.Context) {
go wait.Forever(func() { backupController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { restoreController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { bsController.Run(workers, ctx.Done()) }, waitDuration)
wait.Forever(func() { tcController.Run(workers, ctx.Done()) }, waitDuration)
}
onStopped := func() {
glog.Fatalf("leader election lost")
}
// leader election for multiple tidb-cloud-manager
go wait.Forever(func() {
leaderelection.RunOrDie(controllerCtx, leaderelection.LeaderElectionConfig{
Lock: &rl,
LeaseDuration: leaseDuration,
RenewDeadline: renewDuration,
RetryPeriod: retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: onStarted,
OnStoppedLeading: onStopped,
},
})
}, waitDuration)
glog.Fatal(http.ListenAndServe(":6060", nil))
}
|
[
"\"NAMESPACE\""
] |
[] |
[
"NAMESPACE"
] |
[]
|
["NAMESPACE"]
|
go
| 1 | 0 | |
test/e2e/provisioning/all_e2e_test.go
|
//go:build e2e
/*
Copyright 2019 The Machine Controller Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning
import (
"context"
"flag"
"fmt"
"os"
"strings"
"testing"
"time"
clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1"
providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types"
"github.com/kubermatic/machine-controller/pkg/userdata/flatcar"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
klog.InitFlags(nil)
if err := clusterv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme); err != nil {
klog.Fatalf("failed to add clusterv1alpha1 to scheme: %v", err)
}
}
const (
DOManifest = "./testdata/machinedeployment-digitalocean.yaml"
AWSManifest = "./testdata/machinedeployment-aws.yaml"
AWSSpotInstanceManifest = "./testdata/machinedeployment-aws-spot-instances.yaml"
AWSManifestARM = "./testdata/machinedeployment-aws-arm-machines.yaml"
AWSEBSEncryptedManifest = "./testdata/machinedeployment-aws-ebs-encryption-enabled.yaml"
AzureManifest = "./testdata/machinedeployment-azure.yaml"
AzureRedhatSatelliteManifest = "./testdata/machinedeployment-azure.yaml"
AzureCustomImageReferenceManifest = "./testdata/machinedeployment-azure-custom-image-reference.yaml"
GCEManifest = "./testdata/machinedeployment-gce.yaml"
HZManifest = "./testdata/machinedeployment-hetzner.yaml"
PacketManifest = "./testdata/machinedeployment-packet.yaml"
LinodeManifest = "./testdata/machinedeployment-linode.yaml"
VSPhereManifest = "./testdata/machinedeployment-vsphere.yaml"
VSPhereDSCManifest = "./testdata/machinedeployment-vsphere-datastore-cluster.yaml"
VSPhereResourcePoolManifest = "./testdata/machinedeployment-vsphere-resource-pool.yaml"
ScalewayManifest = "./testdata/machinedeployment-scaleway.yaml"
OSMachineManifest = "./testdata/machine-openstack.yaml"
OSManifest = "./testdata/machinedeployment-openstack.yaml"
OSUpgradeManifest = "./testdata/machinedeployment-openstack-upgrade.yml"
invalidMachineManifest = "./testdata/machine-invalid.yaml"
kubevirtManifest = "./testdata/machinedeployment-kubevirt.yaml"
kubevirtManifestDNSConfig = "./testdata/machinedeployment-kubevirt-dns-config.yaml"
alibabaManifest = "./testdata/machinedeployment-alibaba.yaml"
anexiaManifest = "./testdata/machinedeployment-anexia.yaml"
)
var testRunIdentifier = flag.String("identifier", "local", "The unique identifier for this test run")
func TestInvalidObjectsGetRejected(t *testing.T) {
t.Parallel()
tests := []scenario{
{osName: "invalid", executor: verifyCreateMachineFails},
{osName: "flatcar", executor: verifyCreateMachineFails},
}
for i, test := range tests {
testScenario(t,
test,
fmt.Sprintf("invalid-machine-%v", i),
nil,
invalidMachineManifest,
false)
}
}
// TestCustomCAsAreApplied ensures that the configured CA bundle is actually
// being used by performing a negative test: It purposefully replaces the
// valid CA bundle with a bundle that contains one random self-signed cert
// and then expects openstack provisioning to _fail_.
func TestCustomCAsAreApplied(t *testing.T) {
t.Parallel()
osAuthURL := os.Getenv("OS_AUTH_URL")
osDomain := os.Getenv("OS_DOMAIN")
osPassword := os.Getenv("OS_PASSWORD")
osRegion := os.Getenv("OS_REGION")
osUsername := os.Getenv("OS_USERNAME")
osTenant := os.Getenv("OS_TENANT_NAME")
osNetwork := os.Getenv("OS_NETWORK_NAME")
if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" {
t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!")
}
params := []string{
fmt.Sprintf("<< IDENTITY_ENDPOINT >>=%s", osAuthURL),
fmt.Sprintf("<< USERNAME >>=%s", osUsername),
fmt.Sprintf("<< PASSWORD >>=%s", osPassword),
fmt.Sprintf("<< DOMAIN_NAME >>=%s", osDomain),
fmt.Sprintf("<< REGION >>=%s", osRegion),
fmt.Sprintf("<< TENANT_NAME >>=%s", osTenant),
fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork),
}
testScenario(
t,
scenario{
name: "ca-test",
containerRuntime: "docker",
kubernetesVersion: versions[0].String(),
osName: string(providerconfigtypes.OperatingSystemUbuntu),
executor: func(kubeConfig, manifestPath string, parameters []string, d time.Duration) error {
if err := updateMachineControllerForCustomCA(kubeConfig); err != nil {
return fmt.Errorf("failed to add CA: %v", err)
}
return verifyCreateMachineFails(kubeConfig, manifestPath, parameters, d)
},
},
"dummy-machine",
params,
OSMachineManifest,
false,
)
}
func updateMachineControllerForCustomCA(kubeconfig string) error {
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return fmt.Errorf("Error building kubeconfig: %v", err)
}
client, err := ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{})
if err != nil {
return fmt.Errorf("failed to create Client: %v", err)
}
ctx := context.Background()
ns := metav1.NamespaceSystem
// create intentionally valid but useless CA bundle
caBundle := &corev1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: ns,
Name: "ca-bundle",
},
Data: map[string]string{
// this certificate was created using `make examples/ca-cert.pem`
"ca-bundle.pem": strings.TrimSpace(`
-----BEGIN CERTIFICATE-----
MIIFezCCA2OgAwIBAgIUV9en2WQLDZ1VzPYgzblnuhrg1sQwDQYJKoZIhvcNAQEL
BQAwTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ0wCwYDVQQKDARBY21lMSIw
IAYDVQQDDBlrOHMtbWFjaGluZS1jb250cm9sbGVyLWNhMB4XDTIwMDgxOTEzNDEw
MloXDTQ4MDEwNTEzNDEwMlowTTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ0w
CwYDVQQKDARBY21lMSIwIAYDVQQDDBlrOHMtbWFjaGluZS1jb250cm9sbGVyLWNh
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtHUcIL+zTd7jmYazQCL0
auCJxbICdthFzW3Hs8FwQ3zhXiqP7bsgMgLsG5lxmRA1iyRRUQklV/Cu6XTWQkPA
Z8WqA06zhoNl7/f5tfhilJS6RP3ftlDJ9UMVb2DaG560VF+31QHZKL8Hr0KgPdz9
WgUFTpD1LpOk0wHJdjc/WzKaTFrZm3UAZRcZIkR0+5LrUudmUPYfbHWtYSYLX2vB
Y0+9oKqcpTtoFE2jGa993dtSPSE7grG3kfKb+IhwHUDXOW0xiT/uue7JAJYc6fDd
RoRdf3vSIESl9+R7lxymcW5R9YrQ26YJ6HlVr14BpT0hNVgvrpJINstYBpj5PbQV
kpIcHmrDOoZEgb+QTAtzga0mZctWWa7U1AJ8KoWejrJgNCAE4nrecFaPQ7aDjSe4
ca0/Gx1TtLPhswMFqQhihK4bxuV1iTTsk++h8rK5ii6jO6ioS+AF9Nqye+1tYuE8
JePXMMkO1pnwKeyiRGs8poJdQEXzu0xYbc/f2FZqP4b9X4TfsVC5WQIO/xhfhaOI
l0cIKTaBn5mWW5gn/ag+AnaTHZ7aX3A4zAuE/riyTFC2GWNLO5PqlTgo6c/+5ynC
x5Q6CUBIMFw4LP8DMC2bWhyJjRaCre9+3bXSXQ8XCWxAyfTjDTcIgBEv0+peGko0
wb697GGWGgiqlRpW8GBZPeUCAwEAAaNTMFEwHQYDVR0OBBYEFO2EDvPI7jRqR6rK
vKkqj8BxCCZvMB8GA1UdIwQYMBaAFO2EDvPI7jRqR6rKvKkqj8BxCCZvMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAJMXHPxnorj4h+HePA1TaqBs
LPfrARxPGi+/mFWGtT8JpLf8cP1YT3j74sdD9oiDCxDSL+Cg5JY3IKa5U+jnS6Go
a26D4U9MOUl2hPOa/f4BEEN9+6jNvB/jg1Jd1YC1Q7hdWjBZKx1n+qMhq+bwNJZo
du0t/zmgk6sJVa7E50ILv/WmEQDCo0NFpOBSku0M35iA+maMgxq5/7EBybl2Qo2F
j6IPTxGBRbOE13I8virmYz9MdloiKX1GDUDCP3yRSSnPVveKlaGYa/lCNbSEhynb
KZHbzcro71RRAgne1cNaFIqr5oCZMSSx+hlsc/mkenr7Dg1/o6FexFc1IYO85Fs4
VC8Yb5V2oD8IDZlRVo7G74cZqEly8OYHO17zO0ib3S70aPGTUtFXEyMiirWCbVCb
L3I2dvQcO19WTQ8CfujWGtbL2lhBZvJTfa9fzrz3uYQRIeBHIWZvi8sEIQ1pmeOi
9PQkGHHJO+jfJkbOdR9cAmHUyuHH26WzZctg5CR2+f6xA8kO/8tUMEAJ9hUJa1iJ
Br0c+gPd5UmjrHLikc40/CgjmfLkaSJcnmiYP0xxYM3Rqm8ptKJM7asHDDbeBK8m
rh3NiRD903zsNpRiUXKkQs7N382SkRaBTB/rJTONM00pXEQYAivs5nIEfCQzen/Z
C8QmzsMaZhk+mVFr1sGy
-----END CERTIFICATE-----
`),
},
}
if err := client.Create(ctx, caBundle); err != nil {
return fmt.Errorf("failed to create ca-bundle ConfigMap: %v", err)
}
// add CA to deployments
deployments := []string{"machine-controller", "machine-controller-webhook"}
for _, deployment := range deployments {
if err := addCAToDeployment(ctx, client, deployment, ns); err != nil {
return fmt.Errorf("failed to add CA to %s Deployment: %v", deployment, err)
}
}
// wait for deployments to roll out
for _, deployment := range deployments {
if err := wait.Poll(3*time.Second, 30*time.Second, func() (done bool, err error) {
d := &appsv1.Deployment{}
key := types.NamespacedName{Namespace: ns, Name: deployment}
if err := client.Get(ctx, key, d); err != nil {
return false, fmt.Errorf("failed to get Deployment: %v", err)
}
return d.Status.AvailableReplicas > 0, nil
}); err != nil {
return fmt.Errorf("%s Deployment never became ready: %v", deployment, err)
}
}
return nil
}
func addCAToDeployment(ctx context.Context, client ctrlruntimeclient.Client, name string, namespace string) error {
deployment := &appsv1.Deployment{}
key := types.NamespacedName{Namespace: namespace, Name: name}
if err := client.Get(ctx, key, deployment); err != nil {
return fmt.Errorf("failed to get Deployment: %v", err)
}
caVolume := corev1.Volume{
Name: "ca-bundle",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "ca-bundle",
},
},
},
}
caVolumeMount := corev1.VolumeMount{
Name: "ca-bundle",
ReadOnly: true,
MountPath: "/etc/machine-controller",
}
oldDeployment := deployment.DeepCopy()
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, caVolume)
container := deployment.Spec.Template.Spec.Containers[0]
container.VolumeMounts = append(container.VolumeMounts, caVolumeMount)
container.Command = append(container.Command, "-ca-bundle=/etc/machine-controller/ca-bundle.pem")
deployment.Spec.Template.Spec.Containers[0] = container
return client.Patch(ctx, deployment, ctrlruntimeclient.MergeFrom(oldDeployment))
}
func TestKubevirtProvisioningE2E(t *testing.T) {
t.Parallel()
kubevirtKubeconfig := os.Getenv("KUBEVIRT_E2E_TESTS_KUBECONFIG")
if kubevirtKubeconfig == "" {
t.Fatalf("Unable to run kubevirt tests, KUBEVIRT_E2E_TESTS_KUBECONFIG must be set")
}
selector := OsSelector("ubuntu", "centos", "flatcar")
params := []string{
fmt.Sprintf("<< KUBECONFIG >>=%s", kubevirtKubeconfig),
}
runScenarios(t, selector, params, kubevirtManifest, fmt.Sprintf("kubevirt-%s", *testRunIdentifier))
}
func TestKubevirtDNSConfigProvisioningE2E(t *testing.T) {
t.Parallel()
kubevirtKubeconfig := os.Getenv("KUBEVIRT_E2E_TESTS_KUBECONFIG")
if kubevirtKubeconfig == "" {
t.Fatalf("Unable to run kubevirt tests, KUBEVIRT_E2E_TESTS_KUBECONFIG must be set")
}
params := []string{
fmt.Sprintf("<< KUBECONFIG >>=%s", kubevirtKubeconfig),
}
scenario := scenario{
name: "Kubevirt with dns config",
osName: "ubuntu",
containerRuntime: "docker",
kubernetesVersion: "v1.22.2",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, kubevirtManifestDNSConfig, false)
}
func TestOpenstackProvisioningE2E(t *testing.T) {
t.Parallel()
osAuthURL := os.Getenv("OS_AUTH_URL")
osDomain := os.Getenv("OS_DOMAIN")
osPassword := os.Getenv("OS_PASSWORD")
osRegion := os.Getenv("OS_REGION")
osUsername := os.Getenv("OS_USERNAME")
osTenant := os.Getenv("OS_TENANT_NAME")
osNetwork := os.Getenv("OS_NETWORK_NAME")
if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" {
t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!")
}
params := []string{
fmt.Sprintf("<< IDENTITY_ENDPOINT >>=%s", osAuthURL),
fmt.Sprintf("<< USERNAME >>=%s", osUsername),
fmt.Sprintf("<< PASSWORD >>=%s", osPassword),
fmt.Sprintf("<< DOMAIN_NAME >>=%s", osDomain),
fmt.Sprintf("<< REGION >>=%s", osRegion),
fmt.Sprintf("<< TENANT_NAME >>=%s", osTenant),
fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork),
}
selector := Not(OsSelector("sles", "rhel", "amzn2"))
runScenarios(t, selector, params, OSManifest, fmt.Sprintf("os-%s", *testRunIdentifier))
}
// TestDigitalOceanProvisioning - a test suite that exercises digital ocean provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
//
// note that tests require a valid API Token that is read from the DO_E2E_TEST_TOKEN environmental variable.
func TestDigitalOceanProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
doToken := os.Getenv("DO_E2E_TESTS_TOKEN")
if len(doToken) == 0 {
t.Fatal("unable to run the test suite, DO_E2E_TESTS_TOKEN environment variable cannot be empty")
}
selector := OsSelector("ubuntu", "centos")
// act
params := []string{fmt.Sprintf("<< DIGITALOCEAN_TOKEN >>=%s", doToken)}
runScenarios(t, selector, params, DOManifest, fmt.Sprintf("do-%s", *testRunIdentifier))
}
// TestAWSProvisioning - a test suite that exercises AWS provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAWSProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
selector := Not(OsSelector("sles"))
// act
params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit),
}
runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
// TestAWSSpotInstanceProvisioning - a test suite that exercises AWS provider
// by requesting spot nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAWSSpotInstanceProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
selector := Not(OsSelector("sles"))
// act
params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit),
}
runScenarios(t, selector, params, AWSSpotInstanceManifest, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
// TestAWSARMProvisioningE2E - a test suite that exercises AWS provider for arm machines
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAWSARMProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
selector := OsSelector("ubuntu")
// act
params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition),
}
runScenarios(t, selector, params, AWSManifestARM, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
// TestAWSSLESProvisioningE2E - a test suite that exercises AWS provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAWSSLESProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
// act
params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
}
// We would like to test SLES image only in this test as the other images are tested in TestAWSProvisioningE2E
selector := OsSelector("sles")
runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
func TestAWSFlatcarCoreOSCloudInit8ProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
params := []string{
fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit),
}
// We would like to test flatcar with CoreOS-cloud-init
selector := OsSelector("flatcar")
runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
func TestAWSFlatcarContainerdProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
params := []string{
fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit),
}
scenario := scenario{
name: "flatcar with containerd in AWS",
osName: "flatcar",
containerRuntime: "containerd",
kubernetesVersion: "1.19.9",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, AWSManifest, false)
}
func TestAWSCentOS8ProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
amiID := "ami-032025b3afcbb6b34" // official "CentOS 8.2.2004 x86_64"
params := []string{
fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
fmt.Sprintf("<< AMI >>=%s", amiID),
}
// We would like to test CentOS8 image only in this test as the other images are tested in TestAWSProvisioningE2E
selector := OsSelector("centos")
runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier))
}
// TestAWSEbsEncryptionEnabledProvisioningE2E - a test suite that exercises AWS provider with ebs encryption enabled
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAWSEbsEncryptionEnabledProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID")
awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET")
if len(awsKeyID) == 0 || len(awsSecret) == 0 {
t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty")
}
// act
params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID),
fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret),
}
scenario := scenario{
name: "AWS with ebs encryption enabled",
osName: "ubuntu",
containerRuntime: "containerd",
kubernetesVersion: "v1.20.1",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, fmt.Sprintf("aws-%s", *testRunIdentifier), params, AWSEBSEncryptedManifest, false)
}
// TestAzureProvisioningE2E - a test suite that exercises Azure provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestAzureProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
azureTenantID := os.Getenv("AZURE_E2E_TESTS_TENANT_ID")
azureSubscriptionID := os.Getenv("AZURE_E2E_TESTS_SUBSCRIPTION_ID")
azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID")
azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET")
if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 {
t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty")
}
selector := Not(OsSelector("sles", "amzn2"))
// act
params := []string{
fmt.Sprintf("<< AZURE_TENANT_ID >>=%s", azureTenantID),
fmt.Sprintf("<< AZURE_SUBSCRIPTION_ID >>=%s", azureSubscriptionID),
fmt.Sprintf("<< AZURE_CLIENT_ID >>=%s", azureClientID),
fmt.Sprintf("<< AZURE_CLIENT_SECRET >>=%s", azureClientSecret),
}
runScenarios(t, selector, params, AzureManifest, fmt.Sprintf("azure-%s", *testRunIdentifier))
}
// TestAzureCustomImageReferenceProvisioningE2E - a test suite that exercises Azure provider
// by requesting nodes with different combination of container runtime type, container runtime version and custom Image reference.
func TestAzureCustomImageReferenceProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
azureTenantID := os.Getenv("AZURE_E2E_TESTS_TENANT_ID")
azureSubscriptionID := os.Getenv("AZURE_E2E_TESTS_SUBSCRIPTION_ID")
azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID")
azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET")
if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 {
t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty")
}
selector := OsSelector("ubuntu")
// act
params := []string{
fmt.Sprintf("<< AZURE_TENANT_ID >>=%s", azureTenantID),
fmt.Sprintf("<< AZURE_SUBSCRIPTION_ID >>=%s", azureSubscriptionID),
fmt.Sprintf("<< AZURE_CLIENT_ID >>=%s", azureClientID),
fmt.Sprintf("<< AZURE_CLIENT_SECRET >>=%s", azureClientSecret),
}
runScenarios(t, selector, params, AzureCustomImageReferenceManifest, fmt.Sprintf("azure-%s", *testRunIdentifier))
}
// TestAzureRedhatSatelliteProvisioningE2E - a test suite that exercises Azure provider
// by requesting rhel node and subscribe to redhat satellite server.
func TestAzureRedhatSatelliteProvisioningE2E(t *testing.T) {
t.Parallel()
t.Skip()
// test data
azureTenantID := os.Getenv("AZURE_E2E_TESTS_TENANT_ID")
azureSubscriptionID := os.Getenv("AZURE_E2E_TESTS_SUBSCRIPTION_ID")
azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID")
azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET")
if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 {
t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty")
}
// act
params := []string{
fmt.Sprintf("<< AZURE_TENANT_ID >>=%s", azureTenantID),
fmt.Sprintf("<< AZURE_SUBSCRIPTION_ID >>=%s", azureSubscriptionID),
fmt.Sprintf("<< AZURE_CLIENT_ID >>=%s", azureClientID),
fmt.Sprintf("<< AZURE_CLIENT_SECRET >>=%s", azureClientSecret),
}
scenario := scenario{
name: "Azure redhat satellite server subscription",
osName: "rhel",
containerRuntime: "docker",
kubernetesVersion: "v1.17.0",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, AzureRedhatSatelliteManifest, false)
}
// TestGCEProvisioningE2E - a test suite that exercises Google Cloud provider
// by requesting nodes with different combination of container runtime type,
// container runtime version and the OS flavour.
func TestGCEProvisioningE2E(t *testing.T) {
t.Parallel()
// Test data.
googleServiceAccount := os.Getenv("GOOGLE_SERVICE_ACCOUNT")
if len(googleServiceAccount) == 0 {
t.Fatal("unable to run the test suite, GOOGLE_SERVICE_ACCOUNT environment variable cannot be empty")
}
// Act. GCE does not support CentOS.
selector := OsSelector("ubuntu")
params := []string{
fmt.Sprintf("<< GOOGLE_SERVICE_ACCOUNT >>=%s", googleServiceAccount),
}
runScenarios(t, selector, params, GCEManifest, fmt.Sprintf("gce-%s", *testRunIdentifier))
}
// TestHetznerProvisioning - a test suite that exercises Hetzner provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestHetznerProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
hzToken := os.Getenv("HZ_E2E_TOKEN")
if len(hzToken) == 0 {
t.Fatal("unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty")
}
selector := OsSelector("ubuntu", "centos")
// act
params := []string{fmt.Sprintf("<< HETZNER_TOKEN >>=%s", hzToken)}
runScenarios(t, selector, params, HZManifest, fmt.Sprintf("hz-%s", *testRunIdentifier))
}
// TestPacketProvisioning - a test suite that exercises Packet provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestPacketProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
apiKey := os.Getenv("PACKET_API_KEY")
if len(apiKey) == 0 {
t.Fatal("unable to run the test suite, PACKET_API_KEY environment variable cannot be empty")
}
projectID := os.Getenv("PACKET_PROJECT_ID")
if len(projectID) == 0 {
t.Fatal("unable to run the test suite, PACKET_PROJECT_ID environment variable cannot be empty")
}
selector := Not(OsSelector("sles", "rhel", "amzn2"))
// act
params := []string{
fmt.Sprintf("<< PACKET_API_KEY >>=%s", apiKey),
fmt.Sprintf("<< PACKET_PROJECT_ID >>=%s", projectID),
}
runScenarios(t, selector, params, PacketManifest, fmt.Sprintf("packet-%s", *testRunIdentifier))
}
func TestAlibabaProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
accessKeyID := os.Getenv("ALIBABA_ACCESS_KEY_ID")
if len(accessKeyID) == 0 {
t.Fatal("unable to run the test suite, ALIBABA_ACCESS_KEY_ID environment variable cannot be empty")
}
accessKeySecret := os.Getenv("ALIBABA_ACCESS_KEY_SECRET")
if len(accessKeySecret) == 0 {
t.Fatal("unable to run the test suite, ALIBABA_ACCESS_KEY_SECRET environment variable cannot be empty")
}
selector := Not(OsSelector("sles", "rhel", "flatcar"))
// act
params := []string{
fmt.Sprintf("<< ALIBABA_ACCESS_KEY_ID >>=%s", accessKeyID),
fmt.Sprintf("<< ALIBABA_ACCESS_KEY_SECRET >>=%s", accessKeySecret),
}
runScenarios(t, selector, params, alibabaManifest, fmt.Sprintf("alibaba-%s", *testRunIdentifier))
}
// TestLinodeProvisioning - a test suite that exercises Linode provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
//
// note that tests require a valid API Token that is read from the LINODE_E2E_TEST_TOKEN environmental variable.
func TestLinodeProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
linodeToken := os.Getenv("LINODE_E2E_TESTS_TOKEN")
if len(linodeToken) == 0 {
t.Fatal("unable to run the test suite, LINODE_E2E_TESTS_TOKEN environment variable cannot be empty")
}
// we're shimming userdata through Linode stackscripts and the stackscript hasn't been verified for use with centos
selector := OsSelector("ubuntu")
// act
params := []string{fmt.Sprintf("<< LINODE_TOKEN >>=%s", linodeToken)}
runScenarios(t, selector, params, LinodeManifest, fmt.Sprintf("linode-%s", *testRunIdentifier))
}
func getVSphereTestParams(t *testing.T) []string {
// test data
vsPassword := os.Getenv("VSPHERE_E2E_PASSWORD")
vsUsername := os.Getenv("VSPHERE_E2E_USERNAME")
vsCluster := os.Getenv("VSPHERE_E2E_CLUSTER")
vsAddress := os.Getenv("VSPHERE_E2E_ADDRESS")
if vsPassword == "" || vsUsername == "" || vsAddress == "" || vsCluster == "" {
t.Fatal("unable to run the test suite, VSPHERE_E2E_PASSWORD, VSPHERE_E2E_USERNAME, VSPHERE_E2E_CLUSTER " +
"or VSPHERE_E2E_ADDRESS environment variables cannot be empty")
}
// act
params := []string{fmt.Sprintf("<< VSPHERE_PASSWORD >>=%s", vsPassword),
fmt.Sprintf("<< VSPHERE_USERNAME >>=%s", vsUsername),
fmt.Sprintf("<< VSPHERE_ADDRESS >>=%s", vsAddress),
fmt.Sprintf("<< VSPHERE_CLUSTER >>=%s", vsCluster),
}
return params
}
// TestVsphereProvisioning - a test suite that exercises vsphere provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
func TestVsphereProvisioningE2E(t *testing.T) {
t.Parallel()
selector := Not(OsSelector("sles", "amzn2", "rhel"))
params := getVSphereTestParams(t)
runScenarios(t, selector, params, VSPhereManifest, fmt.Sprintf("vs-%s", *testRunIdentifier))
}
// TestVsphereDatastoreClusterProvisioning - is the same as the TestVsphereProvisioning suite but specifies a DatastoreCluster
// instead of the Datastore in the provider specs.
func TestVsphereDatastoreClusterProvisioningE2E(t *testing.T) {
t.Parallel()
selector := OsSelector("ubuntu", "centos")
params := getVSphereTestParams(t)
runScenarios(t, selector, params, VSPhereDSCManifest, fmt.Sprintf("vs-dsc-%s", *testRunIdentifier))
}
// TestVsphereResourcePoolProvisioning - creates a machine deployment using a
// resource pool.
func TestVsphereResourcePoolProvisioningE2E(t *testing.T) {
t.Parallel()
params := getVSphereTestParams(t)
// We do not need to test all combinations.
scenario := scenario{
name: "vSphere resource pool provisioning",
osName: "flatcar",
containerRuntime: "docker",
kubernetesVersion: "1.22.2",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, VSPhereResourcePoolManifest, false)
}
// TestScalewayProvisioning - a test suite that exercises scaleway provider
// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour.
//
// note that tests require the following environment variable:
// - SCW_ACCESS_KEY -> the Scaleway Access Key
// - SCW_SECRET_KEY -> the Scaleway Secret Key
// - SCW_DEFAULT_PROJECT_ID -> the Scaleway Project ID
func TestScalewayProvisioningE2E(t *testing.T) {
t.Parallel()
// test data
scwAccessKey := os.Getenv("SCW_ACCESS_KEY")
if len(scwAccessKey) == 0 {
t.Fatal("unable to run the test suite, SCW_E2E_TEST_ACCESS_KEY environment variable cannot be empty")
}
scwSecretKey := os.Getenv("SCW_SECRET_KEY")
if len(scwSecretKey) == 0 {
t.Fatal("unable to run the test suite, SCW_E2E_TEST_SECRET_KEY environment variable cannot be empty")
}
scwProjectID := os.Getenv("SCW_DEFAULT_PROJECT_ID")
if len(scwProjectID) == 0 {
t.Fatal("unable to run the test suite, SCW_E2E_TEST_PROJECT_ID environment variable cannot be empty")
}
selector := Not(OsSelector("sles", "rhel", "flatcar"))
// act
params := []string{
fmt.Sprintf("<< SCW_ACCESS_KEY >>=%s", scwAccessKey),
fmt.Sprintf("<< SCW_SECRET_KEY >>=%s", scwSecretKey),
fmt.Sprintf("<< SCW_DEFAULT_PROJECT_ID >>=%s", scwProjectID),
}
runScenarios(t, selector, params, ScalewayManifest, fmt.Sprintf("scw-%s", *testRunIdentifier))
}
// TestUbuntuProvisioningWithUpgradeE2E will create an instance from an old Ubuntu 1604
// image and upgrade it prior to joining the cluster
func TestUbuntuProvisioningWithUpgradeE2E(t *testing.T) {
t.Parallel()
osAuthURL := os.Getenv("OS_AUTH_URL")
osDomain := os.Getenv("OS_DOMAIN")
osPassword := os.Getenv("OS_PASSWORD")
osRegion := os.Getenv("OS_REGION")
osUsername := os.Getenv("OS_USERNAME")
osTenant := os.Getenv("OS_TENANT_NAME")
osNetwork := os.Getenv("OS_NETWORK_NAME")
if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" {
t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!")
}
params := []string{
fmt.Sprintf("<< IDENTITY_ENDPOINT >>=%s", osAuthURL),
fmt.Sprintf("<< USERNAME >>=%s", osUsername),
fmt.Sprintf("<< PASSWORD >>=%s", osPassword),
fmt.Sprintf("<< DOMAIN_NAME >>=%s", osDomain),
fmt.Sprintf("<< REGION >>=%s", osRegion),
fmt.Sprintf("<< TENANT_NAME >>=%s", osTenant),
fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork),
}
scenario := scenario{
name: "Ubuntu upgrade",
osName: "ubuntu",
containerRuntime: "docker",
kubernetesVersion: "1.22.2",
executor: verifyCreateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, OSUpgradeManifest, false)
}
// TestDeploymentControllerUpgradesMachineE2E verifies the machineDeployment controller correctly
// rolls over machines on changes in the machineDeployment
func TestDeploymentControllerUpgradesMachineE2E(t *testing.T) {
t.Parallel()
// test data
hzToken := os.Getenv("HZ_E2E_TOKEN")
if len(hzToken) == 0 {
t.Fatal("unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty")
}
// act
params := []string{fmt.Sprintf("<< HETZNER_TOKEN >>=%s", hzToken)}
scenario := scenario{
name: "MachineDeployment upgrade",
osName: "ubuntu",
containerRuntime: "docker",
kubernetesVersion: "1.19.1",
executor: verifyCreateUpdateAndDelete,
}
testScenario(t, scenario, *testRunIdentifier, params, HZManifest, false)
}
func TestAnexiaProvisioningE2E(t *testing.T) {
t.Parallel()
token := os.Getenv("ANEXIA_TOKEN")
if token == "" {
t.Fatal("unable to run the test suite, ANEXIA_TOKEN environment variable cannot be empty")
}
selector := OsSelector("flatcar")
params := []string{
fmt.Sprintf("<< ANEXIA_TOKEN >>=%s", token),
}
runScenarios(t, selector, params, anexiaManifest, fmt.Sprintf("anexia-%s", *testRunIdentifier))
}
|
[
"\"OS_AUTH_URL\"",
"\"OS_DOMAIN\"",
"\"OS_PASSWORD\"",
"\"OS_REGION\"",
"\"OS_USERNAME\"",
"\"OS_TENANT_NAME\"",
"\"OS_NETWORK_NAME\"",
"\"KUBEVIRT_E2E_TESTS_KUBECONFIG\"",
"\"KUBEVIRT_E2E_TESTS_KUBECONFIG\"",
"\"OS_AUTH_URL\"",
"\"OS_DOMAIN\"",
"\"OS_PASSWORD\"",
"\"OS_REGION\"",
"\"OS_USERNAME\"",
"\"OS_TENANT_NAME\"",
"\"OS_NETWORK_NAME\"",
"\"DO_E2E_TESTS_TOKEN\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AWS_E2E_TESTS_KEY_ID\"",
"\"AWS_E2E_TESTS_SECRET\"",
"\"AZURE_E2E_TESTS_TENANT_ID\"",
"\"AZURE_E2E_TESTS_SUBSCRIPTION_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_SECRET\"",
"\"AZURE_E2E_TESTS_TENANT_ID\"",
"\"AZURE_E2E_TESTS_SUBSCRIPTION_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_SECRET\"",
"\"AZURE_E2E_TESTS_TENANT_ID\"",
"\"AZURE_E2E_TESTS_SUBSCRIPTION_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_ID\"",
"\"AZURE_E2E_TESTS_CLIENT_SECRET\"",
"\"GOOGLE_SERVICE_ACCOUNT\"",
"\"HZ_E2E_TOKEN\"",
"\"PACKET_API_KEY\"",
"\"PACKET_PROJECT_ID\"",
"\"ALIBABA_ACCESS_KEY_ID\"",
"\"ALIBABA_ACCESS_KEY_SECRET\"",
"\"LINODE_E2E_TESTS_TOKEN\"",
"\"VSPHERE_E2E_PASSWORD\"",
"\"VSPHERE_E2E_USERNAME\"",
"\"VSPHERE_E2E_CLUSTER\"",
"\"VSPHERE_E2E_ADDRESS\"",
"\"SCW_ACCESS_KEY\"",
"\"SCW_SECRET_KEY\"",
"\"SCW_DEFAULT_PROJECT_ID\"",
"\"OS_AUTH_URL\"",
"\"OS_DOMAIN\"",
"\"OS_PASSWORD\"",
"\"OS_REGION\"",
"\"OS_USERNAME\"",
"\"OS_TENANT_NAME\"",
"\"OS_NETWORK_NAME\"",
"\"HZ_E2E_TOKEN\"",
"\"ANEXIA_TOKEN\""
] |
[] |
[
"AZURE_E2E_TESTS_CLIENT_SECRET",
"ALIBABA_ACCESS_KEY_SECRET",
"OS_USERNAME",
"OS_TENANT_NAME",
"OS_NETWORK_NAME",
"ALIBABA_ACCESS_KEY_ID",
"OS_REGION",
"OS_DOMAIN",
"AWS_E2E_TESTS_SECRET",
"AZURE_E2E_TESTS_SUBSCRIPTION_ID",
"VSPHERE_E2E_USERNAME",
"ANEXIA_TOKEN",
"AZURE_E2E_TESTS_CLIENT_ID",
"DO_E2E_TESTS_TOKEN",
"AZURE_E2E_TESTS_TENANT_ID",
"PACKET_PROJECT_ID",
"VSPHERE_E2E_CLUSTER",
"SCW_DEFAULT_PROJECT_ID",
"VSPHERE_E2E_PASSWORD",
"AWS_E2E_TESTS_KEY_ID",
"OS_AUTH_URL",
"OS_PASSWORD",
"SCW_ACCESS_KEY",
"VSPHERE_E2E_ADDRESS",
"KUBEVIRT_E2E_TESTS_KUBECONFIG",
"GOOGLE_SERVICE_ACCOUNT",
"SCW_SECRET_KEY",
"PACKET_API_KEY",
"LINODE_E2E_TESTS_TOKEN",
"HZ_E2E_TOKEN"
] |
[]
|
["AZURE_E2E_TESTS_CLIENT_SECRET", "ALIBABA_ACCESS_KEY_SECRET", "OS_USERNAME", "OS_TENANT_NAME", "OS_NETWORK_NAME", "ALIBABA_ACCESS_KEY_ID", "OS_REGION", "OS_DOMAIN", "AWS_E2E_TESTS_SECRET", "AZURE_E2E_TESTS_SUBSCRIPTION_ID", "VSPHERE_E2E_USERNAME", "ANEXIA_TOKEN", "AZURE_E2E_TESTS_CLIENT_ID", "DO_E2E_TESTS_TOKEN", "AZURE_E2E_TESTS_TENANT_ID", "PACKET_PROJECT_ID", "VSPHERE_E2E_CLUSTER", "SCW_DEFAULT_PROJECT_ID", "VSPHERE_E2E_PASSWORD", "AWS_E2E_TESTS_KEY_ID", "OS_AUTH_URL", "OS_PASSWORD", "SCW_ACCESS_KEY", "VSPHERE_E2E_ADDRESS", "KUBEVIRT_E2E_TESTS_KUBECONFIG", "GOOGLE_SERVICE_ACCOUNT", "SCW_SECRET_KEY", "PACKET_API_KEY", "LINODE_E2E_TESTS_TOKEN", "HZ_E2E_TOKEN"]
|
go
| 30 | 0 | |
config/config.go
|
package config
import (
"flag"
"github.com/rakyll/globalconf"
"os"
)
type config struct {
CAdvisorUrl string
DockerUrl string
Systemd bool
NumStats int
Auth bool
Key string
HostUuid string
Port int
Ip string
}
var Config config
func Parse() error {
flag.IntVar(&Config.Port, "port", 8080, "Listen port")
flag.StringVar(&Config.Ip, "ip", "", "Listen IP, defaults to all IPs")
flag.StringVar(&Config.CAdvisorUrl, "cadvisor-url", "http://localhost:8081", "cAdvisor URL")
flag.StringVar(&Config.DockerUrl, "docker-host", "unix:///var/run/docker.sock", "Docker host URL")
flag.IntVar(&Config.NumStats, "num-stats", 600, "Number of stats to show by default")
flag.BoolVar(&Config.Auth, "auth", false, "Authenticate requests")
flag.StringVar(&Config.HostUuid, "host-uuid", "", "Host UUID")
flag.StringVar(&Config.Key, "public-key", "", "Public Key for Authentication")
confOptions := &globalconf.Options{
EnvPrefix: "HOST_API_",
}
filename := os.Getenv("HOST_API_CONFIG_FILE")
if len(filename) > 0 {
confOptions.Filename = filename
}
conf, err := globalconf.NewWithOptions(confOptions)
if err != nil {
return err
}
conf.ParseAll()
s, err := os.Stat("/run/systemd/system")
if err != nil || !s.IsDir() {
Config.Systemd = false
} else {
Config.Systemd = true
}
return nil
}
|
[
"\"HOST_API_CONFIG_FILE\""
] |
[] |
[
"HOST_API_CONFIG_FILE"
] |
[]
|
["HOST_API_CONFIG_FILE"]
|
go
| 1 | 0 | |
api/api_test.go
|
package api_test
import (
"fmt"
"log"
"os"
"reflect"
"sync"
"testing"
"github.com/SevereCloud/vksdk/api"
"github.com/SevereCloud/vksdk/api/errors"
"github.com/SevereCloud/vksdk/object"
"github.com/stretchr/testify/assert"
)
func noError(t *testing.T, err error) bool {
t.Helper()
switch errors.GetType(err) {
// case errors.TooMany:
// t.Skip("Too many requests per second")
case errors.Server:
t.Skip("Internal server error")
case errors.Permission:
t.Skip("Permission to perform this action is denied")
case errors.Captcha:
t.Skip("Captcha needed")
}
if err != nil {
ctx := errors.GetErrorContext(err)
if ctx.Code != 0 {
s := "\n"
s += fmt.Sprintf("code: %d\n", ctx.Code)
s += fmt.Sprintf("text: %s\n", ctx.Text)
s += fmt.Sprintf("message: %s\n", ctx.Message)
s += "params:\n"
for _, param := range ctx.RequestParams {
s += fmt.Sprintf("\t%s: %s\n", param.Key, param.Value)
}
t.Log(s)
} else {
t.Log(fmt.Sprintf("\n%#v", err))
}
}
return assert.NoError(t, err)
}
func needUserToken(t *testing.T) {
t.Helper()
if vkUser.AccessToken == "" {
t.Skip("USER_TOKEN empty")
}
}
func needGroupToken(t *testing.T) {
t.Helper()
if vkGroup.AccessToken == "" {
t.Skip("GROUP_TOKEN empty")
}
}
func needServiceToken(t *testing.T) {
t.Helper()
if vkService.AccessToken == "" {
t.Skip("SERVICE_TOKEN empty")
}
}
func needWidgetToken(t *testing.T) {
t.Helper()
if vkWidget.AccessToken == "" {
t.Skip("WIDGET_TOKEN empty")
}
}
func needChatID(t *testing.T) int {
mux.Lock()
defer mux.Unlock()
t.Helper()
needUserToken(t)
if vkChatID == 0 {
var err error
vkChatID, err = vkUser.MessagesCreateChat(api.Params{
"title": "TestChat",
})
if err != nil {
t.Skip("Get chatID", err)
}
}
return vkChatID
}
var vkGroup, vkService, vkUser, vkWidget *api.VK // nolint:gochecknoglobals
var vkUserID, vkGroupID, vkChatID int // nolint:gochecknoglobals
var mux sync.Mutex // nolint:gochecknoglobals
func TestMain(m *testing.M) {
vkGroup = api.NewVK(os.Getenv("GROUP_TOKEN"))
if vkGroup.AccessToken != "" {
group, err := vkGroup.GroupsGetByID(api.Params{})
if err != nil {
log.Fatalf("GROUP_TOKEN bad: %v", err)
}
vkGroupID = group[0].ID
}
vkWidget = api.NewVK(os.Getenv("WIDGET_TOKEN"))
vkService = api.NewVK(os.Getenv("SERVICE_TOKEN"))
vkService.Limit = 3
vkUser = api.NewVK(os.Getenv("USER_TOKEN"))
vkUser.Limit = 3
if vkUser.AccessToken != "" {
user, err := vkUser.UsersGet(api.Params{})
if err != nil {
log.Fatalf("USER_TOKEN bad: %v", err)
}
vkUserID = user[0].ID
}
runTests := m.Run()
os.Exit(runTests)
}
func TestVK_Request(t *testing.T) {
t.Parallel()
groupToken := os.Getenv("GROUP_TOKEN")
if groupToken == "" {
t.Skip("GROUP_TOKEN empty")
}
vk := api.NewVK(groupToken)
t.Run("Request 403 error", func(t *testing.T) {
_, err := vk.Request("", api.Params{})
if err == nil {
t.Errorf("VK.Request() got1 = %v, want -1", err)
}
})
vk.MethodURL = ""
t.Run("Client error", func(t *testing.T) {
_, err := vk.Request("test", api.Params{"test": "test"})
if err == nil {
t.Errorf("VK.Request() got1 = %v, want -1", err)
}
})
}
func TestVK_RequestLimit(t *testing.T) {
needUserToken(t)
vkUser.Limit = 4
var wg sync.WaitGroup
for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
_, err := vkUser.UsersGet(api.Params{})
assert.NoError(t, err)
wg.Done()
}()
}
wg.Wait()
vkUser.Limit = 3
}
func TestVK_Execute_error(t *testing.T) {
t.Parallel()
needGroupToken(t)
var response int
err := vkGroup.Execute(`API.users.get({user_id:-1});return 1;`, &response)
assert.Error(t, err)
assert.Equal(t, 1, response)
}
func TestVK_Execute_object(t *testing.T) {
t.Parallel()
needGroupToken(t)
var response struct {
Text string `json:"text"`
}
err := vkGroup.Execute(`return {text: "hello"};`, &response)
assert.NoError(t, err)
assert.Equal(t, "hello", response.Text)
}
func TestVK_InvalidContentType(t *testing.T) {
t.Parallel()
needGroupToken(t)
var testObj string
err := vkGroup.RequestUnmarshal("t/t", api.Params{}, testObj)
if err.Error() != "invalid content-type" {
t.Errorf("VK.RequestUnmarshal() error = %v", err)
}
}
type renamedBool bool
func Test_FmtValue(t *testing.T) {
t.Parallel()
f := func(value interface{}, want string) {
t.Helper()
got := api.FmtValue(value, 0)
assert.Equal(t, got, want)
}
f(nil, "")
f(reflect.ValueOf(nil), "")
f(reflect.Value{}, "")
f(true, "1")
f(false, "0")
f(renamedBool(true), "1")
f(123, "123")
f(1.1, "1.1")
f("abc", "abc")
// Attachment
photo := object.PhotosPhoto{
OwnerID: 321,
ID: 123,
}
f(photo, "photo321_123")
// Keyboard
keyboard := object.NewMessagesKeyboard(true)
f(keyboard, keyboard.ToJSON())
// Slice
intSlice := []int{1, 2, 3}
f(intSlice, "1,2,3")
f([]object.PhotosPhoto{photo, photo}, "photo321_123,photo321_123")
// Pointer
f(&intSlice, "1,2,3")
f(&photo, "photo321_123")
}
func TestVK_CaptchaForce(t *testing.T) {
t.Parallel()
needUserToken(t)
_, err := vkUser.CaptchaForce(api.Params{})
if errors.GetType(err) != errors.Captcha {
t.Errorf("VK.CaptchaForce() err=%v, want 14", err)
}
}
// FIXME: v2 remove TestInit.
func TestInit(t *testing.T) {
t.Parallel()
vk := api.Init("")
assert.NotNil(t, vk)
}
func TestParams_methods(t *testing.T) {
t.Parallel()
p := api.Params{}
p.Lang(1)
p.TestMode(true)
p.CaptchaSID("text")
p.CaptchaKey("text")
p.Confirm(true)
assert.Equal(t, p["lang"], 1)
assert.Equal(t, p["test_mode"], true)
assert.Equal(t, p["captcha_sid"], "text")
assert.Equal(t, p["captcha_key"], "text")
assert.Equal(t, p["confirm"], true)
}
|
[
"\"GROUP_TOKEN\"",
"\"WIDGET_TOKEN\"",
"\"SERVICE_TOKEN\"",
"\"USER_TOKEN\"",
"\"GROUP_TOKEN\""
] |
[] |
[
"USER_TOKEN",
"SERVICE_TOKEN",
"WIDGET_TOKEN",
"GROUP_TOKEN"
] |
[]
|
["USER_TOKEN", "SERVICE_TOKEN", "WIDGET_TOKEN", "GROUP_TOKEN"]
|
go
| 4 | 0 | |
Product/TrendManager/TrendScoreToDatabase.py
|
"""
Author: John Andree Lidquist, Marten Bolin
Date:
Last update: 2017/11/21 Albin Bergvall
Purpose: Gets movie from database and stores a trending score
"""
from datetime import datetime
import threading
import os
from apscheduler.schedulers.background import BackgroundScheduler
from Product.TrendManager.TwitterAPI import TwitterAPI
from Product.Database.DatabaseManager.Retrieve.RetrieveMovie import RetrieveMovie
from Product.Database.DatabaseManager.Insert.InsertTrending import InsertTrending
from Product.Database.DatabaseManager.Retrieve.RetrieveTrending import RetrieveTrending
from Product.Database.DatabaseManager.Update.UpdateTrending import UpdateTrending
from Product.TrendManager.TrendingController import TrendingController
TIME_LIMIT_TWITTER_STREAM = 43200 # Time limit for twitter stream uptime in seconds
# Time limit for twitter stream if there is no file to load data from
TIME_LIMIT_TWITTER_STREAM_NO_FILE = 7200
class TrendingToDB(object):
"""
Author: John Andree Lidquist, Marten Bolin
Date: 2017-10-12
Last update: 2017-11-13
Purpose: This class handles collecting all the trending scores so that they can
be stored in the database.
The class is using threads and will be abel to run in the background continuously
"""
def __init__(self, daemon=False, daily=False):
"""
Author: John Andree Lidquist, Marten Bolin
Date:2017-10-12
Last update: 2017-11-17
Purpose: Instantiates the class, and based on the params an be run in different ways.
:param daemon: True - makes the process terminate when app is finished.
False - The process will not terminate until finished or terminated.
:param daily: True - Will make the process run once every day.
False - Will only run the process once.
"""
# self.daemon = daemon
self.stop = False
self.daily = daily
self.insert_trend = InsertTrending()
self.retrieve_trend = RetrieveTrending()
self.alter_trend = UpdateTrending()
self.retrieve_movie = RetrieveMovie()
if daily:
# if set to daily, it creates a scheduler and sets the interval to 1 day
self.scheduled = BackgroundScheduler()
if not daemon:
self.scheduled._daemon = False
self.scheduled.add_job(self.run, 'interval', days=1, id="1")
self.scheduled.start()
self.scheduled.modify_job(job_id="1", next_run_time=datetime.now())
else:
# creates the thread that will make the method run parallel.
# Sets daemon to true so that it will allow
# the app to be terminated and will terminate with it.
thread = threading.Thread(target=self.run, args=())
thread.daemon = daemon
thread.start()
def run(self):
"""
Author: John Andree Lidquist, Marten Bolin
Date: 2017-10-28
Last update:2017-11-21 Albin Bergvall
Purpose: The method where which will fetch all the scores by the
TrendingController which communicate with the Youtube and Twitter API.
"""
# Following steps are done:
# 1. Query movies from database
# 2. Get new score for that movie
# 3. Save the highest scores from the different trending sources
# 4. Iterate though list of scored movies and normalize,
# weight and add the scores to a total score
# 5. If current total score is different from the newly
# fetched score - Update score in database, else go to step 1
# 6. Go to step 1
trend_controller = TrendingController()
res_movie = self.retrieve_movie.retrieve_movie()
scored_movies = []
twitter_max = 1
youtube_max = 1
for movie in res_movie:
if self.stop:
break
scored_movie = trend_controller.get_trending_content(movie.title)
scored_movie.id = movie.id
if scored_movie.youtube_score > youtube_max:
youtube_max = scored_movie.youtube_score
if scored_movie.twitter_score > twitter_max:
twitter_max = scored_movie.twitter_score
scored_movies.append(scored_movie)
print("Movie ID:", scored_movie.id)
print("Inserting scored movies into database...")
for scored_movie in scored_movies:
res_score = self.retrieve_trend.retrieve_trend_score(scored_movie.id)
scored_movie.total_score = (scored_movie.youtube_score * 0.7 / youtube_max) + \
(scored_movie.twitter_score * 0.3 / twitter_max)
if res_score:
if scored_movie.total_score != res_score.total_score:
# If score is new
self.alter_trend.update_trend_score(movie_id=scored_movie.id,
total_score=scored_movie.total_score,
youtube_score=scored_movie.youtube_score,
twitter_score=scored_movie.twitter_score)
else:
# If movie is not in TrendingScore table
self.insert_trend.add_trend_score(movie_id=scored_movie.id,
total_score=scored_movie.total_score,
youtube_score=scored_movie.youtube_score,
twitter_score=scored_movie.twitter_score)
# The commit is in the loop for now due to high waiting time but
# could be moved outside to lower total run time
# Open twitter stream after titles has been scored, to gather new data
# The os.environ checks if the run config has a variable named "TWITTERSTREAM"
# and only starts stream if it is set to 1. This is to make sure that the stream
# isn't opened during testing.
try:
if os.environ["TWITTERSTREAM"] == "1":
TwitterAPI().open_twitter_stream(TIME_LIMIT_TWITTER_STREAM)
print("Opened Twitter Stream")
except KeyError:
pass
print("Waiting until next day to update")
# Used to stop the thread if background is false
# or for any other reason it needs to be stopped.
def terminate(self):
"""
Author: John Andree Lidquist, Marten Bolin
Date:
Last update:
Purpose: Terminates the process
"""
print("Shutting down TrendScoreToDatabase")
self.stop = True
if self.daily:
self.scheduled.shutdown()
|
[] |
[] |
[
"TWITTERSTREAM"
] |
[]
|
["TWITTERSTREAM"]
|
python
| 1 | 0 | |
registry/storage/driver/swift/swift_test.go
|
package swift
import (
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"testing"
"github.com/ncw/swift/swifttest"
"github.com/sequix/distribution/context"
storagedriver "github.com/sequix/distribution/registry/storage/driver"
"github.com/sequix/distribution/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var swiftDriverConstructor func(prefix string) (*Driver, error)
func init() {
var (
username string
password string
authURL string
tenant string
tenantID string
domain string
domainID string
tenantDomain string
tenantDomainID string
trustID string
container string
region string
AuthVersion int
endpointType string
insecureSkipVerify bool
secretKey string
accessKey string
containerKey bool
tempURLMethods []string
swiftServer *swifttest.SwiftServer
err error
)
username = os.Getenv("SWIFT_USERNAME")
password = os.Getenv("SWIFT_PASSWORD")
authURL = os.Getenv("SWIFT_AUTH_URL")
tenant = os.Getenv("SWIFT_TENANT_NAME")
tenantID = os.Getenv("SWIFT_TENANT_ID")
domain = os.Getenv("SWIFT_DOMAIN_NAME")
domainID = os.Getenv("SWIFT_DOMAIN_ID")
tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME")
tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID")
trustID = os.Getenv("SWIFT_TRUST_ID")
container = os.Getenv("SWIFT_CONTAINER_NAME")
region = os.Getenv("SWIFT_REGION_NAME")
AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION"))
endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE")
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
secretKey = os.Getenv("SWIFT_SECRET_KEY")
accessKey = os.Getenv("SWIFT_ACCESS_KEY")
containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY"))
tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",")
if username == "" || password == "" || authURL == "" || container == "" {
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
panic(err)
}
username = "swifttest"
password = "swifttest"
authURL = swiftServer.AuthURL
container = "test"
}
prefix, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(prefix)
swiftDriverConstructor = func(root string) (*Driver, error) {
parameters := Parameters{
username,
password,
authURL,
tenant,
tenantID,
domain,
domainID,
tenantDomain,
tenantDomainID,
trustID,
region,
AuthVersion,
container,
root,
endpointType,
insecureSkipVerify,
defaultChunkSize,
secretKey,
accessKey,
containerKey,
tempURLMethods,
}
return New(parameters)
}
driverConstructor := func() (storagedriver.StorageDriver, error) {
return swiftDriverConstructor(prefix)
}
testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip)
}
func TestEmptyRootList(t *testing.T) {
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
rootedDriver, err := swiftDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
emptyRootDriver, err := swiftDriverConstructor("")
if err != nil {
t.Fatalf("unexpected error creating empty root driver: %v", err)
}
slashRootDriver, err := swiftDriverConstructor("/")
if err != nil {
t.Fatalf("unexpected error creating slash root driver: %v", err)
}
filename := "/test"
contents := []byte("contents")
ctx := context.Background()
err = rootedDriver.PutContent(ctx, filename, contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
keys, _ := emptyRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
keys, _ = slashRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
// Create an object with a path nested under the existing object
err = rootedDriver.PutContent(ctx, filename+"/file1", contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
err = rootedDriver.Delete(ctx, filename)
if err != nil {
t.Fatalf("failed to delete: %v", err)
}
keys, err = rootedDriver.List(ctx, "/")
if err != nil {
t.Fatalf("failed to list objects after deletion: %v", err)
}
if len(keys) != 0 {
t.Fatal("delete did not remove nested objects")
}
}
func TestFilenameChunking(t *testing.T) {
// Test valid input and sizes
input := []string{"a", "b", "c", "d", "e"}
expecteds := [][][]string{
{
{"a"},
{"b"},
{"c"},
{"d"},
{"e"},
},
{
{"a", "b"},
{"c", "d"},
{"e"},
},
{
{"a", "b", "c"},
{"d", "e"},
},
{
{"a", "b", "c", "d"},
{"e"},
},
{
{"a", "b", "c", "d", "e"},
},
{
{"a", "b", "c", "d", "e"},
},
}
for i, expected := range expecteds {
actual, err := chunkFilenames(input, i+1)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("chunk %v didn't match expected value %v", actual, expected)
}
if err != nil {
t.Fatalf("unexpected error chunking filenames: %v", err)
}
}
// Test nil input
actual, err := chunkFilenames(nil, 5)
if len(actual) != 0 {
t.Fatal("chunks were returned when passed nil")
}
if err != nil {
t.Fatalf("unexpected error chunking filenames: %v", err)
}
// Test 0 and < 0 sizes
_, err = chunkFilenames(nil, 0)
if err == nil {
t.Fatal("expected error for size = 0")
}
_, err = chunkFilenames(nil, -1)
if err == nil {
t.Fatal("expected error for size = -1")
}
}
func TestSwiftSegmentPath(t *testing.T) {
d := &driver{
Prefix: "/test/segment/path",
}
s1, err := d.swiftSegmentPath("foo-baz")
if err != nil {
t.Fatalf("unexpected error generating segment path: %v", err)
}
s2, err := d.swiftSegmentPath("foo-baz")
if err != nil {
t.Fatalf("unexpected error generating segment path: %v", err)
}
if !strings.HasPrefix(s1, "test/segment/path/segments/") {
t.Fatalf("expected to be prefixed: %s", s1)
}
if !strings.HasPrefix(s1, "test/segment/path/segments/") {
t.Fatalf("expected to be prefixed: %s", s2)
}
if len(s1) != 68 {
t.Fatalf("unexpected segment path length, %d != %d", len(s1), 68)
}
if len(s2) != 68 {
t.Fatalf("unexpected segment path length, %d != %d", len(s2), 68)
}
if s1 == s2 {
t.Fatalf("expected segment paths to differ, %s == %s", s1, s2)
}
}
|
[
"\"SWIFT_USERNAME\"",
"\"SWIFT_PASSWORD\"",
"\"SWIFT_AUTH_URL\"",
"\"SWIFT_TENANT_NAME\"",
"\"SWIFT_TENANT_ID\"",
"\"SWIFT_DOMAIN_NAME\"",
"\"SWIFT_DOMAIN_ID\"",
"\"SWIFT_DOMAIN_NAME\"",
"\"SWIFT_DOMAIN_ID\"",
"\"SWIFT_TRUST_ID\"",
"\"SWIFT_CONTAINER_NAME\"",
"\"SWIFT_REGION_NAME\"",
"\"SWIFT_AUTH_VERSION\"",
"\"SWIFT_ENDPOINT_TYPE\"",
"\"SWIFT_INSECURESKIPVERIFY\"",
"\"SWIFT_SECRET_KEY\"",
"\"SWIFT_ACCESS_KEY\"",
"\"SWIFT_TEMPURL_CONTAINERKEY\"",
"\"SWIFT_TEMPURL_METHODS\""
] |
[] |
[
"SWIFT_AUTH_URL",
"SWIFT_TEMPURL_CONTAINERKEY",
"SWIFT_DOMAIN_ID",
"SWIFT_AUTH_VERSION",
"SWIFT_ACCESS_KEY",
"SWIFT_DOMAIN_NAME",
"SWIFT_TENANT_NAME",
"SWIFT_REGION_NAME",
"SWIFT_INSECURESKIPVERIFY",
"SWIFT_TEMPURL_METHODS",
"SWIFT_PASSWORD",
"SWIFT_CONTAINER_NAME",
"SWIFT_SECRET_KEY",
"SWIFT_ENDPOINT_TYPE",
"SWIFT_USERNAME",
"SWIFT_TRUST_ID",
"SWIFT_TENANT_ID"
] |
[]
|
["SWIFT_AUTH_URL", "SWIFT_TEMPURL_CONTAINERKEY", "SWIFT_DOMAIN_ID", "SWIFT_AUTH_VERSION", "SWIFT_ACCESS_KEY", "SWIFT_DOMAIN_NAME", "SWIFT_TENANT_NAME", "SWIFT_REGION_NAME", "SWIFT_INSECURESKIPVERIFY", "SWIFT_TEMPURL_METHODS", "SWIFT_PASSWORD", "SWIFT_CONTAINER_NAME", "SWIFT_SECRET_KEY", "SWIFT_ENDPOINT_TYPE", "SWIFT_USERNAME", "SWIFT_TRUST_ID", "SWIFT_TENANT_ID"]
|
go
| 17 | 0 | |
plugins/sms/provider/tencentcloud/tencentcloud.go
|
package tencentcloud
import (
"github.com/gogf/gf/errors/gerror"
"github.com/gogf/gf/util/gconv"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
sms "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sms/v20190711" //引入sms
)
type Sender struct {
SdkAppid string
SecretId string
SecretKey string
SignName string
}
// 初始化一些参数
func New(secretId, secretKey, signName string) *Sender {
sd := &Sender{}
sd.SecretId = secretId
sd.SecretKey = secretKey
sd.SignName = signName
return sd
}
func (sd *Sender) Request(TemplateCode string, TemplateParam, phoneNumbers []string) (string, error) {
/* 必要步骤:
* 实例化一个认证对象,入参需要传入腾讯云账户密钥对secretId,secretKey。
* 这里采用的是从环境变量读取的方式,需要在环境变量中先设置这两个值。
* 你也可以直接在代码中写死密钥对,但是小心不要将代码复制、上传或者分享给他人,
* 以免泄露密钥对危及你的财产安全。
* CAM密匙查询: https://console.cloud.tencent.com/cam/capi*/
credential := common.NewCredential(
// os.Getenv("TENCENTCLOUD_SECRET_ID"),
// os.Getenv("TENCENTCLOUD_SECRET_KEY"),
sd.SecretId,
sd.SecretKey,
)
/* 非必要步骤:
* 实例化一个客户端配置对象,可以指定超时时间等配置 */
cpf := profile.NewClientProfile()
/* SDK默认使用POST方法。
* 如果你一定要使用GET方法,可以在这里设置。GET方法无法处理一些较大的请求 */
cpf.HttpProfile.ReqMethod = "POST"
/* SDK有默认的超时时间,非必要请不要进行调整
* 如有需要请在代码中查阅以获取最新的默认值 */
//cpf.HttpProfile.ReqTimeout = 5
/* SDK会自动指定域名。通常是不需要特地指定域名的,但是如果你访问的是金融区的服务
* 则必须手动指定域名,例如sms的上海金融区域名: sms.ap-shanghai-fsi.tencentcloudapi.com */
cpf.HttpProfile.Endpoint = "sms.tencentcloudapi.com"
/* SDK默认用TC3-HMAC-SHA256进行签名,非必要请不要修改这个字段 */
cpf.SignMethod = "HmacSHA1"
/* 实例化要请求产品(以sms为例)的client对象
* 第二个参数是地域信息,可以直接填写字符串ap-guangzhou,或者引用预设的常量 */
client, _ := sms.NewClient(credential, "ap-beijing", cpf)
/* 实例化一个请求对象,根据调用的接口和实际情况,可以进一步设置请求参数
* 你可以直接查询SDK源码确定接口有哪些属性可以设置
* 属性可能是基本类型,也可能引用了另一个数据结构
* 推荐使用IDE进行开发,可以方便的跳转查阅各个接口和数据结构的文档说明 */
request := sms.NewSendSmsRequest()
/* 基本类型的设置:
* SDK采用的是指针风格指定参数,即使对于基本类型你也需要用指针来对参数赋值。
* SDK提供对基本类型的指针引用封装函数
* 帮助链接:
* 短信控制台: https://console.cloud.tencent.com/sms/smslist
* sms helper: https://cloud.tencent.com/document/product/382/3773 */
/* 短信应用ID: 短信SdkAppid在 [短信控制台] 添加应用后生成的实际SdkAppid,示例如1400006666 */
request.SmsSdkAppid = common.StringPtr(sd.SdkAppid)
/* 短信签名内容: 使用 UTF-8 编码,必须填写已审核通过的签名,签名信息可登录 [短信控制台] 查看 */
request.Sign = common.StringPtr(sd.SignName)
/* 国际/港澳台短信 senderid: 国内短信填空,默认未开通,如需开通请联系 [sms helper] */
request.SenderId = common.StringPtr("")
/* 用户的 session 内容: 可以携带用户侧 ID 等上下文信息,server 会原样返回 */
request.SessionContext = common.StringPtr("")
/* 短信码号扩展号: 默认未开通,如需开通请联系 [sms helper] */
request.ExtendCode = common.StringPtr("0")
/* 模板参数: 若无模板参数,则设置为空*/
request.TemplateParamSet = common.StringPtrs(TemplateParam)
/* 模板 ID: 必须填写已审核通过的模板 ID。模板ID可登录 [短信控制台] 查看 */
request.TemplateID = common.StringPtr(TemplateCode)
/* 下发手机号码,采用 e.164 标准,+[国家或地区码][手机号]
* 示例如:+8613711112222, 其中前面有一个+号 ,86为国家码,13711112222为手机号,最多不要超过200个手机号*/
request.PhoneNumberSet = common.StringPtrs(phoneNumbers)
// 通过client对象调用想要访问的接口,需要传入请求对象
response, err := client.SendSms(request)
// 处理异常
if _, ok := err.(*errors.TencentCloudSDKError); ok {
return "", err
}
// 非SDK异常,直接失败。实际代码中可以加入其他的处理。
if err != nil {
return "", err
}
//b, _ := json.Marshal(response.Response)
//fmt.Printf("%s", b)
// 打印发送结果
if *(response.Response.SendStatusSet[0].Message) != "send success" {
if *(response.Response.SendStatusSet[0].Message) == "incorrect number format" {
return gconv.String(*(response.Response.SendStatusSet[0].Message)), gerror.New(gconv.String(TemplateParam) + gconv.String(*(response.Response.SendStatusSet[0].PhoneNumber)+": 发送失败, 手机号格式错误"))
} else {
return gconv.String(*(response.Response.SendStatusSet[0].Message)), gerror.New(gconv.String(TemplateParam) + gconv.String(*(response.Response.SendStatusSet[0].PhoneNumber)+": 发送失败, 空号、停机或关机"))
}
}
return gconv.String(*(response.Response.SendStatusSet[0].Message)), nil
}
|
[
"\"TENCENTCLOUD_SECRET_ID\"",
"\"TENCENTCLOUD_SECRET_KEY\""
] |
[] |
[
"TENCENTCLOUD_SECRET_ID",
"TENCENTCLOUD_SECRET_KEY"
] |
[]
|
["TENCENTCLOUD_SECRET_ID", "TENCENTCLOUD_SECRET_KEY"]
|
go
| 2 | 0 | |
com_007_rw1_fdi_regulatory_restrictiveness_index/com_007_rw1_fdi_regulatory_restrictiveness_index_processing.py
|
import pandas as pd
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
from zipfile import ZipFile
import shutil
import logging
import glob
import re
import datetime
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'com_007_rw1_fdi_regulatory_restrictiveness_index' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
Data can be downloaded at the following link:
http://stats.oecd.org/Index.aspx?datasetcode=FDIINDEX#
Above the table, there is a 'export' button that will lead to a dropdown menu containing different export options.
Once you select 'Text file (CSV)' from the menu, a new window will occur and allow you to download the data as a csv file to your Downloads folder.
'''
logger.info('Downloading raw data')
download = glob.glob(os.path.join(os.path.expanduser("~"), 'Downloads', 'FDIINDEX_*.csv'))[0]
# Move this file into your data directory
raw_data_file = os.path.join(data_dir, os.path.basename(download))
shutil.move(download,raw_data_file)
# read in data to pandas dataframe
df = pd.read_csv(raw_data_file)
'''
Process data
'''
# remove 'TIME' column because it is identical to the 'year' column
df = df.drop(columns = 'TIME')
# remove the column 'SECTOR' since it contains the same information as the column 'Sector/Industry'
# (SECTOR is a numeric column, while Sector/Industry' is text, so we will keep the more informative column)
df = df.drop(columns = 'SECTOR')
# remove columns with no values
df.dropna(axis=1)
# remove columns with only one unique value
for col in df.columns:
if len(df[col].unique()) == 1:
df.drop(col,inplace=True,axis=1)
# reformat the dataframe so each sector becomes a new column
pivoted = pd.pivot_table(df, index = ['LOCATION', 'Country','Year'], columns = 'Sector / Industry', values = 'Value').reset_index()
# convert the years in the 'Year' column to datetime objects and store them in a new column 'datetime'
pivoted['datetime'] = [datetime.datetime(x, 1, 1) for x in pivoted.Year]
# rename columns, replacing or removing symbols and spaces, and
# making them all lowercase so that it matches Carto column name requirements
pivoted.columns = [re.sub('[().]', '', col.lower().replace('&', 'and').replace(' ', '_')) for col in pivoted.columns]
# save processed dataset to csv
processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')
pivoted.to_csv(processed_data_file, index=False)
'''
Upload processed data to Carto
'''
logger.info('Uploading processed data to Carto.')
util_carto.upload_to_carto(processed_data_file, 'LINK')
'''
Upload original data and processed data to Amazon S3 storage
'''
# initialize AWS variables
aws_bucket = 'wri-public-data'
s3_prefix = 'resourcewatch/'
logger.info('Uploading original data to S3.')
# Upload raw data file to S3
# Copy the raw data into a zipped file to upload to S3
raw_data_dir = os.path.join(data_dir, dataset_name+'.zip')
with ZipFile(raw_data_dir,'w') as zip:
zip.write(raw_data_file, os.path.basename(raw_data_file))
# Upload raw data file to S3
uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir))
logger.info('Uploading processed data to S3.')
# Copy the processed data into a zipped file to upload to S3
processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')
with ZipFile(processed_data_dir,'w') as zip:
zip.write(processed_data_file, os.path.basename(processed_data_file))
# Upload processed data file to S3
uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir))
|
[] |
[] |
[
"PROCESSING_DIR"
] |
[]
|
["PROCESSING_DIR"]
|
python
| 1 | 0 | |
controllers/precache.go
|
package controllers
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
ranv1alpha1 "github.com/openshift-kni/cluster-group-upgrades-operator/api/v1alpha1"
"github.com/openshift-kni/cluster-group-upgrades-operator/controllers/utils"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// reconcilePrecaching provides the main precaching entry point
// returns: error
func (r *ClusterGroupUpgradeReconciler) reconcilePrecaching(
ctx context.Context,
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade) error {
if clusterGroupUpgrade.Spec.PreCaching {
// Pre-caching is required
if clusterGroupUpgrade.Status.Precaching == nil {
clusterGroupUpgrade.Status.Precaching = &ranv1alpha1.PrecachingStatus{
Spec: &ranv1alpha1.PrecachingSpec{
PlatformImage: "",
OperatorsIndexes: []string{},
OperatorsPackagesAndChannels: []string{},
},
Status: make(map[string]string),
Clusters: []string{},
}
}
doneCondition := meta.FindStatusCondition(
clusterGroupUpgrade.Status.Conditions, "PrecachingDone")
r.Log.Info("[reconcilePrecaching]",
"FindStatusCondition PrecachingDone", doneCondition)
if doneCondition != nil && doneCondition.Status == metav1.ConditionTrue {
// Precaching is done
return nil
}
// Precaching is required and not marked as done
return r.precachingFsm(ctx, clusterGroupUpgrade)
}
// No precaching required
return nil
}
// getImageForVersionFromUpdateGraph gets the image for the given version
// by traversing the update graph.
// Connecting to the upstream URL with the channel passed as a parameter
// the update graph is returned as JSON. This function then traverses
// the nodes list from that JSON to find the version and if found
// then returns the image
func (r *ClusterGroupUpgradeReconciler) getImageForVersionFromUpdateGraph(
upstream string, channel string, version string) (string, error) {
updateGraphURL := upstream + "?channel=" + channel
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
req, _ := http.NewRequest("GET", updateGraphURL, nil)
req.Header.Add("Accept", "application/json")
res, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("unable to request update graph on url %s: %w", updateGraphURL, err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("unable to read body from response: %w", err)
}
var graph map[string]interface{}
err = json.Unmarshal(body, &graph)
if err != nil {
return "", fmt.Errorf("unable to unmarshal body: %w", err)
}
nodes := graph["nodes"].([]interface{})
for _, n := range nodes {
node := n.(map[string]interface{})
if node["version"] == version {
return node["payload"].(string), nil
}
}
return "", fmt.Errorf("unable to find version %s on update graph on url %s", version, updateGraphURL)
}
// extractPrecachingSpecFromPolicies extracts the software spec to be pre-cached
// from policies.
// There are three object types to look at in the policies:
// - ClusterVersion: release image must be specified to be pre-cached
// - Subscription: provides the list of operator packages and channels
// - CatalogSource: must be explicitly configured to be precached.
// All the clusters in the CGU must have same catalog source(s)
// returns: precachingSpec, error
func (r *ClusterGroupUpgradeReconciler) extractPrecachingSpecFromPolicies(
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade,
policies []*unstructured.Unstructured) (ranv1alpha1.PrecachingSpec, error) {
var spec ranv1alpha1.PrecachingSpec
for _, policy := range policies {
objects, err := r.stripPolicy(policy.Object)
if err != nil {
return spec, err
}
for _, object := range objects {
kind := object["kind"]
switch kind {
case utils.PolicyTypeClusterVersion:
cvSpec := object["spec"].(map[string]interface{})
desiredUpdate, found := cvSpec["desiredUpdate"]
if !found {
continue
}
image, found := desiredUpdate.(map[string]interface{})["image"]
if found && image != "" {
if len(spec.PlatformImage) > 0 && spec.PlatformImage != image {
msg := fmt.Sprintf("Platform image must be set once, but %s and %s were given",
spec.PlatformImage, image)
meta.SetStatusCondition(&clusterGroupUpgrade.Status.Conditions, metav1.Condition{
Type: utils.PrecacheSpecValidCondition,
Status: metav1.ConditionFalse,
Reason: "PlatformImageConflict",
Message: msg})
return *new(ranv1alpha1.PrecachingSpec), nil
}
spec.PlatformImage = fmt.Sprintf("%s", image)
} else {
upstream := object["spec"].(map[string]interface {
})["upstream"].(string)
channel := object["spec"].(map[string]interface {
})["channel"].(string)
version := object["spec"].(map[string]interface {
})["desiredUpdate"].(map[string]interface{})["version"].(string)
image, err = r.getImageForVersionFromUpdateGraph(upstream, channel, version)
if err != nil {
meta.SetStatusCondition(&clusterGroupUpgrade.Status.Conditions, metav1.Condition{
Type: utils.PrecacheSpecValidCondition,
Status: metav1.ConditionFalse,
Reason: "PlatformImageInvalid",
Message: err.Error()})
return *new(ranv1alpha1.PrecachingSpec), nil
}
spec.PlatformImage = image.(string)
}
r.Log.Info("[extractPrecachingSpecFromPolicies]", "ClusterVersion image", spec.PlatformImage)
case utils.PolicyTypeSubscription:
packChan := fmt.Sprintf("%s:%s", object["spec"].(map[string]interface{})["name"],
object["spec"].(map[string]interface{})["channel"])
spec.OperatorsPackagesAndChannels = append(spec.OperatorsPackagesAndChannels, packChan)
r.Log.Info("[extractPrecachingSpecFromPolicies]", "Operator package:channel", packChan)
continue
case utils.PolicyTypeCatalogSource:
index := fmt.Sprintf("%s", object["spec"].(map[string]interface{})["image"])
spec.OperatorsIndexes = append(spec.OperatorsIndexes, index)
r.Log.Info("[extractPrecachingSpecFromPolicies]", "CatalogSource", index)
continue
default:
continue
}
}
}
return spec, nil
}
// stripPolicy strips policy information and returns the underlying objects
// returns: []interface{} - list of the underlying objects in the policy
// error
func (r *ClusterGroupUpgradeReconciler) stripPolicy(
policyObject map[string]interface{}) ([]map[string]interface{}, error) {
var objects []map[string]interface{}
policyTemplates, exists, err := unstructured.NestedFieldCopy(
policyObject, "spec", "policy-templates")
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("[stripPolicy] spec -> policy-templates not found")
}
for _, policyTemplate := range policyTemplates.([]interface{}) {
objTemplates := policyTemplate.(map[string]interface {
})["objectDefinition"].(map[string]interface {
})["spec"].(map[string]interface{})["object-templates"]
if objTemplates == nil {
return nil, fmt.Errorf("[stripPolicy] can't find object-templates in policyTemplate")
}
for _, objTemplate := range objTemplates.([]interface{}) {
spec := objTemplate.(map[string]interface{})["objectDefinition"]
if spec == nil {
return nil, fmt.Errorf("[stripPolicy] can't find any objectDefinition")
}
objects = append(objects, spec.(map[string]interface{}))
}
}
return objects, nil
}
// deployDependencies deploys precaching workload dependencies
// returns: ok (bool)
// error
func (r *ClusterGroupUpgradeReconciler) deployDependencies(
ctx context.Context,
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade,
cluster string) (bool, error) {
spec := r.getPrecacheSpecTemplateData(clusterGroupUpgrade)
spec.Cluster = cluster
msg := fmt.Sprintf("%v", spec)
r.Log.Info("[deployDependencies]", "getPrecacheSpecTemplateData",
cluster, "status", "success", "content", msg)
err := r.createResourcesFromTemplates(ctx, spec, precacheDependenciesCreateTemplates)
if err != nil {
return false, err
}
spec.ViewUpdateIntervalSec = utils.ViewUpdateSec * len(clusterGroupUpgrade.Status.Precaching.Clusters)
err = r.createResourcesFromTemplates(ctx, spec, precacheDependenciesViewTemplates)
if err != nil {
return false, err
}
return true, nil
}
// getPrecacheimagePullSpec gets the precaching workload image pull spec.
// returns: image - pull spec string
// error
func (r *ClusterGroupUpgradeReconciler) getPrecacheimagePullSpec(
ctx context.Context,
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade) (
string, error) {
overrides, err := r.getOverrides(ctx, clusterGroupUpgrade)
if err != nil {
r.Log.Error(err, "getOverrides failed ")
return "", err
}
image := overrides["precache.image"]
if image == "" {
image = os.Getenv("PRECACHE_IMG")
r.Log.Info("[getPrecacheimagePullSpec]", "workload image", image)
if image == "" {
return "", fmt.Errorf(
"can't find pre-caching image pull spec in environment or overrides")
}
}
return image, nil
}
// getPrecacheSpecTemplateData: Converts precaching payload spec to template data
// returns: precacheTemplateData (softwareSpec)
// error
func (r *ClusterGroupUpgradeReconciler) getPrecacheSpecTemplateData(
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade) *templateData {
rv := new(templateData)
spec := clusterGroupUpgrade.Status.Precaching.Spec
rv.PlatformImage = spec.PlatformImage
rv.Operators.Indexes = spec.OperatorsIndexes
rv.Operators.PackagesAndChannels = spec.OperatorsPackagesAndChannels
return rv
}
// includeSoftwareSpecOverrides includes software spec overrides if present
// Overrides can be used to force a specific pre-cache workload or payload
// irrespective of the configured policies or the operator csv. This can be done
// by creating a Configmap object named "cluster-group-upgrade-overrides"
// in the CGU namespace with zero or more of the following "data" entries:
// 1. "precache.image" - pre-caching workload image pull spec. Normally derived
// from the operator ClusterServiceVersion object.
// 2. "platform.image" - OCP release image pull URI
// 3. "operators.indexes" - OLM index images (list of index image URIs)
// 4. "operators.packagesAndChannels" - operator packages and channels
// (list of <package:channel> string entries)
// If overrides are used, the configmap must be created before the CGU
// returns: *ranv1alpha1.PrecachingSpec, error
func (r *ClusterGroupUpgradeReconciler) includeSoftwareSpecOverrides(
ctx context.Context,
clusterGroupUpgrade *ranv1alpha1.ClusterGroupUpgrade, spec *ranv1alpha1.PrecachingSpec) (
ranv1alpha1.PrecachingSpec, error) {
rv := new(ranv1alpha1.PrecachingSpec)
overrides, err := r.getOverrides(ctx, clusterGroupUpgrade)
if err != nil {
return *rv, err
}
platformImage := overrides["platform.image"]
operatorsIndexes := strings.Split(overrides["operators.indexes"], "\n")
operatorsPackagesAndChannels := strings.Split(overrides["operators.packagesAndChannels"], "\n")
if platformImage == "" {
platformImage = spec.PlatformImage
}
rv.PlatformImage = platformImage
if overrides["operators.indexes"] == "" {
operatorsIndexes = spec.OperatorsIndexes
}
rv.OperatorsIndexes = operatorsIndexes
if overrides["operators.packagesAndChannels"] == "" {
operatorsPackagesAndChannels = spec.OperatorsPackagesAndChannels
}
rv.OperatorsPackagesAndChannels = operatorsPackagesAndChannels
if err != nil {
return *rv, err
}
return *rv, err
}
// checkPreCacheSpecConsistency checks software spec can be precached
// returns: consistent (bool), message (string)
func (r *ClusterGroupUpgradeReconciler) checkPreCacheSpecConsistency(
spec ranv1alpha1.PrecachingSpec) (consistent bool, message string) {
var operatorsRequested, platformRequested bool = true, true
if len(spec.OperatorsIndexes) == 0 {
operatorsRequested = false
}
if spec.PlatformImage == "" {
platformRequested = false
}
if operatorsRequested && len(spec.OperatorsPackagesAndChannels) == 0 {
return false, "inconsistent precaching configuration: olm index provided, but no packages"
}
if !operatorsRequested && !platformRequested {
return false, "inconsistent precaching configuration: no software spec provided"
}
return true, ""
}
|
[
"\"PRECACHE_IMG\""
] |
[] |
[
"PRECACHE_IMG"
] |
[]
|
["PRECACHE_IMG"]
|
go
| 1 | 0 | |
backend_api/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend_api.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
.mage/go.go
|
// Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ttnmage
import (
"bufio"
"bytes"
"fmt"
"os"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
)
// Go namespace.
type Go mg.Namespace
var minGoVersion = "1.11.4"
var goModuleEnv = map[string]string{
"GO111MODULE": "on",
}
var goTags = os.Getenv("GO_TAGS")
func buildGoArgs(cmd string, args ...string) []string {
if goTags != "" {
args = append([]string{fmt.Sprintf("-tags=%s", goTags)}, args...)
}
return append([]string{cmd}, args...)
}
func execGo(cmd string, args ...string) error {
_, err := sh.Exec(goModuleEnv, os.Stdout, os.Stderr, "go", buildGoArgs(cmd, args...)...)
return err
}
func outputGo(cmd string, args ...string) (string, error) {
if goTags != "" {
args = append([]string{fmt.Sprintf("-tags=%s", goTags)}, args...)
}
var buf bytes.Buffer
_, err := sh.Exec(goModuleEnv, &buf, os.Stderr, "go", buildGoArgs(cmd, args...)...)
if err != nil {
return "", err
}
return buf.String(), nil
}
// CheckVersion checks the installed Go version against the minimum version we support.
func (Go) CheckVersion() error {
if mg.Verbose() {
fmt.Println("Checking Go version")
}
versionStr, err := outputGo("version")
if err != nil {
return err
}
version := strings.Split(strings.TrimPrefix(strings.Fields(versionStr)[2], "go"), ".")
major, _ := strconv.Atoi(version[0])
minor, _ := strconv.Atoi(version[1])
var patch int
if len(version) > 2 {
patch, _ = strconv.Atoi(version[2])
}
current := semver.Version{Major: uint64(major), Minor: uint64(minor), Patch: uint64(patch)}
min, _ := semver.Parse(minGoVersion)
if current.LT(min) {
return fmt.Errorf("Your version of Go (%s) is not supported. Please install Go %s or later", versionStr, minGoVersion)
}
return nil
}
var goPackageDirs []string
func (Go) packageDirs() (packageDirs []string, err error) {
if goPackageDirs != nil {
return goPackageDirs, nil
}
defer func() {
goPackageDirs = packageDirs
}()
dirs, err := outputGo("list", "-f", "{{.Dir}}", "./...")
if err != nil {
return nil, err
}
all := strings.Split(strings.TrimSpace(dirs), "\n")
if selectedDirs == nil {
return all, nil
}
selected := make([]string, 0, len(all))
for _, dir := range all {
if selectedDirs[dir] {
selected = append(selected, dir)
}
}
return selected, nil
}
// Fmt formats and simplifies all Go files.
func (g Go) Fmt() error {
dirs, err := g.packageDirs()
if err != nil {
return err
}
if len(dirs) == 0 {
return nil
}
if mg.Verbose() {
fmt.Printf("Formatting and simplifying %d Go packages\n", len(dirs))
}
return sh.RunCmd("gofmt", "-w", "-s")(dirs...)
}
// Lint lints all Go files.
func (g Go) Lint() error {
dirs, err := g.packageDirs()
if err != nil {
return err
}
if len(dirs) == 0 {
return nil
}
if mg.Verbose() {
fmt.Printf("Linting %d Go packages\n", len(dirs))
}
return execGo("run", append([]string{"github.com/mgechev/revive", "-config=.revive.toml", "-formatter=stylish"}, dirs...)...)
}
// Misspell fixes common spelling mistakes in Go files.
func (g Go) Misspell() error {
dirs, err := g.packageDirs()
if err != nil {
return err
}
if len(dirs) == 0 {
return nil
}
if mg.Verbose() {
fmt.Printf("Fixing common spelling mistakes in %d Go packages\n", len(dirs))
}
return execGo("run", append([]string{"github.com/client9/misspell/cmd/misspell", "-w"}, dirs...)...)
}
// Unconvert removes unnecessary type conversions from Go files.
func (g Go) Unconvert() error {
dirs, err := g.packageDirs()
if err != nil {
return err
}
if len(dirs) == 0 {
return nil
}
if mg.Verbose() {
fmt.Printf("Removing unnecessary type conversions from %d Go packages\n", len(dirs))
}
return execGo("run", append([]string{"github.com/mdempsky/unconvert", "-safe", "-apply"}, dirs...)...)
}
// Quality runs code quality checks on Go files.
func (g Go) Quality() {
mg.Deps(g.Fmt, g.Misspell, g.Unconvert)
g.Lint() // Errors are allowed.
}
func init() {
preCommitChecks = append(preCommitChecks, Go.Quality)
}
func execGoTest(args ...string) error {
return execGo("test", append([]string{"-timeout=5m", "-failfast"}, args...)...)
}
// Test tests all Go packages.
func (Go) Test() error {
if mg.Verbose() {
fmt.Println("Testing all Go packages")
}
return execGoTest("./...")
}
var goBinaries = []string{"./cmd/ttn-lw-cli", "./cmd/ttn-lw-stack"}
// TestBinaries tests the Go binaries by executing them with the --help flag.
func (Go) TestBinaries() error {
if mg.Verbose() {
fmt.Println("Testing Go binaries")
}
for _, binary := range goBinaries {
_, err := outputGo("run", binary, "--help")
if err != nil {
return err
}
}
return nil
}
const goCoverageFile = "coverage.out"
// Cover tests all Go packages and writes test coverage into the coverage file.
func (Go) Cover() error {
if mg.Verbose() {
fmt.Println("Testing all Go packages with coverage")
}
return execGoTest("-cover", "-covermode=atomic", "-coverprofile="+goCoverageFile, "./...")
}
var coverallsIgnored = []string{
".fm.go:",
".pb.go:",
".pb.gw.go:",
".pb.validate.go",
}
// Coveralls sends the test coverage to Coveralls.
func (Go) Coveralls() error {
mg.Deps(Go.Cover)
if mg.Verbose() {
fmt.Println("Filtering Go coverage output")
}
inFile, err := os.Open(goCoverageFile)
if err != nil {
return err
}
outFile, err := os.OpenFile("coveralls_"+goCoverageFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer func() {
outFile.Close()
os.Remove("coveralls_" + goCoverageFile)
}()
s := bufio.NewScanner(inFile)
nextLine:
for s.Scan() {
line := s.Text()
for _, suffix := range coverallsIgnored {
if strings.Contains(line, suffix) {
continue nextLine
}
}
if _, err = fmt.Fprintln(outFile, line); err != nil {
return err
}
}
if err = outFile.Close(); err != nil {
return err
}
service := os.Getenv("COVERALLS_SERVICE")
if service == "" {
service = "travis-ci"
}
if mg.Verbose() {
fmt.Println("Sending Go coverage to Coveralls")
}
return execGo("run", "github.com/mattn/goveralls", "-coverprofile=coveralls_"+goCoverageFile, "-service="+service, "-repotoken="+os.Getenv("COVERALLS_TOKEN"))
}
// Messages builds the file with translatable messages in Go code.
func (g Go) Messages() error {
return execGo("run", "./cmd/internal/generate_i18n.go")
}
|
[
"\"GO_TAGS\"",
"\"COVERALLS_SERVICE\"",
"\"COVERALLS_TOKEN\""
] |
[] |
[
"GO_TAGS",
"COVERALLS_SERVICE",
"COVERALLS_TOKEN"
] |
[]
|
["GO_TAGS", "COVERALLS_SERVICE", "COVERALLS_TOKEN"]
|
go
| 3 | 0 | |
pkg/store/ocistore/oci_store.go
|
package ocistore
import (
"context"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/pkg/errors"
"github.com/replicatedhq/kots/pkg/k8sutil"
kotsadmobjects "github.com/replicatedhq/kots/pkg/kotsadm/objects"
"github.com/replicatedhq/kots/pkg/util"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
/* OCIStore stores most data in an OCI compatible image repository,
but does not make guarantees that every thing is stored there.
Some data is stored locally in Kuberntes ConfigMaps and Secrets
to speed up retrieval
A note about "transactions": in the pg store, there were a few
places that relied on transactions to ensure integrity
Here, this is stored in configmaps and secrets, and this inegrity
is provided by the Kubernetes API's enforcement of puts.
If a caller GETs a configmap, updates it and then tries to PUT that
configmap, but another process has modified it, the PUT will
be rejected. This level of consistency is all that's needed for KOTS
*/
var (
ErrNotFound = errors.New("not found")
ErrNotImplemented = errors.New("not implemented in ocistore")
)
type cachedTaskStatus struct {
expirationTime time.Time
taskStatus taskStatus
}
type OCIStore struct {
BaseURI string
PlainHTTP bool
sessionSecret *corev1.Secret
sessionExpiration time.Time
cachedTaskStatus map[string]*cachedTaskStatus
}
func (s *OCIStore) Init() error {
return nil
}
func (s *OCIStore) WaitForReady(ctx context.Context) error {
return nil
}
func (s *OCIStore) IsNotFound(err error) bool {
if err == nil {
return false
}
cause := errors.Cause(err)
if cause == ErrNotFound {
return true
}
if err, ok := cause.(awserr.Error); ok {
switch err.Code() {
case "NotFound", "NoSuchKey":
return true
default:
return false
}
}
if kuberneteserrors.IsNotFound(cause) {
return true
}
return false
}
func canIgnoreEtcdError(err error) bool {
if err == nil {
return true
}
if strings.Contains(err.Error(), "connection refused") {
return true
}
if strings.Contains(err.Error(), "request timed out") {
return true
}
if strings.Contains(err.Error(), "EOF") {
return true
}
return false
}
func StoreFromEnv() *OCIStore {
return &OCIStore{
BaseURI: os.Getenv("STORAGE_BASEURI"),
PlainHTTP: os.Getenv("STORAGE_BASEURI_PLAINHTTP") == "true",
}
}
func (s *OCIStore) getSecret(name string) (*corev1.Secret, error) {
clientset, err := k8sutil.GetClientset()
if err != nil {
return nil, errors.Wrap(err, "failed to get clientset")
}
existingSecret, err := clientset.CoreV1().Secrets(util.PodNamespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
return nil, errors.Wrap(err, "failed to get secret")
} else if kuberneteserrors.IsNotFound(err) {
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: util.PodNamespace,
Labels: map[string]string{
"owner": "kotsadm",
},
},
Data: map[string][]byte{},
}
createdSecret, err := clientset.CoreV1().Secrets(util.PodNamespace).Create(context.TODO(), &secret, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create secret")
}
return createdSecret, nil
}
return existingSecret, nil
}
func (s *OCIStore) getConfigmap(name string) (*corev1.ConfigMap, error) {
clientset, err := k8sutil.GetClientset()
if err != nil {
return nil, errors.Wrap(err, "failed to get clientset")
}
existingConfigmap, err := clientset.CoreV1().ConfigMaps(util.PodNamespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
return nil, errors.Wrap(err, "failed to get configmap")
} else if kuberneteserrors.IsNotFound(err) {
configmap := corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: util.PodNamespace,
Labels: map[string]string{
"owner": "kotsadm",
},
},
Data: map[string]string{},
}
createdConfigmap, err := clientset.CoreV1().ConfigMaps(util.PodNamespace).Create(context.TODO(), &configmap, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create configmap")
}
return createdConfigmap, nil
}
return existingConfigmap, nil
}
func (s *OCIStore) updateConfigmap(configmap *corev1.ConfigMap) error {
clientset, err := k8sutil.GetClientset()
if err != nil {
return errors.Wrap(err, "failed to get clientset")
}
_, err = clientset.CoreV1().ConfigMaps(util.PodNamespace).Update(context.Background(), configmap, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "failed to update config map")
}
return nil
}
func (s *OCIStore) ensureApplicationMetadata(applicationMetadata string, namespace string, upstreamURI string) error {
clientset, err := k8sutil.GetClientset()
if err != nil {
return errors.Wrap(err, "failed to get clientset")
}
existingConfigMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "kotsadm-application-metadata", metav1.GetOptions{})
if err != nil {
if !kuberneteserrors.IsNotFound(err) {
return errors.Wrap(err, "failed to get existing metadata config map")
}
metadata := []byte(applicationMetadata)
_, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.TODO(), kotsadmobjects.ApplicationMetadataConfig(metadata, namespace, upstreamURI), metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create metadata config map")
}
return nil
}
if existingConfigMap.Data == nil {
existingConfigMap.Data = map[string]string{}
}
existingConfigMap.Data["application.yaml"] = applicationMetadata
existingConfigMap.Data["upstreamUri"] = upstreamURI
_, err = clientset.CoreV1().ConfigMaps(util.PodNamespace).Update(context.Background(), existingConfigMap, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "failed to update config map")
}
return nil
}
|
[
"\"STORAGE_BASEURI\"",
"\"STORAGE_BASEURI_PLAINHTTP\""
] |
[] |
[
"STORAGE_BASEURI",
"STORAGE_BASEURI_PLAINHTTP"
] |
[]
|
["STORAGE_BASEURI", "STORAGE_BASEURI_PLAINHTTP"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"fmt"
"github.com/prometheus/client_golang/prometheus/promhttp"
"log"
"net/http"
"os"
"time"
)
func main() {
token := os.Getenv("TOKEN")
client := Shard{Token: token}
GatewayData, err := client.GatewayBot()
m := new(MetricsEngine)
m.Init()
if err != nil {
log.Fatal("Unable to get GatewayBot data")
}
if GatewayData.Shards < 1 {
log.Fatal("Failed to get recommended shard count from Discord")
}
log.Printf("Launching %d shards...", GatewayData.Shards)
var shards []Shard
for i := 0; i < GatewayData.Shards; i++ {
shard := NewShard(GatewayData.URL, token, GatewayData.Shards, i, m)
err = shard.Open()
if err != nil {
log.Fatal("Unable to connect to Discord: ", err)
}
shards = append(shards, shard)
time.Sleep(5 * time.Second)
}
// Wait here until CTRL-C or other term signal is received.
fmt.Println("Bot is now running. Press CTRL-C to exit.")
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(":8080", nil))
// Cleanly close down the Discord session.
_ = client.Close()
}
|
[
"\"TOKEN\""
] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
go
| 1 | 0 | |
http-server/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
// register hello function to handle all requests
mux := http.NewServeMux()
mux.HandleFunc("/", hello)
// use PORT environment variable, or default to 8080
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
// start the web server on port and accept requests
log.Printf("Server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux))
}
// hello responds to the request with a plain-text "Hello, world" message.
func hello(w http.ResponseWriter, r *http.Request) {
log.Printf("Serving request: %s", r.URL.Path)
host, _ := os.Hostname()
fmt.Fprintf(w, "Hello, world!\n")
fmt.Fprintf(w, "Version: 1.0.0\n")
fmt.Fprintf(w, "Hostname: %s\n", host)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
edge/cmd/edgecore/app/server.go
|
package app
import (
"errors"
"fmt"
"os"
"github.com/mitchellh/go-ps"
"github.com/spf13/cobra"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/cli/globalflag"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
"github.com/kubeedge/beehive/pkg/core"
"github.com/kubeedge/kubeedge/edge/cmd/edgecore/app/options"
"github.com/kubeedge/kubeedge/edge/pkg/common/dbm"
"github.com/kubeedge/kubeedge/edge/pkg/devicetwin"
"github.com/kubeedge/kubeedge/edge/pkg/edged"
"github.com/kubeedge/kubeedge/edge/pkg/edgehub"
"github.com/kubeedge/kubeedge/edge/pkg/edgestream"
"github.com/kubeedge/kubeedge/edge/pkg/eventbus"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager"
"github.com/kubeedge/kubeedge/edge/pkg/servicebus"
"github.com/kubeedge/kubeedge/edge/test"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1"
"github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1/validation"
"github.com/kubeedge/kubeedge/pkg/util"
"github.com/kubeedge/kubeedge/pkg/util/flag"
"github.com/kubeedge/kubeedge/pkg/version"
"github.com/kubeedge/kubeedge/pkg/version/verflag"
)
// NewEdgeCoreCommand create edgecore cmd
func NewEdgeCoreCommand() *cobra.Command {
opts := options.NewEdgeCoreOptions()
cmd := &cobra.Command{
Use: "edgecore",
Long: `Edgecore is the core edge part of KubeEdge, which contains six modules: devicetwin, edged,
edgehub, eventbus, metamanager, and servicebus. DeviceTwin is responsible for storing device status
and syncing device status to the cloud. It also provides query interfaces for applications. Edged is an
agent that runs on edge nodes and manages containerized applications and devices. Edgehub is a web socket
client responsible for interacting with Cloud Service for the edge computing (like Edge Controller as in the KubeEdge
Architecture). This includes syncing cloud-side resource updates to the edge, and reporting
edge-side host and device status changes to the cloud. EventBus is a MQTT client to interact with MQTT
servers (mosquito), offering publish and subscribe capabilities to other components. MetaManager
is the message processor between edged and edgehub. It is also responsible for storing/retrieving metadata
to/from a lightweight database (SQLite).ServiceBus is a HTTP client to interact with HTTP servers (REST),
offering HTTP client capabilities to components of cloud to reach HTTP servers running at edge. `,
Run: func(cmd *cobra.Command, args []string) {
verflag.PrintAndExitIfRequested()
flag.PrintMinConfigAndExitIfRequested(v1alpha1.NewMinEdgeCoreConfig())
flag.PrintDefaultConfigAndExitIfRequested(v1alpha1.NewDefaultEdgeCoreConfig())
flag.PrintFlags(cmd.Flags())
if errs := opts.Validate(); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs))
}
config, err := opts.Config()
if err != nil {
klog.Fatal(err)
}
if errs := validation.ValidateEdgeCoreConfiguration(config); len(errs) > 0 {
klog.Fatal(util.SpliceErrors(errs.ToAggregate().Errors()))
}
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
// Check the running environment by default
checkEnv := os.Getenv("CHECK_EDGECORE_ENVIRONMENT")
// Force skip check if enable metaserver
if config.Modules.MetaManager.MetaServer.Enable {
checkEnv = "false"
}
if checkEnv != "false" {
// Check running environment before run edge core
if err := environmentCheck(); err != nil {
klog.Fatal(fmt.Errorf("Failed to check the running environment: %v", err))
}
}
// get edge node local ip
if config.Modules.Edged.NodeIP == "" {
hostnameOverride := util.GetHostname()
localIP, _ := util.GetLocalIP(hostnameOverride)
config.Modules.Edged.NodeIP = localIP
}
registerModules(config)
// start all modules
core.Run()
},
}
fs := cmd.Flags()
namedFs := opts.Flags()
flag.AddFlags(namedFs.FlagSet("global"))
verflag.AddFlags(namedFs.FlagSet("global"))
globalflag.AddGlobalFlags(namedFs.FlagSet("global"), cmd.Name())
for _, f := range namedFs.FlagSets {
fs.AddFlagSet(f)
}
usageFmt := "Usage:\n %s\n"
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStderr(), namedFs, cols)
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
cliflag.PrintSections(cmd.OutOrStdout(), namedFs, cols)
})
return cmd
}
// environmentCheck check the environment before edgecore start
// if Check failed, return errors
func environmentCheck() error {
processes, err := ps.Processes()
if err != nil {
return err
}
for _, process := range processes {
// if kubelet is running, return error
if process.Executable() == "kubelet" {
return errors.New("kubelet should not running on edge node when running edgecore")
}
// if kube-proxy is running, return error
if process.Executable() == "kube-proxy" {
return errors.New("kube-proxy should not running on edge node when running edgecore")
}
}
return nil
}
// registerModules register all the modules started in edgecore
func registerModules(c *v1alpha1.EdgeCoreConfig) {
devicetwin.Register(c.Modules.DeviceTwin, c.Modules.Edged.HostnameOverride)
edged.Register(c.Modules.Edged)
edgehub.Register(c.Modules.EdgeHub, c.Modules.Edged.HostnameOverride)
eventbus.Register(c.Modules.EventBus, c.Modules.Edged.HostnameOverride)
metamanager.Register(c.Modules.MetaManager)
servicebus.Register(c.Modules.ServiceBus)
edgestream.Register(c.Modules.EdgeStream, c.Modules.Edged.HostnameOverride, c.Modules.Edged.NodeIP)
test.Register(c.Modules.DBTest)
// Note: Need to put it to the end, and wait for all models to register before executing
dbm.InitDBConfig(c.DataBase.DriverName, c.DataBase.AliasName, c.DataBase.DataSource)
}
|
[
"\"CHECK_EDGECORE_ENVIRONMENT\""
] |
[] |
[
"CHECK_EDGECORE_ENVIRONMENT"
] |
[]
|
["CHECK_EDGECORE_ENVIRONMENT"]
|
go
| 1 | 0 | |
backend/auth_system/asgi.py
|
"""
ASGI config for auth_system project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'auth_system.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
worker/main.go
|
package worker
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
"github.com/otiai10/daap"
"github.com/seqpod/seqpod-api/models"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
// Enqueue enqueues a job to worker queue
// TODO:
// For now, worker runtime is spawned on a bit worker instance machine, called "Elephant".
// It should be spawned on an instance automatically generated/being terminated by "Algnome".
// TODO:
// For now, result files are placed on API server /tmp directory.
// It should be placed on S3 Bucket
func Enqueue(job *models.Job) {
session, err := mgo.Dial(os.Getenv("MONGODB_URI"))
if err != nil {
logInternalError("DB SESSION", err)
return
}
defer func() {
if err := recover(); err != nil {
failed(session, job, fmt.Errorf("%v", err))
}
session.Close()
}()
c := models.Jobs(session)
if err = c.UpdateId(job.ID, bson.M{
"$set": bson.M{
"status": models.Running,
"started_at": time.Now(),
},
}); err != nil {
failed(session, job, err)
return
}
if err = c.FindId(job.ID).One(job); err != nil {
failed(session, job, err)
return
}
machine, err := fetchMachineConfig()
if err != nil {
failed(session, job, err)
return
}
if len(job.Workflow) == 0 {
failed(session, job, fmt.Errorf("No any workflow specified"))
return
}
img := job.Workflow[0]
env := []string{
fmt.Sprintf("REFERENCE=%s", "GRCh37.fa"),
}
for key, input := range job.Resource.Inputs {
env = append(env, fmt.Sprintf("%s=%s", key, input))
}
for key, param := range job.Parameters {
env = append(env, fmt.Sprintf("%s=%s", key, param))
}
// Ensure outputs directory exsits.
os.MkdirAll(filepath.Join(job.Resource.URL, "out"), os.ModePerm)
arg := daap.Args{
Machine: machine,
Mounts: []daap.Mount{
// Mount inputs and outpus directory.
daap.Volume(job.Resource.URL, "/var/data"),
},
Env: env,
}
process := daap.NewProcess(img, arg)
ctx := context.Background()
if err = process.Run(ctx); err != nil {
failed(session, job, err)
return
}
out, err := ioutil.ReadAll(process.Stdout)
if err != nil {
failed(session, job, err)
return
}
serr, err := ioutil.ReadAll(process.Stderr)
if err != nil {
failed(session, job, err)
return
}
applog, err := ioutil.ReadAll(process.Log)
if err != nil {
failed(session, job, err)
return
}
err = models.Jobs(session).UpdateId(job.ID, bson.M{
"$set": bson.M{
"stdout": string(out),
"stderr": string(serr),
"applog": string(applog),
},
})
if err != nil {
failed(session, job, err)
return
}
// TODO: Use "Salamander"
results, err := detectResultFiles(job)
if err != nil {
failed(session, job, err)
return
}
if err := c.UpdateId(job.ID, bson.M{
"$set": bson.M{
"status": models.Completed,
"results": results,
"finished_at": time.Now(),
},
}); err != nil {
failed(session, job, err)
}
}
func failed(session *mgo.Session, job *models.Job, err error) {
if e := models.Jobs(session).UpdateId(job.ID, bson.M{
"$push": bson.M{
"errors": err.Error(),
},
"$set": bson.M{
"status": models.Errored,
"finished_at": time.Now(),
},
}); e != nil {
logInternalError("Update", e)
}
}
func logInternalError(prefix string, err error) {
log.Printf("[WORKER][%s] %v\n", prefix, err.Error())
}
// TODO: Result files are NOT ALWAYS on the root level of directory
// In future, it should be managed by "Salamander"
// to place evetything on S3 buckets.
func detectResultFiles(job *models.Job) ([]string, error) {
files, err := ioutil.ReadDir(filepath.Join(job.Resource.URL, "out"))
if err != nil {
return nil, err
}
results := []string{}
for _, f := range files {
results = append(results, f.Name())
}
return results, nil
}
// fetchMachineConfig fetches machine configs
// from mounted "/var/machine" directory
// so that user can specify machine on docker-compose CLI layer.
// TODO: Machine should be provided by "Algnome",
// for now, it provides "Elephant"
func fetchMachineConfig() (*daap.MachineConfig, error) {
// This directory is binded by docker-copose, check docker-copose.yaml.
p := "/var/machine"
f, err := os.Open(filepath.Join(p, "config.json"))
if err != nil {
return nil, err
}
defer f.Close()
config := new(models.MachineConfig)
if err := json.NewDecoder(f).Decode(config); err != nil {
return nil, err
}
return &daap.MachineConfig{
Host: fmt.Sprintf("tcp://%s:2376", config.Driver.IPAddress),
CertPath: p,
}, nil
}
|
[
"\"MONGODB_URI\""
] |
[] |
[
"MONGODB_URI"
] |
[]
|
["MONGODB_URI"]
|
go
| 1 | 0 | |
examples/service/chat/role/delete/role_delete_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v2 "github.com/RJPearson94/twilio-sdk-go/service/chat/v2"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var chatClient *v2.Chat
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
chatClient = twilio.NewWithCredentials(creds).Chat.V2
}
func main() {
err := chatClient.
Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Role("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Delete()
if err != nil {
log.Panicf("%s", err.Error())
}
log.Println("Role deleted")
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
kms/api-client/snippets_test.py
|
# Copyright 2017 Google, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import hashlib
import os
import time
import uuid
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, utils
from google.cloud import kms
from google.cloud.kms_v1.proto import resources_pb2
import pytest
from create_key_asymmetric_decrypt import create_key_asymmetric_decrypt
from create_key_asymmetric_sign import create_key_asymmetric_sign
from create_key_hsm import create_key_hsm
from create_key_labels import create_key_labels
from create_key_ring import create_key_ring
from create_key_rotation_schedule import create_key_rotation_schedule
from create_key_symmetric_encrypt_decrypt import create_key_symmetric_encrypt_decrypt
from create_key_version import create_key_version
from decrypt_asymmetric import decrypt_asymmetric
from decrypt_symmetric import decrypt_symmetric
from destroy_key_version import destroy_key_version
from disable_key_version import disable_key_version
from enable_key_version import enable_key_version
from encrypt_asymmetric import encrypt_asymmetric
from encrypt_symmetric import encrypt_symmetric
from get_key_labels import get_key_labels
from get_key_version_attestation import get_key_version_attestation
from get_public_key import get_public_key
from iam_add_member import iam_add_member
from iam_get_policy import iam_get_policy
from iam_remove_member import iam_remove_member
from quickstart import quickstart
from restore_key_version import restore_key_version
from sign_asymmetric import sign_asymmetric
from update_key_add_rotation import update_key_add_rotation
from update_key_remove_labels import update_key_remove_labels
from update_key_remove_rotation import update_key_remove_rotation
from update_key_set_primary import update_key_set_primary
from update_key_update_labels import update_key_update_labels
from verify_asymmetric_ec import verify_asymmetric_ec
from verify_asymmetric_rsa import verify_asymmetric_rsa
@pytest.fixture(scope="module")
def client():
return kms.KeyManagementServiceClient()
@pytest.fixture(scope="module")
def project_id():
return os.environ['GCLOUD_PROJECT']
@pytest.fixture(scope="module")
def location_id():
return "us-east1"
@pytest.fixture(scope="module")
def key_ring_id(client, project_id, location_id):
location_name = client.location_path(project_id, location_id)
key_ring_id = '{}'.format(uuid.uuid4())
key_ring = client.create_key_ring(location_name, key_ring_id, {})
yield key_ring_id
for key in client.list_crypto_keys(key_ring.name):
if key.rotation_period.seconds > 0 or key.next_rotation_time.seconds > 0:
# https://github.com/googleapis/gapic-generator-python/issues/364
updated_key = resources_pb2.CryptoKey()
updated_key.name = key.name
update_mask = {'paths': ['rotation_period', 'next_rotation_time']}
client.update_crypto_key(updated_key, update_mask)
f = 'state != DESTROYED AND state != DESTROY_SCHEDULED'
for version in client.list_crypto_key_versions(key.name, filter_=f):
client.destroy_crypto_key_version(version.name)
@pytest.fixture(scope="module")
def asymmetric_decrypt_key_id(client, project_id, location_id, key_ring_id):
key_ring_name = client.key_ring_path(project_id, location_id, key_ring_id)
key_id = '{}'.format(uuid.uuid4())
key = client.create_crypto_key(key_ring_name, key_id, {
'purpose': kms.enums.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT,
'version_template': {
'algorithm': kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_DECRYPT_OAEP_2048_SHA256
},
'labels': {'foo': 'bar', 'zip': 'zap'}
})
wait_for_ready(client, '{}/cryptoKeyVersions/1'.format(key.name))
return key_id
@pytest.fixture(scope="module")
def asymmetric_sign_ec_key_id(client, project_id, location_id, key_ring_id):
key_ring_name = client.key_ring_path(project_id, location_id, key_ring_id)
key_id = '{}'.format(uuid.uuid4())
key = client.create_crypto_key(key_ring_name, key_id, {
'purpose': kms.enums.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN,
'version_template': {
'algorithm': kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.EC_SIGN_P256_SHA256
},
'labels': {'foo': 'bar', 'zip': 'zap'}
})
wait_for_ready(client, '{}/cryptoKeyVersions/1'.format(key.name))
return key_id
@pytest.fixture(scope="module")
def asymmetric_sign_rsa_key_id(client, project_id, location_id, key_ring_id):
key_ring_name = client.key_ring_path(project_id, location_id, key_ring_id)
key_id = '{}'.format(uuid.uuid4())
key = client.create_crypto_key(key_ring_name, key_id, {
'purpose': kms.enums.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN,
'version_template': {
'algorithm': kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PKCS1_2048_SHA256
},
'labels': {'foo': 'bar', 'zip': 'zap'}
})
wait_for_ready(client, '{}/cryptoKeyVersions/1'.format(key.name))
return key_id
@pytest.fixture(scope="module")
def hsm_key_id(client, project_id, location_id, key_ring_id):
key_ring_name = client.key_ring_path(project_id, location_id, key_ring_id)
key_id = '{}'.format(uuid.uuid4())
key = client.create_crypto_key(key_ring_name, key_id, {
'purpose': kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT,
'version_template': {
'algorithm': kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION,
'protection_level': kms.enums.ProtectionLevel.HSM
},
'labels': {'foo': 'bar', 'zip': 'zap'}
})
wait_for_ready(client, '{}/cryptoKeyVersions/1'.format(key.name))
return key_id
@pytest.fixture(scope="module")
def symmetric_key_id(client, project_id, location_id, key_ring_id):
key_ring_name = client.key_ring_path(project_id, location_id, key_ring_id)
key_id = '{}'.format(uuid.uuid4())
key = client.create_crypto_key(key_ring_name, key_id, {
'purpose': kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT,
'version_template': {
'algorithm': kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION
},
'labels': {'foo': 'bar', 'zip': 'zap'}
})
wait_for_ready(client, '{}/cryptoKeyVersions/1'.format(key.name))
return key_id
def wait_for_ready(client, key_version_name):
for i in range(5):
key_version = client.get_crypto_key_version(key_version_name)
if key_version.state == kms.enums.CryptoKeyVersion.CryptoKeyVersionState.ENABLED:
return
time.sleep(0.1*(i**2))
pytest.fail('{} not ready'.format(key_version_name))
def test_create_key_asymmetric_decrypt(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_asymmetric_decrypt(project_id, location_id, key_ring_id, key_id)
assert key.purpose == kms.enums.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT
assert key.version_template.algorithm == kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_DECRYPT_OAEP_2048_SHA256
def test_create_key_asymmetric_sign(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_asymmetric_sign(project_id, location_id, key_ring_id, key_id)
assert key.purpose == kms.enums.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN
assert key.version_template.algorithm == kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PKCS1_2048_SHA256
def test_create_key_hsm(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_hsm(project_id, location_id, key_ring_id, key_id)
assert key.purpose == kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
assert key.version_template.algorithm == kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION
assert key.version_template.protection_level == kms.enums.ProtectionLevel.HSM
def test_create_key_labels(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_labels(project_id, location_id, key_ring_id, key_id)
assert key.purpose == kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
assert key.version_template.algorithm == kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION
assert key.labels == {'team': 'alpha', 'cost_center': 'cc1234'}
def test_create_key_ring(project_id, location_id):
key_ring_id = '{}'.format(uuid.uuid4())
key_ring = create_key_ring(project_id, location_id, key_ring_id)
assert key_ring
def test_create_key_rotation_schedule(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_rotation_schedule(project_id, location_id, key_ring_id, key_id)
assert key.rotation_period.seconds == 60*60*24*30
assert key.next_rotation_time.seconds > 0
def test_create_key_symmetric_encrypt_decrypt(project_id, location_id, key_ring_id):
key_id = '{}'.format(uuid.uuid4())
key = create_key_symmetric_encrypt_decrypt(project_id, location_id, key_ring_id, key_id)
assert key.purpose == kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
assert key.version_template.algorithm == kms.enums.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION
def test_create_key_version(project_id, location_id, key_ring_id, symmetric_key_id):
version = create_key_version(project_id, location_id, key_ring_id, symmetric_key_id)
assert version
def test_decrypt_asymmetric(client, project_id, location_id, key_ring_id, asymmetric_decrypt_key_id):
message = 'my message'.encode('utf-8')
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, '1')
public_key = client.get_public_key(key_version_name)
pem = public_key.pem.encode('utf-8')
rsa_key = serialization.load_pem_public_key(pem, default_backend())
pad = padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None)
ciphertext = rsa_key.encrypt(message, pad)
response = decrypt_asymmetric(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, '1', ciphertext)
assert response.plaintext == message
def test_decrypt_symmetric(client, project_id, location_id, key_ring_id, symmetric_key_id):
plaintext = 'my message'.encode('utf-8')
key_version_name = client.crypto_key_path(project_id, location_id, key_ring_id, symmetric_key_id)
encrypt_response = client.encrypt(key_version_name, plaintext)
ciphertext = encrypt_response.ciphertext
decrypt_response = decrypt_symmetric(project_id, location_id, key_ring_id, symmetric_key_id, ciphertext)
assert decrypt_response.plaintext == plaintext
def test_destroy_restore_key_version(client, project_id, location_id, key_ring_id, asymmetric_decrypt_key_id):
key_name = client.crypto_key_path(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id)
version = client.create_crypto_key_version(key_name, {})
version_id = version.name.split('/')[-1]
wait_for_ready(client, version.name)
destroyed_version = destroy_key_version(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, version_id)
assert destroyed_version.state == kms.enums.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED
restored_version = restore_key_version(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, version_id)
assert restored_version.state == kms.enums.CryptoKeyVersion.CryptoKeyVersionState.DISABLED
def test_disable_enable_key_version(client, project_id, location_id, key_ring_id, asymmetric_decrypt_key_id):
key_name = client.crypto_key_path(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id)
version = client.create_crypto_key_version(key_name, {})
version_id = version.name.split('/')[-1]
wait_for_ready(client, version.name)
disabled_version = disable_key_version(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, version_id)
assert disabled_version.state == kms.enums.CryptoKeyVersion.CryptoKeyVersionState.DISABLED
enabled_version = enable_key_version(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, version_id)
assert enabled_version.state == kms.enums.CryptoKeyVersion.CryptoKeyVersionState.ENABLED
def test_encrypt_asymmetric(client, project_id, location_id, key_ring_id, asymmetric_decrypt_key_id):
plaintext = 'my message'
ciphertext = encrypt_asymmetric(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, '1', plaintext)
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, '1')
response = client.asymmetric_decrypt(key_version_name, ciphertext)
assert response.plaintext == plaintext.encode('utf-8')
def test_encrypt_symmetric(client, project_id, location_id, key_ring_id, symmetric_key_id):
plaintext = 'my message'
encrypt_response = encrypt_symmetric(project_id, location_id, key_ring_id, symmetric_key_id, plaintext)
key_name = client.crypto_key_path(project_id, location_id, key_ring_id, symmetric_key_id)
decrypt_response = client.decrypt(key_name, encrypt_response.ciphertext)
assert decrypt_response.plaintext == plaintext.encode('utf-8')
def test_get_key_labels(project_id, location_id, key_ring_id, symmetric_key_id):
key = get_key_labels(project_id, location_id, key_ring_id, symmetric_key_id)
assert key.labels == {'foo': 'bar', 'zip': 'zap'}
def test_get_key_version_attestation(project_id, location_id, key_ring_id, hsm_key_id):
attestation = get_key_version_attestation(project_id, location_id, key_ring_id, hsm_key_id, '1')
assert attestation.format
assert attestation.content
def test_get_public_key(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id):
public_key = get_public_key(project_id, location_id, key_ring_id, asymmetric_decrypt_key_id, '1')
assert public_key.pem
def test_iam_add_member(project_id, location_id, key_ring_id, symmetric_key_id):
member = 'group:[email protected]'
policy = iam_add_member(project_id, location_id, key_ring_id, symmetric_key_id, member)
assert any(member in b.members for b in policy.bindings)
def test_iam_get_policy(project_id, location_id, key_ring_id, symmetric_key_id):
policy = iam_get_policy(project_id, location_id, key_ring_id, symmetric_key_id)
assert policy
def test_iam_remove_member(client, project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id):
resource_name = client.crypto_key_path(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id)
policy = client.get_iam_policy(resource_name)
policy.bindings.add(
role='roles/cloudkms.cryptoKeyEncrypterDecrypter',
members=['group:[email protected]', 'group:[email protected]'])
client.set_iam_policy(resource_name, policy)
policy = iam_remove_member(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id, 'group:[email protected]')
assert not any('group:[email protected]' in b.members for b in policy.bindings)
assert any('group:[email protected]' in b.members for b in policy.bindings)
def test_sign_asymmetric(client, project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id):
message = 'my message'
sign_response = sign_asymmetric(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id, '1', message)
assert sign_response.signature
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id, '1')
public_key = client.get_public_key(key_version_name)
pem = public_key.pem.encode('utf-8')
rsa_key = serialization.load_pem_public_key(pem, default_backend())
hash_ = hashlib.sha256(message.encode('utf-8')).digest()
try:
sha256 = hashes.SHA256()
pad = padding.PKCS1v15()
rsa_key.verify(sign_response.signature, hash_, pad, utils.Prehashed(sha256))
except InvalidSignature:
pytest.fail('invalid signature')
def test_update_key_add_rotation(project_id, location_id, key_ring_id, symmetric_key_id):
key = update_key_add_rotation(project_id, location_id, key_ring_id, symmetric_key_id)
assert key.rotation_period.seconds == 60*60*24*30
assert key.next_rotation_time.seconds > 0
def test_update_key_remove_labels(project_id, location_id, key_ring_id, symmetric_key_id):
key = update_key_remove_labels(project_id, location_id, key_ring_id, symmetric_key_id)
assert key.labels == {}
def test_update_key_remove_rotation(project_id, location_id, key_ring_id, symmetric_key_id):
key = update_key_remove_rotation(project_id, location_id, key_ring_id, symmetric_key_id)
assert key.rotation_period.seconds == 0
assert key.next_rotation_time.seconds == 0
def test_update_key_set_primary(project_id, location_id, key_ring_id, symmetric_key_id):
key = update_key_set_primary(project_id, location_id, key_ring_id, symmetric_key_id, '1')
assert '1' in key.primary.name
def test_update_key_update_labels(project_id, location_id, key_ring_id, symmetric_key_id):
key = update_key_update_labels(project_id, location_id, key_ring_id, symmetric_key_id)
assert key.labels == {'new_label': 'new_value'}
def test_verify_asymmetric_ec(client, project_id, location_id, key_ring_id, asymmetric_sign_ec_key_id):
message = 'my message'
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, asymmetric_sign_ec_key_id, '1')
hash_ = hashlib.sha256(message.encode('utf-8')).digest()
sign_response = client.asymmetric_sign(key_version_name, {'sha256': hash_})
verified = verify_asymmetric_ec(project_id, location_id, key_ring_id, asymmetric_sign_ec_key_id, '1', message, sign_response.signature)
assert verified
def test_verify_asymmetric_rsa(client, project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id):
message = 'my message'
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id, '1')
hash_ = hashlib.sha256(message.encode('utf-8')).digest()
sign_response = client.asymmetric_sign(key_version_name, {'sha256': hash_})
verified = verify_asymmetric_rsa(project_id, location_id, key_ring_id, asymmetric_sign_rsa_key_id, '1', message, sign_response.signature)
assert verified
def test_quickstart(project_id, location_id):
key_rings = quickstart(project_id, location_id)
assert key_rings
|
[] |
[] |
[
"GCLOUD_PROJECT"
] |
[]
|
["GCLOUD_PROJECT"]
|
python
| 1 | 0 | |
pyspch/display_mpl.py
|
import os,sys,io
import scipy.signal
from urllib.request import urlopen
from IPython.display import display, Audio, HTML, clear_output
import math
import numpy as np
import pandas as pd
import librosa
from .constants import EPS_FLOAT, LOG10, SIGEPS_FLOAT
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.figure import Figure
os.environ['PYSPCH_BACKEND'] = "mpl"
#######################################################################################
# Define the SpchFig class as a superclass of matplotlib Figure
#######################################################################################
class SpchFig(Figure):
def __init__(self,row_heights=[1.,1.],col_widths=[1.],**kwargs):
fig_kwargs={'constrained_layout':True,'figsize':(12,6),'dpi':72}
fig_kwargs.update(kwargs)
super().__init__(**fig_kwargs)
self.nrows = len(row_heights)
self.ncols = len(col_widths)
gs = self.add_gridspec(nrows=self.nrows,ncols=self.ncols,
height_ratios=row_heights,width_ratios=col_widths)
for i in range(0,self.nrows):
for j in range(0,self.ncols):
ii = i*self.ncols + j
self.add_subplot(gs[i,j])
self.axes[ii].Init = False
# we work 100% object oriented and therefore close the plot immediately
# the figure will only show on demand
plt.close()
if self.ncols == 1:
self.align_ylabels(self.axes)
# convert list of axis to axis number
def get_axis(self,iax):
if isinstance(iax,list): # row*col spec
ii = iax[0]*self.axes[0].numCols + iax[1]
ax = self.axes[ii]
else: # rows only
ax = self.axes[iax]
return(ax)
#######################################################################################
# Low level API for mpl backend
#######################################################################################
def add_line_plot(self,y,iax=0,x=None,x0=0.,dx=1.,xrange='tight',yrange=None,grid='False',title=None,xlabel=None,ylabel=None,xticks=True,yticks=True,**kwargs):
"""
Add a line plot to an existing axis
Required Parameters
--------------------
iax(int): row index (Numbering: 0=top row)
y (numpy array): data as (npts,) or (nftrs,npts) numpy array
Optional Parameters
--------------------
x : x-axis as (1-D) numpy array (default=None, use sample indices)
x0: x-axis offset
dx : sampling period on x-axis (default = 1)
xrange : 'tight'(default) or xrange-values
yrange : None(default), 'tight' or yrange-values. 'tight' on the Y-axis creates 20% headroom
grid : False (default)
xlabel : default=None
ylabel : default=None
"""
ax = self.get_axis(iax)
ax.Init = True
if(y.ndim == 1): y=y.reshape(-1,y.size)
nftrs,npts= y.shape
if x is None:
x = x0+np.arange(npts) * dx
ax.plot(x,y.T,**kwargs)
if xrange is None: pass
elif xrange == 'tight':
ddx = (x[-1]-x[0])/len(x)
ax.set_xlim([x[0],x[-1]])
else: ax.set_xlim(xrange)
if yrange is None: pass
elif yrange == 'tight':
yy = 1.2 * np.max(np.abs(y)+SIGEPS_FLOAT)
ax.set_ylim(-yy,yy)
else:
ax.set_ylim(yrange)
ax.grid(grid)
if title is not None: ax.set_title(title)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
if(yticks): ax.tick_params(axis='y',labelleft=True, left=True)
else: ax.tick_params(axis='y',labelleft=False, left=False)
if(xticks): ax.tick_params(axis='x',labelbottom=True)
else: ax.tick_params(axis='x',labelrotation=0.0,labelbottom=False,bottom=True)
def add_img_plot(self,img,iax=0,x0=None,y0=None,dx=1,dy=1,x=None,y=None,xticks=True,xlabel=None,ylabel=None,**kwargs):
''' Add an image plot (spectrogram style)
Parameters
----------
iax : axis number (default=0)
img : image, a (nrows x ncols) numpy array
x,y: coordinates for X and Y axis points, if None dx,dy are used
x0, y0: starting values on x and y axis; if None use dx/2 and dy/2 to center
dx, dy : int/float (default = 1)
xticks : (boolean) - label the x-axis ticks
xlabel : string (default=None)
ylabel : string (default=None)
row : int (default=1) [Numbering: row=1=top row]
**kwargs: extra arguments to pass / override defaults in ax.colormesh()
'''
ax = self.get_axis(iax)
ax.Init = True
(nr,nc)= img.shape
params={'cmap':'jet','shading':'auto'}
params.update(kwargs)
# Use x & y center coordinates with same dimensions and centered positions
if x is None:
if x0 is None : x0 = 0.5*dx
x = np.arange(nc) * dx + x0
if y is None:
if y0 is None : y0 = 0.5*dy
y= np.arange(nr) * dy + y0
ax.pcolormesh(x,y,img,**params)
if(xticks): ax.tick_params(axis='x',labelbottom=True)
else: ax.tick_params(axis='x',labelrotation=0.0,labelbottom=False,bottom=True)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
def add_waterfall2D(self,X,iax=0,ax_ref=0,x0=None,dx=.01,scale=None,colors=['r','g','b','k','y','m']):
'''
adds basic 2D waterfall plot in axis iax
'''
ax = self.get_axis(iax)
# check if new axis and do some initialization if needed
#if ax.Init == False:
# ax.set_xlim(self.axes[ax_ref].get_xlim())
# ax.set_ylim([0.,1.])
# ax.Init = True
# Axis_is_new = True
#else:
# Axis_is_new = False
xmin = np.min(X)
xmax = np.max(X)
if scale is None: scale = -dx/(xmax-xmin)
if x0 is None: x0 = dx/2
(nparam,nfr)=X.shape
ax.set_xlim([x0,x0+nfr*dx])
for i in range(0,nfr):
ax.plot(scale*(X[:,i]-xmin)+x0+i*dx,np.arange(nparam),color=colors[i%6])
def add_seg_plot(self,seg,iax=0,xrange=None,yrange=None,ypos=0.5,Lines=True,Labels=False,color='#FF0000',size=16,ax_ref=0,txtargs={},lineargs={}):
'''adds a segmentation to an axis
This can be an axis without prior info; in this case at least xrange should be given to scale the x-axis correctly
Alternatively the segmentation can be overlayed on an existing plot. In this case the x and y lim's can be inherited from the previous plot This can be
Required Parameters
-------------------
iax : axis number
seg: segmentation DataFrame
Optional Parameters
-------------------
xrange: X-axis range, if None keep existing
yrange: Y-axis range, if None keep existing
ypos: relative height to print the segmentation labels (default= 0.5)
if None, do not write out segmentation labels
ax_ref: reference axis for timing information (default=0)
Lines: boolean, to plot segmentation lines (default=True)
Labels: boolean, the segmentation is a label stream (default=False) [not used]
color: text and line color (default=#FF0000 (red))
size: text size (default=16)
**txtargs: extra arguments for labeling text, passed to ax.text()
such as color, fontsize, ..
**lineargs: extra arguments for the label lines, passed to ax.vlines()
such as linestyles, color, ..
'''
if seg is None: return
ax = self.get_axis(iax)
# check if new axis and do some initialization if needed
if ax.Init == False:
ax.set_xlim(self.axes[ax_ref].get_xlim())
ax.set_ylim([0.,1.])
ax.Init = True
Axis_is_new = True
else:
Axis_is_new = False
if xrange is not None: ax.set_xlim(xrange)
else: xrange = ax.get_xlim()
if yrange is not None: ax.set_ylim(yrange)
else: yrange = ax.get_ylim()
if ypos is not None:
ypos = yrange[0] + ypos * (yrange[1]-yrange[0])
_lineargs={'linestyles':'dashed','colors':color}
_lineargs.update(lineargs)
_txtargs={'horizontalalignment':'center','verticalalignment':'center',
'color':color,'fontsize':size}
_txtargs.update(txtargs)
if seg is None:
# just create the axis for future use
return
elif 'seg' in seg.columns:
# a segmentation dataframe with begin and end times i.e. with (t0,t1,seg) entries
for iseg in seg.index:
t0= seg['t0'][iseg]
t1= seg['t1'][iseg]
txt = seg['seg'][iseg]
mid_seg = (t1+t0)/2.
if ( (xrange[0] < mid_seg) and (mid_seg < xrange[1]) ) :
if (ypos is not None) :
xpos = float(t0+(t1-t0)/2.0)
ax.text(xpos,ypos,txt,**_txtargs)
if ( (xrange[0] < t0) and Lines ) :
ax.vlines([t0],yrange[0],yrange[1],**_lineargs)
if ( (t1 < xrange[1]) and Lines ) :
ax.vlines([t1],yrange[0],yrange[1],**_lineargs)
elif 'lbl' in seg.columns:
# a label DataFrame with (t,lbl) entries
for iseg in seg.index:
xpos=seg['t'][iseg]
if (xpos > xrange[0]) and (xpos < xrange[1]) :
ax.text(xpos,ypos,seg['lbl'][iseg],**_txtargs)
# for a new axis, just provide tick marks at the bottom
if(Axis_is_new):
ax.tick_params(axis='y',labelleft=False,left=False)
ax.tick_params(axis='x',labelbottom=False,bottom=True)
def add_vlines(self,x,iax=0,color='#F00',linestyle='dashed'):
'''
add vertical lines at positions x over full heigth of the axis
'''
ax = self.get_axis(iax)
y = ax.get_ylim()
ax.vlines(x,y[0],y[1],colors=color,linestyles=linestyle)
def add_vrect(self,x0,x1,iax=0,color='#888',alpha=0.2):
'''
add vertical rectangle between x0 and x1
'''
ax = self.get_axis(iax)
ax.axvspan(x0, x1, color=color, alpha=alpha )
################################ OLDDDDDDDDDDDDDDDDDDDDD ############################
def make_subplots(row_heights=[1.,1.],col_widths=[1.],figsize=(12,6),dpi=72,**kwargs):
""" Create a figure and axis for a multi-row plot
make_subplots has an analogous interface to the make_subplots() in the plotly package
This routine lets you specify the respective row heights.
Note that some defaults deviate from the mpl defaults such as figsize and dpi
Parameters
----------
row_heights : height ratios for different subplots (array of floats)
figsize : figsize in inch. default=(12,6)
dpi : scaling factor. default=72
**kwargs : kwargs to be passed to plt.figure()
constrained_layout=True
Returns
-------
fig : Figure
"""
fig_kwargs={'clear':True,'constrained_layout':True,'figsize':figsize,'dpi':dpi}
fig_kwargs.update(kwargs)
# we like tight x-axis in all situations
# plt.rcParams['axes.xmargin'] = 0
nrows = len(row_heights)
ncols = len(col_widths)
fig,_ = plt.subplots(nrows=nrows,ncols=ncols,gridspec_kw={'height_ratios':row_heights,'width_ratios':col_widths},
**fig_kwargs)
plt.close()
return(fig)
def make_rows(row_heights=[1.,1.],figsize=(12,6),dpi=72,**kwargs):
""" Create a figure and axis for a multi-row plot
make_rows has an analogous interface to the make_subplots() in the plotly package
This routine lets you specify the respective row heights.
Note that some defaults deviate from the mpl defaults such as figsize and dpi
Parameters
----------
row_heights : height ratios for different subplots (array of floats)
figsize : figsize in inch. default=(12,6)
dpi : scaling factor. default=72
**kwargs : kwargs to be passed to plt.figure()
constrained_layout=True
Returns
-------
fig : Figure
"""
fig_kwargs={'constrained_layout':True,'figsize':figsize,'dpi':dpi}
fig_kwargs.update(kwargs)
# we like tight x-axis in all situations
plt.rcParams['axes.xmargin'] = 0
fig = plt.figure(**fig_kwargs)
nrows = len(row_heights)
gs = fig.add_gridspec(nrows=nrows,ncols=1,height_ratios=row_heights)
for i in range(0,nrows):
fig.add_subplot(gs[i,0])
fig.axes[i].Init = False
plt.close()
return(fig)
def update_fig(fig,kv):
for k,v in kv.items():
if k == 'title': fig.suptitle(v)
elif k == 'titlemargin': pass
def update_axis(fig,row,kv={}):
ax = fig.axes[row]
for k,v in kv.items():
if k == 'xlabel': ax.set_xlabel(v)
elif k == 'ylabel': ax.set_ylabel(v)
elif k == 'xlim': ax.set_xlim(v)
elif k == 'ylim': ax.set_ylim(v)
elif k == 'grid': ax.grid(v)
#elif k == 'box': ax.axis(v)
def close_plot(fig):
fig.align_ylabels(fig.axes)
plt.close()
|
[] |
[] |
[
"PYSPCH_BACKEND"
] |
[]
|
["PYSPCH_BACKEND"]
|
python
| 1 | 0 | |
dev-tools/mage/settings.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mage
import (
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/magefile/mage/sh"
"github.com/pkg/errors"
"golang.org/x/tools/go/vcs"
"github.com/elastic/beats/v7/dev-tools/mage/gotool"
)
const (
fpmVersion = "1.11.0"
// Docker images. See https://github.com/elastic/golang-crossbuild.
beatsFPMImage = "docker.elastic.co/beats-dev/fpm"
// BeatsCrossBuildImage is the image used for crossbuilding Beats.
BeatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild"
elasticBeatsImportPath = "github.com/elastic/beats"
elasticBeatsModulePath = "github.com/elastic/beats/v7"
)
// Common settings with defaults derived from files, CWD, and environment.
var (
GOOS = build.Default.GOOS
GOARCH = build.Default.GOARCH
GOARM = EnvOr("GOARM", "")
Platform = MakePlatformAttributes(GOOS, GOARCH, GOARM)
BinaryExt = ""
XPackDir = "../x-pack"
RaceDetector = false
TestCoverage = false
// CrossBuildMountModcache, if true, mounts $GOPATH/pkg/mod into
// the crossbuild images at /go/pkg/mod, read-only.
CrossBuildMountModcache = true
BeatName = EnvOr("BEAT_NAME", filepath.Base(CWD()))
BeatServiceName = EnvOr("BEAT_SERVICE_NAME", BeatName)
BeatIndexPrefix = EnvOr("BEAT_INDEX_PREFIX", BeatName)
BeatDescription = EnvOr("BEAT_DESCRIPTION", "")
BeatVendor = EnvOr("BEAT_VENDOR", "Elastic")
BeatLicense = EnvOr("BEAT_LICENSE", "ASL 2.0")
BeatURL = EnvOr("BEAT_URL", "https://www.elastic.co/products/beats/"+BeatName)
BeatUser = EnvOr("BEAT_USER", "root")
BeatProjectType ProjectType
Snapshot bool
versionQualified bool
versionQualifier string
FuncMap = map[string]interface{}{
"beat_doc_branch": BeatDocBranch,
"beat_version": BeatQualifiedVersion,
"commit": CommitHash,
"date": BuildDate,
"elastic_beats_dir": ElasticBeatsDir,
"go_version": GoVersion,
"repo": GetProjectRepoInfo,
"title": strings.Title,
"tolower": strings.ToLower,
"contains": strings.Contains,
}
)
func init() {
if GOOS == "windows" {
BinaryExt = ".exe"
}
var err error
RaceDetector, err = strconv.ParseBool(EnvOr("RACE_DETECTOR", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse RACE_DETECTOR env value"))
}
TestCoverage, err = strconv.ParseBool(EnvOr("TEST_COVERAGE", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse TEST_COVERAGE env value"))
}
Snapshot, err = strconv.ParseBool(EnvOr("SNAPSHOT", "false"))
if err != nil {
panic(errors.Wrap(err, "failed to parse SNAPSHOT env value"))
}
versionQualifier, versionQualified = os.LookupEnv("VERSION_QUALIFIER")
}
// ProjectType specifies the type of project (OSS vs X-Pack).
type ProjectType uint8
// Project types.
const (
OSSProject ProjectType = iota
XPackProject
CommunityProject
)
// ErrUnknownProjectType is returned if an unknown ProjectType value is used.
var ErrUnknownProjectType = fmt.Errorf("unknown ProjectType")
// EnvMap returns map containing the common settings variables and all variables
// from the environment. args are appended to the output prior to adding the
// environment variables (so env vars have the highest precedence).
func EnvMap(args ...map[string]interface{}) map[string]interface{} {
envMap := varMap(args...)
// Add the environment (highest precedence).
for _, e := range os.Environ() {
env := strings.SplitN(e, "=", 2)
envMap[env[0]] = env[1]
}
return envMap
}
func varMap(args ...map[string]interface{}) map[string]interface{} {
data := map[string]interface{}{
"GOOS": GOOS,
"GOARCH": GOARCH,
"GOARM": GOARM,
"Platform": Platform,
"BinaryExt": BinaryExt,
"XPackDir": XPackDir,
"BeatName": BeatName,
"BeatServiceName": BeatServiceName,
"BeatIndexPrefix": BeatIndexPrefix,
"BeatDescription": BeatDescription,
"BeatVendor": BeatVendor,
"BeatLicense": BeatLicense,
"BeatURL": BeatURL,
"BeatUser": BeatUser,
"Snapshot": Snapshot,
"Qualifier": versionQualifier,
}
// Add the extra args to the map.
for _, m := range args {
for k, v := range m {
data[k] = v
}
}
return data
}
func dumpVariables() (string, error) {
var dumpTemplate = `## Variables
GOOS = {{.GOOS}}
GOARCH = {{.GOARCH}}
GOARM = {{.GOARM}}
Platform = {{.Platform}}
BinaryExt = {{.BinaryExt}}
XPackDir = {{.XPackDir}}
BeatName = {{.BeatName}}
BeatServiceName = {{.BeatServiceName}}
BeatIndexPrefix = {{.BeatIndexPrefix}}
BeatDescription = {{.BeatDescription}}
BeatVendor = {{.BeatVendor}}
BeatLicense = {{.BeatLicense}}
BeatURL = {{.BeatURL}}
BeatUser = {{.BeatUser}}
VersionQualifier = {{.Qualifier}}
## Functions
beat_doc_branch = {{ beat_doc_branch }}
beat_version = {{ beat_version }}
commit = {{ commit }}
date = {{ date }}
elastic_beats_dir = {{ elastic_beats_dir }}
go_version = {{ go_version }}
repo.RootImportPath = {{ repo.RootImportPath }}
repo.CanonicalRootImportPath = {{ repo.CanonicalRootImportPath }}
repo.RootDir = {{ repo.RootDir }}
repo.ImportPath = {{ repo.ImportPath }}
repo.SubDir = {{ repo.SubDir }}
`
return Expand(dumpTemplate)
}
// DumpVariables writes the template variables and values to stdout.
func DumpVariables() error {
out, err := dumpVariables()
if err != nil {
return err
}
fmt.Println(out)
return nil
}
var (
commitHash string
commitHashOnce sync.Once
)
// CommitHash returns the full length git commit hash.
func CommitHash() (string, error) {
var err error
commitHashOnce.Do(func() {
commitHash, err = sh.Output("git", "rev-parse", "HEAD")
})
return commitHash, err
}
var (
elasticBeatsDirValue string
elasticBeatsDirErr error
elasticBeatsDirLock sync.Mutex
)
// SetElasticBeatsDir sets the internal elastic beats dir to a preassigned value
func SetElasticBeatsDir(path string) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
elasticBeatsDirValue = path
}
// ElasticBeatsDir returns the path to Elastic beats dir.
func ElasticBeatsDir() (string, error) {
elasticBeatsDirLock.Lock()
defer elasticBeatsDirLock.Unlock()
if elasticBeatsDirValue != "" || elasticBeatsDirErr != nil {
return elasticBeatsDirValue, elasticBeatsDirErr
}
elasticBeatsDirValue, elasticBeatsDirErr = findElasticBeatsDir()
if elasticBeatsDirErr == nil {
log.Println("Found Elastic Beats dir at", elasticBeatsDirValue)
}
return elasticBeatsDirValue, elasticBeatsDirErr
}
// findElasticBeatsDir returns the root directory of the Elastic Beats module, using "go list".
//
// When running within the Elastic Beats repo, this will return the repo root. Otherwise,
// it will return the root directory of the module from within the module cache or vendor
// directory.
func findElasticBeatsDir() (string, error) {
repo, err := GetProjectRepoInfo()
if err != nil {
return "", err
}
if repo.IsElasticBeats() {
return repo.RootDir, nil
}
return gotool.ListModuleCacheDir(elasticBeatsModulePath)
}
var (
buildDate = time.Now().UTC().Format(time.RFC3339)
)
// BuildDate returns the time that the build started.
func BuildDate() string {
return buildDate
}
var (
goVersionValue string
goVersionErr error
goVersionOnce sync.Once
)
// GoVersion returns the version of Go defined in the project's .go-version
// file.
func GoVersion() (string, error) {
goVersionOnce.Do(func() {
goVersionValue = os.Getenv("BEAT_GO_VERSION")
if goVersionValue != "" {
return
}
goVersionValue, goVersionErr = getBuildVariableSources().GetGoVersion()
})
return goVersionValue, goVersionErr
}
var (
beatVersionRegex = regexp.MustCompile(`(?m)^const defaultBeatVersion = "(.+)"\r?$`)
beatVersionValue string
beatVersionErr error
beatVersionOnce sync.Once
)
// BeatQualifiedVersion returns the Beat's qualified version. The value can be overwritten by
// setting VERSION_QUALIFIER in the environment.
func BeatQualifiedVersion() (string, error) {
version, err := beatVersion()
if err != nil {
return "", err
}
// version qualifier can intentionally be set to "" to override build time var
if !versionQualified || versionQualifier == "" {
return version, nil
}
return version + "-" + versionQualifier, nil
}
// BeatVersion returns the Beat's version. The value can be overridden by
// setting BEAT_VERSION in the environment.
func beatVersion() (string, error) {
beatVersionOnce.Do(func() {
beatVersionValue = os.Getenv("BEAT_VERSION")
if beatVersionValue != "" {
return
}
beatVersionValue, beatVersionErr = getBuildVariableSources().GetBeatVersion()
})
return beatVersionValue, beatVersionErr
}
var (
beatDocBranchRegex = regexp.MustCompile(`(?m)doc-branch:\s*([^\s]+)\r?$`)
beatDocBranchValue string
beatDocBranchErr error
beatDocBranchOnce sync.Once
)
// BeatDocBranch returns the documentation branch name associated with the
// Beat branch.
func BeatDocBranch() (string, error) {
beatDocBranchOnce.Do(func() {
beatDocBranchValue = os.Getenv("BEAT_DOC_BRANCH")
if beatDocBranchValue != "" {
return
}
beatDocBranchValue, beatDocBranchErr = getBuildVariableSources().GetDocBranch()
})
return beatDocBranchValue, beatDocBranchErr
}
// --- BuildVariableSources
var (
// DefaultBeatBuildVariableSources contains the default locations build
// variables are read from by Elastic Beats.
DefaultBeatBuildVariableSources = &BuildVariableSources{
BeatVersion: "{{ elastic_beats_dir }}/libbeat/version/version.go",
GoVersion: "{{ elastic_beats_dir }}/.go-version",
DocBranch: "{{ elastic_beats_dir }}/libbeat/docs/version.asciidoc",
}
buildVariableSources *BuildVariableSources
buildVariableSourcesLock sync.Mutex
)
// SetBuildVariableSources sets the BuildVariableSources that defines where
// certain build data should be sourced from. Community Beats must call this.
func SetBuildVariableSources(s *BuildVariableSources) {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
buildVariableSources = s
}
func getBuildVariableSources() *BuildVariableSources {
buildVariableSourcesLock.Lock()
defer buildVariableSourcesLock.Unlock()
if buildVariableSources != nil {
return buildVariableSources
}
repo, err := GetProjectRepoInfo()
if err != nil {
panic(err)
}
if repo.IsElasticBeats() {
buildVariableSources = DefaultBeatBuildVariableSources
return buildVariableSources
}
panic(errors.Errorf("magefile must call devtools.SetBuildVariableSources() "+
"because it is not an elastic beat (repo=%+v)", repo.RootImportPath))
}
// BuildVariableSources is used to explicitly define what files contain build
// variables and how to parse the values from that file. This removes ambiguity
// about where the data is sources and allows a degree of customization for
// community Beats.
//
// Default parsers are used if one is not defined.
type BuildVariableSources struct {
// File containing the Beat version.
BeatVersion string
// Parses the Beat version from the BeatVersion file.
BeatVersionParser func(data []byte) (string, error)
// File containing the Go version to be used in cross-builds.
GoVersion string
// Parses the Go version from the GoVersion file.
GoVersionParser func(data []byte) (string, error)
// File containing the documentation branch.
DocBranch string
// Parses the documentation branch from the DocBranch file.
DocBranchParser func(data []byte) (string, error)
}
func (s *BuildVariableSources) expandVar(in string) (string, error) {
return expandTemplate("inline", in, map[string]interface{}{
"elastic_beats_dir": ElasticBeatsDir,
})
}
// GetBeatVersion reads the BeatVersion file and parses the version from it.
func (s *BuildVariableSources) GetBeatVersion() (string, error) {
file, err := s.expandVar(s.BeatVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read beat version file=%v", file)
}
if s.BeatVersionParser == nil {
s.BeatVersionParser = parseBeatVersion
}
return s.BeatVersionParser(data)
}
// GetGoVersion reads the GoVersion file and parses the version from it.
func (s *BuildVariableSources) GetGoVersion() (string, error) {
file, err := s.expandVar(s.GoVersion)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read go version file=%v", file)
}
if s.GoVersionParser == nil {
s.GoVersionParser = parseGoVersion
}
return s.GoVersionParser(data)
}
// GetDocBranch reads the DocBranch file and parses the branch from it.
func (s *BuildVariableSources) GetDocBranch() (string, error) {
file, err := s.expandVar(s.DocBranch)
if err != nil {
return "", err
}
data, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "failed to read doc branch file=%v", file)
}
if s.DocBranchParser == nil {
s.DocBranchParser = parseDocBranch
}
return s.DocBranchParser(data)
}
func parseBeatVersion(data []byte) (string, error) {
matches := beatVersionRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat version file")
}
func parseGoVersion(data []byte) (string, error) {
return strings.TrimSpace(string(data)), nil
}
func parseDocBranch(data []byte) (string, error) {
matches := beatDocBranchRegex.FindSubmatch(data)
if len(matches) == 2 {
return string(matches[1]), nil
}
return "", errors.New("failed to parse beat doc branch")
}
// --- ProjectRepoInfo
// ProjectRepoInfo contains information about the project's repo.
type ProjectRepoInfo struct {
RootImportPath string // Import path at the project root.
CanonicalRootImportPath string // Pre-modules root import path (does not contain semantic import version identifier).
RootDir string // Root directory of the project.
ImportPath string // Import path of the current directory.
SubDir string // Relative path from the root dir to the current dir.
}
// IsElasticBeats returns true if the current project is
// github.com/elastic/beats.
func (r *ProjectRepoInfo) IsElasticBeats() bool {
return r.CanonicalRootImportPath == elasticBeatsImportPath
}
var (
repoInfoValue *ProjectRepoInfo
repoInfoErr error
repoInfoOnce sync.Once
)
// GetProjectRepoInfo returns information about the repo including the root
// import path and the current directory's import path.
func GetProjectRepoInfo() (*ProjectRepoInfo, error) {
repoInfoOnce.Do(func() {
if isUnderGOPATH() {
repoInfoValue, repoInfoErr = getProjectRepoInfoUnderGopath()
} else {
repoInfoValue, repoInfoErr = getProjectRepoInfoWithModules()
}
})
return repoInfoValue, repoInfoErr
}
func isUnderGOPATH() bool {
underGOPATH := false
srcDirs, err := listSrcGOPATHs()
if err != nil {
return false
}
for _, srcDir := range srcDirs {
rel, err := filepath.Rel(srcDir, CWD())
if err != nil {
continue
}
if !strings.Contains(rel, "..") {
underGOPATH = true
}
}
return underGOPATH
}
func getProjectRepoInfoWithModules() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
rootDir string
subDir string
)
possibleRoot := cwd
var errs []string
for {
isRoot, err := isGoModRoot(possibleRoot)
if err != nil {
errs = append(errs, err.Error())
}
if isRoot {
rootDir = possibleRoot
subDir, err = filepath.Rel(rootDir, cwd)
if err != nil {
errs = append(errs, err.Error())
}
break
}
possibleRoot = filepath.Dir(possibleRoot)
}
if rootDir == "" {
return nil, errors.Errorf("failed to find root dir of module file: %v", errs)
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func isGoModRoot(path string) (bool, error) {
gomodPath := filepath.Join(path, "go.mod")
_, err := os.Stat(gomodPath)
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func getProjectRepoInfoUnderGopath() (*ProjectRepoInfo, error) {
var (
cwd = CWD()
errs []string
rootDir string
)
srcDirs, err := listSrcGOPATHs()
if err != nil {
return nil, err
}
for _, srcDir := range srcDirs {
_, root, err := vcs.FromDir(cwd, srcDir)
if err != nil {
// Try the next gopath.
errs = append(errs, err.Error())
continue
}
rootDir = filepath.Join(srcDir, root)
break
}
if rootDir == "" {
return nil, errors.Errorf("error while determining root directory: %v", errs)
}
subDir, err := filepath.Rel(rootDir, cwd)
if err != nil {
return nil, errors.Wrap(err, "failed to get relative path to repo root")
}
rootImportPath, err := gotool.GetModuleName()
if err != nil {
return nil, err
}
return &ProjectRepoInfo{
RootImportPath: rootImportPath,
CanonicalRootImportPath: filepath.ToSlash(extractCanonicalRootImportPath(rootImportPath)),
RootDir: rootDir,
SubDir: subDir,
ImportPath: filepath.ToSlash(filepath.Join(rootImportPath, subDir)),
}, nil
}
func extractCanonicalRootImportPath(rootImportPath string) string {
// In order to be compatible with go modules, the root import
// path of any module at major version v2 or higher must include
// the major version.
// Ref: https://github.com/golang/go/wiki/Modules#semantic-import-versioning
//
// Thus, Beats has to include the major version as well.
// This regex removes the major version from the import path.
re := regexp.MustCompile(`(/v[1-9][0-9]*)$`)
return re.ReplaceAllString(rootImportPath, "")
}
func listSrcGOPATHs() ([]string, error) {
var (
cwd = CWD()
errs []string
srcDirs []string
)
for _, gopath := range filepath.SplitList(build.Default.GOPATH) {
gopath = filepath.Clean(gopath)
if !strings.HasPrefix(cwd, gopath) {
// Fixes an issue on macOS when /var is actually /private/var.
var err error
gopath, err = filepath.EvalSymlinks(gopath)
if err != nil {
errs = append(errs, err.Error())
continue
}
}
srcDirs = append(srcDirs, filepath.Join(gopath, "src"))
}
if len(srcDirs) == 0 {
return srcDirs, errors.Errorf("failed to find any GOPATH %v", errs)
}
return srcDirs, nil
}
|
[
"\"BEAT_GO_VERSION\"",
"\"BEAT_VERSION\"",
"\"BEAT_DOC_BRANCH\""
] |
[] |
[
"BEAT_VERSION",
"BEAT_GO_VERSION",
"BEAT_DOC_BRANCH"
] |
[]
|
["BEAT_VERSION", "BEAT_GO_VERSION", "BEAT_DOC_BRANCH"]
|
go
| 3 | 0 | |
XY_Model_propare_state3_chi64_A0.py
|
import torch as tc
import numpy as np
import copy
import os,sys
import Circle_Function_Class_A0 as ev
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
from torch.optim.lr_scheduler import StepLR
import BasicFunSJR as bfs
from CNNBTN import Paras_VL_CNN_BTN_Collected1chg1
import BasicFun as bf
tmp = sys.argv[0][sys.argv[0].rfind(os.sep) + 1:] # 返回文件名
mark = tmp[-5]
which_gpu = tmp[-4] # 调用固定
para = Paras_VL_CNN_BTN_Collected1chg1()
para['dataset'] = 'fashion-mnist'
para['device'] = bf.choose_device(which_gpu)
para['log_name'] = './record' + mark + which_gpu
start = tc.cuda.Event(enable_timing=True)
end = tc.cuda.Event(enable_timing=True)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
# tc.manual_seed(7) # 固定随机数,使产生的随机数可以复现
dtype = tc.float32 # float 监控norm
mps_num = 48
lr = 1e-2
it_time = 50
pt_time = 50 # 交错优化所在的次数
dt_print = 10
step_size = it_time * pt_time // 5 # lr学习率递减的间隔epoch
x1_axis = list() # 作图横轴 优化次数
identity_4 = tc.eye(4, dtype=dtype).to(para['device']) # 第二层演化小量变化的单位阵量子门
vol = tc.tensor(1e-3, dtype=dtype).to(para['device']) # 为其小量变化幅度, 对优化次数影响不大
con_vol = tc.tensor(1e-5, dtype=dtype).to(para['device'])
entropy_list = list()
average = tc.tensor(0, dtype=dtype).to(para['device']) # 计算纠缠熵 所用到的初始值
k_bood = 64
file_name = r'./tar_data.npz'
out_file_name = r'./layer_out_data.npz'
Loss_accuracy_range = 0.0001 # 控制Loss精度的范围,达到精度范围自动跳出循环
base_it_time = it_time//3 # 进行优化的最少次数,与分层优化有关
center_position = 24
layer_num = 3 # 控制不同层的门进行优化
gatenum = (mps_num - 1)*layer_num # 控制变分参数量子门的个数
tar_mpslist = list()
ini_state = list()
y_loss_layer = list() # 分层交错进行 每层的精度
y_loss_conda = list() # 协同优化 的精度
read_gatenum = (mps_num - 1)*(layer_num -1)
zero_gatetensor = tc.zeros(gatenum, 4, 4)
conba_gatalist = list()
layer_gatelist = list() # 在后续被reshape成(2, 4, 2)的三阶tensor
layer_gatelist_0 = list() # 将门分层储存
layer_gatelist_1 = list() # 将门分层储存
layer_gatelist_2 = list() # 将门分层储存
layer_gatelist_3 = list() # 将门分层储存
layer_gatelist_4 = list() # 将门分层储存
layer_gatelist_5 = list() # 将门分层储存
layer_optimize = list() # 分层存储优化器
loss_ = list([list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]),
list([]), list([]), list([])])
half_entropy_list = list([]) # 制作热图
half_entropy_list.append(tc.zeros([pt_time+1, mps_num-1])) # 最后一次为目标纠缠熵
number_list = list([0])
print('The quantum circuit is' + str(layer_num))
print('lr=:' + str(lr) + ', k_bood=: ' + str(k_bood) + ', A small amount of vol per unit door is: ' + str(vol))
data = np.load(file_name)
tar_mpslist.append(tc.from_numpy(data['tar_mpslist0']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist1']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist2']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist3']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist4']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist5']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist6']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist7']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist8']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist9']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist10']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist11']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist12']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist13']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist14']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist15']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist16']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist17']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist18']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist19']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist20']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist21']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist22']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist23']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist24']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist25']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist26']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist27']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist28']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist29']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist30']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist31']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist32']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist33']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist34']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist35']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist36']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist37']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist38']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist39']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist40']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist41']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist42']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist43']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist44']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist45']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist46']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist47']).to(para['device']))
def fprint(content, file=None, print_screen=True, append=True):
if file is None:
file = './record.log'
if append:
way = 'ab'
else:
way = 'wb'
with open(file, way, buffering=0) as log:
log.write((content + '\n').encode(encoding='utf-8'))
if print_screen:
print(content)
def mps_norm(tar_tensor_): # 对目标量子态进行归一化 log归一化
tv = tc.einsum('asb,asd->bd', tar_tensor_[0].data, tar_tensor_[0].data)
t_norm = tc.norm(tv)
tv = tv / t_norm
tar_tensor_[0] = tar_tensor_[0].data / tc.sqrt(t_norm)
for gt in range(1, mps_num):
if gt < mps_num - 1:
tv = tc.einsum('ac,asb,csd->bd', tv, tar_tensor_[gt].data, tar_tensor_[gt].data)
else:
tv = tc.einsum('ac,asb,csd->bd', tv, tar_tensor_[gt].data, tar_tensor_[gt].data)
norm_t = tc.norm(tv)
tv = tv / norm_t
tar_tensor_[gt] = tar_tensor_[gt] / tc.sqrt(norm_t)
def qr_left_and_right_location(MPS_list, location, vol, feature_num=2): # 对目标MPS进行正交,并求解其纠缠熵
# print('location', location)
for k in range(location):
# print('k', k)
q, r = tc.qr(MPS_list[k].reshape(-1, MPS_list[k].shape[2]))
r = r
MPS_list[k] = q.reshape(-1, feature_num, q.shape[1])
MPS_list[k + 1] = tc.einsum('nl, lmk-> nmk', [r, MPS_list[k + 1]])
for i in range(len(MPS_list) - 1, location, -1):
# print('i', i)
q, r = tc.qr(MPS_list[i].reshape(MPS_list[i].shape[0], -1).t())
q_shape = q.t().shape
MPS_list[i] = q.t().reshape(q_shape[0], feature_num, -1)
r = r
MPS_list[i - 1] = tc.einsum('ldk, nk-> ldn', [MPS_list[i - 1], r])
MPS_list[location] = MPS_list[location]/tc.norm(MPS_list[location])
# u, s, v = tc.svd(MPS_list[location].reshape(-1, MPS_list[location].shape[2]))
u, s, v = tc.svd(MPS_list[location].reshape(MPS_list[location].shape[0], -1))
s = s[s > vol]
y = (-1) * tc.sum(tc.pow(s, 2) * tc.log(tc.pow(s, 2)), dim=0).item()
return y, MPS_list # y 返回纠缠熵 , mps_list返回正交化的目标mps的list()
def half_entropy(out_mps):
for ht in range(1, mps_num):
h_entropy = qr_left_and_right_location(out_mps, ht, 1e-16)[0]
half_entropy_list[0][number_list[0], ht-1] = h_entropy
number_list[0] = number_list[0] + 1
entro_tar = copy.deepcopy(tar_mpslist)
for et in range(1, mps_num):
entropy = qr_left_and_right_location(entro_tar, et, 1e-16)[0]
entropy_list.append(entropy)
for m in range(mps_num - 2):
average_ = entropy_list[m]
average = average + average_
average = average / (mps_num - 1) # 求解平均纠缠熵
center_entropy = qr_left_and_right_location(entro_tar, center_position, 1e-16)[0]
print('平均纠缠熵是:{}'.format(average))
print('正交中心为第' + str(center_position) + '个tensor的MPS纠缠熵是:{}'.format(center_entropy))
for nn in range(mps_num): # 初始真空零态
ini_state.append(tc.tensor([1, 0], dtype=dtype).reshape(1, 2, 1).to(para['device']))
read_memory_gate = bfs.load('read_memory_gate_data', 'gate')
for vt in range(read_gatenum): # 为了分层优化的下一层结果比单层好,随机初始化小量微扰的单位阵
unitary_gate = read_memory_gate[vt].to(para['device'])
unitary_gate.requires_grad = True
layer_gatelist.append(unitary_gate)
for jt in range(gatenum//layer_num):
vol_gate = tc.mul(tc.rand((4, 4), dtype=dtype).to(para['device']), vol)
unitary_gate = tc.add(vol_gate, identity_4)
unitary_gate.requires_grad = True
layer_gatelist.append(unitary_gate)
mps_norm(ini_state) # 对初始量子态进行归一化
# lay_optimize_1 = tc.optim.Adam(layer_gatelist, lr=lr) # 分层优化的量子门参数,在分层优化结束之后进行协同优化
print('分层储存优化器进入list')
for it in range(gatenum): # 将分层优化的loss的list 根据层数区分开
if it < (gatenum//layer_num)*1:
layer_gatelist_0.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*2:
layer_gatelist_1.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*3:
layer_gatelist_2.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*4:
layer_gatelist_3.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*5:
layer_gatelist_4.append(layer_gatelist[it])
else:
layer_gatelist_5.append(layer_gatelist[it])
lay_optimize_0 = tc.optim.Adam(layer_gatelist_0, lr=lr) # 分层优化的量子门参数,在分层优化结束之后进行协同优化
lay_optimize_1 = tc.optim.Adam(layer_gatelist_1, lr=lr)
lay_optimize_2 = tc.optim.Adam(layer_gatelist_2, lr=lr)
layer_optimize.append(lay_optimize_0) # 将三层优化器
layer_optimize.append(lay_optimize_1)
layer_optimize.append(lay_optimize_2)
scheduler_0 = StepLR(lay_optimize_0, step_size=step_size, gamma=0.1)
scheduler_1 = StepLR(lay_optimize_1, step_size=step_size, gamma=0.1)
scheduler_2 = StepLR(lay_optimize_2, step_size=step_size, gamma=0.1)
scheduler = list()
scheduler.append(scheduler_0)
scheduler.append(scheduler_1)
scheduler.append(scheduler_2)
evo = ev.Evolve(mps_num, k_bood, 2, gatenum, layer_num)
evo.init_tensor_list(copy.deepcopy(ini_state))
for bt in range(layer_num):
print('初始化第' + str(bt) + '的学习率:', layer_optimize[bt].defaults['lr'])
start.record() # 开始计算模型的运算时间花费
for pt in range(pt_time): # 交错优化所在的次数
fprint('Circle优化位于第' + str(pt) + '次', file=para['log_name'])
for lay_num in range(layer_num):
fprint('Circle优化位于第' + str(lay_num) + '层', file=para['log_name'])
for vt in range(it_time):
for llt in range(lay_num, lay_num + 1): # 先将优化层进行演化,演化完成后将其存进新的list,作为下一层初始
evo.layered_evolve_mps(layer_gatelist, llt)
if vt == it_time - 1:
evo.storage_layer_out_optimization(llt, 0)
for at in range(lay_num + 1, layer_num): # 将不变分的量子门演化进入线路
evo.layered_evolve_mps(layer_gatelist, at)
lay_loss = evo.log_fidelity(tar_mpslist) # 借助了mps跨越指数复杂度的优势
if ((vt + 1) % dt_print) == 0:
if vt == 0:
fprint('block')
else:
fprint('At t = ' + str(vt) + ', loss = ' + str(lay_loss.item()), file=para['log_name'])
loss_[lay_num].append(lay_loss.item())
lay_loss.backward()
layer_optimize[lay_num].step()
layer_optimize[lay_num].zero_grad()
if ((vt + 1) % dt_print) == 0:
fprint("第%d个epoch的学习率:%f" % (vt, layer_optimize[lay_num].param_groups[0]['lr']),
file=para['log_name'])
scheduler[lay_num].step()
tc.cuda.empty_cache() # 删除不必要的变量
if lay_num == layer_num-1:
if vt == it_time - 1:
half_entropy(evo.out_optimization())
if vt == it_time - 1:
evo.read_layer_out_optimization(lay_num, 0)
else:
evo.read_layer_out_optimization(lay_num, 1)
half_entropy(tar_mpslist) # 热图的最后一行为目标态纠缠的信息
bfs.save('.', 'out_memory_half_entropy_data', [half_entropy_list], ['half_entropy'])
for dt in range(gatenum):
zero_gatetensor[dt, :, :] = layer_gatelist[dt].data
bfs.save('.', 'out_memory_gate_data', [zero_gatetensor], ['gate'])
out_layer = evo.out_optimization()
out_layer_numpy = list()
for nt in range(mps_num): # 将目标MPS转存成numpy数组
out_layer_numpy.append(out_layer[nt].numpy())
np.savez(out_file_name,
tar_mpslist0=out_layer_numpy[0], tar_mpslist1=out_layer_numpy[1], tar_mpslist2=out_layer_numpy[2],
tar_mpslist3=out_layer_numpy[3], tar_mpslist4=out_layer_numpy[4], tar_mpslist5=out_layer_numpy[5],
tar_mpslist6=out_layer_numpy[6], tar_mpslist7=out_layer_numpy[7], tar_mpslist8=out_layer_numpy[8],
tar_mpslist9=out_layer_numpy[9],
tar_mpslist10=out_layer_numpy[10], tar_mpslist11=out_layer_numpy[11], tar_mpslist12=out_layer_numpy[12],
tar_mpslist13=out_layer_numpy[13], tar_mpslist14=out_layer_numpy[14], tar_mpslist15=out_layer_numpy[15],
tar_mpslist16=out_layer_numpy[16], tar_mpslist17=out_layer_numpy[17], tar_mpslist18=out_layer_numpy[18],
tar_mpslist19=out_layer_numpy[19],
tar_mpslist20=out_layer_numpy[20], tar_mpslist21=out_layer_numpy[21], tar_mpslist22=out_layer_numpy[22],
tar_mpslist23=out_layer_numpy[23], tar_mpslist24=out_layer_numpy[24], tar_mpslist25=out_layer_numpy[25],
tar_mpslist26=out_layer_numpy[26], tar_mpslist27=out_layer_numpy[27], tar_mpslist28=out_layer_numpy[28],
tar_mpslist29=out_layer_numpy[29],
tar_mpslist30=out_layer_numpy[30], tar_mpslist31=out_layer_numpy[31], tar_mpslist32=out_layer_numpy[32],
tar_mpslist33=out_layer_numpy[33], tar_mpslist34=out_layer_numpy[34], tar_mpslist35=out_layer_numpy[35],
tar_mpslist36=out_layer_numpy[36], tar_mpslist37=out_layer_numpy[37], tar_mpslist38=out_layer_numpy[38],
tar_mpslist39=out_layer_numpy[39],
tar_mpslist40=out_layer_numpy[40], tar_mpslist41=out_layer_numpy[41], tar_mpslist42=out_layer_numpy[42],
tar_mpslist43=out_layer_numpy[43], tar_mpslist44=out_layer_numpy[44], tar_mpslist45=out_layer_numpy[45],
tar_mpslist46=out_layer_numpy[46], tar_mpslist47=out_layer_numpy[47])
for nt in range(mps_num): # 将目标MPS转存成numpy数组
tar_mpslist[nt] = tar_mpslist[nt].cpu().numpy()
end.record() # 截至记录模型花费计算的时间
# Waits for everything to finish running
tc.cuda.synchronize() # 等待当前设备上所有流中的所有核心完成。
print('Runtime: ', start.elapsed_time(end))
for i in range(pt_time*5):
x1_axis.append(i*10)
color_list = list(['deeppink', 'red', 'gold', 'black', 'lime', 'peru', 'purple', 'blue'])
plt.figure(num=1, figsize=(16, 12), dpi=100)
plt.tick_params(labelsize=16)
plt.xlabel("num of optimize", fontsize=20) # x轴上的名字
plt.ylabel("negative-logarithmic fidelities (NLFs) per site", fontsize=20)
plt.grid(axis='x', c='g', linestyle='--', alpha=0.5)
for kt in range(layer_num):
plt.plot(x1_axis, loss_[kt], color=color_list[kt], linewidth=3, label=' Circle layered Optimize' + str(kt))
plt.legend(prop={'family': 'Times New Roman', 'size': 16}, loc='upper right')
plt.savefig('./MPS_Step_3layer_Circle.jpg')
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICE",
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["CUDA_VISIBLE_DEVICE", "KMP_DUPLICATE_LIB_OK"]
|
python
| 2 | 0 | |
htxaarhuslan/wsgi.py
|
"""
WSGI config for htxaarhuslan project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "htxaarhuslan.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
env/Lib/site-packages/ghp_import.py
|
#! /usr/bin/env python
#
# This file is part of the ghp-import package released under
# the Tumbolia Public License. See the LICENSE file for more
# information.
import errno
import optparse as op
import os
import subprocess as sp
import sys
import time
import unicodedata
from dateutil import tz
from datetime import datetime
try:
from shlex import quote
except ImportError:
from pipes import quote
__all__ = ['ghp_import']
__usage__ = "%prog [OPTIONS] DIRECTORY"
class GhpError(Exception):
def __init__(self, message):
self.message = message
if sys.version_info[0] == 3:
def enc(text):
if isinstance(text, bytes):
return text
return text.encode()
def dec(text):
if isinstance(text, bytes):
return text.decode('utf-8')
return text
def write(pipe, data):
try:
pipe.stdin.write(data)
except IOError as e:
if e.errno != errno.EPIPE:
raise
else:
def enc(text):
if isinstance(text, unicode): # noqa F821
return text.encode('utf-8')
return text
def dec(text):
if isinstance(text, unicode): # noqa F821
return text
return text.decode('utf-8')
def write(pipe, data):
pipe.stdin.write(data)
class Git(object):
def __init__(self, use_shell=False):
self.use_shell = use_shell
self.cmd = None
self.pipe = None
self.stderr = None
self.stdout = None
def check_repo(self):
if self.call('rev-parse') != 0:
error = self.stderr
if not error:
error = "Unknown Git error"
error = dec(error)
if error.startswith("fatal: "):
error = error[len("fatal: "):]
raise GhpError(error)
def try_rebase(self, remote, branch):
rc = self.call('rev-list', '--max-count=1', '%s/%s' % (remote, branch))
if rc != 0:
return True
rev = dec(self.stdout.strip())
rc = self.call('update-ref', 'refs/heads/%s' % branch, rev)
if rc != 0:
return False
return True
def get_config(self, key):
self.call('config', key)
return self.stdout.strip()
def get_prev_commit(self, branch):
rc = self.call('rev-list', '--max-count=1', branch, '--')
if rc != 0:
return None
return dec(self.stdout).strip()
def open(self, *args, **kwargs):
if self.use_shell:
self.cmd = 'git ' + ' '.join(map(quote, args))
else:
self.cmd = ['git'] + list(args)
if sys.version_info >= (3, 2, 0):
kwargs['universal_newlines'] = False
for k in 'stdin stdout stderr'.split():
kwargs.setdefault(k, sp.PIPE)
kwargs['shell'] = self.use_shell
self.pipe = sp.Popen(self.cmd, **kwargs)
return self.pipe
def call(self, *args, **kwargs):
self.open(*args, **kwargs)
(self.stdout, self.stderr) = self.pipe.communicate()
return self.pipe.wait()
def check_call(self, *args, **kwargs):
kwargs["shell"] = self.use_shell
sp.check_call(['git'] + list(args), **kwargs)
def normalize_path(path):
# Fix unicode pathnames on OS X
# See: http://stackoverflow.com/a/5582439/44289
if sys.platform == "darwin":
return unicodedata.normalize("NFKC", dec(path))
return path
def mk_when(timestamp=None):
if timestamp is None:
timestamp = int(time.time())
currtz = datetime.now(tz.tzlocal()).strftime('%z')
return "%s %s" % (timestamp, currtz)
def start_commit(pipe, git, branch, message):
uname = os.getenv('GIT_COMMITTER_NAME', dec(git.get_config('user.name')))
email = os.getenv('GIT_COMMITTER_EMAIL', dec(git.get_config('user.email')))
when = os.getenv('GIT_COMMITTER_DATE', mk_when())
write(pipe, enc('commit refs/heads/%s\n' % branch))
write(pipe, enc('committer %s <%s> %s\n' % (uname, email, when)))
write(pipe, enc('data %d\n%s\n' % (len(enc(message)), message)))
head = git.get_prev_commit(branch)
if head:
write(pipe, enc('from %s\n' % head))
write(pipe, enc('deleteall\n'))
def add_file(pipe, srcpath, tgtpath):
with open(srcpath, "rb") as handle:
if os.access(srcpath, os.X_OK):
write(pipe, enc('M 100755 inline %s\n' % tgtpath))
else:
write(pipe, enc('M 100644 inline %s\n' % tgtpath))
data = handle.read()
write(pipe, enc('data %d\n' % len(data)))
write(pipe, enc(data))
write(pipe, enc('\n'))
def add_nojekyll(pipe):
write(pipe, enc('M 100644 inline .nojekyll\n'))
write(pipe, enc('data 0\n'))
write(pipe, enc('\n'))
def add_cname(pipe, cname):
write(pipe, enc('M 100644 inline CNAME\n'))
write(pipe, enc('data %d\n%s\n' % (len(enc(cname)), cname)))
def gitpath(fname):
norm = os.path.normpath(fname)
return "/".join(norm.split(os.path.sep))
def run_import(git, srcdir, **opts):
pipe = git.open('fast-import', '--date-format=raw', '--quiet',
stdin=sp.PIPE, stdout=None, stderr=None)
start_commit(pipe, git, opts['branch'], opts['mesg'])
for path, _, fnames in os.walk(srcdir, followlinks=opts['followlinks']):
for fn in fnames:
fpath = os.path.join(path, fn)
fpath = normalize_path(fpath)
gpath = gitpath(os.path.relpath(fpath, start=srcdir))
if opts['prefix']:
gpath = os.path.join(opts['prefix'], gpath)
add_file(pipe, fpath, gpath)
if opts['nojekyll']:
add_nojekyll(pipe)
if opts['cname'] is not None:
add_cname(pipe, opts['cname'])
write(pipe, enc('\n'))
pipe.stdin.close()
if pipe.wait() != 0:
sys.stdout.write(enc("Failed to process commit.\n"))
def options():
return [
op.make_option(
'-n', '--no-jekyll', dest='nojekyll', default=False,
action="store_true",
help='Include a .nojekyll file in the branch.'),
op.make_option(
'-c', '--cname', dest='cname', default=None,
help='Write a CNAME file with the given CNAME.'),
op.make_option(
'-m', '--message', dest='mesg',
default='Update documentation',
help='The commit message to use on the target branch.'),
op.make_option(
'-p', '--push', dest='push', default=False,
action='store_true',
help='Push the branch to origin/{branch} after committing.'),
op.make_option(
'-x', '--prefix', dest='prefix', default=None,
help='The prefix to add to each file that gets pushed to the '
'remote. [%default]'),
op.make_option(
'-f', '--force', dest='force',
default=False, action='store_true',
help='Force the push to the repository'),
op.make_option(
'-r', '--remote', dest='remote', default='origin',
help='The name of the remote to push to. [%default]'),
op.make_option(
'-b', '--branch', dest='branch', default='gh-pages',
help='Name of the branch to write to. [%default]'),
op.make_option(
'-s', '--shell', dest='use_shell', default=False,
action='store_true',
help='Use the shell when invoking Git. [%default]'),
op.make_option(
'-l', '--follow-links', dest='followlinks',
default=False, action='store_true',
help='Follow symlinks when adding files. [%default]')
]
def ghp_import(srcdir, **kwargs):
if not os.path.isdir(srcdir):
raise GhpError("Not a directory: %s" % srcdir)
opts = {opt.dest: opt.default for opt in options()}
opts.update(kwargs)
git = Git(use_shell=opts['use_shell'])
git.check_repo()
if not git.try_rebase(opts['remote'], opts['branch']):
raise GhpError("Failed to rebase %s branch." % opts['branch'])
run_import(git, srcdir, **opts)
if opts['push']:
if opts['force']:
git.check_call('push', opts['remote'], opts['branch'], '--force')
else:
git.check_call('push', opts['remote'], opts['branch'])
def main():
parser = op.OptionParser(usage=__usage__, option_list=options())
opts, args = parser.parse_args()
if len(args) == 0:
parser.error("No import directory specified.")
if len(args) > 1:
parser.error("Unknown arguments specified: %s" % ', '.join(args[1:]))
try:
ghp_import(args[0], **opts.__dict__)
except GhpError as e:
parser.error(e.message)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GIT_COMMITTER_EMAIL",
"GIT_COMMITTER_NAME",
"GIT_COMMITTER_DATE"
] |
[]
|
["GIT_COMMITTER_EMAIL", "GIT_COMMITTER_NAME", "GIT_COMMITTER_DATE"]
|
python
| 3 | 0 | |
examples/insert_test.go
|
package examples
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/pborman/uuid"
btrdb "github.com/BTrDB/btrdb/v5"
)
func TestInsertingProceduralData(t *testing.T) {
//First connect to the cluster. In BTrDB v4 we are advocating that all
//programs use environment variables to specify the endpoint rather
//than assuming specific addresses:
//Set $BTRDB_ENDPOINTS to
//"server1:4410;server2:4410..."
//Note that not all endpoints need be listed, but it will make this
//program more resilient if you specify more or all of the endpoints
db, err := btrdb.ConnectAuth(context.TODO(), os.Getenv("BTRDB_APIKEY"), btrdb.EndpointsFromEnv()...)
if err != nil {
t.Fatalf("Unexpected connection error: %v", err)
}
//Streams must be created before use
uu := uuid.NewRandom()
//A collection is a small group of streams (<100 is best) generally associated
//with a single device or service. BTrDB is designed for lots of small collections
//not small numbers of big collections
collection := fmt.Sprintf("test/inserting_procedural_data.%d", time.Now().UnixNano())
//The annotation is used to store (mutable) extra data with the stream. It
//is technically just a byte array, but we prefer people use msgpacked objects.
//the tooling is not quite there to make this easy, so its ok to make this nil
//for now
var annotation map[string]*string = nil
stream, err := db.Create(context.TODO(), uu, collection, btrdb.OptKV("name", "test"), annotation)
if err != nil {
t.Fatalf("Unexpected creation error: %v", err)
}
//Now you manipulate the stream:
err = stream.InsertTV(context.TODO(),
[]int64{100e6, 200e6, 300e6, 400e6},
[]float64{1.1, 2.2, 3.3, 4.4})
if err != nil {
t.Fatalf("Unexpected insert error: %v", err)
}
//Start = -1000ns, End = 1000ns, Width = 150ns, Depth = 2^0 (all the way), Version = latest
rvchan, ver, errc := stream.Windows(context.TODO(), -1000e6, 1000e6, 150e6, 0, btrdb.LatestVersion)
_ = ver //don't use this, that's ok
for result := range rvchan {
fmt.Printf("Window @%d min=%.2f mean=%.2f max=%.2f count=%d\n",
result.Time, result.Min, result.Mean, result.Max, result.Count)
}
if e := <-errc; e != nil {
t.Fatalf("Got an error: %v", e)
}
}
|
[
"\"BTRDB_APIKEY\""
] |
[] |
[
"BTRDB_APIKEY"
] |
[]
|
["BTRDB_APIKEY"]
|
go
| 1 | 0 | |
tests/peer_fixture_base.py
|
import os
import unittest
import odil
class PeerFixtureBase(unittest.TestCase):
def setUp(self, contexts):
self.association = odil.Association()
self.association.set_peer_host(os.environ["ODIL_PEER_HOST_NAME"])
self.association.set_peer_port(int(os.environ["ODIL_PEER_PORT"]))
self.association.update_parameters()\
.set_calling_ae_title(os.environ["ODIL_OWN_AET"])\
.set_called_ae_title(os.environ["ODIL_PEER_AET"]) \
.set_presentation_contexts(contexts)
self.association.associate()
def tearDown(self):
self.association.release()
|
[] |
[] |
[
"ODIL_OWN_AET",
"ODIL_PEER_HOST_NAME",
"ODIL_PEER_AET",
"ODIL_PEER_PORT"
] |
[]
|
["ODIL_OWN_AET", "ODIL_PEER_HOST_NAME", "ODIL_PEER_AET", "ODIL_PEER_PORT"]
|
python
| 4 | 0 | |
exporter/src/main/java/io/zeebe/monitor/SimpleMonitorExporter.java
|
/*
* Copyright © 2017 camunda services GmbH ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.zeebe.monitor;
import io.zeebe.exporter.context.Context;
import io.zeebe.exporter.context.Controller;
import io.zeebe.exporter.record.Record;
import io.zeebe.exporter.record.RecordMetadata;
import io.zeebe.exporter.record.value.DeploymentRecordValue;
import io.zeebe.exporter.record.value.IncidentRecordValue;
import io.zeebe.exporter.record.value.JobRecordValue;
import io.zeebe.exporter.record.value.MessageRecordValue;
import io.zeebe.exporter.record.value.MessageSubscriptionRecordValue;
import io.zeebe.exporter.record.value.TimerRecordValue;
import io.zeebe.exporter.record.value.WorkflowInstanceRecordValue;
import io.zeebe.exporter.record.value.deployment.DeployedWorkflow;
import io.zeebe.exporter.record.value.deployment.DeploymentResource;
import io.zeebe.exporter.spi.Exporter;
import io.zeebe.protocol.Protocol;
import io.zeebe.protocol.clientapi.RecordType;
import io.zeebe.protocol.clientapi.ValueType;
import io.zeebe.protocol.intent.DeploymentIntent;
import io.zeebe.protocol.intent.IncidentIntent;
import io.zeebe.protocol.intent.Intent;
import io.zeebe.protocol.intent.JobIntent;
import io.zeebe.protocol.intent.MessageIntent;
import io.zeebe.protocol.intent.MessageSubscriptionIntent;
import io.zeebe.protocol.intent.TimerIntent;
import io.zeebe.protocol.intent.WorkflowInstanceIntent;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.slf4j.Logger;
public class SimpleMonitorExporter implements Exporter {
private static final String ENV_PREFIX = "SIMPLE_MONITOR_EXPORTER_";
private static final String ENV_JDBC_URL = ENV_PREFIX + "JDBC_URL";
private static final String ENV_JDBC_DRIVER = ENV_PREFIX + "JDBC_DRIVER";
private static final String ENV_JDBC_USER = ENV_PREFIX + "JDBC_USER";
private static final String ENV_JDBC_PASSWORD = ENV_PREFIX + "JDBC_PASSWORD";
private static final String ENV_JDBC_CREATE_SCHEMA = ENV_PREFIX + "JDBC_CREATE_SCHEMA";
private static final String INSERT_WORKFLOW =
"INSERT INTO WORKFLOW (ID_, KEY_, BPMN_PROCESS_ID_, VERSION_, RESOURCE_, TIMESTAMP_) VALUES ('%s', %d, '%s', %d, '%s', %d);";
private static final String INSERT_WORKFLOW_INSTANCE =
"INSERT INTO WORKFLOW_INSTANCE"
+ " (ID_, PARTITION_ID_, KEY_, BPMN_PROCESS_ID_, VERSION_, WORKFLOW_KEY_, STATE_, START_)"
+ " VALUES "
+ "('%s', %d, %d, '%s', %d, %d, '%s', %d);";
private static final String UPDATE_WORKFLOW_INSTANCE =
"UPDATE WORKFLOW_INSTANCE SET END_ = %d, STATE_ = '%s' WHERE KEY_ = %d;";
private static final String INSERT_ACTIVITY_INSTANCE =
"INSERT INTO ACTIVITY_INSTANCE"
+ " (ID_, PARTITION_ID_, KEY_, INTENT_, WORKFLOW_INSTANCE_KEY_, ACTIVITY_ID_, SCOPE_INSTANCE_KEY_, PAYLOAD_, WORKFLOW_KEY_, TIMESTAMP_)"
+ " VALUES "
+ "('%s', %d, %d, '%s', %d, '%s', %d, '%s', %d, %d);";
private static final String INSERT_INCIDENT =
"INSERT INTO INCIDENT"
+ " (ID_, KEY_, WORKFLOW_INSTANCE_KEY_, ACTIVITY_INSTANCE_KEY_, JOB_KEY_, ERROR_TYPE_, ERROR_MSG_, CREATED_)"
+ " VALUES "
+ "('%s', %d, %d, %d, %d, '%s', '%s', %d)";
private static final String UPDATE_INCIDENT =
"UPDATE INCIDENT SET RESOLVED_ = %d WHERE KEY_ = %d;";
private static final String INSERT_JOB =
"INSERT INTO JOB"
+ " (ID_, KEY_, JOB_TYPE_, WORKFLOW_INSTANCE_KEY_, ACTIVITY_INSTANCE_KEY_, STATE_, RETRIES_, TIMESTAMP_)"
+ " VALUES "
+ "('%s', %d, '%s', %d, %d, '%s', %d, %d)";
private static final String UPDATE_JOB =
"UPDATE JOB SET STATE_ = '%s', WORKER_ = '%s', RETRIES_ = %d, TIMESTAMP_ = %d WHERE KEY_ = %d;";
private static final String INSERT_MESSAGE =
"INSERT INTO MESSAGE"
+ " (ID_, KEY_, NAME_, CORRELATION_KEY_, MESSAGE_ID_, PAYLOAD_, STATE_, TIMESTAMP_)"
+ " VALUES "
+ "('%s', %d, '%s', '%s', '%s', '%s', '%s', %d)";
private static final String UPDATE_MESSAGE =
"UPDATE MESSAGE SET STATE_ = '%s', TIMESTAMP_ = %d WHERE KEY_ = %d;";
private static final String INSERT_MESSAGE_SUBSCRIPTION =
"INSERT INTO MESSAGE_SUBSCRIPTION"
+ " (ID_, WORKFLOW_INSTANCE_KEY_, ACTIVITY_INSTANCE_KEY_, MESSAGE_NAME_, CORRELATION_KEY_, STATE_, TIMESTAMP_)"
+ " VALUES "
+ "('%s', %d, %d, '%s', '%s', '%s', %d)";
private static final String UPDATE_MESSAGE_SUBSCRIPTION =
"UPDATE MESSAGE_SUBSCRIPTION SET STATE_ = '%s', TIMESTAMP_ = %d WHERE ACTIVITY_INSTANCE_KEY_ = %d and MESSAGE_NAME_ = '%s';";
private static final String INSERT_TIMER =
"INSERT INTO TIMER"
+ " (ID_, KEY_, ACTIVITY_INSTANCE_KEY_, HANDLER_NODE_ID_, DUE_DATE_, STATE_, TIMESTAMP_)"
+ " VALUES "
+ "('%s', %d, %d, '%s', %d, '%s', %d)";
private static final String UPDATE_TIMER =
"UPDATE TIMER SET STATE_ = '%s', TIMESTAMP_ = %d WHERE KEY_ = %d;";
public static final String CREATE_SCHEMA_SQL_PATH = "/CREATE_SCHEMA.sql";
private final Map<ValueType, Consumer<Record>> insertCreatorPerType = new HashMap<>();
private final List<String> sqlStatements;
private Logger log;
private Controller controller;
private SimpleMonitorExporterConfiguration configuration;
private Connection connection;
private int batchSize;
private int batchTimerMilli;
private Duration batchExecutionTimer;
private long lastPosition;
public SimpleMonitorExporter() {
insertCreatorPerType.put(ValueType.DEPLOYMENT, this::exportDeploymentRecord);
insertCreatorPerType.put(ValueType.WORKFLOW_INSTANCE, this::exportWorkflowInstanceRecord);
insertCreatorPerType.put(ValueType.INCIDENT, this::exportIncidentRecord);
insertCreatorPerType.put(ValueType.JOB, this::exportJobRecord);
insertCreatorPerType.put(ValueType.MESSAGE, this::exportMessageRecord);
insertCreatorPerType.put(ValueType.MESSAGE_SUBSCRIPTION, this::exportMessageSubscriptionRecord);
insertCreatorPerType.put(ValueType.TIMER, this::exportTimerRecord);
sqlStatements = new ArrayList<>();
}
@Override
public void configure(final Context context) {
log = context.getLogger();
configuration =
context.getConfiguration().instantiate(SimpleMonitorExporterConfiguration.class);
applyEnvironmentVariables(configuration);
batchSize = configuration.batchSize;
batchTimerMilli = configuration.batchTimerMilli;
log.debug("Exporter configured with {}", configuration);
try {
Class.forName(configuration.driverName);
} catch (final ClassNotFoundException e) {
throw new RuntimeException("Driver not found in class path", e);
}
}
private void applyEnvironmentVariables(final SimpleMonitorExporterConfiguration configuration) {
final Map<String, String> environment = System.getenv();
Optional.ofNullable(environment.get(ENV_JDBC_URL))
.ifPresent(url -> configuration.jdbcUrl = url);
Optional.ofNullable(environment.get(ENV_JDBC_DRIVER))
.ifPresent(driver -> configuration.driverName = driver);
Optional.ofNullable(environment.get(ENV_JDBC_USER))
.ifPresent(user -> configuration.userName = user);
Optional.ofNullable(environment.get(ENV_JDBC_PASSWORD))
.ifPresent(password -> configuration.password = password);
Optional.ofNullable(environment.get(ENV_JDBC_CREATE_SCHEMA))
.ifPresent(createSchema -> configuration.createSchema = createSchema);
}
@Override
public void open(final Controller controller) {
try {
connection =
DriverManager.getConnection(
configuration.jdbcUrl, configuration.userName, configuration.password);
connection.setAutoCommit(true);
} catch (final SQLException e) {
throw new RuntimeException(
String.format("Error on opening database with configuration %s.", configuration), e);
}
createTables();
log.info("Start exporting to {}.", configuration.jdbcUrl);
this.controller = controller;
if (batchTimerMilli > 0) {
batchExecutionTimer = Duration.ofMillis(batchTimerMilli);
this.controller.scheduleTask(batchExecutionTimer, this::batchTimerExecution);
}
}
private void createTables() {
if (configuration.createSchema != null && !configuration.createSchema.equals("-")) {
try (final Statement statement = connection.createStatement()) {
final InputStream resourceAsStream =
SimpleMonitorExporter.class.getResourceAsStream(configuration.createSchema);
final String sql =
new BufferedReader(new InputStreamReader(resourceAsStream))
.lines()
.collect(Collectors.joining(System.lineSeparator()));
/**
* MySQL does not allow semicolon batching by executeUpdate(sql), split using naive approach
*/
for (String s: sql.split("\\;")) {
log.info("Create tables:\n{}", s);
statement.executeUpdate(s);
}
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
}
@Override
public void close() {
try {
connection.close();
} catch (final Exception e) {
log.warn("Failed to close jdbc connection", e);
}
log.info("Exporter closed");
}
@Override
public void export(final Record record) {
lastPosition = record.getPosition();
if (record.getMetadata().getRecordType() != RecordType.EVENT) {
return;
}
final Consumer<Record> recordConsumer =
insertCreatorPerType.get(record.getMetadata().getValueType());
if (recordConsumer != null) {
recordConsumer.accept(record);
if (sqlStatements.size() > batchSize) {
executeSqlStatementBatch();
}
}
}
private void batchTimerExecution() {
executeSqlStatementBatch();
controller.scheduleTask(batchExecutionTimer, this::batchTimerExecution);
}
private void executeSqlStatementBatch() {
try (final Statement statement = connection.createStatement()) {
for (final String insert : sqlStatements) {
statement.addBatch(insert);
}
statement.executeBatch();
sqlStatements.clear();
} catch (final Exception e) {
log.error("Batch insert failed!", e);
}
controller.updateLastExportedRecordPosition(lastPosition);
}
private void exportDeploymentRecord(final Record record) {
final RecordMetadata metadata = record.getMetadata();
if (metadata.getIntent() != DeploymentIntent.CREATED
|| metadata.getPartitionId() != Protocol.DEPLOYMENT_PARTITION) {
// ignore deployment event on other partitions to avoid duplicates
return;
}
final long timestamp = record.getTimestamp().toEpochMilli();
final DeploymentRecordValue deploymentRecordValue = (DeploymentRecordValue) record.getValue();
final List<DeploymentResource> resources = deploymentRecordValue.getResources();
for (final DeploymentResource resource : resources) {
final List<DeployedWorkflow> deployedWorkflows =
deploymentRecordValue
.getDeployedWorkflows()
.stream()
.filter(w -> w.getResourceName().equals(resource.getResourceName()))
.collect(Collectors.toList());
for (final DeployedWorkflow deployedWorkflow : deployedWorkflows) {
final String insertStatement =
String.format(
INSERT_WORKFLOW,
createId(),
deployedWorkflow.getWorkflowKey(),
getCleanString(deployedWorkflow.getBpmnProcessId()),
deployedWorkflow.getVersion(),
getCleanString(new String(resource.getResource())),
timestamp);
sqlStatements.add(insertStatement);
}
}
}
private boolean isWorkflowInstance(
final Record record, final WorkflowInstanceRecordValue workflowInstanceRecordValue) {
return workflowInstanceRecordValue.getWorkflowInstanceKey() == record.getKey();
}
private void exportWorkflowInstanceRecord(final Record record) {
final long key = record.getKey();
final int partitionId = record.getMetadata().getPartitionId();
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final WorkflowInstanceRecordValue workflowInstanceRecordValue =
(WorkflowInstanceRecordValue) record.getValue();
if (isWorkflowInstance(record, workflowInstanceRecordValue)) {
exportWorkflowInstance(key, partitionId, intent, timestamp, workflowInstanceRecordValue);
} else {
exportActivityInstance(key, partitionId, intent, timestamp, workflowInstanceRecordValue);
}
}
private void exportWorkflowInstance(
final long key,
final int partitionId,
final Intent intent,
final long timestamp,
final WorkflowInstanceRecordValue workflowInstanceRecordValue) {
if (intent == WorkflowInstanceIntent.ELEMENT_ACTIVATED) {
final String bpmnProcessId = getCleanString(workflowInstanceRecordValue.getBpmnProcessId());
final int version = workflowInstanceRecordValue.getVersion();
final long workflowKey = workflowInstanceRecordValue.getWorkflowKey();
final String insertWorkflowInstanceStatement =
String.format(
INSERT_WORKFLOW_INSTANCE,
createId(),
partitionId,
key,
bpmnProcessId,
version,
workflowKey,
"Active",
timestamp);
sqlStatements.add(insertWorkflowInstanceStatement);
} else if (intent == WorkflowInstanceIntent.ELEMENT_COMPLETED) {
final String updateWorkflowInstanceStatement =
String.format(UPDATE_WORKFLOW_INSTANCE, timestamp, "Completed", key);
sqlStatements.add(updateWorkflowInstanceStatement);
} else if (intent == WorkflowInstanceIntent.ELEMENT_TERMINATED) {
final String updateWorkflowInstanceStatement =
String.format(UPDATE_WORKFLOW_INSTANCE, timestamp, "Terminated", key);
sqlStatements.add(updateWorkflowInstanceStatement);
}
}
private void exportActivityInstance(
final long key,
final int partitionId,
final Intent intent,
final long timestamp,
final WorkflowInstanceRecordValue workflowInstanceRecordValue) {
final long workflowInstanceKey = workflowInstanceRecordValue.getWorkflowInstanceKey();
final String elementId = getCleanString(workflowInstanceRecordValue.getElementId());
final long scopeInstanceKey = workflowInstanceRecordValue.getScopeInstanceKey();
final String payload = getCleanString(workflowInstanceRecordValue.getPayload());
final long workflowKey = workflowInstanceRecordValue.getWorkflowKey();
final String insertActivityInstanceStatement =
String.format(
INSERT_ACTIVITY_INSTANCE,
createId(),
partitionId,
key,
intent,
workflowInstanceKey,
elementId,
scopeInstanceKey,
payload,
workflowKey,
timestamp);
sqlStatements.add(insertActivityInstanceStatement);
}
private void exportIncidentRecord(final Record record) {
final long key = record.getKey();
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final IncidentRecordValue incidentRecordValue = (IncidentRecordValue) record.getValue();
final long workflowInstanceKey = incidentRecordValue.getWorkflowInstanceKey();
final long elementInstanceKey = incidentRecordValue.getElementInstanceKey();
final long jobKey = incidentRecordValue.getJobKey();
final String errorType = getCleanString(incidentRecordValue.getErrorType());
final String errorMessage = getCleanString(incidentRecordValue.getErrorMessage());
if (intent == IncidentIntent.CREATED) {
final String insertStatement =
String.format(
INSERT_INCIDENT,
createId(),
key,
workflowInstanceKey,
elementInstanceKey,
jobKey,
errorType,
errorMessage,
timestamp);
sqlStatements.add(insertStatement);
} else if (intent == IncidentIntent.RESOLVED) {
final String updateIncidentStatement = String.format(UPDATE_INCIDENT, timestamp, key);
sqlStatements.add(updateIncidentStatement);
}
}
private void exportJobRecord(final Record record) {
final long key = record.getKey();
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final String state = intent.name().toLowerCase();
final JobRecordValue jobRecord = (JobRecordValue) record.getValue();
final String jobType = jobRecord.getType();
final long workflowInstanceKey = jobRecord.getHeaders().getWorkflowInstanceKey();
final long elementInstanceKey = jobRecord.getHeaders().getElementInstanceKey();
final int retries = jobRecord.getRetries();
final String worker = jobRecord.getWorker();
if (intent == JobIntent.CREATED) {
final String insertStatement =
String.format(
INSERT_JOB,
createId(),
key,
jobType,
workflowInstanceKey,
elementInstanceKey,
state,
retries,
timestamp);
sqlStatements.add(insertStatement);
} else {
final String updateStatement =
String.format(UPDATE_JOB, state, worker, retries, timestamp, key);
sqlStatements.add(updateStatement);
}
}
private void exportMessageRecord(final Record record) {
final long key = record.getKey();
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final String state = intent.name().toLowerCase();
final MessageRecordValue messageRecord = (MessageRecordValue) record.getValue();
final String name = messageRecord.getName();
final String correlationKey = messageRecord.getCorrelationKey();
final String messageId = messageRecord.getMessageId();
final String payload = messageRecord.getPayload();
if (intent == MessageIntent.PUBLISHED) {
final String insertStatement =
String.format(
INSERT_MESSAGE,
createId(),
key,
name,
correlationKey,
messageId,
payload,
state,
timestamp);
sqlStatements.add(insertStatement);
} else {
final String updateStatement = String.format(UPDATE_MESSAGE, state, timestamp, key);
sqlStatements.add(updateStatement);
}
}
private void exportMessageSubscriptionRecord(final Record record) {
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final String state = intent.name().toLowerCase();
final MessageSubscriptionRecordValue subscriptionRecord =
(MessageSubscriptionRecordValue) record.getValue();
final String messageName = subscriptionRecord.getMessageName();
final String correlationKey = subscriptionRecord.getCorrelationKey();
final long workflowInstanceKey = subscriptionRecord.getWorkflowInstanceKey();
final long elementInstanceKey = subscriptionRecord.getElementInstanceKey();
if (intent == MessageSubscriptionIntent.OPENED) {
final String insertStatement =
String.format(
INSERT_MESSAGE_SUBSCRIPTION,
createId(),
workflowInstanceKey,
elementInstanceKey,
messageName,
correlationKey,
state,
timestamp);
sqlStatements.add(insertStatement);
} else {
final String updateStatement =
String.format(
UPDATE_MESSAGE_SUBSCRIPTION, state, timestamp, elementInstanceKey, messageName);
sqlStatements.add(updateStatement);
}
}
private void exportTimerRecord(final Record record) {
final long key = record.getKey();
final Intent intent = record.getMetadata().getIntent();
final long timestamp = record.getTimestamp().toEpochMilli();
final String state = intent.name().toLowerCase();
final TimerRecordValue timerRecord = (TimerRecordValue) record.getValue();
final long elementInstanceKey = timerRecord.getElementInstanceKey();
final String handlerNodeId = timerRecord.getHandlerFlowNodeId();
final long dueDate = timerRecord.getDueDate();
if (intent == TimerIntent.CREATED) {
final String insertStatement =
String.format(
INSERT_TIMER,
createId(),
key,
elementInstanceKey,
handlerNodeId,
dueDate,
state,
timestamp);
sqlStatements.add(insertStatement);
} else {
final String updateStatement = String.format(UPDATE_TIMER, state, timestamp, key);
sqlStatements.add(updateStatement);
}
}
private String getCleanString(final String string) {
return string.trim().replaceAll("'", "`");
}
private String createId() {
return UUID.randomUUID().toString();
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
example_json_test.go
|
package pgx_test
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v4"
)
func Example_JSON() {
conn, err := pgx.Connect(context.Background(), os.Getenv("PGX_TEST_DATABASE"))
if err != nil {
fmt.Printf("Unable to establish connection: %v", err)
return
}
type person struct {
Name string `json:"name"`
Age int `json:"age"`
}
input := person{
Name: "John",
Age: 42,
}
var output person
err = conn.QueryRow(context.Background(), "select $1::json", input).Scan(&output)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(output.Name, output.Age)
// Output:
// John 42
}
|
[
"\"PGX_TEST_DATABASE\""
] |
[] |
[
"PGX_TEST_DATABASE"
] |
[]
|
["PGX_TEST_DATABASE"]
|
go
| 1 | 0 | |
test/integration/persistentvolumes/persistent_volumes_test.go
|
// +build integration,!no-etcd
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolumes
import (
"fmt"
"math/rand"
"net/http/httptest"
"os"
"strconv"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/integration/framework"
"github.com/golang/glog"
)
// Several tests in this file are configurable by environment variables:
// KUBE_INTEGRATION_PV_OBJECTS - nr. of PVs/PVCs to be created
// (100 by default)
// KUBE_INTEGRATION_PV_SYNC_PERIOD - volume controller sync period
// (10s by default)
// KUBE_INTEGRATION_PV_END_SLEEP - for how long should
// TestPersistentVolumeMultiPVsPVCs sleep when it's finished (0s by
// default). This is useful to test how long does it take for periodic sync
// to process bound PVs/PVCs.
//
const defaultObjectCount = 100
const defaultSyncPeriod = 10 * time.Second
const provisionerPluginName = "kubernetes.io/mock-provisioner"
func getObjectCount() int {
objectCount := defaultObjectCount
if s := os.Getenv("KUBE_INTEGRATION_PV_OBJECTS"); s != "" {
var err error
objectCount, err = strconv.Atoi(s)
if err != nil {
glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_OBJECTS: %v", err)
}
}
glog.V(2).Infof("using KUBE_INTEGRATION_PV_OBJECTS=%d", objectCount)
return objectCount
}
func getSyncPeriod(syncPeriod time.Duration) time.Duration {
period := syncPeriod
if s := os.Getenv("KUBE_INTEGRATION_PV_SYNC_PERIOD"); s != "" {
var err error
period, err = time.ParseDuration(s)
if err != nil {
glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_SYNC_PERIOD: %v", err)
}
}
glog.V(2).Infof("using KUBE_INTEGRATION_PV_SYNC_PERIOD=%v", period)
return period
}
func testSleep() {
var period time.Duration
if s := os.Getenv("KUBE_INTEGRATION_PV_END_SLEEP"); s != "" {
var err error
period, err = time.ParseDuration(s)
if err != nil {
glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_END_SLEEP: %v", err)
}
}
glog.V(2).Infof("using KUBE_INTEGRATION_PV_END_SLEEP=%v", period)
if period != 0 {
time.Sleep(period)
glog.V(2).Infof("sleep finished")
}
}
func TestPersistentVolumeRecycler(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeRecycler started")
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pv-recycler", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
ctrl.Run(stopCh)
defer close(stopCh)
// This PV will be claimed, released, and recycled.
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
pvc := createPVC("fake-pvc-recycler", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
_, err := testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeRecycler pvc created")
// wait until the controller pairs the volume and claim
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeRecycler pv bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound")
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
glog.V(2).Infof("TestPersistentVolumeRecycler pv released")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeAvailable)
glog.V(2).Infof("TestPersistentVolumeRecycler pv available")
}
func TestPersistentVolumeDeleter(t *testing.T) {
glog.V(2).Infof("TestPersistentVolumeDeleter started")
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pv-deleter", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
ctrl.Run(stopCh)
defer close(stopCh)
// This PV will be claimed, released, and deleted.
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
pvc := createPVC("fake-pvc-deleter", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
_, err := testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeDeleter pv created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeDeleter pvc created")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeDeleter pv bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound")
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted")
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased)
glog.V(2).Infof("TestPersistentVolumeDeleter pv released")
for {
event := <-watchPV.ResultChan()
if event.Type == watch.Deleted {
break
}
}
glog.V(2).Infof("TestPersistentVolumeDeleter pv deleted")
}
func TestPersistentVolumeBindRace(t *testing.T) {
// Test a race binding many claims to a PV that is pre-bound to a specific
// PVC. Only this specific PVC should get bound.
glog.V(2).Infof("TestPersistentVolumeBindRace started")
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pv-bind-race", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
ctrl.Run(stopCh)
defer close(stopCh)
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
counter := 0
maxClaims := 100
claims := []*v1.PersistentVolumeClaim{}
for counter <= maxClaims {
counter += 1
clone, _ := conversion.NewCloner().DeepCopy(pvc)
newPvc, _ := clone.(*v1.PersistentVolumeClaim)
newPvc.ObjectMeta = v1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
if err != nil {
t.Fatalf("Error creating newPvc: %v", err)
}
claims = append(claims, claim)
}
glog.V(2).Infof("TestPersistentVolumeBindRace claims created")
// putting a bind manually on a pv should only match the claim it is bound to
rand.Seed(time.Now().Unix())
claim := claims[rand.Intn(maxClaims-1)]
claimRef, err := v1.GetReference(claim)
if err != nil {
t.Fatalf("Unexpected error getting claimRef: %v", err)
}
pv.Spec.ClaimRef = claimRef
pv.Spec.ClaimRef.UID = ""
pv, err = testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Fatalf("Unexpected error creating pv: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
pv, err = testClient.PersistentVolumes().Get(pv.Name)
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("Unexpected nil claimRef")
}
if pv.Spec.ClaimRef.Namespace != claimRef.Namespace || pv.Spec.ClaimRef.Name != claimRef.Name {
t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", claimRef.Namespace, claimRef.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
}
// TestPersistentVolumeClaimLabelSelector test binding using label selectors
func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pvc-label-selector", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
controller.Run(stopCh)
defer close(stopCh)
var (
err error
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
reclaim = v1.PersistentVolumeReclaimRetain
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes)
)
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "true"})
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "false"})
_, err = testClient.PersistentVolumes().Create(pv_true)
if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err)
}
_, err = testClient.PersistentVolumes().Create(pv_false)
if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err)
}
t.Log("volumes created")
pvc.Spec.Selector = &unversioned.LabelSelector{
MatchLabels: map[string]string{
"foo": "true",
},
}
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
}
t.Log("claim created")
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound")
}
pv, err = testClient.PersistentVolumes().Get("pv-true")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("True PV should be bound")
}
if pv.Spec.ClaimRef.Namespace != pvc.Namespace || pv.Spec.ClaimRef.Name != pvc.Name {
t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", pvc.Namespace, pvc.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
}
// TestPersistentVolumeClaimLabelSelectorMatchExpressions test binding using
// MatchExpressions label selectors
func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pvc-match-expresssions", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
controller.Run(stopCh)
defer close(stopCh)
var (
err error
modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
reclaim = v1.PersistentVolumeReclaimRetain
pv_true = createPV("pv-true", "/tmp/foo-label", "1G", modes, reclaim)
pv_false = createPV("pv-false", "/tmp/foo-label", "1G", modes, reclaim)
pvc = createPVC("pvc-ls-1", ns.Name, "1G", modes)
)
pv_true.ObjectMeta.SetLabels(map[string]string{"foo": "valA", "bar": ""})
pv_false.ObjectMeta.SetLabels(map[string]string{"foo": "valB", "baz": ""})
_, err = testClient.PersistentVolumes().Create(pv_true)
if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err)
}
_, err = testClient.PersistentVolumes().Create(pv_false)
if err != nil {
t.Fatalf("Failed to create PersistentVolume: %v", err)
}
t.Log("volumes created")
pvc.Spec.Selector = &unversioned.LabelSelector{
MatchExpressions: []unversioned.LabelSelectorRequirement{
{
Key: "foo",
Operator: unversioned.LabelSelectorOpIn,
Values: []string{"valA"},
},
{
Key: "foo",
Operator: unversioned.LabelSelectorOpNotIn,
Values: []string{"valB"},
},
{
Key: "bar",
Operator: unversioned.LabelSelectorOpExists,
Values: []string{},
},
{
Key: "baz",
Operator: unversioned.LabelSelectorOpDoesNotExist,
Values: []string{},
},
},
}
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Fatalf("Failed to create PersistentVolumeClaim: %v", err)
}
t.Log("claim created")
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound")
pv, err := testClient.PersistentVolumes().Get("pv-false")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef != nil {
t.Fatalf("False PV shouldn't be bound")
}
pv, err = testClient.PersistentVolumes().Get("pv-true")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("True PV should be bound")
}
if pv.Spec.ClaimRef.Namespace != pvc.Namespace || pv.Spec.ClaimRef.Name != pvc.Name {
t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", pvc.Namespace, pvc.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
}
// TestPersistentVolumeMultiPVs tests binding of one PVC to 100 PVs with
// different size.
func TestPersistentVolumeMultiPVs(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("multi-pvs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
controller.Run(stopCh)
defer close(stopCh)
maxPVs := getObjectCount()
pvs := make([]*v1.PersistentVolume, maxPVs)
for i := 0; i < maxPVs; i++ {
// This PV will be claimed, released, and deleted
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), strconv.Itoa(i)+"G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
}
pvc := createPVC("pvc-2", ns.Name, strconv.Itoa(maxPVs/2)+"G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
for i := 0; i < maxPVs; i++ {
_, err := testClient.PersistentVolumes().Create(pvs[i])
if err != nil {
t.Errorf("Failed to create PersistentVolume %d: %v", i, err)
}
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeAvailable)
}
t.Log("volumes created")
_, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
}
t.Log("claim created")
// wait until the binder pairs the claim with a volume
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound")
// only one PV is bound
bound := 0
for i := 0; i < maxPVs; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
continue
}
// found a bounded PV
p := pv.Spec.Capacity[v1.ResourceStorage]
pvCap := p.Value()
expectedCap := resource.MustParse(strconv.Itoa(maxPVs/2) + "G")
expectedCapVal := expectedCap.Value()
if pv.Spec.ClaimRef.Name != pvc.Name || pvCap != expectedCapVal {
t.Fatalf("Bind mismatch! Expected %s capacity %d but got %s capacity %d", pvc.Name, expectedCapVal, pv.Spec.ClaimRef.Name, pvCap)
}
t.Logf("claim bounded to %s capacity %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage])
bound += 1
}
t.Log("volumes checked")
if bound != 1 {
t.Fatalf("Only 1 PV should be bound but got %d", bound)
}
// deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
t.Log("claim deleted")
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
t.Log("volumes released")
}
// TestPersistentVolumeMultiPVsPVCs tests binding of 100 PVC to 100 PVs.
// This test is configurable by KUBE_INTEGRATION_PV_* variables.
func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("multi-pvs-pvcs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
controllerStopCh := make(chan struct{})
binder.Run(controllerStopCh)
defer close(controllerStopCh)
objCount := getObjectCount()
pvs := make([]*v1.PersistentVolume, objCount)
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
for i := 0; i < objCount; i++ {
// This PV will be claimed, released, and deleted
pvs[i] = createPV("pv-"+strconv.Itoa(i), "/tmp/foo"+strconv.Itoa(i), "1G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
pvcs[i] = createPVC("pvc-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
}
// Create PVs first
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: start")
// Create the volumes in a separate goroutine to pop events from
// watchPV early - it seems it has limited capacity and it gets stuck
// with >3000 volumes.
go func() {
for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumes().Create(pvs[i])
}
}()
// Wait for them to get Available
for i := 0; i < objCount; i++ {
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeAvailable)
glog.V(1).Infof("%d volumes available", i+1)
}
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available")
// Start a separate goroutine that randomly modifies PVs and PVCs while the
// binder is working. We test that the binder can bind volumes despite
// users modifying objects underneath.
stopCh := make(chan struct{}, 0)
go func() {
for {
// Roll a dice and decide a PV or PVC to modify
if rand.Intn(2) == 0 {
// Modify PV
i := rand.Intn(objCount)
name := "pv-" + strconv.Itoa(i)
pv, err := testClient.PersistentVolumes().Get(name)
if err != nil {
// Silently ignore error, the PV may have be already deleted
// or not exists yet.
glog.V(4).Infof("Failed to read PV %s: %v", name, err)
continue
}
if pv.Annotations == nil {
pv.Annotations = map[string]string{"TestAnnotation": fmt.Sprint(rand.Int())}
} else {
pv.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
}
_, err = testClient.PersistentVolumes().Update(pv)
if err != nil {
// Silently ignore error, the PV may have been updated by
// the controller.
glog.V(4).Infof("Failed to update PV %s: %v", pv.Name, err)
continue
}
glog.V(4).Infof("Updated PV %s", pv.Name)
} else {
// Modify PVC
i := rand.Intn(objCount)
name := "pvc-" + strconv.Itoa(i)
pvc, err := testClient.PersistentVolumeClaims(v1.NamespaceDefault).Get(name)
if err != nil {
// Silently ignore error, the PVC may have be already
// deleted or not exists yet.
glog.V(4).Infof("Failed to read PVC %s: %v", name, err)
continue
}
if pvc.Annotations == nil {
pvc.Annotations = map[string]string{"TestAnnotation": fmt.Sprint(rand.Int())}
} else {
pvc.Annotations["TestAnnotation"] = fmt.Sprint(rand.Int())
}
_, err = testClient.PersistentVolumeClaims(v1.NamespaceDefault).Update(pvc)
if err != nil {
// Silently ignore error, the PVC may have been updated by
// the controller.
glog.V(4).Infof("Failed to update PVC %s: %v", pvc.Name, err)
continue
}
glog.V(4).Infof("Updated PVC %s", pvc.Name)
}
select {
case <-stopCh:
break
default:
continue
}
}
}()
// Create the claims, again in a separate goroutine.
go func() {
for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i])
}
}()
// wait until the binder pairs all claims
for i := 0; i < objCount; i++ {
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(1).Infof("%d claims bound", i+1)
}
// wait until the binder pairs all volumes
for i := 0; i < objCount; i++ {
waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeBound)
glog.V(1).Infof("%d claims bound", i+1)
}
glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: claims are bound")
stopCh <- struct{}{}
// check that everything is bound to something
for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("PV %q is not bound", pv.Name)
}
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err)
}
if pvc.Spec.VolumeName == "" {
t.Fatalf("PVC %q is not bound", pvc.Name)
}
glog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName)
}
testSleep()
}
// TestPersistentVolumeControllerStartup tests startup of the controller.
// The controller should not unbind any volumes when it starts.
func TestPersistentVolumeControllerStartup(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("controller-startup", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
objCount := getObjectCount()
const shortSyncPeriod = 2 * time.Second
syncPeriod := getSyncPeriod(shortSyncPeriod)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s, shortSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// Create *bound* volumes and PVCs
pvs := make([]*v1.PersistentVolume, objCount)
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
for i := 0; i < objCount; i++ {
pvName := "pv-startup-" + strconv.Itoa(i)
pvcName := "pvc-startup-" + strconv.Itoa(i)
pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
pvc.Annotations = map[string]string{"annBindCompleted": ""}
pvc.Spec.VolumeName = pvName
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
}
// Save Bound status as a separate transaction
newPVC.Status.Phase = v1.ClaimBound
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
if err != nil {
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
}
pvcs[i] = newPVC
// Drain watchPVC with all events generated by the PVC until it's bound
// We don't want to catch "PVC craated with Status.Phase == Pending"
// later in this test.
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
claimRef, err := v1.GetReference(newPVC)
if err != nil {
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
return
}
pv.Spec.ClaimRef = claimRef
newPV, err := testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
}
// Save Bound status as a separate transaction
newPV.Status.Phase = v1.VolumeBound
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
if err != nil {
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
}
pvs[i] = newPV
// Drain watchPV with all events generated by the PV until it's bound
// We don't want to catch "PV craated with Status.Phase == Pending"
// later in this test.
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
}
// Start the controller when all PVs and PVCs are already saved in etcd
stopCh := make(chan struct{})
binder.Run(stopCh)
defer close(stopCh)
// wait for at least two sync periods for changes. No volume should be
// Released and no claim should be Lost during this time.
timer := time.NewTimer(2 * syncPeriod)
defer timer.Stop()
finished := false
for !finished {
select {
case volumeEvent := <-watchPV.ResultChan():
volume, ok := volumeEvent.Object.(*v1.PersistentVolume)
if !ok {
continue
}
if volume.Status.Phase != v1.VolumeBound {
t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
}
case claimEvent := <-watchPVC.ResultChan():
claim, ok := claimEvent.Object.(*v1.PersistentVolumeClaim)
if !ok {
continue
}
if claim.Status.Phase != v1.ClaimBound {
t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
}
case <-timer.C:
// Wait finished
glog.V(2).Infof("Wait finished")
finished = true
}
}
// check that everything is bound to something
for i := 0; i < objCount; i++ {
pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("PV %q is not bound", pv.Name)
}
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
if err != nil {
t.Fatalf("Unexpected error getting pvc: %v", err)
}
if pvc.Spec.VolumeName == "" {
t.Fatalf("PVC %q is not bound", pvc.Name)
}
glog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName)
}
}
// TestPersistentVolumeProvisionMultiPVCs tests provisioning of many PVCs.
// This test is configurable by KUBE_INTEGRATION_PV_* variables.
func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("provision-multi-pvs", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes and StorageClasses).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
defer testClient.Storage().StorageClasses().DeleteCollection(nil, v1.ListOptions{})
storageClass := storage.StorageClass{
TypeMeta: unversioned.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: v1.ObjectMeta{
Name: "gold",
},
Provisioner: provisionerPluginName,
}
testClient.Storage().StorageClasses().Create(&storageClass)
stopCh := make(chan struct{})
binder.Run(stopCh)
defer close(stopCh)
objCount := getObjectCount()
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
for i := 0; i < objCount; i++ {
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
pvc.Annotations = map[string]string{
storageutil.StorageClassAnnotation: "gold",
}
pvcs[i] = pvc
}
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: start")
// Create the claims in a separate goroutine to pop events from watchPVC
// early. It gets stuck with >3000 claims.
go func() {
for i := 0; i < objCount; i++ {
_, _ = testClient.PersistentVolumeClaims(ns.Name).Create(pvcs[i])
}
}()
// Wait until the controller provisions and binds all of them
for i := 0; i < objCount; i++ {
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(1).Infof("%d claims bound", i+1)
}
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound")
// check that we have enough bound PVs
pvList, err := testClient.PersistentVolumes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list volumes: %s", err)
}
if len(pvList.Items) != objCount {
t.Fatalf("Expected to get %d volumes, got %d", objCount, len(pvList.Items))
}
for i := 0; i < objCount; i++ {
pv := &pvList.Items[i]
if pv.Status.Phase != v1.VolumeBound {
t.Fatalf("Expected volume %s to be bound, is %s instead", pv.Name, pv.Status.Phase)
}
glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)
}
// Delete the claims
for i := 0; i < objCount; i++ {
_ = testClient.PersistentVolumeClaims(ns.Name).Delete(pvcs[i].Name, nil)
}
// Wait for the PVs to get deleted by listing remaining volumes
// (delete events were unreliable)
for {
volumes, err := testClient.PersistentVolumes().List(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to list volumes: %v", err)
}
glog.V(1).Infof("%d volumes remaining", len(volumes.Items))
if len(volumes.Items) == 0 {
break
}
time.Sleep(time.Second)
}
glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: volumes are deleted")
}
// TestPersistentVolumeMultiPVsDiffAccessModes tests binding of one PVC to two
// PVs with different access modes.
func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("multi-pvs-diff-access", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, controller, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
controller.Run(stopCh)
defer close(stopCh)
// This PV will be claimed, released, and deleted
pv_rwo := createPV("pv-rwo", "/tmp/foo", "10G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
pv_rwm := createPV("pv-rwm", "/tmp/bar", "10G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, v1.PersistentVolumeReclaimRetain)
pvc := createPVC("pvc-rwm", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany})
_, err := testClient.PersistentVolumes().Create(pv_rwm)
if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err)
}
_, err = testClient.PersistentVolumes().Create(pv_rwo)
if err != nil {
t.Errorf("Failed to create PersistentVolume: %v", err)
}
t.Log("volumes created")
_, err = testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Errorf("Failed to create PersistentVolumeClaim: %v", err)
}
t.Log("claim created")
// wait until the controller pairs the volume and claim
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
t.Log("volume bound")
waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound)
t.Log("claim bound")
// only RWM PV is bound
pv, err := testClient.PersistentVolumes().Get("pv-rwo")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef != nil {
t.Fatalf("ReadWriteOnce PV shouldn't be bound")
}
pv, err = testClient.PersistentVolumes().Get("pv-rwm")
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("ReadWriteMany PV should be bound")
}
if pv.Spec.ClaimRef.Name != pvc.Name {
t.Fatalf("Bind mismatch! Expected %s but got %s", pvc.Name, pv.Spec.ClaimRef.Name)
}
// deleting a claim releases the volume
if err := testClient.PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
t.Log("claim deleted")
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeReleased)
t.Log("volume released")
}
func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w watch.Interface, phase v1.PersistentVolumePhase) {
// Check if the volume is already in requested phase
volume, err := client.Core().PersistentVolumes().Get(pvName)
if err == nil && volume.Status.Phase == phase {
return
}
// Wait for the phase
for {
event := <-w.ResultChan()
volume, ok := event.Object.(*v1.PersistentVolume)
if !ok {
continue
}
if volume.Status.Phase == phase && volume.Name == pvName {
glog.V(2).Infof("volume %q is %s", volume.Name, phase)
break
}
}
}
func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, namespace string, w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
// Check if the claim is already in requested phase
claim, err := client.Core().PersistentVolumeClaims(namespace).Get(claimName)
if err == nil && claim.Status.Phase == phase {
return
}
// Wait for the phase
for {
event := <-w.ResultChan()
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
if !ok {
continue
}
if claim.Status.Phase == phase && claim.Name == claimName {
glog.V(2).Infof("claim %q is %s", claim.Name, phase)
break
}
}
}
func waitForAnyPersistentVolumePhase(w watch.Interface, phase v1.PersistentVolumePhase) {
for {
event := <-w.ResultChan()
volume, ok := event.Object.(*v1.PersistentVolume)
if !ok {
continue
}
if volume.Status.Phase == phase {
glog.V(2).Infof("volume %q is %s", volume.Name, phase)
break
}
}
}
func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase v1.PersistentVolumeClaimPhase) {
for {
event := <-w.ResultChan()
claim, ok := event.Object.(*v1.PersistentVolumeClaim)
if !ok {
continue
}
if claim.Status.Phase == phase {
glog.V(2).Infof("claim %q is %s", claim.Name, phase)
break
}
}
}
func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
testClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugin := &volumetest.FakeVolumePlugin{
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}
plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewController(
persistentvolumecontroller.ControllerParameters{
KubeClient: binderClient,
SyncPeriod: getSyncPeriod(syncPeriod),
VolumePlugins: plugins,
Cloud: cloud,
EnableDynamicProvisioning: true,
})
watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
func createPV(name, path, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume {
return &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: path}},
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)},
AccessModes: mode,
PersistentVolumeReclaimPolicy: reclaim,
},
}
}
func createPVC(name, namespace, cap string, mode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)}},
AccessModes: mode,
},
}
}
|
[
"\"KUBE_INTEGRATION_PV_OBJECTS\"",
"\"KUBE_INTEGRATION_PV_SYNC_PERIOD\"",
"\"KUBE_INTEGRATION_PV_END_SLEEP\""
] |
[] |
[
"KUBE_INTEGRATION_PV_SYNC_PERIOD",
"KUBE_INTEGRATION_PV_OBJECTS",
"KUBE_INTEGRATION_PV_END_SLEEP"
] |
[]
|
["KUBE_INTEGRATION_PV_SYNC_PERIOD", "KUBE_INTEGRATION_PV_OBJECTS", "KUBE_INTEGRATION_PV_END_SLEEP"]
|
go
| 3 | 0 | |
vendor/github.com/avinetworks/sdk/go/examples/test/create_profiles_test.go
|
package test
import (
"fmt"
"os"
"testing"
"github.com/avinetworks/sdk/go/clients"
"github.com/avinetworks/sdk/go/models"
"github.com/avinetworks/sdk/go/session"
)
func TestCreateProfiles(t *testing.T) {
aviClient, err := clients.NewAviClient(os.Getenv("controller"), "admin",
session.SetPassword("fr3sca$%^"),
session.SetTenant("admin"),
session.SetVersion("17.2.8"),
session.SetInsecure)
if err != nil {
fmt.Println("Couldn't create session: ", err)
t.Fail()
}
cv, err := aviClient.AviSession.GetControllerVersion()
fmt.Printf("Avi Controller Version: %v:%v\n", cv, err)
// Create application persistence in avinetworks tenant
profileobj := models.ApplicationPersistenceProfile{}
ifed := false
profileobj.IsFederated = &ifed
pt := "PERSISTENCE_TYPE_CLIENT_IP_ADDRESS"
profileobj.PersistenceType = &pt
name := "Test-Persistece-Profile"
profileobj.Name = &name
tr := "/api/tenant?name=admin"
profileobj.TenantRef = &tr
ipt := (int32)(5)
ipobj := models.IPPersistenceProfile{IPPersistentTimeout: &ipt}
profileobj.IPPersistenceProfile = &ipobj
npobj, err := aviClient.ApplicationPersistenceProfile.Create(&profileobj)
if err != nil {
fmt.Println("\n Application persistence profile creation failed: ", err)
t.Fail()
}
fmt.Println("\n Application persistence profile ", npobj)
// Create ssl profile in avinetworks tenant
sslobj := models.SSLProfile{}
name = "Test-Ssl-Profile"
sslobj.Name = &name
tr = "/api/tenant?name=admin"
sslobj.TenantRef = &tr
essr := true
sslobj.EnableSslSessionReuse = &essr
sst := (int32)(86400)
sslobj.SslSessionTimeout = &sst
pcco := false
sslobj.PreferClientCipherOrdering = &pcco
scn := true
sslobj.SendCloseNotify = &scn
ac := "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA"
sslobj.AcceptedCiphers = &ac
Type := "SSL_VERSION_TLS1"
vobj := models.SSLVersion{Type: &Type}
sslobj.AcceptedVersions = append(sslobj.AcceptedVersions, &vobj)
profobj, err := aviClient.SSLProfile.Create(&sslobj)
if err != nil {
fmt.Println("\n Ssl profile creation failed: ", err)
t.Fail()
} else {
fmt.Println("Ssl profile ", profobj)
}
}
|
[
"\"controller\""
] |
[] |
[
"controller"
] |
[]
|
["controller"]
|
go
| 1 | 0 | |
lasso/ansa/rest/server_ansa.py
|
import os
import sys
import ansa
import json
import signal
import inspect
import logging
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from lasso.logging import str_error, str_warn, str_info
from lasso.utils.ConsoleColoring import ConsoleColoring
from settings import (
FUNCTION_NAME,
DEFAULT_REST_SERVER_PORT,
SERVER_NAME,
ENTITY_ID,
ENTITY_ANSA_TYPE,
)
from server_html import SERVER_HTML_TEMPLATE
from materialize_min_js import (
WEB_MATERIALIZE_MIN_JS,
WEB_MATERIALIZE_MIN_CSS,
)
# messages
_msg_site_packages_dir_not_set = "Environment variable '{0}' was not specified. Assuming all required packages where installed already somewhere else."
_msg_import_error = '''
ImportError: {0}
Please install the package for ANSA as follows:
(1) Create and activate a conda environment with python 3.3
- 'conda create -n py33 python=3.3'
- Win: 'activate py33' or Linux: 'source activate py33'
(2) Install required packages
- 'conda install flask'
(3) Run the server with the option '--python33-path'
- 'python -m lasso.ansa.rest.server --python33-path path/to/anaconda/envs/py33'
Or set the environment variable 'ANSA_REST_SITE_PACKAGES_PATH'
- csh : 'setenv ANSA_REST_SITE_PACKAGES_PATH path/to/anaconda/envs/py33'
- bash: 'export ANSA_REST_SITE_PACKAGES_PATH="path/to/anaconda/envs/py33"'
- ps : '$env:ANSA_REST_SITE_PACKAGES_PATH = "path/to/anaconda/envs/py33"'
- cmd : 'setx ANSA_REST_SITE_PACKAGES_PATH "path/to/anaconda/envs/py33"'
(4) Enjoy life ♥
'''
_msg_missing_json_data = '''Missing json data.
A json object with three entries is expected:
- {function_name}: str (required, ansa function name e.g. ansa.base.GetEntity)
- args: list (arguments to the function)
- kwargs: obj (named arguments for the function)
'''.format(function_name=FUNCTION_NAME)
_msg_missing_function_name = "message json data requires the attribute '{0}'".format(
FUNCTION_NAME)
_msg_wrong_type = "{0} must be of type {1}"
_msg_shutdown = "Shutting down ANSA server."
_msg_only_method_post = "Only method post is supported for this address."
_msg_missing_entitiy_id = "An entity of ansa_type '{0}' is missing an id."
_msg_entity_not_found = "An ANSA entity with id '{0}' and type {1} can not be found."
# set the path for the site packages folder
try:
sys.path.append(os.environ["ANSA_REST_SITE_PACKAGES_PATH"])
except KeyError as err:
print(str_warn(_msg_site_packages_dir_not_set.format(
"ANSA_REST_SITE_PACKAGES_PATH")))
# try importing flask here
# if the environment is set up correctly, the package should be
# found in sys.path
try:
import flask
from flask import (
Flask,
json,
jsonify,
request,
)
except ImportError as err:
raise RuntimeError(str_error(_msg_import_error.format(str(err))))
class AnsaJsonEncoder(json.JSONEncoder):
''' Custom JSON encoder for ANSA
'''
def default(self, obj: object):
''' encoder function
'''
if isinstance(obj, ansa.base.Entity):
ansa_dict = {
prop_name: prop_value
for prop_name, prop_value in inspect.getmembers(obj)
if prop_name and not callable(prop_value)
}
ansa_dict["id"] = obj._id
ansa_dict["ansa_type"] = obj.ansa_type(ansa.base.CurrentDeck())
return ansa_dict
elif isinstance(obj, bytes):
return obj.decode("utf-8")
return json.JSONEncoder.default(self, obj)
def _dict_is_ansa_entity(obj: dict):
''' Checks if a dictionary is an ansa entity
Parameters
----------
obj: `dict`
object to check
Returns
-------
is_ansa_entity: `bool`
'''
if isinstance(obj, dict) and ENTITY_ANSA_TYPE in obj:
return True
else:
return False
def _deserialize_ansa_entity(dict_entity: dict):
''' deserializes an ansa entity
Parameters
----------
dict_entity: `dict`
deserializes an ansa entity from a dict
Returns
-------
entity: `ansa.base.Entity`
Raises
------
ValueError: if entity was not found
'''
# we assume that this is given
ansa_type = dict_entity[ENTITY_ANSA_TYPE]
# check if an id is there
if ENTITY_ID not in dict_entity:
raise ValueError(_msg_missing_entitiy_id.format(
dict_entity[ENTITY_ANSA_TYPE]))
entity_id = dict_entity[ENTITY_ID]
# in case you wonder, yes all of this is expensive ...
entity = ansa.base.GetEntity(
ansa.base.CurrentDeck(),
ansa_type,
entity_id
)
if entity == None:
raise ValueError(_msg_entity_not_found.format(entity_id, ansa_type))
return entity
def _deserialize_obj_rest(obj: object):
''' Deserializes an object from the REST API such as Lists or ANSA entities
Parameters
----------
obj: `object`
object to deserialize
Returns
-------
obj: `object`
deserialized object
'''
# DICT
if isinstance(obj, dict):
# sir, we have an entity
if _dict_is_ansa_entity(obj):
return _deserialize_ansa_entity(obj)
# just another dict
else:
return {
_deserialize_obj_rest(key): _deserialize_obj_rest(value)
for key, value in obj.items()
}
# LIST
elif isinstance(obj, (list, tuple)):
return [_deserialize_obj_rest(value) for value in obj]
# all fine
else:
return obj
class AnsaFunction:
''' Utility class for managing ansa function data
'''
def __init__(self, function_name: str, args: list, kwargs: dict):
''' Initialize an ansa function
Parameters
----------
function_name : `str`
name of the function to execute with full module path
args : `list`
argument list
kwargs : `dict`
dictionary of named args
Returns
-------
ansa_function : `AnsaFunction`
ansa function wrapper
'''
# set stuff
self.function_name = function_name
self.args = args
self.kwargs = kwargs
self.result = None
def run(self):
''' Run the ansa function
Returns
-------
result : `object`
whatever came out of the function
'''
# seperate module path from function name
module_name, function_name = self.function_name.rsplit('.', 1)
# import module
my_module = __import__(module_name, globals(),
locals(), (function_name, ), 0)
# get function from module
my_function = getattr(my_module, function_name)
# run function
self.result = my_function(*self.args, **self.kwargs)
return self.result
def parse_json_rest(json_data: dict):
''' Parse JSON data originating from REST
Parameters
----------
json_data : `dict`
json data as dict
Returns
-------
ansa_function : `AnsaFunction`
ansa function wrapper object
Raises:
-------
ValueError: if anything was missing or wrong
'''
if json_data == None:
raise ValueError(_msg_missing_json_data)
if not FUNCTION_NAME in json_data:
raise ValueError(_msg_missing_function_name)
# parse args
args = json_data["args"] if "args" in json_data else []
if not isinstance(args, (list, tuple)):
raise ValueError(_msg_wrong_type.format("args", "list"))
# parse lwargs
kwargs = json_data["kwargs"] if "kwargs" in json_data else {}
if not isinstance(kwargs, dict):
raise ValueError(_msg_wrong_type.format("kwargs", "dict"))
# find and deserialize ansa entities in json data
args = _deserialize_obj_rest(args)
kwargs = _deserialize_obj_rest(kwargs)
return AnsaFunction(
function_name=json_data[FUNCTION_NAME],
args=args,
kwargs=kwargs,
)
def print_header():
header = '''
ANSA Remote Scripting Server by {0}
------------------------------------------
'''.format(ConsoleColoring.blue("LASSO GmbH", light=True))
print(header)
##############################################
# FLASK
##############################################
# initiate flask app
app = Flask(SERVER_NAME)
app.json_encoder = AnsaJsonEncoder
@app.route("/shutdown")
def handle_terminate():
''' This function terminates the REST server remotely
'''
# htis was a little too rude, even for me
# os.kill(os.getpid(), signal.SIGTERM)
# return "Shutting down server"
func = request.environ.get('werkzeug.server.shutdown')
func()
return _msg_shutdown
@app.route("/")
def infoPage():
''' Display base info page
'''
try:
return SERVER_HTML_TEMPLATE.format(
address=request.base_url,
address_run=request.base_url + "run/",
address_shutdown=request.base_url + "shutdown",
function_name=FUNCTION_NAME,
args="args",
kwargs="kwargs",
materialize_css=WEB_MATERIALIZE_MIN_CSS,
materialize_js=WEB_MATERIALIZE_MIN_JS,
)
except Exception as err:
return str(err)
@app.route('/run/', methods=['POST'])
def runAnsaFunction():
''' Runs an ansa function from REST
'''
try:
if request.method == 'POST':
# json request
json_data = request.get_json()
# form request
if not json_data:
function_name = request.form.get(FUNCTION_NAME, None)
if function_name == None:
raise ValueError(_msg_missing_function_name)
args = request.form.get("args", "[]")
if not args:
args = []
print(args, type(args))
args = flask.json.loads(args)
print(args, type(args))
kwargs = request.form.get("kwargs")
if not kwargs:
kwargs = "{}"
print(kwargs, type(kwargs))
kwargs = flask.json.loads(kwargs)
print(kwargs, type(kwargs))
json_data = {
FUNCTION_NAME: request.form[FUNCTION_NAME],
"args": args,
"kwargs": kwargs,
}
print(json_data)
# parse arguments
ansa_function = parse_json_rest(json_data)
# run function
ansa_function.run()
# return the thing
return jsonify({
"success": True,
"payload": ansa_function.result,
})
else:
return _msg_only_method_post
except Exception as err:
return jsonify({
"success": False,
"payload": str(err),
})
def serve(port: int = DEFAULT_REST_SERVER_PORT):
''' Initializes the flask REST service
'''
print_header()
app.run(port=port)
|
[] |
[] |
[
"ANSA_REST_SITE_PACKAGES_PATH"
] |
[]
|
["ANSA_REST_SITE_PACKAGES_PATH"]
|
python
| 1 | 0 | |
setup.py
|
import codecs
import os
import platform
import re
import sys
from setuptools import Extension, setup
NO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS"))
if sys.implementation.name != "cpython":
NO_EXTENSIONS = True
CFLAGS = ["-O2"]
# CFLAGS = ['-g']
if platform.system() != "Windows":
CFLAGS.extend(
[
"-std=c99",
"-Wall",
"-Wsign-compare",
"-Wconversion",
"-fno-strict-aliasing",
"-pedantic",
]
)
extensions = [
Extension(
"multidict._multidict", ["multidict/_multidict.c"], extra_compile_args=CFLAGS,
),
]
with codecs.open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)), "multidict", "__init__.py"
),
"r",
"latin1",
) as fp:
try:
version = re.findall(r'^__version__ = "([^"]+)"\r?$', fp.read(), re.M)[0]
except IndexError:
raise RuntimeError("Unable to determine version.")
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
args = dict(
name="multidict",
version=version,
description=("multidict implementation"),
long_description=read("README.rst"),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Development Status :: 5 - Production/Stable",
],
author="Andrew Svetlov",
author_email="[email protected]",
url="https://github.com/aio-libs/multidict",
project_urls={
"Chat: Gitter": "https://gitter.im/aio-libs/Lobby",
"CI: Azure Pipelines": "https://dev.azure.com/aio-libs/multidict/_build",
"Coverage: codecov": "https://codecov.io/github/aio-libs/multidict",
"Docs: RTD": "https://multidict.readthedocs.io",
"GitHub: issues": "https://github.com/aio-libs/multidict/issues",
"GitHub: repo": "https://github.com/aio-libs/multidict",
},
license="Apache 2",
packages=["multidict"],
python_requires=">=3.5",
include_package_data=True,
)
if not NO_EXTENSIONS:
print("**********************")
print("* Accellerated build *")
print("**********************")
setup(ext_modules=extensions, **args)
else:
print("*********************")
print("* Pure Python build *")
print("*********************")
setup(**args)
|
[] |
[] |
[
"MULTIDICT_NO_EXTENSIONS"
] |
[]
|
["MULTIDICT_NO_EXTENSIONS"]
|
python
| 1 | 0 | |
pkg/utils/charts_test.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"io/ioutil"
"os"
"testing"
)
func TestPathExists(t *testing.T) {
path := os.TempDir()
if !PathExists(path) {
t.Errorf("result of checking if the path exists is wrong")
}
if PathExists(path + "test/") {
t.Errorf("result of checking if the path exists is wrong")
}
}
func TestGetChartsDirectory(t *testing.T) {
f, err := ioutil.TempFile("", "test")
if err != nil {
t.Errorf("MkdirTemp failed due to %v", err)
}
testDir := f.Name()
home := os.Getenv("HOME")
defer os.Setenv("HOME", home) // recover
os.Setenv("HOME", testDir)
if GetChartsDirectory() != "/charts" {
t.Errorf("ChartsDirectory should be /charts if ~/charts not exist")
}
homeChartsFolder := os.Getenv("HOME") + "/charts"
// Make Directory if it doesn't exist.
_ = os.Mkdir(homeChartsFolder, 0600)
if GetChartsDirectory() != "/charts" {
t.Errorf("ChartsDirectory should be ~/charts if ~/charts exist")
}
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "JobGenie.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
mmpose/apis/inference.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
import cv2
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from PIL import Image
from mmpose.core.post_processing import oks_nms
from mmpose.datasets.dataset_info import DatasetInfo
from mmpose.datasets.pipelines import Compose
from mmpose.models import build_posenet
from mmpose.utils.hooks import OutputHook
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def init_pose_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a pose model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_posenet(config.model)
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
return model
def _xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
def _xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (ndarray): Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
"""
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] - 1
bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] - 1
return bbox_xyxy
def _box2cs(cfg, box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
x, y, w, h = box[:4]
input_size = cfg.data_cfg['image_size']
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
class LoadImage:
"""A simple pipeline to load image."""
def __init__(self, color_type='color', channel_order='rgb'):
self.color_type = color_type
self.channel_order = channel_order
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the img_or_path.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img_or_path'], str):
results['image_file'] = results['img_or_path']
img = mmcv.imread(results['img_or_path'], self.color_type,
self.channel_order)
elif isinstance(results['img_or_path'], np.ndarray):
results['image_file'] = ''
if self.color_type == 'color' and self.channel_order == 'rgb':
img = cv2.cvtColor(results['img_or_path'], cv2.COLOR_BGR2RGB)
else:
img = results['img_or_path']
else:
raise TypeError('"img_or_path" must be a numpy array or a str or '
'a pathlib.Path object')
results['img'] = img
return results
def _inference_single_pose_model(model,
img_or_path,
bboxes,
dataset='TopDownCocoDataset',
dataset_info=None,
return_heatmap=False):
"""Inference human bounding boxes.
num_bboxes: N
num_keypoints: K
Args:
model (nn.Module): The loaded pose model.
img_or_path (str | np.ndarray): Image filename or loaded image.
bboxes (list | np.ndarray): All bounding boxes (with scores),
shaped (N, 4) or (N, 5). (left, top, width, height, [score])
where N is number of bounding boxes.
dataset (str): Dataset name. Deprecated.
dataset_info (DatasetInfo): A class containing all dataset info.
outputs (list[str] | tuple[str]): Names of layers whose output is
to be returned, default: None
Returns:
ndarray[NxKx3]: Predicted pose x, y, score.
heatmap[N, K, H, W]: Model output heatmap.
"""
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
channel_order = cfg.test_pipeline[0].get('channel_order', 'rgb')
test_pipeline = [LoadImage(channel_order=channel_order)
] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
assert len(bboxes[0]) in [4, 5]
if dataset_info is not None:
dataset_name = dataset_info.dataset_name
flip_pairs = dataset_info.flip_pairs
else:
warnings.warn(
'dataset is deprecated.'
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
# TODO: These will be removed in the later versions.
if dataset in ('TopDownCocoDataset', 'TopDownOCHumanDataset',
'AnimalMacaqueDataset'):
flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16]]
elif dataset == 'TopDownCocoWholeBodyDataset':
body = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16]]
foot = [[17, 20], [18, 21], [19, 22]]
face = [[23, 39], [24, 38], [25, 37], [26, 36], [27, 35], [28, 34],
[29, 33], [30, 32], [40, 49], [41, 48], [42, 47], [43, 46],
[44, 45], [54, 58], [55, 57], [59, 68], [60, 67], [61, 66],
[62, 65], [63, 70], [64, 69], [71, 77], [72, 76], [73, 75],
[78, 82], [79, 81], [83, 87], [84, 86], [88, 90]]
hand = [[91, 112], [92, 113], [93, 114], [94, 115], [95, 116],
[96, 117], [97, 118], [98, 119], [99, 120], [100, 121],
[101, 122], [102, 123], [103, 124], [104, 125], [105, 126],
[106, 127], [107, 128], [108, 129], [109, 130], [110, 131],
[111, 132]]
flip_pairs = body + foot + face + hand
elif dataset == 'TopDownAicDataset':
flip_pairs = [[0, 3], [1, 4], [2, 5], [6, 9], [7, 10], [8, 11]]
elif dataset == 'TopDownMpiiDataset':
flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
elif dataset == 'TopDownMpiiTrbDataset':
flip_pairs = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11],
[14, 15], [16, 22], [28, 34], [17, 23], [29, 35],
[18, 24], [30, 36], [19, 25], [31, 37], [20, 26],
[32, 38], [21, 27], [33, 39]]
elif dataset in ('OneHand10KDataset', 'FreiHandDataset',
'PanopticDataset', 'InterHand2DDataset'):
flip_pairs = []
elif dataset in 'Face300WDataset':
flip_pairs = [[0, 16], [1, 15], [2, 14], [3, 13], [4, 12], [5, 11],
[6, 10], [7, 9], [17, 26], [18, 25], [19, 24],
[20, 23], [21, 22], [31, 35], [32, 34], [36, 45],
[37, 44], [38, 43], [39, 42], [40, 47], [41, 46],
[48, 54], [49, 53], [50, 52], [61, 63], [60, 64],
[67, 65], [58, 56], [59, 55]]
elif dataset in 'FaceAFLWDataset':
flip_pairs = [[0, 5], [1, 4], [2, 3], [6, 11], [7, 10], [8, 9],
[12, 14], [15, 17]]
elif dataset in 'FaceCOFWDataset':
flip_pairs = [[0, 1], [4, 6], [2, 3], [5, 7], [8, 9], [10, 11],
[12, 14], [16, 17], [13, 15], [18, 19], [22, 23]]
elif dataset in 'FaceWFLWDataset':
flip_pairs = [[0, 32], [1, 31], [2, 30], [3, 29], [4, 28], [5, 27],
[6, 26], [7, 25], [8, 24], [9, 23], [10, 22],
[11, 21], [12, 20], [13, 19], [14, 18], [15, 17],
[33, 46], [34, 45], [35, 44], [36, 43], [37, 42],
[38, 50], [39, 49], [40, 48], [41, 47], [60, 72],
[61, 71], [62, 70], [63, 69], [64, 68], [65, 75],
[66, 74], [67, 73], [55, 59], [56, 58], [76, 82],
[77, 81], [78, 80], [87, 83], [86, 84], [88, 92],
[89, 91], [95, 93], [96, 97]]
elif dataset in 'AnimalFlyDataset':
flip_pairs = [[1, 2], [6, 18], [7, 19], [8, 20], [9, 21], [10, 22],
[11, 23], [12, 24], [13, 25], [14, 26], [15, 27],
[16, 28], [17, 29], [30, 31]]
elif dataset in 'AnimalHorse10Dataset':
flip_pairs = []
elif dataset in 'AnimalLocustDataset':
flip_pairs = [[5, 20], [6, 21], [7, 22], [8, 23], [9, 24],
[10, 25], [11, 26], [12, 27], [13, 28], [14, 29],
[15, 30], [16, 31], [17, 32], [18, 33], [19, 34]]
elif dataset in 'AnimalZebraDataset':
flip_pairs = [[3, 4], [5, 6]]
elif dataset in 'AnimalPoseDataset':
flip_pairs = [[0, 1], [2, 3], [8, 9], [10, 11], [12, 13], [14, 15],
[16, 17], [18, 19]]
else:
raise NotImplementedError()
dataset_name = dataset
batch_data = []
for bbox in bboxes:
center, scale = _box2cs(cfg, bbox)
# prepare data
data = {
'img_or_path':
img_or_path,
'center':
center,
'scale':
scale,
'bbox_score':
bbox[4] if len(bbox) == 5 else 1,
'bbox_id':
0, # need to be assigned if batch_size > 1
'dataset':
dataset_name,
'joints_3d':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'joints_3d_visible':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'rotation':
0,
'ann_info': {
'image_size': np.array(cfg.data_cfg['image_size']),
'num_joints': cfg.data_cfg['num_joints'],
'flip_pairs': flip_pairs
}
}
data = test_pipeline(data)
batch_data.append(data)
batch_data = collate(batch_data, samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter not work so just move image to cuda device
batch_data['img'] = batch_data['img'].to(device)
# get all img_metas of each bounding box
batch_data['img_metas'] = [
img_metas[0] for img_metas in batch_data['img_metas'].data
]
# forward the model
with torch.no_grad():
result = model(
img=batch_data['img'],
img_metas=batch_data['img_metas'],
return_loss=False,
return_heatmap=return_heatmap)
return result['preds'], result['output_heatmap']
def inference_top_down_pose_model(model,
img_or_path,
person_results=None,
bbox_thr=None,
format='xywh',
dataset='TopDownCocoDataset',
dataset_info=None,
return_heatmap=False,
outputs=None):
"""Inference a single image with a list of person bounding boxes.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
img_or_path (str| np.ndarray): Image filename or loaded image.
person_results (List(dict), optional): a list of detected persons that
contains following items:
- 'bbox' and/or 'track_id'.
- 'bbox' (4, ) or (5, ): The person bounding box, which contains
4 box coordinates (and score).
- 'track_id' (int): The unique id for each human instance.
If not provided, a dummy person result with a bbox covering the
entire image will be used. Default: None.
bbox_thr: Threshold for bounding boxes. Only bboxes with higher scores
will be fed into the pose detector. If bbox_thr is None, ignore it.
format: bbox format ('xyxy' | 'xywh'). Default: 'xywh'.
'xyxy' means (left, top, right, bottom),
'xywh' means (left, top, width, height).
dataset (str): Dataset name, e.g. 'TopDownCocoDataset'.
It is deprecated. Please use dataset_info instead.
dataset_info (DatasetInfo): A class containing all dataset info.
return_heatmap (bool) : Flag to return heatmap, default: False
outputs (list(str) | tuple(str)) : Names of layers whose outputs
need to be returned, default: None
Returns:
list[dict]: The bbox & pose info,
Each item in the list is a dictionary,
containing the bbox: (left, top, right, bottom, [score])
and the pose (ndarray[Kx3]): x, y, score
list[dict[np.ndarray[N, K, H, W] | torch.tensor[N, K, H, W]]]:
Output feature maps from layers specified in `outputs`.
Includes 'heatmap' if `return_heatmap` is True.
"""
# get dataset info
if (dataset_info is None and hasattr(model, 'cfg')
and 'dataset_info' in model.cfg):
dataset_info = DatasetInfo(model.cfg.dataset_info)
if dataset_info is None:
warnings.warn(
'dataset is deprecated.'
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663'
' for details.', DeprecationWarning)
# only two kinds of bbox format is supported.
assert format in ['xyxy', 'xywh']
pose_results = []
returned_outputs = []
if person_results is None:
# create dummy person results
if isinstance(img_or_path, str):
width, height = Image.open(img_or_path).size
else:
height, width = img_or_path.shape[:2]
person_results = [{'bbox': np.array([0, 0, width, height])}]
if len(person_results) == 0:
return pose_results, returned_outputs
# Change for-loop preprocess each bbox to preprocess all bboxes at once.
bboxes = np.array([box['bbox'] for box in person_results])
# Select bboxes by score threshold
if bbox_thr is not None:
assert bboxes.shape[1] == 5
valid_idx = np.where(bboxes[:, 4] > bbox_thr)[0]
bboxes = bboxes[valid_idx]
person_results = [person_results[i] for i in valid_idx]
if format == 'xyxy':
bboxes_xyxy = bboxes
bboxes_xywh = _xyxy2xywh(bboxes)
else:
# format is already 'xywh'
bboxes_xywh = bboxes
bboxes_xyxy = _xywh2xyxy(bboxes)
# if bbox_thr remove all bounding box
if len(bboxes_xywh) == 0:
return [], []
with OutputHook(model, outputs=outputs, as_tensor=False) as h:
# poses is results['pred'] # N x 17x 3
poses, heatmap = _inference_single_pose_model(
model,
img_or_path,
bboxes_xywh,
dataset=dataset,
dataset_info=dataset_info,
return_heatmap=return_heatmap)
if return_heatmap:
h.layer_outputs['heatmap'] = heatmap
returned_outputs.append(h.layer_outputs)
assert len(poses) == len(person_results), print(
len(poses), len(person_results), len(bboxes_xyxy))
for pose, person_result, bbox_xyxy in zip(poses, person_results,
bboxes_xyxy):
pose_result = person_result.copy()
pose_result['keypoints'] = pose
pose_result['bbox'] = bbox_xyxy
pose_results.append(pose_result)
return pose_results, returned_outputs
def inference_bottom_up_pose_model(model,
img_or_path,
dataset='BottomUpCocoDataset',
dataset_info=None,
pose_nms_thr=0.9,
return_heatmap=False,
outputs=None):
"""Inference a single image.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
img_or_path (str| np.ndarray): Image filename or loaded image.
dataset (str): Dataset name, e.g. 'BottomUpCocoDataset'.
It is deprecated. Please use dataset_info instead.
dataset_info (DatasetInfo): A class containing all dataset info.
pose_nms_thr (float): retain oks overlap < pose_nms_thr, default: 0.9.
return_heatmap (bool) : Flag to return heatmap, default: False.
outputs (list(str) | tuple(str)) : Names of layers whose outputs
need to be returned, default: None.
Returns:
list[ndarray]: The predicted pose info.
The length of the list is the number of people (P).
Each item in the list is a ndarray, containing each person's
pose (ndarray[Kx3]): x, y, score.
list[dict[np.ndarray[N, K, H, W] | torch.tensor[N, K, H, W]]]:
Output feature maps from layers specified in `outputs`.
Includes 'heatmap' if `return_heatmap` is True.
"""
# get dataset info
if (dataset_info is None and hasattr(model, 'cfg')
and 'dataset_info' in model.cfg):
dataset_info = DatasetInfo(model.cfg.dataset_info)
if dataset_info is not None:
dataset_name = dataset_info.dataset_name
flip_index = dataset_info.flip_index
else:
warnings.warn(
'dataset is deprecated.'
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
assert (dataset == 'BottomUpCocoDataset')
dataset_name = dataset
flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
pose_results = []
returned_outputs = []
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
channel_order = cfg.test_pipeline[0].get('channel_order', 'rgb')
test_pipeline = [LoadImage(channel_order=channel_order)
] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = {
'img_or_path': img_or_path,
'dataset': dataset_name,
'ann_info': {
'image_size': np.array(cfg.data_cfg['image_size']),
'num_joints': cfg.data_cfg['num_joints'],
'flip_index': flip_index,
}
}
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
with OutputHook(model, outputs=outputs, as_tensor=False) as h:
# forward the model
with torch.no_grad():
result = model(
img=data['img'],
img_metas=data['img_metas'],
return_loss=False,
return_heatmap=return_heatmap)
if return_heatmap:
h.layer_outputs['heatmap'] = result['output_heatmap']
returned_outputs.append(h.layer_outputs)
for idx, pred in enumerate(result['preds']):
area = (np.max(pred[:, 0]) - np.min(pred[:, 0])) * (
np.max(pred[:, 1]) - np.min(pred[:, 1]))
pose_results.append({
'keypoints': pred[:, :3],
'score': result['scores'][idx],
'area': area,
})
# pose nms
keep = oks_nms(pose_results, pose_nms_thr, sigmas=None)
pose_results = [pose_results[_keep] for _keep in keep]
return pose_results, returned_outputs
def vis_pose_result(model,
img,
result,
radius=4,
thickness=1,
kpt_score_thr=0.3,
bbox_color='green',
dataset='TopDownCocoDataset',
dataset_info=None,
show=False,
out_file=None):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str | np.ndarray): Image filename or loaded image.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
radius (int): Radius of circles.
thickness (int): Thickness of lines.
kpt_score_thr (float): The threshold to visualize the keypoints.
skeleton (list[tuple()]): Default None.
show (bool): Whether to show the image. Default True.
out_file (str|None): The filename of the output visualization image.
"""
# get dataset info
if (dataset_info is None and hasattr(model, 'cfg')
and 'dataset_info' in model.cfg):
dataset_info = DatasetInfo(model.cfg.dataset_info)
if dataset_info is not None:
skeleton = dataset_info.skeleton
pose_kpt_color = dataset_info.pose_kpt_color
pose_link_color = dataset_info.pose_link_color
else:
warnings.warn(
'dataset is deprecated.'
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
# TODO: These will be removed in the later versions.
palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
[230, 230, 0], [255, 153, 255], [153, 204, 255],
[255, 102, 255], [255, 51, 255], [102, 178, 255],
[51, 153, 255], [255, 153, 153], [255, 102, 102],
[255, 51, 51], [153, 255, 153], [102, 255, 102],
[51, 255, 51], [0, 255, 0], [0, 0, 255],
[255, 0, 0], [255, 255, 255]])
if dataset in ('TopDownCocoDataset', 'BottomUpCocoDataset',
'TopDownOCHumanDataset', 'AnimalMacaqueDataset'):
# show the results
skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12],
[5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9],
[8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6]]
pose_link_color = palette[[
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
]]
pose_kpt_color = palette[[
16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0
]]
elif dataset == 'TopDownCocoWholeBodyDataset':
# show the results
skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12],
[5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9],
[8, 10], [1, 2], [0, 1], [0, 2],
[1, 3], [2, 4], [3, 5], [4, 6], [15, 17], [15, 18],
[15, 19], [16, 20], [16, 21], [16, 22], [91, 92],
[92, 93], [93, 94], [94, 95], [91, 96], [96, 97],
[97, 98], [98, 99], [91, 100], [100, 101], [101, 102],
[102, 103], [91, 104], [104, 105], [105, 106],
[106, 107], [91, 108], [108, 109], [109, 110],
[110, 111], [112, 113], [113, 114], [114, 115],
[115, 116], [112, 117], [117, 118], [118, 119],
[119, 120], [112, 121], [121, 122], [122, 123],
[123, 124], [112, 125], [125, 126], [126, 127],
[127, 128], [112, 129], [129, 130], [130, 131],
[131, 132]]
pose_link_color = palette[[
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
] + [16, 16, 16, 16, 16, 16] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
pose_kpt_color = palette[
[16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0] +
[0, 0, 0, 0, 0, 0] + [19] * (68 + 42)]
elif dataset == 'TopDownAicDataset':
skeleton = [[2, 1], [1, 0], [0, 13], [13, 3], [3, 4], [4, 5],
[8, 7], [7, 6], [6, 9], [9, 10], [10, 11], [12, 13],
[0, 6], [3, 9]]
pose_link_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 0, 7, 7
]]
pose_kpt_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 0, 0
]]
elif dataset == 'TopDownMpiiDataset':
skeleton = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], [6, 7],
[7, 8], [8, 9], [8, 12], [12, 11], [11, 10], [8, 13],
[13, 14], [14, 15]]
pose_link_color = palette[[
16, 16, 16, 16, 16, 16, 7, 7, 0, 9, 9, 9, 9, 9, 9
]]
pose_kpt_color = palette[[
16, 16, 16, 16, 16, 16, 7, 7, 0, 0, 9, 9, 9, 9, 9, 9
]]
elif dataset == 'TopDownMpiiTrbDataset':
skeleton = [[12, 13], [13, 0], [13, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [0, 6], [1, 7], [6, 7], [6, 8], [7,
9], [8, 10],
[9, 11], [14, 15], [16, 17], [18, 19], [20, 21],
[22, 23], [24, 25], [26, 27], [28, 29], [30, 31],
[32, 33], [34, 35], [36, 37], [38, 39]]
pose_link_color = palette[[16] * 14 + [19] * 13]
pose_kpt_color = palette[[16] * 14 + [0] * 26]
elif dataset in ('OneHand10KDataset', 'FreiHandDataset',
'PanopticDataset'):
skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7],
[7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13],
[13, 14], [14, 15], [15, 16], [0, 17], [17, 18],
[18, 19], [19, 20]]
pose_link_color = palette[[
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
pose_kpt_color = palette[[
0, 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16,
16, 16
]]
elif dataset == 'InterHand2DDataset':
skeleton = [[0, 1], [1, 2], [2, 3], [4, 5], [5, 6], [6, 7], [8, 9],
[9, 10], [10, 11], [12, 13], [13, 14], [14, 15],
[16, 17], [17, 18], [18, 19], [3, 20], [7, 20],
[11, 20], [15, 20], [19, 20]]
pose_link_color = palette[[
0, 0, 0, 4, 4, 4, 8, 8, 8, 12, 12, 12, 16, 16, 16, 0, 4, 8, 12,
16
]]
pose_kpt_color = palette[[
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16, 0
]]
elif dataset == 'Face300WDataset':
# show the results
skeleton = []
pose_link_color = palette[[]]
pose_kpt_color = palette[[19] * 68]
kpt_score_thr = 0
elif dataset == 'FaceAFLWDataset':
# show the results
skeleton = []
pose_link_color = palette[[]]
pose_kpt_color = palette[[19] * 19]
kpt_score_thr = 0
elif dataset == 'FaceCOFWDataset':
# show the results
skeleton = []
pose_link_color = palette[[]]
pose_kpt_color = palette[[19] * 29]
kpt_score_thr = 0
elif dataset == 'FaceWFLWDataset':
# show the results
skeleton = []
pose_link_color = palette[[]]
pose_kpt_color = palette[[19] * 98]
kpt_score_thr = 0
elif dataset == 'AnimalHorse10Dataset':
skeleton = [[0, 1], [1, 12], [12, 16], [16, 21], [21, 17],
[17, 11], [11, 10], [10, 8], [8, 9], [9, 12], [2, 3],
[3, 4], [5, 6], [6, 7], [13, 14], [14, 15], [18, 19],
[19, 20]]
pose_link_color = palette[[4] * 10 + [6] * 2 + [6] * 2 + [7] * 2 +
[7] * 2]
pose_kpt_color = palette[[
4, 4, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 7, 7, 7, 4, 4, 7, 7, 7,
4
]]
elif dataset == 'AnimalFlyDataset':
skeleton = [[1, 0], [2, 0], [3, 0], [4, 3], [5, 4], [7, 6], [8, 7],
[9, 8], [11, 10], [12, 11], [13, 12], [15, 14],
[16, 15], [17, 16], [19, 18], [20, 19], [21, 20],
[23, 22], [24, 23], [25, 24], [27, 26], [28, 27],
[29, 28], [30, 3], [31, 3]]
pose_link_color = palette[[0] * 25]
pose_kpt_color = palette[[0] * 32]
elif dataset == 'AnimalLocustDataset':
skeleton = [[1, 0], [2, 1], [3, 2], [4, 3], [6, 5], [7, 6], [9, 8],
[10, 9], [11, 10], [13, 12], [14, 13], [15, 14],
[17, 16], [18, 17], [19, 18], [21, 20], [22, 21],
[24, 23], [25, 24], [26, 25], [28, 27], [29, 28],
[30, 29], [32, 31], [33, 32], [34, 33]]
pose_link_color = palette[[0] * 26]
pose_kpt_color = palette[[0] * 35]
elif dataset == 'AnimalZebraDataset':
skeleton = [[1, 0], [2, 1], [3, 2], [4, 2], [5, 7], [6, 7], [7, 2],
[8, 7]]
pose_link_color = palette[[0] * 8]
pose_kpt_color = palette[[0] * 9]
elif dataset in 'AnimalPoseDataset':
skeleton = [[0, 1], [0, 2], [1, 3], [0, 4], [1, 4], [4, 5], [5, 7],
[6, 7], [5, 8], [8, 12], [12, 16], [5, 9], [9, 13],
[13, 17], [6, 10], [10, 14], [14, 18], [6, 11],
[11, 15], [15, 19]]
pose_link_color = palette[[0] * 20]
pose_kpt_color = palette[[0] * 20]
else:
NotImplementedError()
if hasattr(model, 'module'):
model = model.module
img = model.show_result(
img,
result,
skeleton,
radius=radius,
thickness=thickness,
pose_kpt_color=pose_kpt_color,
pose_link_color=pose_link_color,
kpt_score_thr=kpt_score_thr,
bbox_color=bbox_color,
show=show,
out_file=out_file)
return img
def process_mmdet_results(mmdet_results, cat_id=1):
"""Process mmdet results, and return a list of bboxes.
Args:
mmdet_results (list|tuple): mmdet results.
cat_id (int): category id (default: 1 for human)
Returns:
person_results (list): a list of detected bounding boxes
"""
if isinstance(mmdet_results, tuple):
det_results = mmdet_results[0]
else:
det_results = mmdet_results
bboxes = det_results[cat_id - 1]
person_results = []
for bbox in bboxes:
person = {}
person['bbox'] = bbox
person_results.append(person)
return person_results
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
share/lib/python/neuron/__init__.py
|
"""
neuron
======
For empirically-based simulations of neurons and networks of neurons in Python.
This is the top-level module of the official python interface to
the NEURON simulation environment (http://neuron.yale.edu/neuron/).
Documentation is available in the docstrings.
For a list of available names, try dir(neuron).
Example:
$ ipython
In [1]: import neuron
NEURON -- VERSION 6.2 2008-08-22
Duke, Yale, and the BlueBrain Project -- Copyright 1984-2007
See http://neuron.yale.edu/credits.html
In [2]: neuron.h ?
Important names and sub-packages
---------------------
For help on these useful functions, see their docstrings:
neuron.init, run, psection, load_mechanisms
neuron.h
The top-level Hoc interpreter.
Execute Hoc commands by calling h with a string argument:
>>> h('objref myobj')
>>> h('myobj = new Vector(10)')
All Hoc defined variables are accessible by attribute access to h.
Example:
>>> print h.myobj.x[9]
Hoc Classes are also defined, for example:
>>> v = h.Vector([1,2,3])
>>> soma = h.Section()
More help is available for the respective class by looking in the object docstring:
>>> help(h.Vector)
neuron.gui
Import this package if you are using NEURON as an extension to Python,
and you would like to use the NEURON GUI.
If you are using NEURON with embedded python, "nrniv -python",
use rather "nrngui -python" if you would like to use the NEURON GUI.
$Id: __init__.py,v 1.1 2008/05/26 11:39:44 hines Exp hines $
"""
## With Python launched under Linux, shared libraries are apparently imported
## using RTLD_LOCAL. For --with-paranrn=dynamic, this caused a failure when
## libnrnmpi.so is dynamically loaded because nrnmpi_myid (and other global
## variables in src/nrnmpi/nrnmpi_def_cinc) were not resolved --- even though
## all those variables are defined in src/oc/nrnmpi_dynam.c and that
## does a dlopen("libnrnmpi.so", RTLD_NOW | RTLD_GLOBAL) .
## In this case setting the dlopenflags below fixes the problem. But it
## seems that DLFCN is often not available.
## This situation is conceptually puzzling because there
## never seems to be a problem dynamically loading libnrnmech.so, though it
## obviously makes use of many names in the rest of NEURON. Anyway,
## we make the following available in case it is ever needed at least to
## verify that some import problem is traceable to this issue.
## The problem can be resolved in two ways. 1) see src/nrnmpi/nrnmpi_dynam.c
## which promotes liboc.so and libnrniv.so to RTLD_GLOBAL (commented out).
## 2) The better way of specifying those libraries to libnrnmpi_la_LIBADD
## in src/nrnmpi/Makefile.am . This latter also explains why libnrnmech.so
## does not have this problem.
#try:
# import sys
# import DLFCN
# sys.setdlopenflags(DLFCN.RTLD_NOW | DLFCN.RTLD_GLOBAL)
#except:
# pass
import sys
embedded = True if 'hoc' in sys.modules else False
try:
import hoc
except:
try:
#Python3.1 extending needs to look into the module explicitly
import neuron.hoc
except: # mingw name strategy
exec("import neuron.hoc%d%d as hoc" % (sys.version_info[0], sys.version_info[1]))
import nrn
import _neuron_section
h = hoc.HocObject()
version = h.nrnversion(5)
__version__ = version
_original_hoc_file = None
if not hasattr(hoc, "__file__"):
import platform
import os
p = h.nrnversion(6)
if "--prefix=" in p:
p = p[p.find('--prefix=') + 9:]
p = p[:p.find("'")]
else:
p = "/usr/local/nrn"
if sys.version_info >= (3, 0):
import sysconfig
phoc = p + "/lib/python/neuron/hoc%s" % sysconfig.get_config_var('SO')
else:
phoc = p + "/lib/python/neuron/hoc.so"
if not os.path.isfile(phoc):
phoc = p + "/%s/lib/libnrnpython%d.so" % (platform.machine(), sys.version_info[0])
if not os.path.isfile(phoc):
phoc = p + "/%s/lib/libnrnpython.so" % platform.machine()
setattr(hoc, "__file__", phoc)
else:
_original_hoc_file = hoc.__file__
# As a workaround to importing doc at neuron import time
# (which leads to chicken and egg issues on some platforms)
# define a dummy help function which imports doc,
# calls the real help function, and reassigns neuron.help to doc.help
# (thus replacing the dummy)
def help(request=None):
global help
from neuron import doc
doc.help(request)
help = doc.help
try:
import pydoc
pydoc.help = help
except:
pass
# Global test-suite function
def test():
""" Runs a global battery of unit tests on the neuron module."""
import neuron.tests
import unittest
runner = unittest.TextTestRunner(verbosity=2)
runner.run(neuron.tests.suite())
def test_rxd():
""" Runs a tests on the rxd and crxd modules."""
import neuron.tests
import unittest
runner = unittest.TextTestRunner(verbosity=2)
runner.run(neuron.tests.test_rxd.suite())
# ------------------------------------------------------------------------------
# class factory for subclassing h.anyclass
# h.anyclass methods may be overridden. If so the base method can be called
# using the idiom self.basemethod = self.baseattr('methodname')
# ------------------------------------------------------------------------------
if sys.version_info[0] == 2:
from neuron.hclass2 import hclass
else:
from neuron.hclass3 import hclass
# global list of paths already loaded by load_mechanisms
nrn_dll_loaded = []
def load_mechanisms(path):
"""
load_mechanisms(path)
Search for and load NMODL mechanisms from the path given.
This function will not load a mechanism path twice.
The path should specify the directory in which nrnivmodl or mknrndll was run,
and in which the directory 'i686' (or 'x86_64' or 'powerpc' depending on your platform)
was created"""
import platform
global nrn_dll_loaded
if path in nrn_dll_loaded:
print("Mechanisms already loaded from path: %s. Aborting." % path)
return True
# in case NEURON is assuming a different architecture to Python,
# we try multiple possibilities
libname = 'libnrnmech.so'
libsubdir = '.libs'
arch_list = [platform.machine(), 'i686', 'x86_64', 'powerpc', 'umac']
# windows loads nrnmech.dll
if h.unix_mac_pc() == 3:
libname = 'nrnmech.dll'
libsubdir = ''
arch_list = ['']
for arch in arch_list:
lib_path = os.path.join(path, arch, libsubdir, libname)
if os.path.exists(lib_path):
h.nrn_load_dll(lib_path)
nrn_dll_loaded.append(path)
return True
print("NEURON mechanisms not found in %s." % path)
return False
import os,sys
if 'NRN_NMODL_PATH' in os.environ:
nrn_nmodl_path = os.environ['NRN_NMODL_PATH'].split(':')
print('Auto-loading mechanisms:')
print('NRN_NMODL_PATH=%s' % os.environ['NRN_NMODL_PATH'])
for x in nrn_nmodl_path:
#print "from path %s:" % x
load_mechanisms(x)
#print "\n"
print("Done.\n")
# ------------------------------------------------------------------------------
# Python classes and functions without a Hoc equivalent, mainly for internal
# use within this file.
# ------------------------------------------------------------------------------
class HocError(Exception): pass
class Wrapper(object):
"""Base class to provide attribute access for HocObjects."""
def __getattr__(self, name):
if name == 'hoc_obj':
return self.__dict__['hoc_obj']
else:
try:
return self.__getattribute__(name)
except AttributeError:
return self.hoc_obj.__getattribute__(name)
def __setattr__(self, name, value):
try:
self.hoc_obj.__setattr__(name, value)
except LookupError:
object.__setattr__(self, name, value)
def new_point_process(name,doc=None):
"""
Returns a Python-wrapped hoc class where the object needs to be associated
with a section.
doc - specify a docstring for the new pointprocess class
"""
h('obfunc new_%s() { return new %s($1) }' % (name, name))
class someclass(Wrapper):
__doc__ = doc
def __init__(self, section, position=0.5):
assert 0 <= position <= 1
section.push()
self.__dict__['hoc_obj'] = getattr(h, 'new_%s' % name)(position) # have to put directly in __dict__ to avoid infinite recursion with __getattr__
h.pop_section()
someclass.__name__ = name
return someclass
def new_hoc_class(name,doc=None):
"""
Returns a Python-wrapped hoc class where the object does not need to be
associated with a section.
doc - specify a docstring for the new hoc class
"""
h('obfunc new_%s() { return new %s() }' % (name, name))
class someclass(Wrapper):
__doc__ = doc
def __init__(self, **kwargs):
self.__dict__['hoc_obj'] = getattr(h, 'new_%s' % name)()
for k,v in list(kwargs.items()):
setattr(self.hoc_obj, k, v)
someclass.__name__ = name
return someclass
# ------------------------------------------------------------------------------
# Python equivalents to Hoc functions
# ------------------------------------------------------------------------------
xopen = h.xopen
quit = h.quit
def hoc_execute(hoc_commands, comment=None):
assert isinstance(hoc_commands,list)
if comment:
logging.debug(comment)
for cmd in hoc_commands:
logging.debug(cmd)
success = hoc.execute(cmd)
if not success:
raise HocError('Error produced by hoc command "%s"' % cmd)
def hoc_comment(comment):
logging.debug(comment)
def psection(section):
"""
function psection(section):
Print info about section in a hoc format which is executable.
(length, parent, diameter, membrane information)
Use section.psection() instead to get a data structure that
contains the same information and more.
See:
https://www.neuron.yale.edu/neuron/static/py_doc/modelspec/programmatic/topology.html?#psection
"""
h.psection(sec=section)
def init():
"""
function init():
Initialize the simulation kernel. This should be called before a run(tstop) call.
Use h.finitialize() instead, which allows you to specify the membrane potential
to initialize to; via e.g. h.finitialize(-65)
https://www.neuron.yale.edu/neuron/static/py_doc/simctrl/programmatic.html?#finitialize
"""
h.finitialize()
def run(tstop):
"""
function run(tstop)
Run the simulation (advance the solver) until tstop [ms]
"""
h('tstop = %g' % tstop)
h('while (t < tstop) { fadvance() }')
# what about pc.psolve(tstop)?
_nrn_dll = None
_nrn_hocobj_ptr = None
_double_ptr = None
_double_size = None
def numpy_element_ref(numpy_array, index):
"""Return a HOC reference into a numpy array.
Parameters
----------
numpy_array : :class:`numpy.ndarray`
the numpy array
index : int
the index into the numpy array
.. warning::
No bounds checking.
.. warning::
Assumes a contiguous array of doubles. In particular, be careful when
using slices. If the array is multi-dimensional,
the user must figure out the integer index to the desired element.
"""
global _nrn_dll, _double_ptr, _double_size, _nrn_hocobj_ptr
import ctypes
if _nrn_hocobj_ptr is None:
_nrn_hocobj_ptr = nrn_dll_sym('nrn_hocobj_ptr')
_nrn_hocobj_ptr.restype = ctypes.py_object
_double_ptr = ctypes.POINTER(ctypes.c_double)
_double_size = ctypes.sizeof(ctypes.c_double)
void_p = ctypes.cast(numpy_array.ctypes.data_as(_double_ptr), ctypes.c_voidp).value + index * _double_size
return _nrn_hocobj_ptr(ctypes.cast(void_p, _double_ptr))
def nrn_dll_sym(name, type=None):
"""return the specified object from the NEURON dlls.
Parameters
----------
name : string
the name of the object (function, integer, etc...)
type : None or ctypes type (e.g. ctypes.c_int)
the type of the object (if None, assumes function pointer)
"""
# TODO: this won't work under Windows; will need to search through until
# can find the right dll (should we cache the results of the search?)
import os
if os.name == 'nt':
return nrn_dll_sym_nt(name, type)
dll = nrn_dll()
if type is None:
return dll.__getattr__(name)
else:
return type.in_dll(dll, name)
nt_dlls = []
def nrn_dll_sym_nt(name, type):
"""return the specified object from the NEURON dlls.
helper for nrn_dll_sym(name, type). Try to find the name in either
nrniv.dll or libnrnpython1013.dll
"""
global nt_dlls
import ctypes
import os
if len(nt_dlls) is 0:
b = 'bin'
if h.nrnversion(8).find('i686') is 0:
b = 'bin'
path = os.path.join(h.neuronhome().replace('/','\\'), b)
p = sys.version_info[0]*10 + sys.version_info[1]
for dllname in ['nrniv.dll', 'libnrnpython%d.dll'%p]:
p = os.path.join(path, dllname)
nt_dlls.append(ctypes.cdll[p])
for dll in nt_dlls:
try:
a = dll.__getattr__(name)
except:
a = None
if a:
if type is None:
return a
else:
return type.in_dll(dll, name)
raise Exception('unable to connect to the NEURON library containing '+name)
def nrn_dll(printpath=False):
"""Return a ctypes object corresponding to the NEURON library.
.. warning::
This provides access to the C-language internals of NEURON and should
be used with care.
"""
import ctypes
import os
import platform
import glob
try:
#extended? if there is a __file__, then use that
if printpath: print ("hoc.__file__ %s" % _original_hoc_file)
the_dll = ctypes.cdll[_original_hoc_file]
return the_dll
except:
pass
success = False
if sys.platform == 'msys' or sys.platform == 'win32':
p = 'hoc%d%d' % (sys.version_info[0], sys.version_info[1])
else:
p = 'hoc'
try:
# maybe hoc.so in this neuron module
base_path = os.path.join(os.path.split(__file__)[0], p)
dlls = glob.glob(base_path + '*.*')
for dll in dlls:
try:
the_dll = ctypes.cdll[dll]
if printpath : print(dll)
return the_dll
except:
pass
except:
pass
# maybe old default module location
neuron_home = os.path.split(os.path.split(h.neuronhome())[0])[0]
base_path = os.path.join(neuron_home, 'lib' , 'python', 'neuron', p)
for extension in ['', '.dll', '.so', '.dylib']:
dlls = glob.glob(base_path + '*' + extension)
for dll in dlls:
try:
the_dll = ctypes.cdll[dll]
if printpath : print(dll)
success = True
except:
pass
if success: break
if success: break
else:
raise Exception('unable to connect to the NEURON library')
return the_dll
# TODO: put this someplace else
# can't be in rxd because that would break things if no scipy
_sec_db = {}
def _declare_contour(secobj, secname):
j = secobj.first
center_vec = secobj.contourcenter(secobj.raw.getrow(0), secobj.raw.getrow(1), secobj.raw.getrow(2))
x0, y0, z0 = [center_vec.x[i] for i in range(3)]
# (is_stack, x, y, z, xcenter, ycenter, zcenter)
_sec_db[secname] = (True if secobj.contour_list else False, secobj.raw.getrow(0).c(j), secobj.raw.getrow(1).c(j), secobj.raw.getrow(2).c(j), x0, y0, z0)
def _create_all_list(obj):
# used by import3d
obj.all = []
def _create_sections_in_obj(obj, name, numsecs):
# used by import3d to instantiate inside of a Python object
setattr(obj, name, [h.Section(name="%s[%d]" % (name, i), cell=obj) for i in range(int(numsecs))])
def _connect_sections_in_obj(obj, childsecname, childx, parentsecname, parentx):
# used by import3d
childarray, childi = _parse_import3d_name(childsecname)
parentarray, parenti = _parse_import3d_name(parentsecname)
getattr(obj, childarray)[childi].connect(getattr(obj, parentarray)[parenti](parentx), childx)
def _parse_import3d_name(name):
if '[' in name:
import re
array, i = re.search(r'(.*)\[(\d*)\]', name).groups()
i = int(i)
else:
array = name
i = 0
return array, i
def _pt3dstyle_in_obj(obj, name, x, y, z):
# used by import3d
array, i = _parse_import3d_name(name)
h.pt3dstyle(1, x, y, z, sec=getattr(obj, array)[i])
def _pt3dadd_in_obj(obj, name, x, y, z, d):
array, i = _parse_import3d_name(name)
h.pt3dadd(x, y, z, d, sec=getattr(obj, array)[i])
def numpy_from_pointer(cpointer, size):
if sys.version_info.major < 3:
return numpy.frombuffer(numpy.core.multiarray.int_asbuffer(
ctypes.addressof(cpointer.contents),
size * numpy.dtype(float).itemsize))
else:
buf_from_mem = ctypes.pythonapi.PyMemoryView_FromMemory
buf_from_mem.restype = ctypes.py_object
buf_from_mem.argtypes = (ctypes.c_void_p, ctypes.c_int, ctypes.c_int)
cbuffer = buf_from_mem(
cpointer, size * numpy.dtype(float).itemsize, 0x200)
return numpy.ndarray((size,), numpy.float, cbuffer, order='C')
try:
import ctypes
import numpy
import traceback
vec_to_numpy_prototype = ctypes.CFUNCTYPE(ctypes.py_object, ctypes.c_int, ctypes.POINTER(ctypes.c_double))
def vec2numpy(size, data):
try:
return numpy_from_pointer(data, size)
except:
traceback.print_exc()
return None
vec_to_numpy_callback = vec_to_numpy_prototype(vec2numpy)
set_vec_as_numpy = nrn_dll_sym('nrnpy_set_vec_as_numpy')
set_vec_as_numpy(vec_to_numpy_callback)
except:
pass
class _WrapperPlot:
def __init__(self, data):
'''do not call directly'''
self._data = data
def __repr__(self):
return '{}.plot()'.format(repr(self._data))
class _RangeVarPlot(_WrapperPlot):
"""Plots the current state of the RangeVarPlot on the graph.
Additional arguments and keyword arguments are passed to the graph's
plotting method.
Example, showing plotting to NEURON graphics, bokeh, and matplotlib:
.. code::
from matplotlib import pyplot
from neuron import h, gui
import bokeh.plotting as b
import math
dend = h.Section(name='dend')
dend.nseg = 55
dend.L = 6.28
# looping over dend.allseg instead of dend to set 0 and 1 ends
for seg in dend.allseg():
seg.v = math.sin(dend.L * seg.x)
r = h.RangeVarPlot('v', dend(0), dend(1))
# matplotlib
graph = pyplot.gca()
r.plot(graph, linewidth=10, color='r')
# NEURON Interviews graph
g = h.Graph()
r.plot(g, 2, 3)
g.exec_menu('View = plot')
# Bokeh
bg = b.Figure()
r.plot(bg, line_width=10)
b.show(bg)
pyplot.show()"""
def __call__(self, graph, *args, **kwargs):
yvec = h.Vector()
xvec = h.Vector()
self._data.to_vector(yvec, xvec)
if isinstance(graph, hoc.HocObject):
return yvec.line(graph, xvec, *args)
if hasattr(graph, 'plot'):
# works with e.g. pyplot or a matplotlib axis
return graph.plot(xvec, yvec, *args, **kwargs)
if hasattr(graph, 'line'):
# works with e.g. bokeh
return graph.line(xvec, yvec, *args, **kwargs)
if str(type(graph)) == "<class 'matplotlib.figure.Figure'>":
raise Exception('plot to a matplotlib axis not a matplotlib figure')
raise Exception('Unable to plot to graphs of type {}'.format(type(graph)))
class _PlotShapePlot(_WrapperPlot):
'''Plots the currently selected data on an object.
Currently only pyplot is supported, e.g.
from matplotlib import pyplot
ps = h.PlotShape(False)
ps.variable('v')
ps.plot(pyplot)
pyplot.show()
Limitations: many. Currently only supports plotting a full cell colored based on a variable.'''
# TODO: handle pointmark, specified sections, color
def __call__(self, graph, *args, **kwargs):
def _get_pyplot_axis3d(fig):
'''requires matplotlib'''
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from neuron.gui2.utilities import _segment_3d_pts
class Axis3DWithNEURON(Axes3D):
def auto_aspect(self):
"""sets the x, y, and z range symmetric around the center
Probably needs a square figure to preserve lengths as you rotate."""
bounds = [self.get_xlim(), self.get_ylim(), self.get_zlim()]
half_delta_max = max([(item[1] - item[0]) / 2 for item in bounds])
xmid = sum(bounds[0]) / 2
ymid = sum(bounds[1]) / 2
zmid = sum(bounds[2]) / 2
self.auto_scale_xyz([xmid - half_delta_max, xmid + half_delta_max],
[ymid - half_delta_max, ymid + half_delta_max],
[zmid - half_delta_max, zmid + half_delta_max])
def mark(self, segment, marker='or', **kwargs):
"""plot a marker on a segment
Args:
segment = the segment to mark
marker = matplotlib marker
**kwargs = passed to matplotlib's plot
"""
# TODO: there has to be a better way to do this
sec = segment.sec
n3d = sec.n3d()
arc3d = [sec.arc3d(i) for i in range(n3d)]
x3d = np.array([sec.x3d(i) for i in range(n3d)])
y3d = np.array([sec.y3d(i) for i in range(n3d)])
z3d = np.array([sec.z3d(i) for i in range(n3d)])
seg_l = sec.L * segment.x
x = np.interp(seg_l, arc3d, x3d)
y = np.interp(seg_l, arc3d, y3d)
z = np.interp(seg_l, arc3d, z3d)
self.plot([x], [y], [z], marker)
return self
def _do_plot(self, val_min, val_max,
sections,
variable,
cmap=cm.cool,
**kwargs):
"""
Plots a 3D shapeplot
Args:
sections = list of h.Section() objects to be plotted
**kwargs passes on to matplotlib (e.g. linewidth=2 for thick lines)
Returns:
lines = list of line objects making up shapeplot
"""
# Adapted from
# https://github.com/ahwillia/PyNeuron-Toolbox/blob/master/PyNeuronToolbox/morphology.py
# Accessed 2019-04-11, which had an MIT license
# Default is to plot all sections.
if sections is None:
sections = list(h.allsec())
h.define_shape()
# default color is black
kwargs.setdefault('color', 'black')
# Plot each segement as a line
lines = {}
lines_list = []
vals = []
for sec in sections:
all_seg_pts = _segment_3d_pts(sec)
for seg, (xs, ys, zs, _, _) in zip(sec, all_seg_pts):
line, = self.plot(xs, ys, zs, '-', **kwargs)
if variable is not None:
try:
if '.' in variable:
mech, var = variable.split('.')
val = getattr(getattr(seg, mech), var)
else:
val = getattr(seg, variable)
except AttributeError:
# leave default color if no variable found
val = None
vals.append(val)
lines[line] = '%s at %s' % (val, seg)
lines_list.append(line)
if variable is not None:
val_range = val_max - val_min
if val_range:
for sec in sections:
for line, val in zip(lines_list, vals):
if val is not None:
col = cmap(int(255 * (val - val_min) / (val_range)))
line.set_color(col)
return lines
return Axis3DWithNEURON(fig)
def _do_plot_on_matplotlib_figure(fig):
import ctypes
get_plotshape_data = nrn_dll_sym('get_plotshape_data')
get_plotshape_data.restype = ctypes.py_object
variable, lo, hi, secs = get_plotshape_data(ctypes.py_object(self._data))
kwargs.setdefault('picker', 2)
result = _get_pyplot_axis3d(fig)
_lines = result._do_plot(lo, hi, secs, variable, *args, **kwargs)
result._mouseover_text = ''
def _onpick(event):
if event.artist in _lines:
result._mouseover_text = _lines[event.artist]
else:
result._mouseover_text = ''
return True
result.auto_aspect()
fig.canvas.mpl_connect('pick_event', _onpick)
def format_coord(*args):
return result._mouseover_text
result.format_coord = format_coord
return result
if hasattr(graph, '__name__') and graph.__name__ == 'matplotlib.pyplot':
fig = graph.figure()
return _do_plot_on_matplotlib_figure(fig)
elif str(type(graph)) == "<class 'matplotlib.figure.Figure'>":
return _do_plot_on_matplotlib_figure(graph)
else:
raise NotImplementedError
try:
import ctypes
def _rvp_plot(rvp):
return _RangeVarPlot(rvp)
def _plotshape_plot(ps):
return _PlotShapePlot(ps)
set_graph_plots = nrn_dll_sym('nrnpy_set_graph_plots')
_rvp_plot_callback = ctypes.py_object(_rvp_plot)
_plotshape_plot_callback = ctypes.py_object(_plotshape_plot)
set_graph_plots(_rvp_plot_callback, _plotshape_plot_callback)
except:
pass
def _has_scipy():
"""
to check for scipy:
has_scipy = 0
objref p
if (nrnpython("import neuron")) {
p = new PythonObject()
has_scipy = p.neuron._has_scipy()
}
"""
try:
import scipy
except:
return 0
return 1
def _pkl(arg):
#print 'neuron._pkl arg is ', arg
return h.Vector(0)
def nrnpy_pass():
return 1
def nrnpy_pr(stdoe, s):
if stdoe == 1:
sys.stdout.write(s.decode())
else:
sys.stderr.write(s.decode())
return 0
if not embedded:
try:
# nrnpy_pr callback in place of hoc printf
# ensures consistent with python stdout even with jupyter notebook.
# nrnpy_pass callback used by h.doNotify() in MINGW when not called from
# gui thread in order to allow the gui thread to run.
nrnpy_set_pr_etal = nrn_dll_sym('nrnpy_set_pr_etal')
nrnpy_pr_proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p)
nrnpy_pass_proto = ctypes.CFUNCTYPE(ctypes.c_int)
nrnpy_set_pr_etal.argtypes = [nrnpy_pr_proto, nrnpy_pass_proto]
nrnpy_pr_callback = nrnpy_pr_proto(nrnpy_pr)
nrnpy_pass_callback = nrnpy_pass_proto(nrnpy_pass)
nrnpy_set_pr_etal(nrnpy_pr_callback, nrnpy_pass_callback)
except:
print("Failed to setup nrnpy_pr")
pass
def nrnpy_vec_math(op, flag, arg1, arg2=None):
import numbers
valid_types = (numbers.Number, hoc.HocObject)
if isinstance(arg1, valid_types):
if flag == 2:
# unary
arg1 = arg1.c()
if op == 'uneg':
return arg1.mul(-1)
if op == 'upos':
return arg1
if op == 'uabs':
return arg1.abs()
elif isinstance(arg2, valid_types):
if flag == 1:
# either reversed (flag=1) or unary (flag=2)
arg2 = arg2.c()
if op in ('mul', 'add'):
return getattr(arg2, op)(arg1)
if op == 'div':
return arg2.pow(-1).mul(arg1)
if op == 'sub':
return arg2.mul(-1).add(arg1)
else:
arg1 = arg1.c()
return getattr(arg1, op)(arg2)
return NotImplemented
try:
nrnpy_vec_math_register = nrn_dll_sym('nrnpy_vec_math_register')
nrnpy_vec_math_register(ctypes.py_object(nrnpy_vec_math))
except:
print("Failed to setup nrnpy_vec_math")
try:
from neuron.psection import psection
nrn.set_psection(psection)
except:
print("Failed to setup nrn.Section.psection")
pass
|
[] |
[] |
[
"NRN_NMODL_PATH"
] |
[]
|
["NRN_NMODL_PATH"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
// generateJSON creates the spire info and certificate bundle that is returned
// by the web server
func generateJSON() (string, error) {
// Bundle contains the struct used to generate the spire info json
type Bundle struct {
Domain string
Server string
CertBundle string
}
certBundle, err := ioutil.ReadFile(os.Getenv("BUNDLE"))
if err != nil {
return "", err
}
s := Bundle{os.Getenv("DOMAIN"), os.Getenv("SERVER"), string(certBundle)}
j, err := json.Marshal(s)
if err != nil {
return "", err
}
return string(j), nil
}
// handler returns the generated json spire info
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
j, err := generateJSON()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, j)
}
// main starts the http server
func main() {
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
[
"\"BUNDLE\"",
"\"DOMAIN\"",
"\"SERVER\""
] |
[] |
[
"DOMAIN",
"SERVER",
"BUNDLE"
] |
[]
|
["DOMAIN", "SERVER", "BUNDLE"]
|
go
| 3 | 0 | |
main.go
|
/**
*
* Copyright 2021 Victor Shinya
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/joho/godotenv"
)
func Handler(w http.ResponseWriter, r *http.Request) {
params := map[string]interface{}{
"username": os.Getenv("SOFTLAYER_USERNAME"),
"apikey": os.Getenv("SOFTLAYER_APIKEY"),
"name": os.Getenv("VSIS_NAME"),
"power": os.Getenv("POWER"),
}
res := Main(params)
b, err := json.Marshal(res)
if err != nil {
log.Fatalf("Error to parse Function response to []byte: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
}
w.Write(b)
}
func main() {
godotenv.Load()
http.HandleFunc("/", Handler)
fmt.Println("Listening on port 8080")
http.ListenAndServe(":8080", nil)
}
|
[
"\"SOFTLAYER_USERNAME\"",
"\"SOFTLAYER_APIKEY\"",
"\"VSIS_NAME\"",
"\"POWER\""
] |
[] |
[
"VSIS_NAME",
"SOFTLAYER_USERNAME",
"POWER",
"SOFTLAYER_APIKEY"
] |
[]
|
["VSIS_NAME", "SOFTLAYER_USERNAME", "POWER", "SOFTLAYER_APIKEY"]
|
go
| 4 | 0 | |
tests/integration/sqs_consumer_tests.py
|
import os
import time
import pytest
from razemax.consumers import MessageConsumer
from razemax.drivers import SQSDriver, Message
from razemax.event_manager import EventManager
from razemax.publisher import SNSMessagePublisher
# events.py
class FollowCreatedEvent:
def __init__(self, from_user_id, to_user_id, is_suggested, timestamp):
self.from_user_id = from_user_id
self.to_user_id = to_user_id
self.is_suggested = is_suggested
self.timestamp = timestamp
def follow_created_subscriber(event: FollowCreatedEvent):
assert event.from_user_id == "amancioortega"
assert event.is_suggested is False
# apps.py
event_bus = EventManager.subscribe(follow_created_subscriber, FollowCreatedEvent)
# mappers.py
def follow_created_mapper(message: Message):
body = message.body.get('body')
event_dict = {
'from_user_id': body.get('source_user'),
'to_user_id': body.get('target_user'),
'is_suggested': body.get('is_suggested'),
'timestamp': body.get('timestamp')
}
return FollowCreatedEvent(**event_dict)
@pytest.mark.integration
def test_integration_sqs():
message_factory = {
'follow_created': follow_created_mapper
}
aws_settings = {
'region_name': os.environ['AWS_REGION'],
'aws_access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'aws_secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],
'endpoint_url': os.environ.get('AWS_ENDPOINT_URL')
}
queue_name = os.environ['SQS_QUEUE_NAME']
topic_arn = os.environ['SNS_TOPIC_ARN']
driver = SQSDriver.build(queue_name=queue_name, aws_settings=aws_settings)
publisher = SNSMessagePublisher.build(topic_arn=topic_arn, aws_settings=aws_settings)
publisher.publish("follow_created", {
'source_user': 'amancioortega',
'target_user': 'jairo',
'is_suggested': False,
'timestamp': "2018-12-01T11:23:23.0000"
})
time.sleep(1) # Wait for deliver
consumer = MessageConsumer(mapper_factory=message_factory, event_manager=EventManager(), queue_driver=driver)
consumer.process_message()
|
[] |
[] |
[
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION",
"SNS_TOPIC_ARN",
"AWS_ENDPOINT_URL",
"SQS_QUEUE_NAME",
"AWS_ACCESS_KEY_ID"
] |
[]
|
["AWS_SECRET_ACCESS_KEY", "AWS_REGION", "SNS_TOPIC_ARN", "AWS_ENDPOINT_URL", "SQS_QUEUE_NAME", "AWS_ACCESS_KEY_ID"]
|
python
| 6 | 0 | |
vendor/github.com/openshift/origin/tools/rebasehelpers/godepchecker/godepchecker.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"go/parser"
"go/token"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/openshift/origin/tools/rebasehelpers/util"
)
var gopath = os.Getenv("GOPATH")
func main() {
fmt.Println(`
Assumes the following:
- $GOPATH is set to a single directory (not the godepsified path)
- "godeps save ./..." has not yet been run on origin
- The desired level of kubernetes is checked out
`)
var self, other string
var checkoutNewer, examineForks bool
flag.StringVar(&self, "self", filepath.Join(gopath, "src/github.com/openshift/origin/Godeps/Godeps.json"), "The first file to compare")
flag.StringVar(&other, "other", filepath.Join(gopath, "src/k8s.io/kubernetes/Godeps/Godeps.json"), "The other file to compare")
flag.BoolVar(&checkoutNewer, "checkout", checkoutNewer, "Check out the newer commit when there is a mismatch between the Godeps")
flag.BoolVar(&examineForks, "examine-forks", examineForks, "Print out git logs from OpenShift forks or upstream dependencies when there is a mismatch in revisions between Kubernetes and Origin")
flag.Parse()
// List packages imported by origin Godeps
originGodeps, err := loadGodeps(self)
if err != nil {
exit(fmt.Sprintf("Error loading %s:", self), err)
}
// List packages imported by kubernetes Godeps
k8sGodeps, err := loadGodeps(other)
if err != nil {
exit(fmt.Sprintf("Error loading %s:", other), err)
}
// List packages imported by origin
_, errs := loadImports(".")
if len(errs) > 0 {
exit("Error loading imports:", errs...)
}
mine := []string{}
yours := []string{}
ours := []string{}
for k := range originGodeps {
if _, exists := k8sGodeps[k]; exists {
ours = append(ours, k)
} else {
mine = append(mine, k)
}
}
for k := range k8sGodeps {
if _, exists := originGodeps[k]; !exists {
yours = append(yours, k)
}
}
sort.Strings(mine)
sort.Strings(yours)
sort.Strings(ours)
// Check for missing k8s deps
if len(yours) > 0 {
fmt.Println("k8s-only godep imports (may need adding to origin):")
for _, k := range yours {
fmt.Println(k)
}
fmt.Printf("\n\n\n")
}
// Check `mine` for unused local deps (might be used transitively by other Godeps)
// Check `ours` for different levels
openshiftForks := sets.NewString(
"github.com/docker/distribution",
"github.com/skynetservices/skydns",
"github.com/coreos/etcd",
"github.com/emicklei/go-restful",
"github.com/golang/glog",
"github.com/cloudflare/cfssl",
"github.com/google/certificate-transparency",
"github.com/RangelReale/osin",
"github.com/google/cadvisor",
)
lastMismatch := ""
for _, k := range ours {
if oRev, kRev := originGodeps[k].Rev, k8sGodeps[k].Rev; oRev != kRev {
if lastMismatch == oRev {
// don't show consecutive mismatches if oRev is the same
continue
}
lastMismatch = oRev
fmt.Printf("Mismatch on %s:\n", k)
newerCommit := ""
repoPath := filepath.Join(gopath, "src", k)
oDecorator := ""
kDecorator := ""
currentRev, err := util.CurrentRev(repoPath)
if err == nil {
if currentRev == oRev {
kDecorator = " "
oDecorator = "*"
}
if currentRev == kRev {
kDecorator = "*"
oDecorator = " "
}
}
oDate, oDateErr := util.CommitDate(oRev, repoPath)
if oDateErr != nil {
oDate = "unknown"
}
kDate, kDateErr := util.CommitDate(kRev, repoPath)
if kDateErr != nil {
kDate = "unknown"
}
if err := util.FetchRepo(repoPath); err != nil {
fmt.Printf(" Error fetching %q: %v\n", repoPath, err)
}
openShiftNewer := false
if older, err := util.IsAncestor(oRev, kRev, repoPath); older && err == nil {
fmt.Printf(" Origin: %s%s (%s)\n", oDecorator, oRev, oDate)
fmt.Printf(" K8s: %s%s (%s, fast-forward)\n", kDecorator, kRev, kDate)
newerCommit = kRev
} else if newer, err := util.IsAncestor(kRev, oRev, repoPath); newer && err == nil {
fmt.Printf(" Origin: %s%s (%s, fast-forward)\n", oDecorator, oRev, oDate)
fmt.Printf(" K8s: %s%s (%s)\n", kDecorator, kRev, kDate)
newerCommit = oRev
openShiftNewer = true
} else if oDateErr == nil && kDateErr == nil {
fmt.Printf(" Origin: %s%s (%s, discontinuous)\n", oDecorator, oRev, oDate)
fmt.Printf(" K8s: %s%s (%s, discontinuous)\n", kDecorator, kRev, kDate)
if oDate > kDate {
newerCommit = oRev
} else {
newerCommit = kRev
}
} else {
fmt.Printf(" Origin: %s%s (%s)\n", oDecorator, oRev, oDate)
fmt.Printf(" K8s: %s%s (%s)\n", kDecorator, kRev, kDate)
if oDateErr != nil {
fmt.Printf(" %s\n", oDateErr)
}
if kDateErr != nil {
fmt.Printf(" %s\n", kDateErr)
}
}
if len(newerCommit) > 0 && newerCommit != currentRev {
if checkoutNewer {
fmt.Printf(" Checking out:\n")
fmt.Printf(" cd %s && git checkout %s\n", repoPath, newerCommit)
if err := util.Checkout(newerCommit, repoPath); err != nil {
fmt.Printf(" %s\n", err)
}
} else {
fmt.Printf(" To check out newest:\n")
fmt.Printf(" cd %s && git checkout %s\n", repoPath, newerCommit)
}
}
if !openShiftNewer {
// only proceed to examine forks if OpenShift's commit is newer than
// Kube's
continue
}
if !examineForks {
continue
}
if currentRev == oRev {
// we're at OpenShift's commit, so there's not really any need to show
// the commits
continue
}
if !strings.HasPrefix(k, "github.com/") {
continue
}
parts := strings.SplitN(k, "/", 4)
repo := fmt.Sprintf("github.com/%s/%s", parts[1], parts[2])
if !openshiftForks.Has(repo) {
continue
}
fmt.Printf("\n Fork info:\n")
if err := func() error {
cwd, err := os.Getwd()
if err != nil {
return err
}
defer os.Chdir(cwd)
if err := os.Chdir(repoPath); err != nil {
return err
}
commits, err := util.CommitsBetween(kRev+"^", oRev)
if err != nil {
return err
}
for _, commit := range commits {
fmt.Printf(" %s %s\n", commit.Sha, commit.Summary)
}
return nil
}(); err != nil {
fmt.Printf(" Error examining fork: %v\n", err)
}
}
}
}
func exit(reason string, errors ...error) {
fmt.Fprintf(os.Stderr, "%s\n", reason)
for _, err := range errors {
fmt.Fprintln(os.Stderr, err.Error())
}
os.Exit(2)
}
func loadImports(root string) (map[string]bool, []error) {
imports := map[string]bool{}
errs := []error{}
fset := &token.FileSet{}
filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// Don't walk godeps
if info.Name() == "Godeps" && info.IsDir() {
return filepath.SkipDir
}
if strings.HasSuffix(info.Name(), ".go") && info.Mode().IsRegular() {
if fileAST, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly); err != nil {
errs = append(errs, err)
} else {
for i := range fileAST.Imports {
pkg := fileAST.Imports[i].Path.Value
imports[pkg[1:len(pkg)-2]] = true
}
}
}
return nil
})
return imports, errs
}
type Godep struct {
Deps []Dep
}
type Dep struct {
ImportPath string
Comment string
Rev string
}
func loadGodeps(file string) (map[string]Dep, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
godeps := &Godep{}
if err := json.Unmarshal(data, godeps); err != nil {
return nil, err
}
depmap := map[string]Dep{}
for i := range godeps.Deps {
dep := godeps.Deps[i]
if _, exists := depmap[dep.ImportPath]; exists {
return nil, fmt.Errorf("imports %q multiple times", dep.ImportPath)
}
depmap[dep.ImportPath] = dep
}
return depmap, nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
libbeat/processors/add_kubernetes_metadata/kubernetes.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build linux darwin windows
package add_kubernetes_metadata
import (
"fmt"
"os"
"time"
k8sclient "k8s.io/client-go/kubernetes"
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/common/kubernetes"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/processors"
jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor"
)
const (
timeout = time.Second * 5
)
type kubernetesAnnotator struct {
watcher kubernetes.Watcher
indexers *Indexers
matchers *Matchers
cache *cache
kubernetesAvailable bool
}
func init() {
processors.RegisterPlugin("add_kubernetes_metadata", New)
jsprocessor.RegisterPlugin("AddKubernetesMetadata", New)
// Register default indexers
Indexing.AddIndexer(PodNameIndexerName, NewPodNameIndexer)
Indexing.AddIndexer(PodUIDIndexerName, NewPodUIDIndexer)
Indexing.AddIndexer(ContainerIndexerName, NewContainerIndexer)
Indexing.AddIndexer(IPPortIndexerName, NewIPPortIndexer)
Indexing.AddMatcher(FieldMatcherName, NewFieldMatcher)
Indexing.AddMatcher(FieldFormatMatcherName, NewFieldFormatMatcher)
}
func isKubernetesAvailable(client k8sclient.Interface) bool {
server, err := client.Discovery().ServerVersion()
if err != nil {
logp.Info("%v: could not detect kubernetes env: %v", "add_kubernetes_metadata", err)
return false
}
logp.Info("%v: kubernetes env detected, with version: %v", "add_kubernetes_metadata", server)
return true
}
// New constructs a new add_kubernetes_metadata processor.
func New(cfg *common.Config) (processors.Processor, error) {
config := defaultKubernetesAnnotatorConfig()
err := cfg.Unpack(&config)
if err != nil {
return nil, fmt.Errorf("fail to unpack the kubernetes configuration: %s", err)
}
//Load default indexer configs
if config.DefaultIndexers.Enabled == true {
Indexing.RLock()
for key, cfg := range Indexing.GetDefaultIndexerConfigs() {
config.Indexers = append(config.Indexers, map[string]common.Config{key: cfg})
}
Indexing.RUnlock()
}
//Load default matcher configs
if config.DefaultMatchers.Enabled == true {
Indexing.RLock()
for key, cfg := range Indexing.GetDefaultMatcherConfigs() {
config.Matchers = append(config.Matchers, map[string]common.Config{key: cfg})
}
Indexing.RUnlock()
}
metaGen, err := kubernetes.NewMetaGenerator(cfg)
if err != nil {
return nil, err
}
processor := &kubernetesAnnotator{
cache: newCache(config.CleanupTimeout),
kubernetesAvailable: false,
}
client, err := kubernetes.GetKubernetesClient(config.KubeConfig)
if err != nil {
if kubernetes.IsInCluster(config.KubeConfig) {
logp.Debug("kubernetes", "%v: could not create kubernetes client using in_cluster config", "add_kubernetes_metadata")
} else if config.KubeConfig == "" {
logp.Debug("kubernetes", "%v: could not create kubernetes client using config: %v", "add_kubernetes_metadata", os.Getenv("KUBECONFIG"))
} else {
logp.Debug("kubernetes", "%v: could not create kubernetes client using config: %v", "add_kubernetes_metadata", config.KubeConfig)
}
return processor, nil
}
if !isKubernetesAvailable(client) {
return processor, nil
}
processor.indexers = NewIndexers(config.Indexers, metaGen)
matchers := NewMatchers(config.Matchers)
if matchers.Empty() {
logp.Debug("kubernetes", "%v: could not initialize kubernetes plugin with zero matcher plugins", "add_kubernetes_metadata")
return processor, nil
}
processor.matchers = matchers
config.Host = kubernetes.DiscoverKubernetesNode(config.Host, kubernetes.IsInCluster(config.KubeConfig), client)
logp.Debug("kubernetes", "Initializing a new Kubernetes watcher using host: %s", config.Host)
watcher, err := kubernetes.NewWatcher(client, &kubernetes.Pod{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Host,
Namespace: config.Namespace,
})
if err != nil {
logp.Err("kubernetes: Couldn't create watcher for %T", &kubernetes.Pod{})
return nil, err
}
processor.watcher = watcher
processor.kubernetesAvailable = true
watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
processor.addPod(obj.(*kubernetes.Pod))
},
UpdateFunc: func(obj interface{}) {
processor.removePod(obj.(*kubernetes.Pod))
processor.addPod(obj.(*kubernetes.Pod))
},
DeleteFunc: func(obj interface{}) {
processor.removePod(obj.(*kubernetes.Pod))
},
})
if err := watcher.Start(); err != nil {
return nil, err
}
return processor, nil
}
func (k *kubernetesAnnotator) Run(event *beat.Event) (*beat.Event, error) {
if !k.kubernetesAvailable {
return event, nil
}
index := k.matchers.MetadataIndex(event.Fields)
if index == "" {
return event, nil
}
metadata := k.cache.get(index)
if metadata == nil {
return event, nil
}
event.Fields.DeepUpdate(common.MapStr{
"kubernetes": metadata.Clone(),
})
return event, nil
}
func (k *kubernetesAnnotator) addPod(pod *kubernetes.Pod) {
metadata := k.indexers.GetMetadata(pod)
for _, m := range metadata {
k.cache.set(m.Index, m.Data)
}
}
func (k *kubernetesAnnotator) removePod(pod *kubernetes.Pod) {
indexes := k.indexers.GetIndexes(pod)
for _, idx := range indexes {
k.cache.delete(idx)
}
}
func (*kubernetesAnnotator) String() string {
return "add_kubernetes_metadata"
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
runsc/main.go
|
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Binary runsc is an implementation of the Open Container Initiative Runtime
// that runs applications inside a sandbox.
package main
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"context"
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cmd"
)
var (
// Although these flags are not part of the OCI spec, they are used by
// Docker, and thus should not be changed.
rootDir = flag.String("root", "", "root directory for storage of container state")
logFilename = flag.String("log", "", "file path where internal debug information is written, default is stdout")
logFormat = flag.String("log-format", "text", "log format: text (default) or json")
debug = flag.Bool("debug", false, "enable debug logging")
// These flags are unique to runsc, and are used to configure parts of the
// system that are not covered by the runtime spec.
// Debugging flags.
debugLogDir = flag.String("debug-log-dir", "", "additional location for logs. It creates individual log files per command")
logPackets = flag.Bool("log-packets", false, "enable network packet logging")
// Debugging flags: strace related
strace = flag.Bool("strace", false, "enable strace")
straceSyscalls = flag.String("strace-syscalls", "", "comma-separated list of syscalls to trace. If --strace is true and this list is empty, then all syscalls will be traced.")
straceLogSize = flag.Uint("strace-log-size", 1024, "default size (in bytes) to log data argument blobs")
// Flags that control sandbox runtime behavior.
platform = flag.String("platform", "ptrace", "specifies which platform to use: ptrace (default), kvm")
network = flag.String("network", "sandbox", "specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.")
fileAccess = flag.String("file-access", "proxy", "specifies which filesystem to use: proxy (default), direct. Using a proxy is more secure because it disallows the sandbox from opennig files directly in the host.")
overlay = flag.Bool("overlay", false, "wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.")
)
var gitRevision = ""
func main() {
// Help and flags commands are generated automatically.
subcommands.Register(subcommands.HelpCommand(), "")
subcommands.Register(subcommands.FlagsCommand(), "")
// Register user-facing runsc commands.
subcommands.Register(new(cmd.Create), "")
subcommands.Register(new(cmd.Delete), "")
subcommands.Register(new(cmd.Events), "")
subcommands.Register(new(cmd.Exec), "")
subcommands.Register(new(cmd.Gofer), "")
subcommands.Register(new(cmd.Kill), "")
subcommands.Register(new(cmd.List), "")
subcommands.Register(new(cmd.PS), "")
subcommands.Register(new(cmd.Run), "")
subcommands.Register(new(cmd.Start), "")
subcommands.Register(new(cmd.State), "")
// Register internal commands with the internal group name. This causes
// them to be sorted below the user-facing commands with empty group.
// The string below will be printed above the commands.
const internalGroup = "internal use only"
subcommands.Register(new(cmd.Boot), internalGroup)
subcommands.Register(new(cmd.Gofer), internalGroup)
// All subcommands must be registered before flag parsing.
flag.Parse()
platformType, err := boot.MakePlatformType(*platform)
if err != nil {
cmd.Fatalf("%v", err)
}
fsAccess, err := boot.MakeFileAccessType(*fileAccess)
if err != nil {
cmd.Fatalf("%v", err)
}
netType, err := boot.MakeNetworkType(*network)
if err != nil {
cmd.Fatalf("%v", err)
}
// Create a new Config from the flags.
conf := &boot.Config{
RootDir: *rootDir,
FileAccess: fsAccess,
Overlay: *overlay,
Network: netType,
LogPackets: *logPackets,
Platform: platformType,
Strace: *strace,
StraceLogSize: *straceLogSize,
}
if len(*straceSyscalls) != 0 {
conf.StraceSyscalls = strings.Split(*straceSyscalls, ",")
}
// Set up logging.
if *debug {
log.SetLevel(log.Debug)
}
var logFile io.Writer = os.Stderr
if *logFilename != "" {
// We must set O_APPEND and not O_TRUNC because Docker passes
// the same log file for all commands (and also parses these
// log files), so we can't destroy them on each command.
f, err := os.OpenFile(*logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
cmd.Fatalf("error opening log file %q: %v", *logFilename, err)
}
logFile = f
}
var e log.Emitter
switch *logFormat {
case "text":
e = log.GoogleEmitter{&log.Writer{Next: logFile}}
case "json":
e = log.JSONEmitter{log.Writer{Next: logFile}}
default:
cmd.Fatalf("invalid log format %q, must be 'json' or 'text'", *logFormat)
}
if *debugLogDir != "" {
if err := os.MkdirAll(*debugLogDir, 0775); err != nil {
cmd.Fatalf("error creating dir %q: %v", *debugLogDir, err)
}
// Format: <debug-log-dir>/runsc.log.<yyymmdd-hhmmss.uuuuuu>.<command>
scmd := flag.CommandLine.Arg(0)
filename := fmt.Sprintf("runsc.log.%s.%s", time.Now().Format("20060102-150405.000000"), scmd)
path := filepath.Join(*debugLogDir, filename)
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
cmd.Fatalf("error opening log file %q: %v", filename, err)
}
e = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}
}
log.SetTarget(e)
log.Infof("***************************")
log.Infof("Args: %s", os.Args)
log.Infof("Git Revision: %s", gitRevision)
log.Infof("PID: %d", os.Getpid())
log.Infof("UID: %d, GID: %d", os.Getuid(), os.Getgid())
log.Infof("Configuration:")
log.Infof("\t\tRootDir: %s", conf.RootDir)
log.Infof("\t\tPlatform: %v", conf.Platform)
log.Infof("\t\tFileAccess: %v, overlay: %t", conf.FileAccess, conf.Overlay)
log.Infof("\t\tNetwork: %v, logging: %t", conf.Network, conf.LogPackets)
log.Infof("\t\tStrace: %t, max size: %d, syscalls: %s", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)
log.Infof("***************************")
// Call the subcommand and pass in the configuration.
var ws syscall.WaitStatus
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
if ws.Signaled() {
// No good way to return it, emulate what the shell does. Maybe raise
// signall to self?
os.Exit(128 + int(ws.Signal()))
}
os.Exit(ws.ExitStatus())
}
// Return an error that is unlikely to be used by the application.
log.Warningf("Failure to execute command, err: %v", subcmdCode)
os.Exit(128)
}
func init() {
// Set default root dir to something (hopefully) user-writeable.
*rootDir = "/var/run/runsc"
if runtimeDir := os.Getenv("XDG_RUNTIME_DIR"); runtimeDir != "" {
*rootDir = filepath.Join(runtimeDir, "runsc")
}
}
|
[
"\"XDG_RUNTIME_DIR\""
] |
[] |
[
"XDG_RUNTIME_DIR"
] |
[]
|
["XDG_RUNTIME_DIR"]
|
go
| 1 | 0 | |
pkg/gits/git_cli.go
|
package gits
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
gitcfg "gopkg.in/src-d/go-git.v4/config"
)
const (
replaceInvalidBranchChars = '_'
)
var (
numberRegex = regexp.MustCompile("[0-9]")
splitDescribeRegex = regexp.MustCompile(`(?:~|\^|-g)`)
)
// GitCLI implements common git actions based on git CLI
type GitCLI struct{}
// NewGitCLI creates a new GitCLI instance
func NewGitCLI() *GitCLI {
return &GitCLI{}
}
// FindGitConfigDir tries to find the `.git` directory either in the current directory or in parent directories
func (g *GitCLI) FindGitConfigDir(dir string) (string, string, error) {
d := dir
var err error
if d == "" {
d, err = os.Getwd()
if err != nil {
return "", "", err
}
}
for {
gitDir := filepath.Join(d, ".git/config")
f, err := util.FileExists(gitDir)
if err != nil {
return "", "", err
}
if f {
return d, gitDir, nil
}
dirPath := strings.TrimSuffix(d, "/")
if dirPath == "" {
return "", "", nil
}
p, _ := filepath.Split(dirPath)
if d == "/" || p == d {
return "", "", nil
}
d = p
}
}
// Clone clones the given git URL into the given directory
func (g *GitCLI) Clone(url string, dir string) error {
return g.clone(dir, url, "", false, false, "", "", "")
}
// Clone clones a single branch of the given git URL into the given directory
func (g *GitCLI) ShallowCloneBranch(gitURL string, branch string, dir string) error {
var err error
verbose := true
remoteName := "origin"
shallow := true
err = g.Init(dir)
if err != nil {
return errors.Wrapf(err, "failed to init a new git repository in directory %s", dir)
}
if verbose {
log.Logger().Infof("ran git init in %s", dir)
}
err = g.AddRemote(dir, "origin", gitURL)
if err != nil {
return errors.Wrapf(err, "failed to add remote %s with url %s in directory %s", remoteName, gitURL, dir)
}
if verbose {
log.Logger().Infof("ran git add remote %s %s in %s", remoteName, gitURL, dir)
}
err = g.fetchBranch(dir, remoteName, false, shallow, verbose, branch)
if err != nil {
return errors.Wrapf(err, "failed to fetch %s from %s in directory %s", branch, gitURL,
dir)
}
err = g.gitCmd(dir, "checkout", "-t", fmt.Sprintf("%s/%s", remoteName, branch))
if err != nil {
log.Logger().Warnf("failed to checkout remote tracking branch %s/%s in directory %s due to: %s", remoteName,
branch, dir, err.Error())
if branch != "master" {
// git init checks out the master branch by default
err = g.CreateBranch(dir, branch)
if err != nil {
return errors.Wrapf(err, "failed to create branch %s in directory %s", branch, dir)
}
if verbose {
log.Logger().Infof("ran git branch %s in directory %s", branch, dir)
}
}
err = g.Reset(dir, fmt.Sprintf("%s/%s", remoteName, branch), true)
if err != nil {
return errors.Wrapf(err, "failed to reset hard to %s in directory %s", branch, dir)
}
err = g.gitCmd(dir, "branch", "--set-upstream-to", fmt.Sprintf("%s/%s", remoteName, branch), branch)
if err != nil {
return errors.Wrapf(err, "failed to set tracking information to %s/%s %s in directory %s", remoteName,
branch, branch, dir)
}
}
return nil
}
// ShallowClone shallow clones the repo at url from the specified commitish or pull request to a local master branch
func (g *GitCLI) ShallowClone(dir string, url string, commitish string, pullRequest string) error {
return g.clone(dir, url, "", true, false, "master", commitish, pullRequest)
}
// clone is a safer implementation of the `git clone` method
func (g *GitCLI) clone(dir string, gitURL string, remoteName string, shallow bool, verbose bool, localBranch string,
commitish string, pullRequest string) error {
var err error
if verbose {
log.Logger().Infof("cloning repository %s to dir %s", gitURL, dir)
}
if remoteName == "" {
remoteName = "origin"
}
if commitish == "" {
if pullRequest == "" {
commitish = "master"
} else {
pullRequestNumber, err := strconv.Atoi(strings.TrimPrefix(pullRequest, "PR-"))
if err != nil {
return errors.Wrapf(err, "converting %s to a pull request number", pullRequest)
}
fmt.Sprintf("refs/pull/%d/head", pullRequestNumber)
}
} else if pullRequest != "" {
return errors.Errorf("cannot specify both pull request and commitish")
}
if localBranch == "" {
localBranch = commitish
}
err = g.Init(dir)
if err != nil {
return errors.Wrapf(err, "failed to init a new git repository in directory %s", dir)
}
if verbose {
log.Logger().Infof("ran git init in %s", dir)
}
err = g.AddRemote(dir, "origin", gitURL)
if err != nil {
return errors.Wrapf(err, "failed to add remote %s with url %s in directory %s", remoteName, gitURL, dir)
}
if verbose {
log.Logger().Infof("ran git add remote %s %s in %s", remoteName, gitURL, dir)
}
err = g.fetchBranch(dir, remoteName, false, shallow, verbose, commitish)
if err != nil {
return errors.Wrapf(err, "failed to fetch %s from %s in directory %s", commitish, gitURL,
dir)
}
if localBranch != "master" {
// git init checks out the master branch by default
err = g.CreateBranch(dir, localBranch)
if err != nil {
return errors.Wrapf(err, "failed to create branch %s in directory %s", localBranch, dir)
}
if verbose {
log.Logger().Infof("ran git branch %s in directory %s", localBranch, dir)
}
}
if commitish == "" {
commitish = localBranch
if commitish == "" {
commitish = "master"
}
}
err = g.Reset(dir, fmt.Sprintf("%s/%s", remoteName, commitish), true)
if err != nil {
return errors.Wrapf(err, "failed to reset hard to %s in directory %s", commitish, dir)
}
if verbose {
log.Logger().Infof("ran git reset --hard %s in directory %s", commitish, dir)
}
err = g.gitCmd(dir, "branch", "--set-upstream-to", fmt.Sprintf("%s/%s", remoteName, commitish), localBranch)
if err != nil {
return errors.Wrapf(err, "failed to set tracking information to %s/%s %s in directory %s", remoteName,
commitish, localBranch, dir)
}
if verbose {
log.Logger().Infof("ran git branch --set-upstream-to %s/%s %s in directory %s", remoteName, commitish,
localBranch, dir)
}
return nil
}
// Pull pulls the Git repository in the given directory
func (g *GitCLI) Pull(dir string) error {
return g.gitCmd(dir, "pull")
}
// PullRemoteBranches pulls the remote Git tags from the given directory
func (g *GitCLI) PullRemoteBranches(dir string) error {
return g.gitCmd(dir, "pull", "--all")
}
// DeleteRemoteBranch deletes the remote branch in the given directory
func (g *GitCLI) DeleteRemoteBranch(dir string, remoteName string, branch string) error {
return g.gitCmd(dir, "push", remoteName, "--delete", branch)
}
// DeleteLocalBranch deletes the local branch in the given directory
func (g *GitCLI) DeleteLocalBranch(dir string, branch string) error {
return g.gitCmd(dir, "branch", "-D", branch)
}
// CloneOrPull clones the given git URL or pull if it already exists
func (g *GitCLI) CloneOrPull(url string, dir string) error {
empty, err := util.IsEmpty(dir)
if err != nil {
return err
}
if !empty {
return g.Pull(dir)
}
return g.Clone(url, dir)
}
// PullUpstream pulls the remote upstream branch into master branch into the given directory
func (g *GitCLI) PullUpstream(dir string) error {
return g.gitCmd(dir, "pull", "-r", "upstream", "master")
}
// ResetToUpstream resets the given branch to the upstream version
func (g *GitCLI) ResetToUpstream(dir string, branch string) error {
err := g.gitCmd(dir, "fetch", "upstream")
if err != nil {
return err
}
return g.gitCmd(dir, "reset", "--hard", "upstream/"+branch)
}
// AddRemote adds a remote repository at the given URL and with the given name
func (g *GitCLI) AddRemote(dir string, name string, url string) error {
err := g.gitCmd(dir, "remote", "add", name, url)
if err != nil {
err = g.gitCmd(dir, "remote", "set-url", name, url)
if err != nil {
return err
}
}
return nil
}
// UpdateRemote updates the URL of the remote repository
func (g *GitCLI) UpdateRemote(dir, url string) error {
return g.gitCmd(dir, "remote", "set-url", "origin", url)
}
// RemoteUpdate performs a git remote update
func (g *GitCLI) RemoteUpdate(dir string) error {
return g.gitCmd(dir, "remote", "update")
}
// StashPush stashes the current changes from the given directory
func (g *GitCLI) StashPush(dir string) error {
return g.gitCmd(dir, "stash", "push")
}
// StashPop applies the last stash , will error if there is no stash. Error can be checked using IsNoStashEntriesError
func (g *GitCLI) StashPop(dir string) error {
return g.gitCmd(dir, "stash", "pop")
}
// CheckoutRemoteBranch checks out the given remote tracking branch
func (g *GitCLI) CheckoutRemoteBranch(dir string, branch string) error {
remoteBranch := "origin/" + branch
remoteBranches, err := g.RemoteBranches(dir)
if err != nil {
return err
}
if util.StringArrayIndex(remoteBranches, remoteBranch) < 0 {
return g.gitCmd(dir, "checkout", "-t", remoteBranch)
}
cur, err := g.Branch(dir)
if err != nil {
return err
}
if cur == branch {
return nil
}
return g.Checkout(dir, branch)
}
// RemoteBranches returns the remote branches
func (g *GitCLI) RemoteBranches(dir string) ([]string, error) {
answer := []string{}
text, err := g.gitCmdWithOutput(dir, "branch", "-r")
if err != nil {
return answer, err
}
lines := strings.Split(text, "\n")
for _, line := range lines {
columns := strings.Split(line, " ")
for _, col := range columns {
if col != "" {
answer = append(answer, col)
break
}
}
}
return answer, nil
}
// LocalBranches will list all local branches
func (g *GitCLI) LocalBranches(dir string) ([]string, error) {
text, err := g.gitCmdWithOutput(dir, "branch")
answer := make([]string, 0)
if err != nil {
return nil, err
}
lines := strings.Split(text, "\n")
for _, line := range lines {
columns := strings.Split(line, " ")
for _, col := range columns {
if col != "" && col != "*" {
answer = append(answer, col)
break
}
}
}
return answer, nil
}
// Checkout checks out the given branch
func (g *GitCLI) Checkout(dir string, branch string) error {
return g.gitCmd(dir, "checkout", branch)
}
// CheckoutCommitFiles checks out the given files to a commit
func (g *GitCLI) CheckoutCommitFiles(dir string, commit string, files []string) error {
var err error
for _, file := range files {
err = g.gitCmd(dir, "checkout", commit, "--", file)
}
return err
}
// Checkout checks out the given branch
func (g *GitCLI) CheckoutOrphan(dir string, branch string) error {
return g.gitCmd(dir, "checkout", "--orphan", branch)
}
// Init inits a git repository into the given directory
func (g *GitCLI) Init(dir string) error {
return g.gitCmd(dir, "init")
}
// Remove removes the given file from a Git repository located at the given directory
func (g *GitCLI) Remove(dir, fileName string) error {
return g.gitCmd(dir, "rm", "-r", fileName)
}
// Remove force removes the given file from a git repository located at the given directory
func (g *GitCLI) RemoveForce(dir, fileName string) error {
return g.gitCmd(dir, "rm", "-rf", fileName)
}
// Clean force cleans a git repository located at a given directory
func (g *GitCLI) CleanForce(dir, fileName string) error {
return g.gitCmd(dir, "clean", "-fd", fileName)
}
// Status returns the status of the git repository at the given directory
func (g *GitCLI) Status(dir string) error {
return g.gitCmd(dir, "status")
}
// Branch returns the current branch of the repository located at the given directory
func (g *GitCLI) Branch(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "rev-parse", "--abbrev-ref", "HEAD")
}
// WriteOperation performs a generic write operation, with nicer error handling
func (g *GitCLI) WriteOperation(dir string, args ...string) error {
return errors.Wrap(g.gitCmd(dir, args...),
"Have you set up a git credential helper? See https://help.github.com/articles/caching-your-github-password-in-git/\n")
}
// Push pushes the changes from the repository at the given directory
func (g *GitCLI) Push(dir string, remote string, force bool, refspec ...string) error {
args := []string{"push", remote}
if force {
args = append(args, "--force")
}
args = append(args, refspec...)
return g.WriteOperation(dir, args...)
}
// ForcePushBranch does a force push of the local branch into the remote branch of the repository at the given directory
func (g *GitCLI) ForcePushBranch(dir string, localBranch string, remoteBranch string) error {
return g.Push(dir, "origin", true, fmt.Sprintf("%s:%s", localBranch, remoteBranch))
}
// PushMaster pushes the master branch into the origin, setting the upstream
func (g *GitCLI) PushMaster(dir string) error {
return g.Push(dir, "origin", false, "master")
}
// Pushtag pushes the given tag into the origin
func (g *GitCLI) PushTag(dir string, tag string) error {
return g.Push(dir, "origin", false, tag)
}
// Add does a git add for all the given arguments
func (g *GitCLI) Add(dir string, args ...string) error {
add := append([]string{"add"}, args...)
return g.gitCmd(dir, add...)
}
// HasChanges indicates if there are any changes in the repository from the given directory
func (g *GitCLI) HasChanges(dir string) (bool, error) {
text, err := g.gitCmdWithOutput(dir, "status", "-s")
if err != nil {
return false, err
}
text = strings.TrimSpace(text)
return len(text) > 0, nil
}
// CommiIfChanges does a commit if there are any changes in the repository at the given directory
func (g *GitCLI) CommitIfChanges(dir string, message string) error {
changed, err := g.HasChanges(dir)
if err != nil {
return err
}
if !changed {
return nil
}
return g.CommitDir(dir, message)
}
// GetCommits returns the commits in a range, exclusive of startSha and inclusive of endSha
func (g *GitCLI) GetCommits(dir string, startSha string, endSha string) ([]GitCommit, error) {
return g.getCommits(dir, fmt.Sprintf("%s..%s", startSha, endSha))
}
func (g *GitCLI) getCommits(dir string, args ...string) ([]GitCommit, error) {
// use a custom format to get commits, using %x1e to separate commits and %x1f to separate fields
args = append([]string{"log", "--format=%H%x1f%an%x1f%ae%x1f%cn%x1f%ce%x1f%s%n%b%x1e"}, args...)
out, err := g.gitCmdWithOutput(dir, args...)
if err != nil {
return nil, errors.Wrapf(err, "running git %s", strings.Join(args, " "))
}
answer := make([]GitCommit, 0)
commits := strings.Split(out, "\x1e")
for _, rawCommit := range commits {
rawCommit = strings.TrimSpace(rawCommit)
if rawCommit == "" {
continue
}
fields := strings.Split(rawCommit, "\x1f")
commit := GitCommit{}
commit.SHA = fields[0]
commit.Author = &GitUser{
Name: fields[1],
Email: fields[2],
}
commit.Committer = &GitUser{
Name: fields[3],
Email: fields[4],
}
commit.Message = fields[5]
answer = append(answer, commit)
}
return answer, nil
}
// GetCommitsNotOnAnyRemote returns a list of commits which are on branch but not present on a remoteGet
func (g *GitCLI) GetCommitsNotOnAnyRemote(dir string, branch string) ([]GitCommit, error) {
return g.getCommits(dir, branch, "--not", "--remotes")
}
// CommitDir commits all changes from the given directory
func (g *GitCLI) CommitDir(dir string, message string) error {
return g.gitCmd(dir, "commit", "-m", message)
}
// AddCommit perform an add and commit of the changes from the repository at the given directory with the given messages
func (g *GitCLI) AddCommit(dir string, msg string) error {
return g.gitCmd(dir, "commit", "-a", "-m", msg, "--allow-empty")
}
// AddCommitFiles perform an add and commit selected files from the repository at the given directory with the given messages
func (g *GitCLI) AddCommitFiles(dir string, msg string, files []string) error {
fileString := strings.Trim(fmt.Sprintf("%v", files), "[]")
return g.gitCmd(dir, "commit", "-m", msg, "--", fileString)
}
func (g *GitCLI) gitCmd(dir string, args ...string) error {
cmd := util.Command{
Dir: dir,
Name: "git",
Args: args,
}
output, err := cmd.RunWithoutRetry()
return errors.Wrapf(err, "git output: %s", output)
}
func (g *GitCLI) gitCmdWithOutput(dir string, args ...string) (string, error) {
cmd := util.Command{
Dir: dir,
Name: "git",
Args: args,
}
return cmd.RunWithoutRetry()
}
// CreateAuthenticatedURL creates the Git repository URL with the username and password encoded for HTTPS based URLs
func (g *GitCLI) CreateAuthenticatedURL(cloneURL string, userAuth *auth.UserAuth) (string, error) {
u, err := url.Parse(cloneURL)
if err != nil {
// already a git/ssh url?
return cloneURL, nil
}
// The file scheme doesn't support auth
if u.Scheme == "file" {
return cloneURL, nil
}
if userAuth.Username != "" || userAuth.ApiToken != "" {
u.User = url.UserPassword(userAuth.Username, userAuth.ApiToken)
return u.String(), nil
}
return cloneURL, nil
}
// RepoName formats the repository names based on the organization
func (g *GitCLI) RepoName(org, repoName string) string {
if org != "" {
return org + "/" + repoName
}
return repoName
}
// Server returns the Git server of the repository at the given directory
func (g *GitCLI) Server(dir string) (string, error) {
repo, err := g.Info(dir)
if err != nil {
return "", err
}
return repo.HostURL(), err
}
// Info returns the git info of the repository at the given directory
func (g *GitCLI) Info(dir string) (*GitRepository, error) {
text, err := g.gitCmdWithOutput(dir, "status")
var rUrl string
if err != nil && strings.Contains(text, "Not a git repository") {
rUrl = os.Getenv("SOURCE_URL")
if rUrl == "" {
// Relevant in a Jenkins pipeline triggered by a PR
rUrl = os.Getenv("CHANGE_URL")
if rUrl == "" {
return nil, fmt.Errorf("you are not in a Git repository - promotion command should be executed from an application directory")
}
}
} else {
text, err = g.gitCmdWithOutput(dir, "config", "--get", "remote.origin.url")
rUrl = strings.TrimSpace(text)
}
repo, err := ParseGitURL(rUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse Git URL %s due to %s", rUrl, err)
}
return repo, err
}
// ConvertToValidBranchName converts the given branch name into a valid git branch string
// replacing any dodgy characters
func (g *GitCLI) ConvertToValidBranchName(name string) string {
name = strings.TrimSuffix(name, "/")
name = strings.TrimSuffix(name, ".lock")
var buffer bytes.Buffer
last := ' '
for _, ch := range name {
if ch <= 32 {
ch = replaceInvalidBranchChars
}
switch ch {
case '~':
ch = replaceInvalidBranchChars
case '^':
ch = replaceInvalidBranchChars
case ':':
ch = replaceInvalidBranchChars
case ' ':
ch = replaceInvalidBranchChars
case '\n':
ch = replaceInvalidBranchChars
case '\r':
ch = replaceInvalidBranchChars
case '\t':
ch = replaceInvalidBranchChars
}
if ch != replaceInvalidBranchChars || last != replaceInvalidBranchChars {
buffer.WriteString(string(ch))
}
last = ch
}
return buffer.String()
}
// FetchBranch fetches the refspecs from the repo
func (g *GitCLI) FetchBranch(dir string, repo string, refspecs ...string) error {
return g.fetchBranch(dir, repo, false, false, false, refspecs...)
}
// FetchBranchShallow fetches the refspecs from the repo
func (g *GitCLI) FetchBranchShallow(dir string, repo string, refspecs ...string) error {
return g.fetchBranch(dir, repo, false, true, false, refspecs...)
}
// FetchBranch fetches the refspecs from the repo
func (g *GitCLI) FetchBranchUnshallow(dir string, repo string, refspecs ...string) error {
return g.fetchBranch(dir, repo, true, false, false, refspecs...)
}
// FetchBranch fetches the refspecs from the repo
func (g *GitCLI) fetchBranch(dir string, repo string, unshallow bool, shallow bool,
verbose bool, refspecs ...string) error {
args := []string{"fetch", repo}
if shallow && unshallow {
return errors.Errorf("cannot use --depth=1 and --unshallow at the same time")
}
if shallow {
args = append(args, "--depth=1")
}
if unshallow {
args = append(args, "--unshallow")
}
for _, refspec := range refspecs {
args = append(args, refspec)
}
err := g.gitCmd(dir, args...)
if err != nil {
return errors.WithStack(err)
}
if verbose {
if shallow {
log.Logger().Infof("ran git fetch %s --depth=1 %s in dir %s", repo, strings.Join(refspecs, " "), dir)
} else if unshallow {
log.Logger().Infof("ran git fetch %s unshallow %s in dir %s", repo, strings.Join(refspecs, " "), dir)
} else {
log.Logger().Infof("ran git fetch %s --depth=1 %s in dir %s", repo, strings.Join(refspecs, " "), dir)
}
}
return nil
}
// GetAuthorEmailForCommit returns the author email from commit message with the given SHA
func (g *GitCLI) GetAuthorEmailForCommit(dir string, sha string) (string, error) {
text, err := g.gitCmdWithOutput(dir, "show", "-s", "--format=%aE", sha)
if err != nil {
return "", fmt.Errorf("failed to invoke git %s in %s due to %s", "show "+sha, dir, err)
}
return strings.TrimSpace(text), nil
}
// SetRemoteURL sets the remote URL of the remote with the given name
func (g *GitCLI) SetRemoteURL(dir string, name string, gitURL string) error {
err := g.gitCmd(dir, "remote", "add", name, gitURL)
if err != nil {
err = g.gitCmd(dir, "remote", "set-url", name, gitURL)
if err != nil {
return err
}
}
return nil
}
func (g *GitCLI) parseGitConfig(gitConf string) (*gitcfg.Config, error) {
if gitConf == "" {
return nil, fmt.Errorf("no GitConfDir defined")
}
cfg := gitcfg.NewConfig()
data, err := ioutil.ReadFile(gitConf)
if err != nil {
return nil, fmt.Errorf("failed to load %s due to %s", gitConf, err)
}
err = cfg.Unmarshal(data)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal %s due to %s", gitConf, err)
}
return cfg, nil
}
// DiscoverRemoteGitURL discovers the remote git URL from the given git configuration
func (g *GitCLI) DiscoverRemoteGitURL(gitConf string) (string, error) {
cfg, err := g.parseGitConfig(gitConf)
if err != nil {
return "", fmt.Errorf("failed to unmarshal %s due to %s", gitConf, err)
}
remotes := cfg.Remotes
if len(remotes) == 0 {
return "", nil
}
rUrl := g.GetRemoteUrl(cfg, "origin")
if rUrl == "" {
rUrl = g.GetRemoteUrl(cfg, "upstream")
}
return rUrl, nil
}
// DiscoverUpstreamGitURL discovers the upstream git URL from the given git configuration
func (g *GitCLI) DiscoverUpstreamGitURL(gitConf string) (string, error) {
cfg, err := g.parseGitConfig(gitConf)
if err != nil {
return "", fmt.Errorf("failed to unmarshal %s due to %s", gitConf, err)
}
remotes := cfg.Remotes
if len(remotes) == 0 {
return "", nil
}
rUrl := g.GetRemoteUrl(cfg, "upstream")
if rUrl == "" {
rUrl = g.GetRemoteUrl(cfg, "origin")
}
return rUrl, nil
}
func (g *GitCLI) firstRemoteUrl(remote *gitcfg.RemoteConfig) string {
if remote != nil {
urls := remote.URLs
if urls != nil && len(urls) > 0 {
return urls[0]
}
}
return ""
}
// GetRemoteUrl returns the remote URL from the given git config
func (g *GitCLI) GetRemoteUrl(config *gitcfg.Config, name string) string {
if config.Remotes != nil {
return g.firstRemoteUrl(config.Remotes[name])
}
return ""
}
// RemoteBranches returns all remote branches with the given prefix
func (g *GitCLI) RemoteBranchNames(dir string, prefix string) ([]string, error) {
answer := []string{}
text, err := g.gitCmdWithOutput(dir, "branch", "-a")
if err != nil {
return answer, err
}
lines := strings.Split(text, "\n")
for _, line := range lines {
line = strings.TrimSpace(strings.TrimPrefix(line, "* "))
if prefix != "" {
if strings.HasPrefix(line, prefix) {
line = strings.TrimPrefix(line, prefix)
answer = append(answer, line)
}
} else {
answer = append(answer, line)
}
}
return answer, nil
}
// GetCommitPointedToByPreviousTag return the SHA of the commit pointed to by the latest-but-1 git tag as well as the tag
// name for the git repo in dir
func (g *GitCLI) GetCommitPointedToByPreviousTag(dir string) (string, string, error) {
tagSHA, tagName, err := g.NthTag(dir, 2)
if err != nil {
return "", "", errors.Wrapf(err, "getting commit pointed to by previous tag in %s", dir)
}
if tagSHA == "" {
return tagSHA, tagName, nil
}
commitSHA, err := g.gitCmdWithOutput(dir, "rev-list", "-n", "1", tagSHA)
if err != nil {
return "", "", errors.Wrapf(err, "running for git rev-list -n 1 %s", tagSHA)
}
return commitSHA, tagName, err
}
// GetRevisionBeforeDate returns the revision before the given date
func (g *GitCLI) GetRevisionBeforeDate(dir string, t time.Time) (string, error) {
dateText := util.FormatDate(t)
return g.GetRevisionBeforeDateText(dir, dateText)
}
// GetRevisionBeforeDateText returns the revision before the given date in format "MonthName dayNumber year"
func (g *GitCLI) GetRevisionBeforeDateText(dir string, dateText string) (string, error) {
branch, err := g.Branch(dir)
if err != nil {
return "", err
}
return g.gitCmdWithOutput(dir, "rev-list", "-1", "--before=\""+dateText+"\"", "--max-count=1", branch)
}
// GetCommitPointedToByLatestTag return the SHA of the commit pointed to by the latest git tag as well as the tag name
// for the git repo in dir
func (g *GitCLI) GetCommitPointedToByLatestTag(dir string) (string, string, error) {
tagSHA, tagName, err := g.NthTag(dir, 1)
if err != nil {
return "", "", errors.Wrapf(err, "getting commit pointed to by latest tag in %s", dir)
}
if tagSHA == "" {
return tagSHA, tagName, nil
}
commitSHA, err := g.gitCmdWithOutput(dir, "rev-list", "-n", "1", tagSHA)
if err != nil {
return "", "", errors.Wrapf(err, "running for git rev-list -n 1 %s", tagSHA)
}
return commitSHA, tagName, err
}
// GetCommitPointedToByTag return the SHA of the commit pointed to by the given git tag
func (g *GitCLI) GetCommitPointedToByTag(dir string, tag string) (string, error) {
commitSHA, err := g.gitCmdWithOutput(dir, "rev-list", "-n", "1", tag)
if err != nil {
return "", errors.Wrapf(err, "running for git rev-list -n 1 %s", tag)
}
return commitSHA, err
}
// GetLatestCommitMessage returns the latest git commit message
func (g *GitCLI) GetLatestCommitMessage(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "log", "-1", "--pretty=%B")
}
// FetchTags fetches all the tags
func (g *GitCLI) FetchTags(dir string) error {
return g.gitCmd(dir, "fetch", "--tags")
}
// Tags returns all tags from the repository at the given directory
func (g *GitCLI) Tags(dir string) ([]string, error) {
return g.FilterTags(dir, "")
}
// FilterTags returns all tags from the repository at the given directory that match the filter
func (g *GitCLI) FilterTags(dir string, filter string) ([]string, error) {
args := []string{"tag"}
if filter != "" {
args = append(args, "--list", filter)
}
text, err := g.gitCmdWithOutput(dir, args...)
if err != nil {
return nil, err
}
text = strings.TrimSuffix(text, "\n")
split := strings.Split(text, "\n")
// Split will return the original string if it can't split it, and it may be empty
if len(split) == 1 && split[0] == "" {
return make([]string, 0), nil
}
return split, nil
}
// CreateTag creates a tag with the given name and message in the repository at the given directory
func (g *GitCLI) CreateTag(dir string, tag string, msg string) error {
return g.gitCmd(dir, "tag", "-fa", tag, "-m", msg)
}
// PrintCreateRepositoryGenerateAccessToken prints the access token URL of a Git repository
func (g *GitCLI) PrintCreateRepositoryGenerateAccessToken(server *auth.AuthServer, username string, o io.Writer) {
tokenUrl := ProviderAccessTokenURL(server.Kind, server.URL, username)
fmt.Fprintf(o, "To be able to create a repository on %s we need an API Token\n", server.Label())
fmt.Fprintf(o, "Please click this URL and generate a token \n%s\n\n", util.ColorInfo(tokenUrl))
fmt.Fprint(o, "Then COPY the token and enter it below:\n\n")
}
// IsFork indicates if the repository at the given directory is a fork
func (g *GitCLI) IsFork(dir string) (bool, error) {
// lets ignore errors as that just means there's no config
originUrl, _ := g.gitCmdWithOutput(dir, "config", "--get", "remote.origin.url")
upstreamUrl, _ := g.gitCmdWithOutput(dir, "config", "--get", "remote.upstream.url")
if originUrl != upstreamUrl && originUrl != "" && upstreamUrl != "" {
return true, nil
}
return false, fmt.Errorf("could not confirm the repo is a fork")
}
// Version returns the git version
func (g *GitCLI) Version() (string, error) {
out, err := g.gitCmdWithOutput("", "version")
idxs := numberRegex.FindStringIndex(out)
if len(idxs) > 0 {
return out[idxs[0]:], err
}
return out, err
}
// Username return the username from the git configuration
func (g *GitCLI) Username(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "config", "--get", "user.name")
}
// SetUsername sets the username in the git configuration
func (g *GitCLI) SetUsername(dir string, username string) error {
// Will return status 1 silently if the user is not set.
_, err := g.gitCmdWithOutput(dir, "config", "--get", "user.name")
if err != nil {
return g.gitCmd(dir, "config", "--global", "--add", "user.name", username)
}
return nil
}
// Email returns the email from the git configuration
func (g *GitCLI) Email(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "config", "--get", "user.email")
}
// SetEmail sets the given email in the git configuration
func (g *GitCLI) SetEmail(dir string, email string) error {
// Will return status 1 silently if the email is not set.
_, err := g.gitCmdWithOutput(dir, "config", "--get", "user.email")
if err != nil {
return g.gitCmd(dir, "config", "--global", "--add", "user.email", email)
}
return nil
}
// CreateBranch creates a branch with the given name in the Git repository from the given directory
func (g *GitCLI) CreateBranch(dir string, branch string) error {
return g.gitCmd(dir, "branch", branch)
}
// Diff runs git diff
func (g *GitCLI) Diff(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "diff")
}
// ListChangedFilesFromBranch lists files changed between branches
func (g *GitCLI) ListChangedFilesFromBranch(dir string, branch string) (string, error) {
return g.gitCmdWithOutput(dir, "diff", "--name-status", branch)
}
// LoadFileFromBranch returns a files's contents from a branch
func (g *GitCLI) LoadFileFromBranch(dir string, branch string, file string) (string, error) {
return g.gitCmdWithOutput(dir, "show", branch+":"+file)
}
// FetchUnshallow runs git fetch --unshallow in dir
func (g *GitCLI) FetchUnshallow(dir string) error {
err := g.gitCmd(dir, "fetch", "--unshallow")
if err != nil {
return errors.Wrapf(err, "running git fetch --unshallow %s", dir)
}
return nil
}
// IsShallow runs git rev-parse --is-shallow-repository in dir and returns the result
func (g *GitCLI) IsShallow(dir string) (bool, error) {
out, err := g.gitCmdWithOutput(dir, "rev-parse", "--is-shallow-repository")
if err != nil {
return false, errors.Wrapf(err, "running git rev-parse --is-shallow-repository %s", dir)
}
if out == "--is-shallow-repository" {
// Newer git has a method to do it, but we use an old git in our builders, so resort to doing it manually
gitDir, _, err := g.FindGitConfigDir(dir)
if err != nil {
return false, errors.Wrapf(err, "finding .git for %s", dir)
}
if _, err := os.Stat(filepath.Join(gitDir, ".git", "shallow")); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
b, err := util.ParseBool(out)
if err != nil {
return false, errors.Wrapf(err, "converting %v to bool", b)
}
return b, nil
}
// CreateBranchFrom creates a new branch called branchName from startPoint
func (g *GitCLI) CreateBranchFrom(dir string, branchName string, startPoint string) error {
return g.gitCmd(dir, "branch", branchName, startPoint)
}
// Merge merges the commitish into the current branch
func (g *GitCLI) Merge(dir string, commitish string) error {
return g.gitCmd(dir, "merge", commitish)
}
// GetLatestCommitSha returns the sha of the last commit
func (g *GitCLI) GetLatestCommitSha(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "rev-parse", "HEAD")
}
// GetFirstCommitSha returns the sha of the first commit
func (g *GitCLI) GetFirstCommitSha(dir string) (string, error) {
return g.gitCmdWithOutput(dir, "rev-list", "--max-parents=0", "HEAD")
}
// Reset performs a git reset --hard back to the commitish specified
func (g *GitCLI) Reset(dir string, commitish string, hard bool) error {
args := []string{"reset"}
if hard {
args = append(args, "--hard")
}
if commitish != "" {
args = append(args, commitish)
}
return g.gitCmd(dir, args...)
}
// MergeTheirs will do a recursive merge of commitish with the strategy option theirs
func (g *GitCLI) MergeTheirs(dir string, commitish string) error {
return g.gitCmd(dir, "merge", "--strategy-option=theirs", commitish)
}
// RebaseTheirs runs git rebase upstream branch with the strategy option theirs
func (g *GitCLI) RebaseTheirs(dir string, upstream string, branch string, skipEmpty bool) error {
args := []string{
"rebase",
"--strategy-option=theirs",
upstream,
}
if branch != "" {
args = append(args, branch)
}
err := g.gitCmd(dir, args...)
if skipEmpty {
// If skipEmpty is passed, then if the failure is due to an empty commit, run `git rebase --skip` to move on
// Weirdly git has no option on rebase to just do this
for err != nil && IsEmptyCommitError(err) {
err = g.gitCmd(dir, "rebase", "--skip")
}
}
if err != nil {
return errors.WithStack(err)
}
return nil
}
// RevParse runs git rev-parse on rev
func (g *GitCLI) RevParse(dir string, rev string) (string, error) {
return g.gitCmdWithOutput(dir, "rev-parse", rev)
}
// SetUpstreamTo will set the given branch to track the origin branch with the same name
func (g *GitCLI) SetUpstreamTo(dir string, branch string) error {
return g.gitCmd(dir, "branch", "--set-upstream-to", fmt.Sprintf("origin/%s", branch), branch)
}
// NthTag return the SHA and tag name of nth tag in reverse chronological order from the repository at the given directory.
// If the nth tag does not exist empty strings without an error are returned.
func (g *GitCLI) NthTag(dir string, n int) (string, string, error) {
args := []string{
"for-each-ref",
"--sort=-creatordate",
"--format=%(objectname)%00%(refname:short)",
fmt.Sprintf("--count=%d", n),
"refs/tags",
}
out, err := g.gitCmdWithOutput(dir, args...)
if err != nil {
return "", "", errors.Wrapf(err, "running git %s", strings.Join(args, " "))
}
tagList := strings.Split(out, "\n")
if len(tagList) < n {
return "", "", nil
}
fields := strings.Split(tagList[n-1], "\x00")
if len(fields) != 2 {
return "", "", errors.Errorf("Unexpected format for returned tag and sha: '%s'", tagList[n-1])
}
return fields[0], fields[1], nil
}
// Remotes will list the names of the remotes
func (g *GitCLI) Remotes(dir string) ([]string, error) {
out, err := g.gitCmdWithOutput(dir, "remote")
if err != nil {
return nil, errors.Wrapf(err, "running git remote")
}
return strings.Split(out, "\n"), nil
}
// CloneBare will create a bare clone of url
func (g *GitCLI) CloneBare(dir string, url string) error {
err := g.gitCmd(dir, "clone", "--bare", url, dir)
if err != nil {
return errors.Wrapf(err, "running git clone --bare %s", url)
}
return nil
}
// PushMirror will push the dir as a mirror to url
func (g *GitCLI) PushMirror(dir string, url string) error {
err := g.gitCmd(dir, "push", "--mirror", url)
if err != nil {
return errors.Wrapf(err, "running git push --mirror %s", url)
}
return nil
}
// CherryPick does a git cherry-pick of commit
func (g *GitCLI) CherryPick(dir string, commitish string) error {
return g.gitCmd(dir, "cherry-pick", commitish)
}
// CherryPickTheirs does a git cherry-pick of commit
func (g *GitCLI) CherryPickTheirs(dir string, commitish string) error {
return g.gitCmd(dir, "cherry-pick", commitish, "--strategy=recursive", "-X", "theirs")
}
// Describe does a git describe of commitish, optionally adding the abbrev arg if not empty, falling back to just the commit ref if it's untagged
func (g *GitCLI) Describe(dir string, contains bool, commitish string, abbrev string, fallback bool) (string, string, error) {
args := []string{"describe", commitish}
if abbrev != "" {
args = append(args, fmt.Sprintf("--abbrev=%s", abbrev))
}
if contains {
args = append(args, "--contains")
}
out, err := g.gitCmdWithOutput(dir, args...)
if err != nil {
if fallback {
// If the commit-ish is untagged, it'll fail with "fatal: cannot describe '<commit-ish>'". In those cases, just return
// the original commit-ish.
if strings.Contains(err.Error(), "fatal: cannot describe") {
return commitish, "", nil
}
}
log.Logger().Warnf("err: %s", err.Error())
return "", "", errors.Wrapf(err, "running git %s", strings.Join(args, " "))
}
trimmed := strings.TrimSpace(strings.Trim(out, "\n"))
parts := splitDescribeRegex.Split(trimmed, -1)
if len(parts) == 2 {
return parts[0], parts[1], nil
}
return parts[0], "", nil
}
|
[
"\"SOURCE_URL\"",
"\"CHANGE_URL\""
] |
[] |
[
"CHANGE_URL",
"SOURCE_URL"
] |
[]
|
["CHANGE_URL", "SOURCE_URL"]
|
go
| 2 | 0 | |
db/db.go
|
package db
import (
"context"
"fmt"
"os"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
)
var (
client *mongo.Client
col *mongo.Collection
database string
collection string
username string
password string
host string
port string
authSource string
ctx = context.Background()
)
// CrawlabItem 插入mongo的struct需要继承它
type CrawlabItem struct {
TaskID string `json:"task_id"`
}
// Init 获取环境变量
func Init() {
database = os.Getenv("CRAWLAB_MONGO_DB")
collection = os.Getenv("CRAWLAB_COLLECTION")
username = os.Getenv("CRAWLAB_MONGO_USERNAME")
password = os.Getenv("CRAWLAB_MONGO_PASSWORD")
host = os.Getenv("CRAWLAB_MONGO_HOST")
if len(host) == 0 {
host = "localhost"
}
port = os.Getenv("CRAWLAB_MONGO_PORT")
if len(port) == 0 {
port = "27017"
}
authSource = os.Getenv("CRAWLAB_MONGO_AUTHSOURCE")
if len(authSource) == 0 && len(username) > 0 {
authSource = "admin"
}
}
// NewCollection 新客户端
func NewCollection() (*mongo.Collection, context.Context, error) {
var err error
applyURI := ""
if client == nil {
Init()
if len(username) > 0 {
applyURI = fmt.Sprintf(`mongodb://%s:%s@%s:%s`,
username, password, host, port)
} else {
applyURI = fmt.Sprintf(`mongodb://%s:%s`,
host, port)
}
client, err = mongo.Connect(ctx, options.Client().ApplyURI(applyURI))
}
err = client.Ping(ctx, readpref.Primary())
if err != nil {
panic(fmt.Sprintf("mongo uri %s connect faild: %s", applyURI, err))
}
if col == nil {
col = client.Database(database).Collection(collection)
}
return col, ctx, err
}
// Close 关闭数据库
// TODO: 好像没有关闭
func Close() {
if client == nil {
return
}
}
|
[
"\"CRAWLAB_MONGO_DB\"",
"\"CRAWLAB_COLLECTION\"",
"\"CRAWLAB_MONGO_USERNAME\"",
"\"CRAWLAB_MONGO_PASSWORD\"",
"\"CRAWLAB_MONGO_HOST\"",
"\"CRAWLAB_MONGO_PORT\"",
"\"CRAWLAB_MONGO_AUTHSOURCE\""
] |
[] |
[
"CRAWLAB_MONGO_USERNAME",
"CRAWLAB_MONGO_PASSWORD",
"CRAWLAB_MONGO_PORT",
"CRAWLAB_MONGO_DB",
"CRAWLAB_COLLECTION",
"CRAWLAB_MONGO_HOST",
"CRAWLAB_MONGO_AUTHSOURCE"
] |
[]
|
["CRAWLAB_MONGO_USERNAME", "CRAWLAB_MONGO_PASSWORD", "CRAWLAB_MONGO_PORT", "CRAWLAB_MONGO_DB", "CRAWLAB_COLLECTION", "CRAWLAB_MONGO_HOST", "CRAWLAB_MONGO_AUTHSOURCE"]
|
go
| 7 | 0 | |
main_superpixels_graph_classification.py
|
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
from train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network # import train functions for GCNs
from utils.result import load_model
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
Random Seed Setup
"""
def set_random_seed(seed, device=None):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device and device.type == 'cuda':
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params, verbose=False):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
if verbose:
print('\n== Net Params:')
print(net_params)
print('\n== Model Structure:')
print(model)
return total_param
"""
TESTING CODE
"""
def test_pipeline(MODEL_NAME, dataset, device, verbose, out_dir):
# Load models
print('\n>> Loading models...')
model_ls = load_model(out_dir, device=device, only_best=False, verbose=verbose,
filter=lambda df: df[df['model'] == MODEL_NAME][df['dataset'] == dataset.name])
# Preparing dataset
print('\n>> Preparing data...')
if MODEL_NAME in ['GCN']:
if model_ls[0]['net_params']['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['SoGCN']:
if model_ls[0]['net_params']['undirected']:
print("[!] Converting directed graphs to undirected graphs for SoGCN model.")
dataset._to_undirected()
testset = dataset.test
print("Test Graphs: ", len(testset))
# Batching test data
test_loader = DataLoader(testset, batch_size=model_ls[0]['net_params']['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)
# Test models
print('\n>> Testing models...')
acc_ls = []
for i, item in enumerate(model_ls):
model = item['model']
net_params = item['net_params']
# Set random seed
set_random_seed(item['seed'], device)
# Evaluate model
_, test_acc = evaluate_network(model, device, test_loader, 0)
acc_ls.append(test_acc)
if verbose:
print('\nModel #%s' % i)
print('Test Accuracy: %s' % acc_ls[-1])
print('\n')
print('AVG Test Accuracy: %s, s.d.: %s' % (np.mean(acc_ls), np.std(acc_ls)))
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
if MODEL_NAME in ['SoGCN']:
if net_params['undirected']:
print("[!] Converting directed graphs to undirected graphs for SoGCN model.")
dataset._to_undirected()
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""".format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# Data loaders
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=False, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs']), ascii=True) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch)
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
def main():
start_time_str = time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
"""
USER CONTROLS
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
parser.add_argument('--verbose', help="Please give a value for verbose")
parser.add_argument('--only_view_params', help="Please give a value for only_view_params")
parser.add_argument('--undirected', help="Please give a value for undirected")
parser.add_argument('--max_order', help="Please give a value for max_order")
parser.add_argument('--gru', help="Please give a value for gru")
parser.add_argument('--activation', help="Please give a value for activation")
parser.add_argument('--test', help="Please give a value for test")
args = parser.parse_args()
if args.config is not None:
with open(args.config) as f:
config = json.load(f)
else:
config = {'gpu': {'use': False, 'id': 0}, 'params': {}, 'net_params': {}}
only_view_params = False
if args.only_view_params is not None:
only_view_params = True if args.only_view_params=='True' else False
test_mode = False
if args.test is not None:
test_mode = True if args.test=='True' else False
verbose_mode = False
if args.verbose is not None:
verbose_mode = True if args.verbose=='True' else False
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# Model name
if args.model is not None:
MODEL_NAME = args.model
elif 'model' in config:
MODEL_NAME = config['model']
else:
raise Exception('No specified model (--model)')
# Dataset name
if args.dataset is not None:
DATASET_NAME = args.dataset
elif 'dataset' in config:
DATASET_NAME = config['dataset']
else:
raise Exception('No specified dataset (--dataset)')
# Out directory
if args.out_dir is not None:
out_dir = args.out_dir
elif 'out_dir' in config:
out_dir = config['out_dir']
else:
raise Exception('No specified out directory (--out_dir)')
'''
Load dataset
'''
# Superpixels dataset
dataset = LoadData(DATASET_NAME)
'''
TEST model pipeline
'''
if test_mode:
print ('=' * 10 + ' TEST mode ' + '=' * 10)
test_pipeline(MODEL_NAME, dataset, device, verbose_mode, out_dir)
return
'''
TRAIN model pipeline
'''
# parameters
params = config['params']
if not 'verbose' in params:
params['verbose'] = False
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
if args.verbose is not None:
params['verbose'] = True if args.verbose=='True' else False
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if not 'max_order' in net_params:
net_params['max_order'] = 2
if not 'gru' in net_params:
net_params['gru'] = False
if not 'undirected' in net_params:
net_params['undirected'] = False
if not 'activation' in net_params:
net_params['activation'] = 'relu'
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
if args.undirected is not None:
net_params['undirected'] = True if args.undirected=='True' else False
if args.max_order is not None:
net_params['max_order'] = int(args.max_order)
if args.gru is not None:
net_params['gru'] = True if args.gru=='True' else False
if args.activation is not None:
net_params['activation'] = args.activation
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.array(dataset.train[:][1])))
net_params['n_classes'] = num_classes
# Set random seed
set_random_seed(params['seed'], device)
# View parameters
net_params['total_param'] = view_model_param(MODEL_NAME, net_params, params['verbose'])
if only_view_params:
print('== View Parameters only ==')
return
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + start_time_str
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + start_time_str
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + start_time_str
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + start_time_str
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
print('\nResult output:', write_file_name)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
main()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
Tests/test_Emboss.py
|
# Copyright 2009 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Runs a few EMBOSS tools to check our wrappers and parsers."""
import os
import sys
import unittest
import subprocess
from Bio.Application import generic_run
from Bio.Emboss.Applications import WaterCommandline, NeedleCommandline
from Bio.Emboss.Applications import SeqretCommandline
from Bio import SeqIO
from Bio import AlignIO
from Bio import MissingExternalDependencyError
from Bio.Alphabet import generic_protein, generic_dna, generic_nucleotide
from Bio.Seq import Seq, translate
from Bio.SeqRecord import SeqRecord
#from Bio.Data.IUPACData import ambiguous_dna_letters
#################################################################
exes_wanted = ["water", "needle", "seqret", "transeq"]
exes = dict() #Dictionary mapping from names to exe locations
if sys.platform=="win32" :
#The default installation path is C:\mEMBOSS which contains the exes.
#EMBOSS also sets an environment variable which we will check for.
try :
path = os.environ["EMBOSS_ROOT"]
except KeyError :
#print >> sys.stderr, "Missing EMBOSS_ROOT environment variable!"
raise MissingExternalDependencyError(\
"Install EMBOSS if you want to use Bio.EMBOSS.")
if os.path.isdir(path) :
for name in exes_wanted :
if os.path.isfile(os.path.join(path, name+".exe")) :
exes[name] = os.path.join(path, name+".exe")
del path, name
else :
import commands
for name in exes_wanted :
#This will "just work" if installed on the path as normal on Unix
if "not found" not in commands.getoutput("%s -help" % name) :
exes[name] = name
del name
if len(exes) < len(exes_wanted) :
raise MissingExternalDependencyError(\
"Install EMBOSS if you want to use Bio.EMBOSS.")
#################################################################
#Top level function as this makes it easier to use for debugging:
def emboss_convert(filename, old_format, new_format):
"""Run seqret, returns handle."""
#Setup, this assumes for all the format names used
#Biopython and EMBOSS names are consistent!
cline = SeqretCommandline(exes["seqret"],
sequence = filename,
sformat = old_format,
osformat = new_format,
auto = True, #no prompting
stdout = True)
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
child.stdin.close()
return child.stdout
#Top level function as this makes it easier to use for debugging:
def emboss_piped_SeqIO_convert(records, old_format, new_format):
"""Run seqret, returns records (as a generator)."""
#Setup, this assumes for all the format names used
#Biopython and EMBOSS names are consistent!
cline = SeqretCommandline(exes["seqret"],
sformat = old_format,
osformat = new_format,
auto = True, #no prompting
filter = True)
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
SeqIO.write(records, child.stdin, old_format)
child.stdin.close()
return SeqIO.parse(child.stdout, new_format)
#Top level function as this makes it easier to use for debugging:
def emboss_piped_AlignIO_convert(alignments, old_format, new_format):
"""Run seqret, returns alignments (as a generator)."""
#Setup, this assumes for all the format names used
#Biopython and EMBOSS names are consistent!
cline = SeqretCommandline(exes["seqret"],
sformat = old_format,
osformat = new_format,
auto = True, #no prompting
filter = True)
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
AlignIO.write(alignments, child.stdin, old_format)
child.stdin.close()
return AlignIO.parse(child.stdout, new_format)
#Top level function as this makes it easier to use for debugging:
def compare_records(old_list, new_list) :
"""Check two lists of SeqRecords agree, raises a ValueError if mismatch."""
if len(old_list) != len(new_list) :
raise ValueError("%i vs %i records" % (len(old_list), len(new_list)))
for old, new in zip(old_list, new_list) :
#Note the name matching is a bit fuzzy, e.g. truncation and
#no spaces in PHYLIP files.
if old.id != new.id and old.name != new.name \
and (old.id not in new.id) and (new.id not in old.id) \
and (old.id.replace(" ","_") != new.id.replace(" ","_")) :
raise ValueError("'%s' or '%s' vs '%s' or '%s' records" \
% (old.id, old.name, new.id, new.name))
if len(old.seq) != len(new.seq) :
raise ValueError("%i vs %i" % (len(old.seq), len(new.seq)))
if str(old.seq).upper() != str(new.seq).upper() :
if len(old.seq) < 200 :
raise ValueError("'%s' vs '%s'" % (old.seq, new.seq))
else :
raise ValueError("'%s...' vs '%s...'" % (old.seq[:100], new.seq[:100]))
if old.features and new.features \
and len(old.features) != len(new.features) :
raise ValueError("%i vs %i features" \
% (len(old.features, len(new.features))))
#TODO - check annotation
return True
#Top level function as this makes it easier to use for debugging:
def compare_alignments(old_list, new_list) :
"""Check two lists of Alignments agree, raises a ValueError if mismatch."""
if len(old_list) != len(new_list) :
raise ValueError("%i vs %i alignments" % (len(old_list), len(new_list)))
for old, new in zip(old_list, new_list) :
if len(old) != len(new) :
raise ValueError("Alignment with %i vs %i records" \
% (len(old), len(new)))
compare_records(old,new)
return True
class SeqRetSeqIOTests(unittest.TestCase):
"""Check EMBOSS seqret against Bio.SeqIO for converting files."""
def tearDown(self) :
clean_up()
def check_SeqIO_to_EMBOSS(self, in_filename, in_format, skip_formats=[],
alphabet=None) :
"""Can Bio.SeqIO write files seqret can read back?"""
if alphabet :
records = list(SeqIO.parse(open(in_filename), in_format, alphabet))
else :
records = list(SeqIO.parse(open(in_filename), in_format))
for temp_format in ["genbank","fasta"] :
if temp_format in skip_formats :
continue
new_records = list(emboss_piped_SeqIO_convert(records, temp_format, "fasta"))
try :
self.assert_(compare_records(records, new_records))
except ValueError, err :
raise ValueError("Disagree on file %s %s in %s format: %s" \
% (in_format, in_filename, temp_format, err))
def check_EMBOSS_to_SeqIO(self, filename, old_format,
skip_formats=[]) :
"""Can Bio.SeqIO read seqret's conversion of the file?"""
#TODO: Why can't we read EMBOSS's swiss output?
self.assert_(os.path.isfile(filename))
old_records = list(SeqIO.parse(open(filename), old_format))
for new_format in ["genbank","fasta","pir","embl", "ig"] :
if new_format in skip_formats :
continue
handle = emboss_convert(filename, old_format, new_format)
new_records = list(SeqIO.parse(handle, new_format))
try :
self.assert_(compare_records(old_records, new_records))
except ValueError, err:
raise ValueError("Disagree on %s file %s in %s format: %s" \
% (old_format, filename, new_format, err))
def check_SeqIO_with_EMBOSS(self, filename, old_format, skip_formats=[],
alphabet=None):
#Check EMBOSS can read Bio.SeqIO output...
self.check_SeqIO_to_EMBOSS(filename, old_format, skip_formats,
alphabet)
#Check Bio.SeqIO can read EMBOSS seqret output...
self.check_EMBOSS_to_SeqIO(filename, old_format, skip_formats)
def test_genbank(self) :
"""SeqIO & EMBOSS reading each other's conversions of a GenBank file."""
self.check_SeqIO_with_EMBOSS("GenBank/cor6_6.gb", "genbank")
def test_genbank2(self) :
"""SeqIO & EMBOSS reading each other's conversions of another GenBank file."""
self.check_SeqIO_with_EMBOSS("GenBank/NC_000932.gb", "genbank")
def test_embl(self) :
"""SeqIO & EMBOSS reading each other's conversions of an EMBL file."""
self.check_SeqIO_with_EMBOSS("EMBL/U87107.embl", "embl")
def test_ig(self) :
"""SeqIO & EMBOSS reading each other's conversions of an ig file."""
self.check_SeqIO_to_EMBOSS("IntelliGenetics/VIF_mase-pro.txt", "ig",
alphabet=generic_protein)
#TODO - What does a % in an ig sequence mean?
#e.g. "IntelliGenetics/vpu_nucaligned.txt"
#and "IntelliGenetics/TAT_mase_nuc.txt"
#EMBOSS seems to ignore them.
def test_pir(self) :
"""SeqIO & EMBOSS reading each other's conversions of a PIR file."""
#Skip genbank here, EMBOSS mangles the LOCUS line:
self.check_SeqIO_with_EMBOSS("NBRF/clustalw.pir", "pir",
skip_formats=["genbank"])
#Skip EMBL here, EMBOSS mangles the ID line
#Skip GenBank, EMBOSS 6.0.1 on Windows won't output proteins as GenBank
self.check_SeqIO_with_EMBOSS("NBRF/DMB_prot.pir", "pir",
skip_formats=["embl","genbank"])
def test_clustalw(self) :
"""SeqIO & EMBOSS reading each other's conversions of a Clustalw file."""
self.check_SeqIO_with_EMBOSS("Clustalw/hedgehog.aln", "clustal",
skip_formats=["embl","genbank"])
self.check_SeqIO_with_EMBOSS("Clustalw/opuntia.aln", "clustal",
skip_formats=["embl","genbank"])
class SeqRetAlignIOTests(unittest.TestCase):
"""Check EMBOSS seqret against Bio.SeqIO for converting files."""
def tearDown(self) :
clean_up()
def check_EMBOSS_to_AlignIO(self, filename, old_format,
skip_formats=[]) :
"""Can AlignIO read seqret's conversion of the file?"""
self.assert_(os.path.isfile(filename), filename)
old_aligns = list(AlignIO.parse(open(filename), old_format))
formats = ["clustal", "phylip", "ig"]
if len(old_aligns) == 1 :
formats.extend(["fasta","nexus"])
for new_format in formats :
if new_format in skip_formats :
continue
handle = emboss_convert(filename, old_format, new_format)
try :
new_aligns = list(AlignIO.parse(handle, new_format))
except :
raise ValueError("Can't parse %s file %s in %s format." \
% (old_format, filename, new_format))
try :
self.assert_(compare_alignments(old_aligns, new_aligns))
except ValueError, err :
raise ValueError("Disagree on %s file %s in %s format: %s" \
% (old_format, filename, new_format, err))
def check_AlignIO_to_EMBOSS(self, in_filename, in_format, skip_formats=[],
alphabet=None) :
"""Can Bio.AlignIO write files seqret can read back?"""
if alphabet :
old_aligns = list(AlignIO.parse(open(in_filename), in_format,
alphabet))
else :
old_aligns = list(AlignIO.parse(open(in_filename), in_format))
formats = ["clustal", "phylip"]
if len(old_aligns) == 1 :
formats.extend(["fasta","nexus"])
for temp_format in formats :
if temp_format in skip_formats :
continue
#PHYLIP is a simple format which explicitly supports
#multiple alignments (unlike FASTA).
try :
new_aligns = list(emboss_piped_AlignIO_convert(old_aligns,
temp_format,
"phylip"))
except ValueError, e :
#e.g. ValueError: Need a DNA, RNA or Protein alphabet
#from writing Nexus files...
continue
try :
self.assert_(compare_alignments(old_aligns, new_aligns))
except ValueError, err :
raise ValueError("Disagree on file %s %s in %s format: %s" \
% (in_format, in_filename, temp_format, err))
def check_AlignIO_with_EMBOSS(self, filename, old_format, skip_formats=[],
alphabet=None):
#Check EMBOSS can read Bio.AlignIO output...
self.check_AlignIO_to_EMBOSS(filename, old_format, skip_formats,
alphabet)
#Check Bio.AlignIO can read EMBOSS seqret output...
self.check_EMBOSS_to_AlignIO(filename, old_format, skip_formats)
def test_align_clustalw(self) :
"""AlignIO & EMBOSS reading each other's conversions of a ClustalW file."""
self.check_AlignIO_with_EMBOSS("Clustalw/hedgehog.aln", "clustal")
self.check_AlignIO_with_EMBOSS("Clustalw/opuntia.aln", "clustal")
self.check_AlignIO_with_EMBOSS("Clustalw/odd_consensus.aln", "clustal",
skip_formats=["nexus"]) #TODO - why not nexus?
self.check_AlignIO_with_EMBOSS("Clustalw/protein.aln", "clustal")
self.check_AlignIO_with_EMBOSS("Clustalw/promals3d.aln", "clustal")
def test_clustalw(self) :
"""AlignIO & EMBOSS reading each other's conversions of a PHYLIP file."""
self.check_AlignIO_with_EMBOSS("Phylip/horses.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/hennigian.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/reference_dna.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/reference_dna2.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/interlaced.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/interlaced2.phy", "phylip")
self.check_AlignIO_with_EMBOSS("Phylip/random.phy", "phylip")
class PairwiseAlignmentTests(unittest.TestCase):
"""Run pairwise alignments with water and needle, and parse them."""
def tearDown(self) :
clean_up()
def pairwise_alignment_check(self, query_seq,
targets, alignments,
local=True) :
"""Check pairwise alignment data is sane."""
#The datasets should be small, so making iterators into lists is OK
targets = list(targets)
alignments = list(alignments)
self.assertEqual(len(targets), len(alignments))
for target, alignment in zip(targets, alignments) :
self.assertEqual(len(alignment), 2)
#self.assertEqual(target.id, alignment[1].id) #too strict
if alignment[1].id not in target.id \
and alignment[1].id not in target.name :
raise AssertionError("%s vs %s or %s" \
% (alignment[1].id , target.id, target.name))
if local :
#Local alignment
self.assert_(str(alignment[0].seq).replace("-","") \
in query_seq)
self.assert_(str(alignment[1].seq).replace("-","").upper() \
in str(target.seq).upper())
else :
#Global alignment
self.assertEqual(str(query_seq), str(alignment[0].seq).replace("-",""))
self.assertEqual(str(target.seq).upper(), \
str(alignment[1].seq).replace("-","").upper())
return True
def test_water_file(self):
"""water with the asis trick, output to a file."""
#Setup, try a mixture of keyword arguments and later additions:
cline = WaterCommandline(cmd=exes["water"],
gapopen="10", gapextend="0.5")
#Try using both human readable names, and the literal ones:
cline.set_parameter("asequence", "asis:ACCCGGGCGCGGT")
cline.set_parameter("-bsequence", "asis:ACCCGAGCGCGGT")
#Try using a property set here:
cline.outfile = "Emboss/temp with space.water"
self.assertEqual(str(eval(repr(cline))), str(cline))
#Run the tool,
result, out, err = generic_run(cline)
#Check it worked,
errors = err.read().strip()
self.assert_(errors.startswith("Smith-Waterman local alignment"), errors)
self.assertEqual(out.read().strip(), "")
if result.return_code != 0 : print >> sys.stderr, "\n%s"%cline
self.assertEqual(result.return_code, 0)
filename = result.get_result("outfile")
self.assertEqual(filename, "Emboss/temp with space.water")
assert os.path.isfile(filename)
#Check we can parse the output...
align = AlignIO.read(open(filename),"emboss")
self.assertEqual(len(align), 2)
self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT")
self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT")
#Clean up,
os.remove(filename)
def test_water_piped(self):
"""water with asis trick, output piped to stdout."""
cline = WaterCommandline(cmd=exes["water"],
asequence="asis:ACCCGGGCGCGGT",
bsequence="asis:ACCCGAGCGCGGT",
gapopen=10,
gapextend=0.5,
auto=True, filter=True)
self.assertEqual(str(cline),
exes["water"] + " -auto -filter" \
+ " -asequence=asis:ACCCGGGCGCGGT" \
+ " -bsequence=asis:ACCCGAGCGCGGT" \
+ " -gapopen=10 -gapextend=0.5")
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
child.stdin.close()
#Check we could read it's output
align = AlignIO.read(child.stdout, "emboss")
self.assertEqual(len(align), 2)
self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT")
self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT")
#Check no error output:
assert child.stderr.read() == ""
assert 0 == child.wait()
def test_needle_file(self):
"""needle with the asis trick, output to a file."""
#Setup,
cline = NeedleCommandline(cmd=exes["needle"])
cline.set_parameter("-asequence", "asis:ACCCGGGCGCGGT")
cline.set_parameter("-bsequence", "asis:ACCCGAGCGCGGT")
cline.set_parameter("-gapopen", "10")
cline.set_parameter("-gapextend", "0.5")
#EMBOSS would guess this, but let's be explicit:
cline.set_parameter("-snucleotide", "True")
cline.set_parameter("-outfile", "Emboss/temp with space.needle")
self.assertEqual(str(eval(repr(cline))), str(cline))
#Run the tool,
result, out, err = generic_run(cline)
#Check it worked,
errors = err.read().strip()
self.assert_(errors.startswith("Needleman-Wunsch global alignment"), errors)
self.assertEqual(out.read().strip(), "")
if result.return_code != 0 : print >> sys.stderr, "\n%s"%cline
self.assertEqual(result.return_code, 0)
filename = result.get_result("outfile")
self.assertEqual(filename, "Emboss/temp with space.needle")
assert os.path.isfile(filename)
#Check we can parse the output...
align = AlignIO.read(open(filename),"emboss")
self.assertEqual(len(align), 2)
self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT")
self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT")
#Clean up,
os.remove(filename)
def test_needle_piped(self):
"""needle with asis trick, output piped to stdout."""
cline = NeedleCommandline(cmd=exes["needle"],
asequence="asis:ACCCGGGCGCGGT",
bsequence="asis:ACCCGAGCGCGGT",
gapopen=10,
gapextend=0.5,
auto=True, filter=True)
self.assertEqual(str(cline),
exes["needle"] + " -auto -filter" \
+ " -asequence=asis:ACCCGGGCGCGGT" \
+ " -bsequence=asis:ACCCGAGCGCGGT" \
+ " -gapopen=10 -gapextend=0.5")
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
child.stdin.close()
#Check we could read it's output
align = AlignIO.read(child.stdout, "emboss")
self.assertEqual(len(align), 2)
self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT")
self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT")
#Check no error output:
assert child.stderr.read() == ""
assert 0 == child.wait()
def test_water_file2(self):
"""water with the asis trick and nucleotide FASTA file, output to a file."""
#Setup,
query = "ACACACTCACACACACTTGGTCAGAGATGCTGTGCTTCTTGGAAGCAAGGNCTCAAAGGCAAGGTGCACGCAGAGGGACGTTTGAGTCTGGGATGAAGCATGTNCGTATTATTTATATGATGGAATTTCACGTTTTTATG"
out_file = "Emboss/temp_test2.water"
in_file = "Fasta/f002"
self.assert_(os.path.isfile(in_file))
if os.path.isfile(out_file) :
os.remove(out_file)
cline = WaterCommandline(cmd=exes["water"])
cline.set_parameter("-asequence", "asis:%s" % query)
cline.set_parameter("-bsequence", in_file)
cline.set_parameter("-gapopen", "10")
cline.set_parameter("-gapextend", "0.5")
cline.set_parameter("-outfile", out_file)
self.assertEqual(str(eval(repr(cline))), str(cline))
#Run the tool,
result, out, err = generic_run(cline)
#Check it worked,
errors = err.read().strip()
self.assert_(errors.startswith("Smith-Waterman local alignment"), errors)
self.assertEqual(out.read().strip(), "")
if result.return_code != 0 : print >> sys.stderr, "\n%s"%cline
self.assertEqual(result.return_code, 0)
self.assertEqual(result.get_result("outfile"), out_file)
assert os.path.isfile(out_file)
#Check we can parse the output and it is sensible...
self.pairwise_alignment_check(query,
SeqIO.parse(open(in_file),"fasta"),
AlignIO.parse(open(out_file),"emboss"),
local=True)
#Clean up,
os.remove(out_file)
def test_water_file3(self):
"""water with the asis trick and GenBank file, output to a file."""
#Setup,
query = "TGTTGTAATGTTTTAATGTTTCTTCTCCCTTTAGATGTACTACGTTTGGA"
out_file = "Emboss/temp_test3.water"
in_file = "GenBank/cor6_6.gb"
self.assert_(os.path.isfile(in_file))
if os.path.isfile(out_file) :
os.remove(out_file)
cline = WaterCommandline(cmd=exes["water"])
cline.set_parameter("asequence", "asis:%s" % query)
cline.set_parameter("bsequence", in_file)
#TODO - Tell water this is a GenBank file!
cline.set_parameter("gapopen", "1")
cline.set_parameter("gapextend", "0.5")
cline.set_parameter("outfile", out_file)
self.assertEqual(str(eval(repr(cline))), str(cline))
#Run the tool,
result, out, err = generic_run(cline)
#Check it worked,
errors = err.read().strip()
self.assert_(errors.startswith("Smith-Waterman local alignment"), errors)
self.assertEqual(out.read().strip(), "")
if result.return_code != 0 : print >> sys.stderr, "\n%s"%cline
self.assertEqual(result.return_code, 0)
self.assertEqual(result.get_result("outfile"), out_file)
assert os.path.isfile(out_file)
#Check we can parse the output and it is sensible...
self.pairwise_alignment_check(query,
SeqIO.parse(open(in_file),"genbank"),
AlignIO.parse(open(out_file),"emboss"),
local=True)
#Clean up,
os.remove(out_file)
def test_water_file4(self):
"""water with the asis trick and SwissProt file, output to a file."""
#Setup,
query = "DVCTGKALCDPVTQNIKTYPVKIENLRVMI"
out_file = "Emboss/temp_test4.water"
in_file = "SwissProt/sp004"
self.assert_(os.path.isfile(in_file))
if os.path.isfile(out_file) :
os.remove(out_file)
cline = WaterCommandline(cmd=exes["water"])
cline.set_parameter("-asequence", "asis:%s" % query)
cline.set_parameter("-bsequence", in_file)
#EMBOSS should work this out, but let's be explicit:
cline.set_parameter("-sprotein", True)
#TODO - Tell water this is a SwissProt file!
cline.set_parameter("-gapopen", "20")
cline.set_parameter("-gapextend", "5")
cline.set_parameter("-outfile", out_file)
self.assertEqual(str(eval(repr(cline))), str(cline))
#Run the tool,
result, out, err = generic_run(cline)
#Check it worked,
errors = err.read().strip()
self.assert_(errors.startswith("Smith-Waterman local alignment"), errors)
self.assertEqual(out.read().strip(), "")
if result.return_code != 0 : print >> sys.stderr, "\n%s"%cline
self.assertEqual(result.return_code, 0)
#Should be able to access this via any alias:
self.assertEqual(result.get_result("-outfile"), out_file)
assert os.path.isfile(out_file)
#Check we can parse the output and it is sensible...
self.pairwise_alignment_check(query,
SeqIO.parse(open(in_file),"swiss"),
AlignIO.parse(open(out_file),"emboss"),
local=True)
#Clean up,
os.remove(out_file)
def test_needle_piped2(self):
"""needle with asis trick, and nucleotide FASTA file, output piped to stdout."""
#TODO - Support needle in Bio.Emboss.Applications
#(ideally with the -auto and -filter arguments)
#Setup,
query = "ACACACTCACACACACTTGGTCAGAGATGCTGTGCTTCTTGGAA"
cline = exes["needle"]
cline += " -asequence asis:" + query
cline += " -bsequence Fasta/f002"
cline += " -auto" #no prompting
cline += " -filter" #use stdout
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
child.stdin.close()
#Check we can parse the output and it is sensible...
self.pairwise_alignment_check(query,
SeqIO.parse(open("Fasta/f002"),"fasta"),
AlignIO.parse(child.stdout,"emboss"),
local=False)
#Check no error output:
assert child.stderr.read() == ""
assert 0 == child.wait()
def test_water_needs_output(self):
"""water without output file or stdout/filter should give error."""
cline = WaterCommandline(cmd=exes["water"],
asequence="asis:ACCCGGGCGCGGT",
bsequence="asis:ACCCGAGCGCGGT",
gapopen=10,
gapextend=0.5,
auto=True)
self.assert_(cline.auto)
self.assert_(not cline.stdout)
self.assert_(not cline.filter)
self.assertEqual(cline.outfile, None)
self.assertRaises(ValueError, str, cline)
def test_needle_needs_output(self):
"""needle without output file or stdout/filter should give error."""
cline = NeedleCommandline(cmd=exes["needle"],
asequence="asis:ACCCGGGCGCGGT",
bsequence="asis:ACCCGAGCGCGGT",
gapopen=10,
gapextend=0.5,
auto=True)
self.assert_(cline.auto)
self.assert_(not cline.stdout)
self.assert_(not cline.filter)
self.assertEqual(cline.outfile, None)
self.assertRaises(ValueError, str, cline)
#Top level function as this makes it easier to use for debugging:
def emboss_translate(sequence, table=None, frame=None) :
"""Call transeq, returns protein sequence as string."""
#TODO - Support transeq in Bio.Emboss.Applications?
#(doesn't seem worthwhile as Biopython can do translations)
if not sequence :
raise ValueError(sequence)
#Setup,
cline = exes["transeq"]
if len(sequence) < 100 :
filename = None
cline += " -sequence asis:%s" % sequence
else :
#There are limits on command line string lengths...
#use a temp file instead.
filename = "Emboss/temp_transeq.txt"
handle = open(filename,"w")
SeqIO.write([SeqRecord(sequence, id="Test")], handle, "fasta")
handle.flush()
handle.close()
cline += " -sequence %s" % filename
cline += " -auto" #no prompting
cline += " -filter" #use stdout
if table is not None:
cline += " -table %s" % str(table)
if frame is not None:
cline += " -frame %s" % str(frame)
#Run the tool,
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=(sys.platform!="win32"))
child.stdin.close()
#Check no error output:
err = child.stderr.read()
if err != "" :
raise ValueError(str(cline) + "\n" + err)
#Check we could read it's output
record = SeqIO.read(child.stdout, "fasta")
if 0 != child.wait() :
raise ValueError(str(cline))
if filename :
os.remove(filename)
if not record.id.startswith("Test") :
raise ValueError(str(cline))
else :
if not record.id.startswith("asis") :
raise ValueError(str(cline))
return str(record.seq)
#Top level function as this makes it easier to use for debugging:
def check_translation(sequence, translation, table=None) :
if table is None :
t = 1
else :
t = table
if translation != str(sequence.translate(t)) \
or translation != str(translate(sequence,t)) \
or translation != translate(str(sequence),t) :
#More details...
for i, amino in enumerate(translation) :
codon = sequence[i*3:i*3+3]
if amino != str(codon.translate(t)) :
raise ValueError("%s -> %s not %s (table %s)" \
% (codon, amino, codon.translate(t), t))
#Shouldn't reach this line:
raise ValueError("%s -> %s (table %s)" \
% (sequence, translation, t))
return True
class TranslationTests(unittest.TestCase):
"""Run pairwise alignments with water and needle, and parse them."""
def tearDown(self) :
clean_up()
def test_simple(self) :
"""transeq vs Bio.Seq for simple translations (including alt tables)."""
examples = [Seq("ACGTGACTGACGTAGCATGCCACTAGG"),
#Unamibguous TA? codons:
Seq("TAATACTATTAG", generic_dna),
#Most of the ambiguous TA? codons:
Seq("TANTARTAYTAMTAKTAHTABTADTAV", generic_dna),
#Problem cases,
#
#Seq("TAW", generic_dna),
#W = A or T, but EMBOSS does TAW -> X
#TAA -> Y, TAT ->Y, so in Biopython TAW -> Y
#
#Seq("TAS", generic_dna),
#S = C or G, but EMBOSS does TAS -> Y
#TAG -> *, TAC ->Y, so in Biopython TAS -> X (Y or *)
#
#Seq("AAS", generic_dna),
#On table 9, EMBOSS gives N, we give X.
#S = C or G, so according to my reading of
#table 9 on the NCBI page, AAC=N, AAG=K
#suggesting this is a bug in EMBOSS.
#
Seq("ACGGGGGGGGTAAGTGGTGTGTGTGTAGT", generic_dna),
]
for sequence in examples :
#EMBOSS treats spare residues differently... avoid this issue
if len(sequence) % 3 != 0 :
sequence = sequence[:-(len(sequence)%3)]
self.assertEqual(len(sequence) % 3, 0)
self.assert_(len(sequence) > 0)
self.check(sequence)
def check(self, sequence) :
"""Compare our translation to EMBOSS's using all tables.
Takes a Seq object (and a filename containing it)."""
translation = emboss_translate(sequence)
self.assert_(check_translation(sequence, translation))
for table in [1,2,3,4,5,6,9,10,11,12,13,14,15] :
translation = emboss_translate(sequence, table)
self.assert_(check_translation(sequence, translation, table))
return True
def translate_all_codons(self, letters) :
sequence = Seq("".join([c1+c3+c3 \
for c1 in letters \
for c2 in letters \
for c3 in letters]),
generic_nucleotide)
self.check(sequence)
#def test_all_ambig_dna_codons(self) :
# """transeq vs Bio.Seq on ambiguous DNA codons (inc. alt tables)."""
# self.translate_all_codons(ambiguous_dna_letters)
def test_all_unambig_dna_codons(self) :
"""transeq vs Bio.Seq on unambiguous DNA codons (inc. alt tables)."""
self.translate_all_codons("ATCGatcg")
def test_all_unambig_rna_codons(self) :
"""transeq vs Bio.Seq on unambiguous RNA codons (inc. alt tables)."""
self.translate_all_codons("AUCGaucg")
def test_mixed_unambig_rna_codons(self) :
"""transeq vs Bio.Seq on unambiguous DNA/RNA codons (inc. alt tables)."""
self.translate_all_codons("ATUCGatucg")
def clean_up() :
"""Fallback clean up method to remove temp files."""
for filename in os.listdir("Emboss") :
if filename.startswith("temp_") :
try :
os.remove(filename)
except :
pass
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
clean_up()
|
[] |
[] |
[
"EMBOSS_ROOT"
] |
[]
|
["EMBOSS_ROOT"]
|
python
| 1 | 0 | |
model_zoo/official/nlp/bert/run_squad_equivalence.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Bert finetune and evaluation script.
'''
import argparse
import collections
import json
import os
import mindspore.common.dtype as mstype
import mindspore.ops.operations.kungfu_comm_ops as kfops
from kungfu.python.elastic import create_tf_records
from mindspore import context
from mindspore import log as logger
from mindspore._c_expression import kungfu_nccl_finalize, kungfu_nccl_init
from mindspore.common import set_seed
from mindspore.common.tensor import Tensor
from mindspore.nn.optim import AdamWeightDecay, Lamb, Momentum
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.train.callback import (CheckpointConfig, ModelCheckpoint,
SummaryCollector, TimeMonitor)
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.bert_for_finetune import BertSquad, BertSquadCell
from src.callback import (CheckpointCallback, ElasticScheduleCallback,
GlobalStepProgressCallback, KungFuSummaryCallback)
from src.dataset import create_squad_dataset
from src.elastic_state import ElasticCallback, ElasticState
from src.finetune_eval_config import bert_net_cfg, optimizer_cfg
from src.kungfu_mindspore_optimizer import KungFuLamb
from src.utils import BertLearningRate, LoadNewestCkpt, make_directory
_cur_dir = os.getcwd()
# HACK
DROPPED = 0
GLOBAL_BATCH_SIZE = 0
SEED = 1
def save_env_vars():
env_dict = {}
for k, v in os.environ.items():
env_dict[k] = v
with open("environment_variables.json", "w") as json_file:
json.dump(env_dict, json_file, indent=4)
def save_python_args(args):
arg_dict = {}
arg_var = vars(args)
for k, v in arg_var.items():
arg_dict[k] = v
with open("python_arguments.json", "w") as json_file:
json.dump(arg_dict, json_file, indent=4)
def do_train(dataset=None, network=None, load_checkpoint_path="", save_checkpoint_path="",
epoch_num=1, distributed=False):
""" do train """
if load_checkpoint_path == "":
raise ValueError("Pretrain model missed, finetune task must load pretrain model!")
# steps_per_epoch = dataset.get_dataset_size()
steps_per_epoch = 2770 # HARDCODED 88641//32
print("Dataset size {}".format(dataset.get_dataset_size()))
print("Optimiser {}".format(optimizer_cfg.optimizer))
# optimizer
if optimizer_cfg.optimizer == 'AdamWeightDecay':
lr_schedule = BertLearningRate(learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate,
end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate,
warmup_steps=int(steps_per_epoch * epoch_num * 0.1),
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.AdamWeightDecay.power)
params = network.trainable_params()
decay_params = list(filter(optimizer_cfg.AdamWeightDecay.decay_filter, params))
other_params = list(filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x), params))
group_params = [{'params': decay_params, 'weight_decay': optimizer_cfg.AdamWeightDecay.weight_decay},
{'params': other_params, 'weight_decay': 0.0}]
optimizer = AdamWeightDecay(group_params, lr_schedule, eps=optimizer_cfg.AdamWeightDecay.eps)
elif optimizer_cfg.optimizer == 'Lamb':
print("=== LEARNING RATE ===")
print("learning rate: {}".format(optimizer_cfg.Lamb.learning_rate))
print("end learning rate: {}".format(optimizer_cfg.Lamb.end_learning_rate))
print("step per epoch: {}".format(steps_per_epoch))
print("number of epochs: {}".format(epoch_num))
warmup_steps = int(steps_per_epoch * epoch_num * 0.1)
print("warmup steps: {}".format(warmup_steps))
decay_steps = steps_per_epoch * epoch_num
print("decay steps: {}".format(decay_steps))
print("power: {}".format(optimizer_cfg.Lamb.power))
print("=== LEARNING RATE ===")
lr_schedule = BertLearningRate(learning_rate=optimizer_cfg.Lamb.learning_rate,
end_learning_rate=optimizer_cfg.Lamb.end_learning_rate,
warmup_steps=warmup_steps,
decay_steps=decay_steps,
power=optimizer_cfg.Lamb.power)
optimizer = KungFuLamb(network.trainable_params(), learning_rate=lr_schedule)
# from src.kungfu_mindspore_optimizer import KungFuLambDebug
# optimizer = KungFuLambDebug(network.trainable_params(), learning_rate=lr_schedule)
# from src.kungfu_mindspore_optimizer import KungFuLambDebugModel
# optimizer = KungFuLambDebugModel(network.trainable_params(), learning_rate=lr_schedule)
elif optimizer_cfg.optimizer == 'Momentum':
optimizer = Momentum(network.trainable_params(), learning_rate=optimizer_cfg.Momentum.learning_rate,
momentum=optimizer_cfg.Momentum.momentum)
else:
raise Exception("Optimizer not supported. support: [AdamWeightDecay, Lamb, Momentum]")
# load checkpoint into network
ckpt_config = CheckpointConfig(save_checkpoint_steps=500, keep_checkpoint_max=10)
# ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(prefix="squad",
directory=None if save_checkpoint_path == "" else save_checkpoint_path,
config=ckpt_config)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(network, param_dict)
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2**32,
scale_factor=2,
scale_window=1000)
netwithgrads = BertSquadCell(network,
optimizer=optimizer,
scale_update_cell=update_cell)
model = Model(netwithgrads)
# callbacks = [TimeMonitor(dataset.get_dataset_size()), LossCallBack(dataset.get_dataset_size()), ckpoint_cb]
callbacks = []
# Summary (loss)
if distributed:
rank = kfops.kungfu_current_rank()
summary_path = "./summary_{}.csv".format(rank)
else:
summary_path = "./summary.csv"
callbacks.append(KungFuSummaryCallback(summary_path))
# ELASTIC
max_progress = 88641
print("max_progress {}".format(max_progress))
es = ElasticState(max_progress - DROPPED, True)
callbacks.append(GlobalStepProgressCallback(model, es, GLOBAL_BATCH_SIZE))
path = "./checkpoint"
callbacks.append(CheckpointCallback(es, model, path))
# schedule = {8320: 2, 22400: 1, 32640: 2, 38400: 1, 46080: 2, 64640: 1, 75520: 2}
# schedule = {5120: 2, 12800: 1, 23680: 2, 30080: 1, 40320: 2, 67840: 1, 79360: 2}
schedule = {}
print("schedule {}".format(schedule))
schedule_cb = ElasticScheduleCallback(es, schedule, model)
callbacks.append(schedule_cb)
callbacks.append(ElasticCallback(es, GLOBAL_BATCH_SIZE))
model.train(100, # really high so that it does not stop too early, callback stops training
dataset,
callbacks=callbacks,
dataset_sink_mode=False)
def do_eval(dataset=None, load_checkpoint_path="", eval_batch_size=1):
""" do eval """
if load_checkpoint_path == "":
raise ValueError("Finetune model missed, evaluation task must load finetune model!")
net = BertSquad(bert_net_cfg, False, 2)
net.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net, param_dict)
model = Model(net)
output = []
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, segment_ids, unique_ids = input_data
start_positions = Tensor([1], mstype.float32)
end_positions = Tensor([1], mstype.float32)
is_impossible = Tensor([1], mstype.float32)
logits = model.predict(input_ids, input_mask, segment_ids, start_positions,
end_positions, unique_ids, is_impossible)
ids = logits[0].asnumpy()
start = logits[1].asnumpy()
end = logits[2].asnumpy()
for i in range(eval_batch_size):
unique_id = int(ids[i])
start_logits = [float(x) for x in start[i].flat]
end_logits = [float(x) for x in end[i].flat]
output.append(RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
return output
def run_squad():
"""run squad task"""
parser = argparse.ArgumentParser(description="run squad")
parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"],
help="Device type, default is Ascend")
parser.add_argument("--distribute", type=str, default="false", choices=["true", "false"],
help="Run distribute, default is false.")
parser.add_argument("--do_train", type=str, default="false", choices=["true", "false"],
help="Eable train, default is false")
parser.add_argument("--do_eval", type=str, default="false", choices=["true", "false"],
help="Eable eval, default is false")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--epoch_num", type=int, default=1, help="Epoch number, default is 1.")
parser.add_argument("--num_class", type=int, default=2, help="The number of class, default is 2.")
parser.add_argument("--train_data_shuffle", type=str, default="true", choices=["true", "false"],
help="Enable train data shuffle, default is true")
parser.add_argument("--eval_data_shuffle", type=str, default="false", choices=["true", "false"],
help="Enable eval data shuffle, default is false")
parser.add_argument("--train_batch_size", type=int, default=32, help="Train batch size, default is 32")
parser.add_argument("--eval_batch_size", type=int, default=1, help="Eval batch size, default is 1")
parser.add_argument("--vocab_file_path", type=str, default="", help="Vocab file path")
parser.add_argument("--eval_json_path", type=str, default="", help="Evaluation json file path, can be eval.json")
parser.add_argument("--save_finetune_checkpoint_path", type=str, default="", help="Save checkpoint path")
parser.add_argument("--load_pretrain_checkpoint_path", type=str, default="", help="Load checkpoint file path")
parser.add_argument("--load_finetune_checkpoint_path", type=str, default="", help="Load checkpoint file path")
parser.add_argument("--train_data_file_path", type=str, default="",
help="Data path, it is better to use absolute path")
parser.add_argument("--schema_file_path", type=str, default="",
help="Schema path, it is better to use absolute path")
args_opt = parser.parse_args()
epoch_num = args_opt.epoch_num
load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path
save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path
load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path
save_python_args(args_opt)
if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "false":
raise ValueError("At least one of 'do_train' or 'do_eval' must be true")
if args_opt.do_train.lower() == "true" and args_opt.train_data_file_path == "":
raise ValueError("'train_data_file_path' must be set when do finetune task")
if args_opt.do_eval.lower() == "true":
if args_opt.vocab_file_path == "":
raise ValueError("'vocab_file_path' must be set when do evaluation task")
if args_opt.eval_json_path == "":
raise ValueError("'tokenization_file_path' must be set when do evaluation task")
""" distributed """
if args_opt.distribute.lower() == "true":
distributed = True
else:
distributed = False
if distributed:
kfops.init(args_opt.device_target)
kungfu_nccl_init()
device_num = kfops.kungfu_current_cluster_size()
rank = kfops.kungfu_current_rank()
print("kungfu rank={}, size={}".format(rank, device_num))
save_finetune_checkpoint_path = os.path.join(save_finetune_checkpoint_path,
"ckpt_" + str(rank))
else:
device_num = 1
rank = 0
target = args_opt.device_target
if target == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id)
elif target == "GPU":
# context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
if bert_net_cfg.compute_type != mstype.float32:
logger.warning('GPU only support fp32 temporarily, run with fp32.')
bert_net_cfg.compute_type = mstype.float32
else:
raise Exception("Target error, GPU or Ascend is supported.")
# dropout_prob is unused
netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1)
# ELASTICITY
index_path = "/data/squad1/tf-index-1.idx.txt"
global GLOBAL_BATCH_SIZE
GLOBAL_BATCH_SIZE = args_opt.train_batch_size
print("before create_tf_records")
shard = create_tf_records(index_path, SEED, GLOBAL_BATCH_SIZE)
filenames = shard['filenames']
print("file names {}".format(filenames))
batch_size, _ = shard['batch_sizes'][0]
global DROPPED
DROPPED = shard['dropped']
if args_opt.do_train.lower() == "true":
ds = create_squad_dataset(batch_size=batch_size, repeat_count=1,
data_file_path=filenames,
schema_file_path=args_opt.schema_file_path,
do_shuffle=False)
do_train(ds, netwithloss, load_pretrain_checkpoint_path, save_finetune_checkpoint_path,
epoch_num, distributed)
if args_opt.do_eval.lower() == "true":
if save_finetune_checkpoint_path == "":
load_finetune_checkpoint_dir = _cur_dir
else:
load_finetune_checkpoint_dir = make_directory(save_finetune_checkpoint_path)
load_finetune_checkpoint_path = LoadNewestCkpt(load_finetune_checkpoint_dir,
ds.get_dataset_size(), epoch_num, "squad")
if args_opt.do_eval.lower() == "true":
from src import tokenization
from src.create_squad_data import (convert_examples_to_features,
read_squad_examples)
from src.squad_get_predictions import write_predictions
from src.squad_postprocess import SQuad_postprocess
tokenizer = tokenization.FullTokenizer(vocab_file=args_opt.vocab_file_path, do_lower_case=True)
eval_examples = read_squad_examples(args_opt.eval_json_path, False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=bert_net_cfg.seq_length,
doc_stride=128,
max_query_length=64,
is_training=False,
output_fn=None,
vocab_file=args_opt.vocab_file_path)
ds = create_squad_dataset(batch_size=args_opt.eval_batch_size, repeat_count=1,
data_file_path=eval_features,
schema_file_path=args_opt.schema_file_path, is_training=False,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"))
outputs = do_eval(ds, load_finetune_checkpoint_path, args_opt.eval_batch_size)
all_predictions = write_predictions(eval_examples, eval_features, outputs, 20, 30, True)
if distributed:
output_path = "./output_{}.json".format(rank)
else:
output_path = "./output.json"
SQuad_postprocess(args_opt.eval_json_path, all_predictions, output_metrics=output_path)
kfops.finalize(args_opt.device_target)
kungfu_nccl_finalize()
if __name__ == "__main__":
save_env_vars()
set_seed(SEED)
run_squad()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sqlite_object_browser/sob_main.py
|
#!/usr/bin/env python
import datetime
import math
import operator
import optparse
import os
import uuid
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict, defaultdict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
MAXDEPTH = 5
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
class PluginContainer():
def __init__(self):
self.plugins = []
def register(self, plugin):
self.plugins.append(plugin)
def format_title(self, title, table_name, row, is_toplevel):
for plugin in self.plugins:
if hasattr(plugin, "format_title"):
title = plugin.format_title(title, table_name, row, is_toplevel)
return title
def format_entry(self, table_name, row_dict):
repres = ", ".join("{}: {}".format(k, v) for k, v in row_dict.items())
title = "{} entry ({})".format(table_name, repres)
for plugin in self.plugins:
if hasattr(plugin, "format_entry"):
title = plugin.format_entry(title, table_name, row_dict)
if len(title) > 75:
title = title[:72] + "..."
return title
def add_content(self, table_name, row, is_toplevel):
extra_content = ""
for plugin in self.plugins:
if hasattr(plugin, "add_content"):
extra_content += plugin.add_content(table_name, row, is_toplevel)
return extra_content
def process_link(self, source_foreign_key, link_tuple, source_name, rowvalue):
for plugin in self.plugins:
if hasattr(plugin, "process_link"):
link_tuple = plugin.process_link(source_foreign_key, link_tuple, source_name, rowvalue)
return link_tuple
plugins = PluginContainer()
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_primary_keys(self, table):
return dataset._database.get_primary_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
print("Rendering table plain")
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows,
)
def get_foreign_key_lookup(table_name):
foreign_keys = dataset.get_foreign_keys(table_name)
return {f.column: f for f in foreign_keys}
def get_renderer(foreignkey_lookup, field_names, suppress, is_outermost_level, table_name, i):
def renderer(row, template_name="rowplus.html"):
outrow = {}
if row is None:
return ""
links = defaultdict(list)
# Lets get all relations:
for attr in dir(row):
if attr.endswith("_rel"):
relation = getattr(row, attr)
print(type(relation))
source_table = relation.model.__name__
_, _, source_name = attr.partition(source_table)
source_name = source_name.rpartition("_")[0][1:]
print("ST", source_table, "SN", source_name)
print("===")
try:
source_foreign_key = get_foreign_key_lookup(source_table)[source_name]
except KeyError as e:
print("Relation unresolved", attr, e, get_foreign_key_lookup(source_table))
pass
else:
rowvalue = getattr(row, source_foreign_key.dest_column)
query = url_for("table_content_plus", table=source_table,
filters="{}:{}".format(source_name, rowvalue))
link_tuple = (source_table, query)
link_tuple = plugins.process_link(source_foreign_key, link_tuple, source_name, rowvalue)
links[source_foreign_key.dest_column].append(link_tuple)
filtered_field_names = [fn for fn in field_names if fn not in suppress]
for field in filtered_field_names:
value = getattr(row, field)
if hasattr(value, "validate_model"): # A Model
inner_row = getattr(row, field)
inner_table_name = type(inner_row).__name__
target_table = dataset[inner_table_name]
foreignkey_lookup_inner = get_foreign_key_lookup(inner_table_name)
if i < app.config['MAXDEPTH']:
outrow[field] = "{}: ".format(inner_row) + get_renderer(foreignkey_lookup_inner,
target_table.columns,
[foreignkey_lookup[field].dest_column],
is_outermost_level=False,
table_name=inner_table_name,
i=i + 1)(inner_row)
else:
print(foreignkey_lookup)
f = "{}:{}".format(foreignkey_lookup[field].dest_column,
getattr(inner_row, foreignkey_lookup[field].dest_column))
link = url_for("table_content_plus", table=inner_table_name, filters=f)
outrow[field] = "{} ".format(inner_row) + render_template("row_truncated.html", link=link)
else:
outrow[field] = value_filter(value)
title = plugins.format_title(table_name, table_name, row, is_outermost_level)
f = []
primary_keys = dataset.get_primary_keys(table_name)
if not primary_keys:
primary_keys = field_names
for field in primary_keys:
f.append("{}:{}".format(field, getattr(row, field)))
main_link = url_for("table_content_plus", table=table_name, filters=",".join(f))
extra_content = plugins.add_content(table_name, row, is_outermost_level)
return render_template(template_name, field_names=filtered_field_names, row=outrow, links=links,
is_outermost_level=is_outermost_level, col_id=str(uuid.uuid4()).replace("-", "_"),
title=title, main_link=main_link, extra_content=extra_content)
return renderer
def get_insert_renderer(foreignkey_lookup, field_names, table_class, generation=0, identifier="0"):
def renderer():
input_attribs = defaultdict(dict)
choices = {}
for field in field_names:
print("table class", type(table_class))
print(dir(table_class))
field_type = getattr(table_class, field).field_type
print("FIELD TYPE", field, repr(field_type))
input_attribs[field]["_SKIP__"] = False
if field_type == "AUTO":
input_attribs[field]["__SKIP__"]=True
elif field_type == "INT":
input_attribs[field]["type"] = "number"
elif field_type == "DECIMAL":
input_attribs[field]["type"] = "number"
input_attribs[field]["step"] = "0.001"
else:
input_attribs[field]["type"] = "text"
if field in foreignkey_lookup:
dest_table = dataset[foreignkey_lookup[field].dest_table]
rows = dest_table.all()
choices[field] = [(row[foreignkey_lookup[field].dest_column],
plugins.format_entry(foreignkey_lookup[field].dest_table,
row)) for row in rows]
foreignkey_lookup_inner = get_foreign_key_lookup(foreignkey_lookup[field].dest_table)
if generation < app.config['MAXDEPTH']:
inner_identifier=str(uuid.uuid4()).replace("-", "_").replace("%", "_")
choices[field].append(("NEW", get_insert_renderer(foreignkey_lookup_inner,
dest_table.columns,
dest_table.model_class,
generation=generation + 1,
identifier=inner_identifier
)(), inner_identifier))
prefix = "{}%{}%{}".format(table_class.__name__, generation, identifier)
main_link = url_for("table_content_plus", table=table_class.__name__, page=10000000)
return render_template("insert_row.html", field_names=field_names, choices=choices,
prefix=prefix, types=input_attribs, main_link=main_link,
table_name=table_class.__name__, is_outermost_level=generation==0,
col_id=str(uuid.uuid4()).replace("-", "_") if identifier=="0" else identifier)
return renderer
def insert_elements(request):
to_write = defaultdict(dict)
for k in request.form:
table, _, rest = k.partition("%")
generation, _, rest = rest.partition("%")
identifier, _, field = rest.partition("%")
to_write[identifier, table][field] = request.form[k]
print(to_write)
# "0" is the topmost entry
insert_for_identifier("0", to_write)
def insert_for_identifier(identifier, to_write):
for key in to_write:
if key[0]==identifier:
break
else:
assert False
print("INSERTING FOR IDENTIFIER {}".format(identifier))
identifier, table_name = key
dataset.update_cache(table_name)
ds_table = dataset[table_name]
table_class = ds_table.model_class
for field, value in to_write[key].items():
if value.startswith("NEW"):
inner_identifier = value.lstrip("NEW")
to_write[key][field] = insert_for_identifier(inner_identifier, to_write)
query = table_class.insert(**to_write[key])
print(query, type(query))
inserted_id = query.execute()
print("RETURNED", inserted_id)
dataset.update_cache(table_name)
return inserted_id
@app.route('/<table>/recursive_content/', methods=["GET", "POST"])
@require_table
def table_content_plus(table):
if request.method == 'POST':
insert_elements(request)
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
field_names = ds_table.columns
query = ds_table.model_class.select()
header_stri = []
filters = request.args.get('filters')
if filters:
for filterstri in filters.split(","):
try:
colname, _, value = filterstri.partition(":")
except:
print("Skipping filter ", filterstri)
continue
if colname not in field_names:
print("Invalid filter", colname)
continue
query = query.where(getattr(ds_table.model_class, colname) == value)
header_stri.append("{}=='{}'".format(colname, value))
total_rows = query.count()
print(total_rows, "Rows")
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
foreign_keys = dataset.get_foreign_keys(table)
foreignkey_lookup = {f.column: f for f in foreign_keys}
for col, foreign_key in foreignkey_lookup.items():
query.prefetch(dataset[foreign_key.dest_table].model_class)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
print(query, type(query))
query = query.paginate(page_number, rows_per_page)
print(query, type(query))
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
# table_sql = dataset.query(
# 'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
# [table, 'table']).fetchone()[0]
return render_template(
'table_content_plus.html',
title=" & ".join(header_stri),
filters=filters,
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows,
renderrow=get_renderer(foreignkey_lookup, field_names, [], True, table_name=table, i=0),
insert_row=get_insert_renderer(foreignkey_lookup, field_names, ds_table.model_class)
)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf8')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf8'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
@app.template_filter('value_filter_plus')
def value_filter_plus(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
parser.add_option(
'-c',
'--customization',
dest='customization',
help='Folder with a file plugin.py.')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def load_customization_plugin(folder):
import importlib.util
spec = importlib.util.spec_from_file_location("customization_plugin", os.path.join(folder, "plugin.py"))
customizer = importlib.util.module_from_spec(spec)
spec.loader.exec_module(customizer)
plugin = customizer.Plugin()
plugins.register(plugin)
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
if options.customization:
load_customization_plugin(options.customization)
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"SQLITE_WEB_PASSWORD"
] |
[]
|
["SQLITE_WEB_PASSWORD"]
|
python
| 1 | 0 | |
modin/experimental/cloud/base.py
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from typing import NamedTuple
import os
import sys
class ClusterError(Exception):
"""
Generic cluster operating exception
"""
def __init__(self, *args, cause: BaseException = None, traceback: str = None, **kw):
self.cause = cause
self.traceback = traceback
super().__init__(*args, **kw)
def __str__(self):
if self.clause:
return f"clause: {self.cause}\n{super()}"
return str(super())
class CannotSpawnCluster(ClusterError):
"""
Raised when cluster cannot be spawned in the cloud
"""
class CannotDestroyCluster(ClusterError):
"""
Raised when cluster cannot be destroyed in the cloud
"""
class ConnectionDetails(NamedTuple):
user_name: str = "modin"
key_file: str = None
address: str = None
port: int = 22
_EXT = (".exe", ".com", ".cmd", ".bat", "") if sys.platform == "win32" else ("",)
def _which(prog):
for entry in os.environ["PATH"].split(os.pathsep):
for ext in _EXT:
path = os.path.join(entry, prog + ext)
if os.access(path, os.X_OK):
return path
return None
def _get_ssh_proxy_command():
socks_proxy = os.environ.get("MODIN_SOCKS_PROXY", None)
if socks_proxy is None:
return None
if _which("nc"):
return f"nc -x {socks_proxy} %h %p"
elif _which("connect"):
return f"connect -S {socks_proxy} %h %p"
raise ClusterError(
"SSH through proxy required but no supported proxying tools found"
)
|
[] |
[] |
[
"MODIN_SOCKS_PROXY",
"PATH"
] |
[]
|
["MODIN_SOCKS_PROXY", "PATH"]
|
python
| 2 | 0 | |
turtlebot3_gazebo/launch/turtlebot3_dqn_stage1.launch.py
|
#!/usr/bin/env python3
#
# Copyright 2019 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Ryan Shim
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import ThisLaunchFileDir
from launch.actions import ExecuteProcess
from launch.substitutions import LaunchConfiguration
TURTLEBOT3_MODEL = os.environ['TURTLEBOT3_MODEL']
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time', default='true')
world_file_name = 'turtlebot3_dqn_stage1/' + TURTLEBOT3_MODEL + '.model'
world = os.path.join(get_package_share_directory('turtlebot3_gazebo'), 'worlds', world_file_name)
launch_file_dir = os.path.join(get_package_share_directory('turtlebot3_gazebo'), 'launch')
return LaunchDescription([
ExecuteProcess(
cmd=['gazebo', '--verbose', world, '-s', 'libgazebo_ros_init.so', '-s', 'libgazebo_ros_factory.so'],
output='screen'),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([launch_file_dir, '/robot_state_publisher.launch.py']),
launch_arguments={'use_sim_time': use_sim_time}.items(),
),
])
|
[] |
[] |
[
"TURTLEBOT3_MODEL"
] |
[]
|
["TURTLEBOT3_MODEL"]
|
python
| 1 | 0 | |
spinesTS/utils/_set_seed.py
|
import os
import numpy as np
import random
def seed_everything(seed=None):
import tensorflow as tf
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
numpy/distutils/ccompiler_opt.py
|
"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware
optimization, starting from parsing the command arguments, to managing the
relation between the CPU baseline and dispatch-able features,
also generating the required C headers and ending with compiling
the sources with proper compiler's flags.
`CCompilerOpt` doesn't provide runtime detection for the CPU features,
instead only focuses on the compiler side, but it creates abstract C headers
that can be used later for the final runtime dispatching process."""
import atexit
import inspect
import os
import pprint
import re
import subprocess
import textwrap
class _Config:
"""An abstract class holds all configurable attributes of `CCompilerOpt`,
these class attributes can be used to change the default behavior
of `CCompilerOpt` in order to fit other requirements.
Attributes
----------
conf_nocache : bool
Set True to disable memory and file cache.
Default is False.
conf_noopt : bool
Set True to forces the optimization to be disabled,
in this case `CCompilerOpt` tends to generate all
expected headers in order to 'not' break the build.
Default is False.
conf_cache_factors : list
Add extra factors to the primary caching factors. The caching factors
are utilized to determine if there are changes had happened that
requires to discard the cache and re-updating it. The primary factors
are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc).
Default is list of two items, containing the time of last modification
of `ccompiler_opt` and value of attribute "conf_noopt"
conf_tmp_path : str,
The path of temporary directory. Default is auto-created
temporary directory via ``tempfile.mkdtemp()``.
conf_check_path : str
The path of testing files. Each added CPU feature must have a
**C** source file contains at least one intrinsic or instruction that
related to this feature, so it can be tested against the compiler.
Default is ``./distutils/checks``.
conf_target_groups : dict
Extra tokens that can be reached from dispatch-able sources through
the special mark ``@targets``. Default is an empty dictionary.
**Notes**:
- case-insensitive for tokens and group names
- sign '#' must stick in the begin of group name and only within ``@targets``
**Example**:
.. code-block:: console
$ "@targets #avx_group other_tokens" > group_inside.c
>>> CCompilerOpt.conf_target_groups["avx_group"] = \\
"$werror $maxopt avx2 avx512f avx512_skx"
>>> cco = CCompilerOpt(cc_instance)
>>> cco.try_dispatch(["group_inside.c"])
conf_c_prefix : str
The prefix of public C definitions. Default is ``"NPY_"``.
conf_c_prefix_ : str
The prefix of internal C definitions. Default is ``"NPY__"``.
conf_cc_flags : dict
Nested dictionaries defining several compiler flags
that linked to some major functions, the main key
represent the compiler name and sub-keys represent
flags names. Default is already covers all supported
**C** compilers.
Sub-keys explained as follows:
"native": str or None
used by argument option `native`, to detect the current
machine support via the compiler.
"werror": str or None
utilized to treat warning as errors during testing CPU features
against the compiler and also for target's policy `$werror`
via dispatch-able sources.
"maxopt": str or None
utilized for target's policy '$maxopt' and the value should
contains the maximum acceptable optimization by the compiler.
e.g. in gcc `'-O3'`
**Notes**:
* case-sensitive for compiler names and flags
* use space to separate multiple flags
* any flag will tested against the compiler and it will skipped
if it's not applicable.
conf_min_features : dict
A dictionary defines the used CPU features for
argument option `'min'`, the key represent the CPU architecture
name e.g. `'x86'`. Default values provide the best effort
on wide range of users platforms.
**Note**: case-sensitive for architecture names.
conf_features : dict
Nested dictionaries used for identifying the CPU features.
the primary key is represented as a feature name or group name
that gathers several features. Default values covers all
supported features but without the major options like "flags",
these undefined options handle it by method `conf_features_partial()`.
Default value is covers almost all CPU features for *X86*, *IBM/Power64*
and *ARM 7/8*.
Sub-keys explained as follows:
"implies" : str or list, optional,
List of CPU feature names to be implied by it,
the feature name must be defined within `conf_features`.
Default is None.
"flags": str or list, optional
List of compiler flags. Default is None.
"detect": str or list, optional
List of CPU feature names that required to be detected
in runtime. By default, its the feature name or features
in "group" if its specified.
"implies_detect": bool, optional
If True, all "detect" of implied features will be combined.
Default is True. see `feature_detect()`.
"group": str or list, optional
Same as "implies" but doesn't require the feature name to be
defined within `conf_features`.
"interest": int, required
a key for sorting CPU features
"headers": str or list, optional
intrinsics C header file
"disable": str, optional
force disable feature, the string value should contains the
reason of disabling.
"autovec": bool or None, optional
True or False to declare that CPU feature can be auto-vectorized
by the compiler.
By default(None), treated as True if the feature contains at
least one applicable flag. see `feature_can_autovec()`
"extra_checks": str or list, optional
Extra test case names for the CPU feature that need to be tested
against the compiler.
Each test case must have a C file named ``extra_xxxx.c``, where
``xxxx`` is the case name in lower case, under 'conf_check_path'.
It should contain at least one intrinsic or function related to the test case.
If the compiler able to successfully compile the C file then `CCompilerOpt`
will add a C ``#define`` for it into the main dispatch header, e.g.
``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
**NOTES**:
* space can be used as separator with options that supports "str or list"
* case-sensitive for all values and feature name must be in upper-case.
* if flags aren't applicable, its will skipped rather than disable the
CPU feature
* the CPU feature will disabled if the compiler fail to compile
the test file
"""
conf_nocache = False
conf_noopt = False
conf_cache_factors = None
conf_tmp_path = None
conf_check_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "checks"
)
conf_target_groups = {}
conf_c_prefix = 'NPY_'
conf_c_prefix_ = 'NPY__'
conf_cc_flags = dict(
gcc = dict(
# native should always fail on arm and ppc64,
# native usually works only with x86
native = '-march=native',
opt = '-O3',
werror = '-Werror',
),
clang = dict(
native = '-march=native',
opt = "-O3",
# One of the following flags needs to be applicable for Clang to
# guarantee the sanity of the testing process, however in certain
# cases `-Werror` gets skipped during the availability test due to
# "unused arguments" warnings.
# see https://github.com/numpy/numpy/issues/19624
werror = '-Werror=switch -Werror',
),
icc = dict(
native = '-xHost',
opt = '-O3',
werror = '-Werror',
),
iccw = dict(
native = '/QxHost',
opt = '/O3',
werror = '/Werror',
),
msvc = dict(
native = None,
opt = '/O2',
werror = '/WX',
)
)
conf_min_features = dict(
x86 = "SSE SSE2",
x64 = "SSE SSE2 SSE3",
ppc64 = '', # play it safe
ppc64le = "VSX VSX2",
s390x = '',
armhf = '', # play it safe
aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD"
)
conf_features = dict(
# X86
SSE = dict(
interest=1, headers="xmmintrin.h",
# enabling SSE without SSE2 is useless also
# it's non-optional for x86_64
implies="SSE2"
),
SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"),
SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"),
SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"),
SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"),
POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"),
SSE42 = dict(interest=7, implies="POPCNT"),
AVX = dict(
interest=8, implies="SSE42", headers="immintrin.h",
implies_detect=False
),
XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"),
FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"),
F16C = dict(interest=11, implies="AVX"),
FMA3 = dict(interest=12, implies="F16C"),
AVX2 = dict(interest=13, implies="F16C"),
AVX512F = dict(
interest=20, implies="FMA3 AVX2", implies_detect=False,
extra_checks="AVX512F_REDUCE"
),
AVX512CD = dict(interest=21, implies="AVX512F"),
AVX512_KNL = dict(
interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
detect="AVX512_KNL", implies_detect=False
),
AVX512_KNM = dict(
interest=41, implies="AVX512_KNL",
group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ",
detect="AVX512_KNM", implies_detect=False
),
AVX512_SKX = dict(
interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
detect="AVX512_SKX", implies_detect=False,
extra_checks="AVX512BW_MASK AVX512DQ_MASK"
),
AVX512_CLX = dict(
interest=43, implies="AVX512_SKX", group="AVX512VNNI",
detect="AVX512_CLX"
),
AVX512_CNL = dict(
interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI",
detect="AVX512_CNL", implies_detect=False
),
AVX512_ICL = dict(
interest=45, implies="AVX512_CLX AVX512_CNL",
group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ",
detect="AVX512_ICL", implies_detect=False
),
# IBM/Power
## Power7/ISA 2.06
VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
## Power8/ISA 2.07
VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
## Power9/ISA 3.00
VSX3 = dict(interest=3, implies="VSX2", implies_detect=False),
## Power10/ISA 3.1
VSX4 = dict(interest=4, implies="VSX3", implies_detect=False,
extra_checks="VSX4_MMA"),
# IBM/Z
## VX(z13) support
VX = dict(interest=1, headers="vecintrin.h"),
## Vector-Enhancements Facility
VXE = dict(interest=2, implies="VX", implies_detect=False),
## Vector-Enhancements Facility 2
VXE2 = dict(interest=3, implies="VXE", implies_detect=False),
# ARM
NEON = dict(interest=1, headers="arm_neon.h"),
NEON_FP16 = dict(interest=2, implies="NEON"),
## FMA
NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"),
## Advanced SIMD
ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False),
## ARMv8.2 half-precision & vector arithm
ASIMDHP = dict(interest=5, implies="ASIMD"),
## ARMv8.2 dot product
ASIMDDP = dict(interest=6, implies="ASIMD"),
## ARMv8.2 Single & half-precision Multiply
ASIMDFHM = dict(interest=7, implies="ASIMDHP"),
)
def conf_features_partial(self):
"""Return a dictionary of supported CPU features by the platform,
and accumulate the rest of undefined options in `conf_features`,
the returned dict has same rules and notes in
class attribute `conf_features`, also its override
any options that been set in 'conf_features'.
"""
if self.cc_noopt:
# optimization is disabled
return {}
on_x86 = self.cc_on_x86 or self.cc_on_x64
is_unix = self.cc_is_gcc or self.cc_is_clang
if on_x86 and is_unix: return dict(
SSE = dict(flags="-msse"),
SSE2 = dict(flags="-msse2"),
SSE3 = dict(flags="-msse3"),
SSSE3 = dict(flags="-mssse3"),
SSE41 = dict(flags="-msse4.1"),
POPCNT = dict(flags="-mpopcnt"),
SSE42 = dict(flags="-msse4.2"),
AVX = dict(flags="-mavx"),
F16C = dict(flags="-mf16c"),
XOP = dict(flags="-mxop"),
FMA4 = dict(flags="-mfma4"),
FMA3 = dict(flags="-mfma"),
AVX2 = dict(flags="-mavx2"),
AVX512F = dict(flags="-mavx512f"),
AVX512CD = dict(flags="-mavx512cd"),
AVX512_KNL = dict(flags="-mavx512er -mavx512pf"),
AVX512_KNM = dict(
flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq"
),
AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"),
AVX512_CLX = dict(flags="-mavx512vnni"),
AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"),
AVX512_ICL = dict(
flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq"
)
)
if on_x86 and self.cc_is_icc: return dict(
SSE = dict(flags="-msse"),
SSE2 = dict(flags="-msse2"),
SSE3 = dict(flags="-msse3"),
SSSE3 = dict(flags="-mssse3"),
SSE41 = dict(flags="-msse4.1"),
POPCNT = {},
SSE42 = dict(flags="-msse4.2"),
AVX = dict(flags="-mavx"),
F16C = {},
XOP = dict(disable="Intel Compiler doesn't support it"),
FMA4 = dict(disable="Intel Compiler doesn't support it"),
# Intel Compiler doesn't support AVX2 or FMA3 independently
FMA3 = dict(
implies="F16C AVX2", flags="-march=core-avx2"
),
AVX2 = dict(implies="FMA3", flags="-march=core-avx2"),
# Intel Compiler doesn't support AVX512F or AVX512CD independently
AVX512F = dict(
implies="AVX2 AVX512CD", flags="-march=common-avx512"
),
AVX512CD = dict(
implies="AVX2 AVX512F", flags="-march=common-avx512"
),
AVX512_KNL = dict(flags="-xKNL"),
AVX512_KNM = dict(flags="-xKNM"),
AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"),
AVX512_CLX = dict(flags="-xCASCADELAKE"),
AVX512_CNL = dict(flags="-xCANNONLAKE"),
AVX512_ICL = dict(flags="-xICELAKE-CLIENT"),
)
if on_x86 and self.cc_is_iccw: return dict(
SSE = dict(flags="/arch:SSE"),
SSE2 = dict(flags="/arch:SSE2"),
SSE3 = dict(flags="/arch:SSE3"),
SSSE3 = dict(flags="/arch:SSSE3"),
SSE41 = dict(flags="/arch:SSE4.1"),
POPCNT = {},
SSE42 = dict(flags="/arch:SSE4.2"),
AVX = dict(flags="/arch:AVX"),
F16C = {},
XOP = dict(disable="Intel Compiler doesn't support it"),
FMA4 = dict(disable="Intel Compiler doesn't support it"),
# Intel Compiler doesn't support FMA3 or AVX2 independently
FMA3 = dict(
implies="F16C AVX2", flags="/arch:CORE-AVX2"
),
AVX2 = dict(
implies="FMA3", flags="/arch:CORE-AVX2"
),
# Intel Compiler doesn't support AVX512F or AVX512CD independently
AVX512F = dict(
implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512"
),
AVX512CD = dict(
implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512"
),
AVX512_KNL = dict(flags="/Qx:KNL"),
AVX512_KNM = dict(flags="/Qx:KNM"),
AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"),
AVX512_CLX = dict(flags="/Qx:CASCADELAKE"),
AVX512_CNL = dict(flags="/Qx:CANNONLAKE"),
AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT")
)
if on_x86 and self.cc_is_msvc: return dict(
SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {},
SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {},
SSE3 = {},
SSSE3 = {},
SSE41 = {},
POPCNT = dict(headers="nmmintrin.h"),
SSE42 = {},
AVX = dict(flags="/arch:AVX"),
F16C = {},
XOP = dict(headers="ammintrin.h"),
FMA4 = dict(headers="ammintrin.h"),
# MSVC doesn't support FMA3 or AVX2 independently
FMA3 = dict(
implies="F16C AVX2", flags="/arch:AVX2"
),
AVX2 = dict(
implies="F16C FMA3", flags="/arch:AVX2"
),
# MSVC doesn't support AVX512F or AVX512CD independently,
# always generate instructions belong to (VL/VW/DQ)
AVX512F = dict(
implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512"
),
AVX512CD = dict(
implies="AVX512F AVX512_SKX", flags="/arch:AVX512"
),
AVX512_KNL = dict(
disable="MSVC compiler doesn't support it"
),
AVX512_KNM = dict(
disable="MSVC compiler doesn't support it"
),
AVX512_SKX = dict(flags="/arch:AVX512"),
AVX512_CLX = {},
AVX512_CNL = {},
AVX512_ICL = {}
)
on_power = self.cc_on_ppc64le or self.cc_on_ppc64
if on_power:
partial = dict(
VSX = dict(
implies=("VSX2" if self.cc_on_ppc64le else ""),
flags="-mvsx"
),
VSX2 = dict(
flags="-mcpu=power8", implies_detect=False
),
VSX3 = dict(
flags="-mcpu=power9 -mtune=power9", implies_detect=False
),
VSX4 = dict(
flags="-mcpu=power10 -mtune=power10", implies_detect=False
)
)
if self.cc_is_clang:
partial["VSX"]["flags"] = "-maltivec -mvsx"
partial["VSX2"]["flags"] = "-mpower8-vector"
partial["VSX3"]["flags"] = "-mpower9-vector"
partial["VSX4"]["flags"] = "-mpower10-vector"
return partial
on_zarch = self.cc_on_s390x
if on_zarch:
partial = dict(
VX = dict(
flags="-march=arch11 -mzvector"
),
VXE = dict(
flags="-march=arch12", implies_detect=False
),
VXE2 = dict(
flags="-march=arch13", implies_detect=False
)
)
return partial
if self.cc_on_aarch64 and is_unix: return dict(
NEON = dict(
implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True
),
NEON_FP16 = dict(
implies="NEON NEON_VFPV4 ASIMD", autovec=True
),
NEON_VFPV4 = dict(
implies="NEON NEON_FP16 ASIMD", autovec=True
),
ASIMD = dict(
implies="NEON NEON_FP16 NEON_VFPV4", autovec=True
),
ASIMDHP = dict(
flags="-march=armv8.2-a+fp16"
),
ASIMDDP = dict(
flags="-march=armv8.2-a+dotprod"
),
ASIMDFHM = dict(
flags="-march=armv8.2-a+fp16fml"
),
)
if self.cc_on_armhf and is_unix: return dict(
NEON = dict(
flags="-mfpu=neon"
),
NEON_FP16 = dict(
flags="-mfpu=neon-fp16 -mfp16-format=ieee"
),
NEON_VFPV4 = dict(
flags="-mfpu=neon-vfpv4",
),
ASIMD = dict(
flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd",
),
ASIMDHP = dict(
flags="-march=armv8.2-a+fp16"
),
ASIMDDP = dict(
flags="-march=armv8.2-a+dotprod",
),
ASIMDFHM = dict(
flags="-march=armv8.2-a+fp16fml"
)
)
# TODO: ARM MSVC
return {}
def __init__(self):
if self.conf_tmp_path is None:
import shutil
import tempfile
tmp = tempfile.mkdtemp()
def rm_temp():
try:
shutil.rmtree(tmp)
except OSError:
pass
atexit.register(rm_temp)
self.conf_tmp_path = tmp
if self.conf_cache_factors is None:
self.conf_cache_factors = [
os.path.getmtime(__file__),
self.conf_nocache
]
class _Distutils:
"""A helper class that provides a collection of fundamental methods
implemented in a top of Python and NumPy Distutils.
The idea behind this class is to gather all methods that it may
need to override in case of reuse 'CCompilerOpt' in environment
different than of what NumPy has.
Parameters
----------
ccompiler : `CCompiler`
The generate instance that returned from `distutils.ccompiler.new_compiler()`.
"""
def __init__(self, ccompiler):
self._ccompiler = ccompiler
def dist_compile(self, sources, flags, ccompiler=None, **kwargs):
"""Wrap CCompiler.compile()"""
assert(isinstance(sources, list))
assert(isinstance(flags, list))
flags = kwargs.pop("extra_postargs", []) + flags
if not ccompiler:
ccompiler = self._ccompiler
return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
def dist_test(self, source, flags, macros=[]):
"""Return True if 'CCompiler.compile()' able to compile
a source file with certain flags.
"""
assert(isinstance(source, str))
from distutils.errors import CompileError
cc = self._ccompiler;
bk_spawn = getattr(cc, 'spawn', None)
if bk_spawn:
cc_type = getattr(self._ccompiler, "compiler_type", "")
if cc_type in ("msvc",):
setattr(cc, 'spawn', self._dist_test_spawn_paths)
else:
setattr(cc, 'spawn', self._dist_test_spawn)
test = False
try:
self.dist_compile(
[source], flags, macros=macros, output_dir=self.conf_tmp_path
)
test = True
except CompileError as e:
self.dist_log(str(e), stderr=True)
if bk_spawn:
setattr(cc, 'spawn', bk_spawn)
return test
def dist_info(self):
"""
Return a tuple containing info about (platform, compiler, extra_args),
required by the abstract class '_CCompiler' for discovering the
platform environment. This is also used as a cache factor in order
to detect any changes happening from outside.
"""
if hasattr(self, "_dist_info"):
return self._dist_info
cc_type = getattr(self._ccompiler, "compiler_type", '')
if cc_type in ("intelem", "intelemw"):
platform = "x86_64"
elif cc_type in ("intel", "intelw", "intele"):
platform = "x86"
else:
from distutils.util import get_platform
platform = get_platform()
cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", ''))
if not cc_type or cc_type == "unix":
if hasattr(cc_info, "__iter__"):
compiler = cc_info[0]
else:
compiler = str(cc_info)
else:
compiler = cc_type
if hasattr(cc_info, "__iter__") and len(cc_info) > 1:
extra_args = ' '.join(cc_info[1:])
else:
extra_args = os.environ.get("CFLAGS", "")
extra_args += os.environ.get("CPPFLAGS", "")
self._dist_info = (platform, compiler, extra_args)
return self._dist_info
@staticmethod
def dist_error(*args):
"""Raise a compiler error"""
from distutils.errors import CompileError
raise CompileError(_Distutils._dist_str(*args))
@staticmethod
def dist_fatal(*args):
"""Raise a distutils error"""
from distutils.errors import DistutilsError
raise DistutilsError(_Distutils._dist_str(*args))
@staticmethod
def dist_log(*args, stderr=False):
"""Print a console message"""
from numpy.distutils import log
out = _Distutils._dist_str(*args)
if stderr:
log.warn(out)
else:
log.info(out)
@staticmethod
def dist_load_module(name, path):
"""Load a module from file, required by the abstract class '_Cache'."""
from .misc_util import exec_mod_from_location
try:
return exec_mod_from_location(name, path)
except Exception as e:
_Distutils.dist_log(e, stderr=True)
return None
@staticmethod
def _dist_str(*args):
"""Return a string to print by log and errors."""
def to_str(arg):
if not isinstance(arg, str) and hasattr(arg, '__iter__'):
ret = []
for a in arg:
ret.append(to_str(a))
return '('+ ' '.join(ret) + ')'
return str(arg)
stack = inspect.stack()[2]
start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno)
out = ' '.join([
to_str(a)
for a in (*args,)
])
return start + out
def _dist_test_spawn_paths(self, cmd, display=None):
"""
Fix msvc SDK ENV path same as distutils do
without it we get c1: fatal error C1356: unable to find mspdbcore.dll
"""
if not hasattr(self._ccompiler, "_paths"):
self._dist_test_spawn(cmd)
return
old_path = os.getenv("path")
try:
os.environ["path"] = self._ccompiler._paths
self._dist_test_spawn(cmd)
finally:
os.environ["path"] = old_path
_dist_warn_regex = re.compile(
# intel and msvc compilers don't raise
# fatal errors when flags are wrong or unsupported
".*("
"warning D9002|" # msvc, it should be work with any language.
"invalid argument for option" # intel
").*"
)
@staticmethod
def _dist_test_spawn(cmd, display=None):
try:
o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
universal_newlines=True)
if o and re.match(_Distutils._dist_warn_regex, o):
_Distutils.dist_error(
"Flags in command", cmd ,"aren't supported by the compiler"
", output -> \n%s" % o
)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
except OSError as e:
o = e
s = 127
else:
return None
_Distutils.dist_error(
"Command", cmd, "failed with exit status %d output -> \n%s" % (
s, o
))
_share_cache = {}
class _Cache:
"""An abstract class handles caching functionality, provides two
levels of caching, in-memory by share instances attributes among
each other and by store attributes into files.
**Note**:
any attributes that start with ``_`` or ``conf_`` will be ignored.
Parameters
----------
cache_path: str or None
The path of cache file, if None then cache in file will disabled.
*factors:
The caching factors that need to utilize next to `conf_cache_factors`.
Attributes
----------
cache_private: set
Hold the attributes that need be skipped from "in-memory cache".
cache_infile: bool
Utilized during initializing this class, to determine if the cache was able
to loaded from the specified cache path in 'cache_path'.
"""
# skip attributes from cache
_cache_ignore = re.compile("^(_|conf_)")
def __init__(self, cache_path=None, *factors):
self.cache_me = {}
self.cache_private = set()
self.cache_infile = False
self._cache_path = None
if self.conf_nocache:
self.dist_log("cache is disabled by `Config`")
return
self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors)
self._cache_path = cache_path
if cache_path:
if os.path.exists(cache_path):
self.dist_log("load cache from file ->", cache_path)
cache_mod = self.dist_load_module("cache", cache_path)
if not cache_mod:
self.dist_log(
"unable to load the cache file as a module",
stderr=True
)
elif not hasattr(cache_mod, "hash") or \
not hasattr(cache_mod, "data"):
self.dist_log("invalid cache file", stderr=True)
elif self._cache_hash == cache_mod.hash:
self.dist_log("hit the file cache")
for attr, val in cache_mod.data.items():
setattr(self, attr, val)
self.cache_infile = True
else:
self.dist_log("miss the file cache")
if not self.cache_infile:
other_cache = _share_cache.get(self._cache_hash)
if other_cache:
self.dist_log("hit the memory cache")
for attr, val in other_cache.__dict__.items():
if attr in other_cache.cache_private or \
re.match(self._cache_ignore, attr):
continue
setattr(self, attr, val)
_share_cache[self._cache_hash] = self
atexit.register(self.cache_flush)
def __del__(self):
for h, o in _share_cache.items():
if o == self:
_share_cache.pop(h)
break
def cache_flush(self):
"""
Force update the cache.
"""
if not self._cache_path:
return
# TODO: don't write if the cache doesn't change
self.dist_log("write cache to path ->", self._cache_path)
cdict = self.__dict__.copy()
for attr in self.__dict__.keys():
if re.match(self._cache_ignore, attr):
cdict.pop(attr)
d = os.path.dirname(self._cache_path)
if not os.path.exists(d):
os.makedirs(d)
repr_dict = pprint.pformat(cdict, compact=True)
with open(self._cache_path, "w") as f:
f.write(textwrap.dedent("""\
# AUTOGENERATED DON'T EDIT
# Please make changes to the code generator \
(distutils/ccompiler_opt.py)
hash = {}
data = \\
""").format(self._cache_hash))
f.write(repr_dict)
def cache_hash(self, *factors):
# is there a built-in non-crypto hash?
# sdbm
chash = 0
for f in factors:
for char in str(f):
chash = ord(char) + (chash << 6) + (chash << 16) - chash
chash &= 0xFFFFFFFF
return chash
@staticmethod
def me(cb):
"""
A static method that can be treated as a decorator to
dynamically cache certain methods.
"""
def cache_wrap_me(self, *args, **kwargs):
# good for normal args
cache_key = str((
cb.__name__, *args, *kwargs.keys(), *kwargs.values()
))
if cache_key in self.cache_me:
return self.cache_me[cache_key]
ccb = cb(self, *args, **kwargs)
self.cache_me[cache_key] = ccb
return ccb
return cache_wrap_me
class _CCompiler:
"""A helper class for `CCompilerOpt` containing all utilities that
related to the fundamental compiler's functions.
Attributes
----------
cc_on_x86 : bool
True when the target architecture is 32-bit x86
cc_on_x64 : bool
True when the target architecture is 64-bit x86
cc_on_ppc64 : bool
True when the target architecture is 64-bit big-endian powerpc
cc_on_ppc64le : bool
True when the target architecture is 64-bit litle-endian powerpc
cc_on_s390x : bool
True when the target architecture is IBM/ZARCH on linux
cc_on_armhf : bool
True when the target architecture is 32-bit ARMv7+
cc_on_aarch64 : bool
True when the target architecture is 64-bit Armv8-a+
cc_on_noarch : bool
True when the target architecture is unknown or not supported
cc_is_gcc : bool
True if the compiler is GNU or
if the compiler is unknown
cc_is_clang : bool
True if the compiler is Clang
cc_is_icc : bool
True if the compiler is Intel compiler (unix like)
cc_is_iccw : bool
True if the compiler is Intel compiler (msvc like)
cc_is_nocc : bool
True if the compiler isn't supported directly,
Note: that cause a fail-back to gcc
cc_has_debug : bool
True if the compiler has debug flags
cc_has_native : bool
True if the compiler has native flags
cc_noopt : bool
True if the compiler has definition 'DISABLE_OPT*',
or 'cc_on_noarch' is True
cc_march : str
The target architecture name, or "unknown" if
the architecture isn't supported
cc_name : str
The compiler name, or "unknown" if the compiler isn't supported
cc_flags : dict
Dictionary containing the initialized flags of `_Config.conf_cc_flags`
"""
def __init__(self):
if hasattr(self, "cc_is_cached"):
return
# attr regex
detect_arch = (
("cc_on_x64", ".*(x|x86_|amd)64.*"),
("cc_on_x86", ".*(win32|x86|i386|i686).*"),
("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*"),
("cc_on_ppc64", ".*(powerpc|ppc)64.*"),
("cc_on_aarch64", ".*(aarch64|arm64).*"),
("cc_on_armhf", ".*arm.*"),
("cc_on_s390x", ".*s390x.*"),
# undefined platform
("cc_on_noarch", ""),
)
detect_compiler = (
("cc_is_gcc", r".*(gcc|gnu\-g).*"),
("cc_is_clang", ".*clang.*"),
("cc_is_iccw", ".*(intelw|intelemw|iccw).*"), # intel msvc like
("cc_is_icc", ".*(intel|icc).*"), # intel unix like
("cc_is_msvc", ".*msvc.*"),
# undefined compiler will be treat it as gcc
("cc_is_nocc", ""),
)
detect_args = (
("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*"),
("cc_has_native", ".*(-march=native|-xHost|/QxHost).*"),
# in case if the class run with -DNPY_DISABLE_OPTIMIZATION
("cc_noopt", ".*DISABLE_OPT.*"),
)
dist_info = self.dist_info()
platform, compiler_info, extra_args = dist_info
# set False to all attrs
for section in (detect_arch, detect_compiler, detect_args):
for attr, rgex in section:
setattr(self, attr, False)
for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)):
for attr, rgex in detect:
if rgex and not re.match(rgex, searchin, re.IGNORECASE):
continue
setattr(self, attr, True)
break
for attr, rgex in detect_args:
if rgex and not re.match(rgex, extra_args, re.IGNORECASE):
continue
setattr(self, attr, True)
if self.cc_on_noarch:
self.dist_log(
"unable to detect CPU architecture which lead to disable the optimization. "
f"check dist_info:<<\n{dist_info}\n>>",
stderr=True
)
self.cc_noopt = True
if self.conf_noopt:
self.dist_log("Optimization is disabled by the Config", stderr=True)
self.cc_noopt = True
if self.cc_is_nocc:
"""
mingw can be treated as a gcc, and also xlc even if it based on clang,
but still has the same gcc optimization flags.
"""
self.dist_log(
"unable to detect compiler type which leads to treating it as GCC. "
"this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC."
f"check dist_info:<<\n{dist_info}\n>>",
stderr=True
)
self.cc_is_gcc = True
self.cc_march = "unknown"
for arch in ("x86", "x64", "ppc64", "ppc64le",
"armhf", "aarch64", "s390x"):
if getattr(self, "cc_on_" + arch):
self.cc_march = arch
break
self.cc_name = "unknown"
for name in ("gcc", "clang", "iccw", "icc", "msvc"):
if getattr(self, "cc_is_" + name):
self.cc_name = name
break
self.cc_flags = {}
compiler_flags = self.conf_cc_flags.get(self.cc_name)
if compiler_flags is None:
self.dist_fatal(
"undefined flag for compiler '%s', "
"leave an empty dict instead" % self.cc_name
)
for name, flags in compiler_flags.items():
self.cc_flags[name] = nflags = []
if flags:
assert(isinstance(flags, str))
flags = flags.split()
for f in flags:
if self.cc_test_flags([f]):
nflags.append(f)
self.cc_is_cached = True
@_Cache.me
def cc_test_flags(self, flags):
"""
Returns True if the compiler supports 'flags'.
"""
assert(isinstance(flags, list))
self.dist_log("testing flags", flags)
test_path = os.path.join(self.conf_check_path, "test_flags.c")
test = self.dist_test(test_path, flags)
if not test:
self.dist_log("testing failed", stderr=True)
return test
def cc_normalize_flags(self, flags):
"""
Remove the conflicts that caused due gathering implied features flags.
Parameters
----------
'flags' list, compiler flags
flags should be sorted from the lowest to the highest interest.
Returns
-------
list, filtered from any conflicts.
Examples
--------
>>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod'])
['armv8.2-a+fp16+dotprod']
>>> self.cc_normalize_flags(
['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2']
)
['-march=core-avx2']
"""
assert(isinstance(flags, list))
if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:
return self._cc_normalize_unix(flags)
if self.cc_is_msvc or self.cc_is_iccw:
return self._cc_normalize_win(flags)
return flags
_cc_normalize_unix_mrgx = re.compile(
# 1- to check the highest of
r"^(-mcpu=|-march=|-x[A-Z0-9\-])"
)
_cc_normalize_unix_frgx = re.compile(
# 2- to remove any flags starts with
# -march, -mcpu, -x(INTEL) and '-m' without '='
r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|"
# exclude:
r"(?:-mzvector)"
)
_cc_normalize_unix_krgx = re.compile(
# 3- keep only the highest of
r"^(-mfpu|-mtune)"
)
_cc_normalize_arch_ver = re.compile(
r"[0-9.]"
)
def _cc_normalize_unix(self, flags):
def ver_flags(f):
# arch ver subflag
# -march=armv8.2-a+fp16fml
tokens = f.split('+')
ver = float('0' + ''.join(
re.findall(self._cc_normalize_arch_ver, tokens[0])
))
return ver, tokens[0], tokens[1:]
if len(flags) <= 1:
return flags
# get the highest matched flag
for i, cur_flag in enumerate(reversed(flags)):
if not re.match(self._cc_normalize_unix_mrgx, cur_flag):
continue
lower_flags = flags[:-(i+1)]
upper_flags = flags[-i:]
filterd = list(filter(
self._cc_normalize_unix_frgx.search, lower_flags
))
# gather subflags
ver, arch, subflags = ver_flags(cur_flag)
if ver > 0 and len(subflags) > 0:
for xflag in lower_flags:
xver, _, xsubflags = ver_flags(xflag)
if ver == xver:
subflags = xsubflags + subflags
cur_flag = arch + '+' + '+'.join(subflags)
flags = filterd + [cur_flag]
if i > 0:
flags += upper_flags
break
# to remove overridable flags
final_flags = []
matched = set()
for f in reversed(flags):
match = re.match(self._cc_normalize_unix_krgx, f)
if not match:
pass
elif match[0] in matched:
continue
else:
matched.add(match[0])
final_flags.insert(0, f)
return final_flags
_cc_normalize_win_frgx = re.compile(
r"^(?!(/arch\:|/Qx\:))"
)
_cc_normalize_win_mrgx = re.compile(
r"^(/arch|/Qx:)"
)
def _cc_normalize_win(self, flags):
for i, f in enumerate(reversed(flags)):
if not re.match(self._cc_normalize_win_mrgx, f):
continue
i += 1
return list(filter(
self._cc_normalize_win_frgx.search, flags[:-i]
)) + flags[-i:]
return flags
class _Feature:
"""A helper class for `CCompilerOpt` that managing CPU features.
Attributes
----------
feature_supported : dict
Dictionary containing all CPU features that supported
by the platform, according to the specified values in attribute
`_Config.conf_features` and `_Config.conf_features_partial()`
feature_min : set
The minimum support of CPU features, according to
the specified values in attribute `_Config.conf_min_features`.
"""
def __init__(self):
if hasattr(self, "feature_is_cached"):
return
self.feature_supported = pfeatures = self.conf_features_partial()
for feature_name in list(pfeatures.keys()):
feature = pfeatures[feature_name]
cfeature = self.conf_features[feature_name]
feature.update({
k:v for k,v in cfeature.items() if k not in feature
})
disabled = feature.get("disable")
if disabled is not None:
pfeatures.pop(feature_name)
self.dist_log(
"feature '%s' is disabled," % feature_name,
disabled, stderr=True
)
continue
# list is used internally for these options
for option in (
"implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
feature[option] = oval.split()
self.feature_min = set()
min_f = self.conf_min_features.get(self.cc_march, "")
for F in min_f.upper().split():
if F in self.feature_supported:
self.feature_min.add(F)
self.feature_is_cached = True
def feature_names(self, names=None, force_flags=None, macros=[]):
"""
Returns a set of CPU feature names that supported by platform and the **C** compiler.
Parameters
----------
names: sequence or None, optional
Specify certain CPU features to test it against the **C** compiler.
if None(default), it will test all current supported features.
**Note**: feature names must be in upper-case.
force_flags: list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during the test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(
names is None or (
not isinstance(names, str) and
hasattr(names, "__iter__")
)
)
assert(force_flags is None or isinstance(force_flags, list))
if names is None:
names = self.feature_supported.keys()
supported_names = set()
for f in names:
if self.feature_is_supported(
f, force_flags=force_flags, macros=macros
):
supported_names.add(f)
return supported_names
def feature_is_exist(self, name):
"""
Returns True if a certain feature is exist and covered within
`_Config.conf_features`.
Parameters
----------
'name': str
feature name in uppercase.
"""
assert(name.isupper())
return name in self.conf_features
def feature_sorted(self, names, reverse=False):
"""
Sort a list of CPU features ordered by the lowest interest.
Parameters
----------
'names': sequence
sequence of supported feature names in uppercase.
'reverse': bool, optional
If true, the sorted features is reversed. (highest interest)
Returns
-------
list, sorted CPU features
"""
def sort_cb(k):
if isinstance(k, str):
return self.feature_supported[k]["interest"]
# multiple features
rank = max([self.feature_supported[f]["interest"] for f in k])
# FIXME: that's not a safe way to increase the rank for
# multi targets
rank += len(k) -1
return rank
return sorted(names, reverse=reverse, key=sort_cb)
def feature_implies(self, names, keep_origins=False):
"""
Return a set of CPU features that implied by 'names'
Parameters
----------
names: str or sequence of str
CPU feature name(s) in uppercase.
keep_origins: bool
if False(default) then the returned set will not contain any
features from 'names'. This case happens only when two features
imply each other.
Examples
--------
>>> self.feature_implies("SSE3")
{'SSE', 'SSE2'}
>>> self.feature_implies("SSE2")
{'SSE'}
>>> self.feature_implies("SSE2", keep_origins=True)
# 'SSE2' found here since 'SSE' and 'SSE2' imply each other
{'SSE', 'SSE2'}
"""
def get_implies(name, _caller=set()):
implies = set()
d = self.feature_supported[name]
for i in d.get("implies", []):
implies.add(i)
if i in _caller:
# infinity recursive guard since
# features can imply each other
continue
_caller.add(name)
implies = implies.union(get_implies(i, _caller))
return implies
if isinstance(names, str):
implies = get_implies(names)
names = [names]
else:
assert(hasattr(names, "__iter__"))
implies = set()
for n in names:
implies = implies.union(get_implies(n))
if not keep_origins:
implies.difference_update(names)
return implies
def feature_implies_c(self, names):
"""same as feature_implies() but combining 'names'"""
if isinstance(names, str):
names = set((names,))
else:
names = set(names)
return names.union(self.feature_implies(names))
def feature_ahead(self, names):
"""
Return list of features in 'names' after remove any
implied features and keep the origins.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
["SSE41"]
# assume AVX2 and FMA3 implies each other and AVX2
# is the highest interest
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2"]
# assume AVX2 and FMA3 don't implies each other
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2", "FMA3"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
implies = self.feature_implies(names, keep_origins=True)
ahead = [n for n in names if n not in implies]
if len(ahead) == 0:
# return the highest interested feature
# if all features imply each other
ahead = self.feature_sorted(names, reverse=True)[:1]
return ahead
def feature_untied(self, names):
"""
same as 'feature_ahead()' but if both features implied each other
and keep the highest interest.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
["SSE2", "SSE3", "SSE41"]
# assume AVX2 and FMA3 implies each other
>>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
["SSE2", "SSE3", "SSE41", "AVX2"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
final = []
for n in names:
implies = self.feature_implies(n)
tied = [
nn for nn in final
if nn in implies and n in self.feature_implies(nn)
]
if tied:
tied = self.feature_sorted(tied + [n])
if n not in tied[1:]:
continue
final.remove(tied[:1][0])
final.append(n)
return final
def feature_get_til(self, names, keyisfalse):
"""
same as `feature_implies_c()` but stop collecting implied
features when feature's option that provided through
parameter 'keyisfalse' is False, also sorting the returned
features.
"""
def til(tnames):
# sort from highest to lowest interest then cut if "key" is False
tnames = self.feature_implies_c(tnames)
tnames = self.feature_sorted(tnames, reverse=True)
for i, n in enumerate(tnames):
if not self.feature_supported[n].get(keyisfalse, True):
tnames = tnames[:i+1]
break
return tnames
if isinstance(names, str) or len(names) <= 1:
names = til(names)
# normalize the sort
names.reverse()
return names
names = self.feature_ahead(names)
names = {t for n in names for t in til(n)}
return self.feature_sorted(names)
def feature_detect(self, names):
"""
Return a list of CPU features that required to be detected
sorted from the lowest to highest interest.
"""
names = self.feature_get_til(names, "implies_detect")
detect = []
for n in names:
d = self.feature_supported[n]
detect += d.get("detect", d.get("group", [n]))
return detect
@_Cache.me
def feature_flags(self, names):
"""
Return a list of CPU features flags sorted from the lowest
to highest interest.
"""
names = self.feature_sorted(self.feature_implies_c(names))
flags = []
for n in names:
d = self.feature_supported[n]
f = d.get("flags", [])
if not f or not self.cc_test_flags(f):
continue
flags += f
return self.cc_normalize_flags(flags)
@_Cache.me
def feature_test(self, name, force_flags=None, macros=[]):
"""
Test a certain CPU feature against the compiler through its own
check file.
Parameters
----------
name: str
Supported CPU feature name.
force_flags: list or None, optional
If None(default), the returned flags from `feature_flags()`
will be used.
macros : list of tuples, optional
A list of C macro definitions.
"""
if force_flags is None:
force_flags = self.feature_flags(name)
self.dist_log(
"testing feature '%s' with flags (%s)" % (
name, ' '.join(force_flags)
))
# Each CPU feature must have C source code contains at
# least one intrinsic or instruction related to this feature.
test_path = os.path.join(
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(
test_path, force_flags + self.cc_flags["werror"], macros=macros
)
if not test:
self.dist_log("testing failed", stderr=True)
return test
@_Cache.me
def feature_is_supported(self, name, force_flags=None, macros=[]):
"""
Check if a certain CPU feature is supported by the platform and compiler.
Parameters
----------
name: str
CPU feature name in uppercase.
force_flags: list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(name.isupper())
assert(force_flags is None or isinstance(force_flags, list))
supported = name in self.feature_supported
if supported:
for impl in self.feature_implies(name):
if not self.feature_test(impl, force_flags, macros=macros):
return False
if not self.feature_test(name, force_flags, macros=macros):
return False
return supported
@_Cache.me
def feature_can_autovec(self, name):
"""
check if the feature can be auto-vectorized by the compiler
"""
assert(isinstance(name, str))
d = self.feature_supported[name]
can = d.get("autovec", None)
if can is None:
valid_flags = [
self.cc_test_flags([f]) for f in d.get("flags", [])
]
can = valid_flags and any(valid_flags)
return can
@_Cache.me
def feature_extra_checks(self, name):
"""
Return a list of supported extra checks after testing them against
the compiler.
Parameters
----------
names: str
CPU feature name in uppercase.
"""
assert isinstance(name, str)
d = self.feature_supported[name]
extra_checks = d.get("extra_checks", [])
if not extra_checks:
return []
self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
flags = self.feature_flags(name)
available = []
not_available = []
for chk in extra_checks:
test_path = os.path.join(
self.conf_check_path, "extra_%s.c" % chk.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("extra check file does not exist", test_path)
is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
if is_supported:
available.append(chk)
else:
not_available.append(chk)
if not_available:
self.dist_log("testing failed for checks", not_available, stderr=True)
return available
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
Parameters
----------
'feature_name': str
CPU feature name in uppercase.
'tabs': int
if > 0, align the generated strings to the right depend on number of tabs.
Returns
-------
str, generated C preprocessor
Examples
--------
>>> self.feature_c_preprocessor("SSE3")
/** SSE3 **/
#define NPY_HAVE_SSE3 1
#include <pmmintrin.h>
"""
assert(feature_name.isupper())
feature = self.feature_supported.get(feature_name)
assert(feature is not None)
prepr = [
"/** %s **/" % feature_name,
"#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
]
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
extra_defs = feature.get("group", [])
extra_defs += self.feature_extra_checks(feature_name)
for edef in extra_defs:
# Guard extra definitions in case of duplicate with
# another feature
prepr += [
"#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
"\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
class _Parse:
"""A helper class that parsing main arguments of `CCompilerOpt`,
also parsing configuration statements in dispatch-able sources.
Parameters
----------
cpu_baseline: str or None
minimal set of required CPU features or special options.
cpu_dispatch: str or None
dispatched set of additional CPU features or special options.
Special options can be:
- **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features`
- **MAX**: Enables all supported CPU features by the Compiler and platform.
- **NATIVE**: Enables all CPU features that supported by the current machine.
- **NONE**: Enables nothing
- **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**.
NOTE: operand + is only added for nominal reason.
NOTES:
- Case-insensitive among all CPU features and special options.
- Comma or space can be used as a separator.
- If the CPU feature is not supported by the user platform or compiler,
it will be skipped rather than raising a fatal error.
- Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features
- 'cpu_baseline' force enables implied features.
Attributes
----------
parse_baseline_names : list
Final CPU baseline's feature names(sorted from low to high)
parse_baseline_flags : list
Compiler flags of baseline features
parse_dispatch_names : list
Final CPU dispatch-able feature names(sorted from low to high)
parse_target_groups : dict
Dictionary containing initialized target groups that configured
through class attribute `conf_target_groups`.
The key is represent the group name and value is a tuple
contains three items :
- bool, True if group has the 'baseline' option.
- list, list of CPU features.
- list, list of extra compiler flags.
"""
def __init__(self, cpu_baseline, cpu_dispatch):
self._parse_policies = dict(
# POLICY NAME, (HAVE, NOT HAVE, [DEB])
KEEP_BASELINE = (
None, self._parse_policy_not_keepbase,
[]
),
KEEP_SORT = (
self._parse_policy_keepsort,
self._parse_policy_not_keepsort,
[]
),
MAXOPT = (
self._parse_policy_maxopt, None,
[]
),
WERROR = (
self._parse_policy_werror, None,
[]
),
AUTOVEC = (
self._parse_policy_autovec, None,
["MAXOPT"]
)
)
if hasattr(self, "parse_is_cached"):
return
self.parse_baseline_names = []
self.parse_baseline_flags = []
self.parse_dispatch_names = []
self.parse_target_groups = {}
if self.cc_noopt:
# skip parsing baseline and dispatch args and keep parsing target groups
cpu_baseline = cpu_dispatch = None
self.dist_log("check requested baseline")
if cpu_baseline is not None:
cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline)
baseline_names = self.feature_names(cpu_baseline)
self.parse_baseline_flags = self.feature_flags(baseline_names)
self.parse_baseline_names = self.feature_sorted(
self.feature_implies_c(baseline_names)
)
self.dist_log("check requested dispatch-able features")
if cpu_dispatch is not None:
cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch)
cpu_dispatch = {
f for f in cpu_dispatch_
if f not in self.parse_baseline_names
}
conflict_baseline = cpu_dispatch_.difference(cpu_dispatch)
self.parse_dispatch_names = self.feature_sorted(
self.feature_names(cpu_dispatch)
)
if len(conflict_baseline) > 0:
self.dist_log(
"skip features", conflict_baseline, "since its part of baseline"
)
self.dist_log("initialize targets groups")
for group_name, tokens in self.conf_target_groups.items():
self.dist_log("parse target group", group_name)
GROUP_NAME = group_name.upper()
if not tokens or not tokens.strip():
# allow empty groups, useful in case if there's a need
# to disable certain group since '_parse_target_tokens()'
# requires at least one valid target
self.parse_target_groups[GROUP_NAME] = (
False, [], []
)
continue
has_baseline, features, extra_flags = \
self._parse_target_tokens(tokens)
self.parse_target_groups[GROUP_NAME] = (
has_baseline, features, extra_flags
)
self.parse_is_cached = True
def parse_targets(self, source):
"""
Fetch and parse configuration statements that required for
defining the targeted CPU features, statements should be declared
in the top of source in between **C** comment and start
with a special mark **@targets**.
Configuration statements are sort of keywords representing
CPU features names, group of statements and policies, combined
together to determine the required optimization.
Parameters
----------
source: str
the path of **C** source file.
Returns
-------
- bool, True if group has the 'baseline' option
- list, list of CPU features
- list, list of extra compiler flags
"""
self.dist_log("looking for '@targets' inside -> ", source)
# get lines between /*@targets and */
with open(source) as fd:
tokens = ""
max_to_reach = 1000 # good enough, isn't?
start_with = "@targets"
start_pos = -1
end_with = "*/"
end_pos = -1
for current_line, line in enumerate(fd):
if current_line == max_to_reach:
self.dist_fatal("reached the max of lines")
break
if start_pos == -1:
start_pos = line.find(start_with)
if start_pos == -1:
continue
start_pos += len(start_with)
tokens += line
end_pos = line.find(end_with)
if end_pos != -1:
end_pos += len(tokens) - len(line)
break
if start_pos == -1:
self.dist_fatal("expected to find '%s' within a C comment" % start_with)
if end_pos == -1:
self.dist_fatal("expected to end with '%s'" % end_with)
tokens = tokens[start_pos:end_pos]
return self._parse_target_tokens(tokens)
_parse_regex_arg = re.compile(r'\s|,|([+-])')
def _parse_arg_features(self, arg_name, req_features):
if not isinstance(req_features, str):
self.dist_fatal("expected a string in '%s'" % arg_name)
final_features = set()
# space and comma can be used as a separator
tokens = list(filter(None, re.split(self._parse_regex_arg, req_features)))
append = True # append is the default
for tok in tokens:
if tok[0] in ("#", "$"):
self.dist_fatal(
arg_name, "target groups and policies "
"aren't allowed from arguments, "
"only from dispatch-able sources"
)
if tok == '+':
append = True
continue
if tok == '-':
append = False
continue
TOK = tok.upper() # we use upper-case internally
features_to = set()
if TOK == "NONE":
pass
elif TOK == "NATIVE":
native = self.cc_flags["native"]
if not native:
self.dist_fatal(arg_name,
"native option isn't supported by the compiler"
)
features_to = self.feature_names(
force_flags=native, macros=[("DETECT_FEATURES", 1)]
)
elif TOK == "MAX":
features_to = self.feature_supported.keys()
elif TOK == "MIN":
features_to = self.feature_min
else:
if TOK in self.feature_supported:
features_to.add(TOK)
else:
if not self.feature_is_exist(TOK):
self.dist_fatal(arg_name,
", '%s' isn't a known feature or option" % tok
)
if append:
final_features = final_features.union(features_to)
else:
final_features = final_features.difference(features_to)
append = True # back to default
return final_features
_parse_regex_target = re.compile(r'\s|[*,/]|([()])')
def _parse_target_tokens(self, tokens):
assert(isinstance(tokens, str))
final_targets = [] # to keep it sorted as specified
extra_flags = []
has_baseline = False
skipped = set()
policies = set()
multi_target = None
tokens = list(filter(None, re.split(self._parse_regex_target, tokens)))
if not tokens:
self.dist_fatal("expected one token at least")
for tok in tokens:
TOK = tok.upper()
ch = tok[0]
if ch in ('+', '-'):
self.dist_fatal(
"+/- are 'not' allowed from target's groups or @targets, "
"only from cpu_baseline and cpu_dispatch parms"
)
elif ch == '$':
if multi_target is not None:
self.dist_fatal(
"policies aren't allowed inside multi-target '()'"
", only CPU features"
)
policies.add(self._parse_token_policy(TOK))
elif ch == '#':
if multi_target is not None:
self.dist_fatal(
"target groups aren't allowed inside multi-target '()'"
", only CPU features"
)
has_baseline, final_targets, extra_flags = \
self._parse_token_group(TOK, has_baseline, final_targets, extra_flags)
elif ch == '(':
if multi_target is not None:
self.dist_fatal("unclosed multi-target, missing ')'")
multi_target = set()
elif ch == ')':
if multi_target is None:
self.dist_fatal("multi-target opener '(' wasn't found")
targets = self._parse_multi_target(multi_target)
if targets is None:
skipped.add(tuple(multi_target))
else:
if len(targets) == 1:
targets = targets[0]
if targets and targets not in final_targets:
final_targets.append(targets)
multi_target = None # back to default
else:
if TOK == "BASELINE":
if multi_target is not None:
self.dist_fatal("baseline isn't allowed inside multi-target '()'")
has_baseline = True
continue
if multi_target is not None:
multi_target.add(TOK)
continue
if not self.feature_is_exist(TOK):
self.dist_fatal("invalid target name '%s'" % TOK)
is_enabled = (
TOK in self.parse_baseline_names or
TOK in self.parse_dispatch_names
)
if is_enabled:
if TOK not in final_targets:
final_targets.append(TOK)
continue
skipped.add(TOK)
if multi_target is not None:
self.dist_fatal("unclosed multi-target, missing ')'")
if skipped:
self.dist_log(
"skip targets", skipped,
"not part of baseline or dispatch-able features"
)
final_targets = self.feature_untied(final_targets)
# add polices dependencies
for p in list(policies):
_, _, deps = self._parse_policies[p]
for d in deps:
if d in policies:
continue
self.dist_log(
"policy '%s' force enables '%s'" % (
p, d
))
policies.add(d)
# release policies filtrations
for p, (have, nhave, _) in self._parse_policies.items():
func = None
if p in policies:
func = have
self.dist_log("policy '%s' is ON" % p)
else:
func = nhave
if not func:
continue
has_baseline, final_targets, extra_flags = func(
has_baseline, final_targets, extra_flags
)
return has_baseline, final_targets, extra_flags
def _parse_token_policy(self, token):
"""validate policy token"""
if len(token) <= 1 or token[-1:] == token[0]:
self.dist_fatal("'$' must stuck in the begin of policy name")
token = token[1:]
if token not in self._parse_policies:
self.dist_fatal(
"'%s' is an invalid policy name, available policies are" % token,
self._parse_policies.keys()
)
return token
def _parse_token_group(self, token, has_baseline, final_targets, extra_flags):
"""validate group token"""
if len(token) <= 1 or token[-1:] == token[0]:
self.dist_fatal("'#' must stuck in the begin of group name")
token = token[1:]
ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get(
token, (False, None, [])
)
if gtargets is None:
self.dist_fatal(
"'%s' is an invalid target group name, " % token + \
"available target groups are",
self.parse_target_groups.keys()
)
if ghas_baseline:
has_baseline = True
# always keep sorting as specified
final_targets += [f for f in gtargets if f not in final_targets]
extra_flags += [f for f in gextra_flags if f not in extra_flags]
return has_baseline, final_targets, extra_flags
def _parse_multi_target(self, targets):
"""validate multi targets that defined between parentheses()"""
# remove any implied features and keep the origins
if not targets:
self.dist_fatal("empty multi-target '()'")
if not all([
self.feature_is_exist(tar) for tar in targets
]) :
self.dist_fatal("invalid target name in multi-target", targets)
if not all([
(
tar in self.parse_baseline_names or
tar in self.parse_dispatch_names
)
for tar in targets
]) :
return None
targets = self.feature_ahead(targets)
if not targets:
return None
# force sort multi targets, so it can be comparable
targets = self.feature_sorted(targets)
targets = tuple(targets) # hashable
return targets
def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags):
"""skip all baseline features"""
skipped = []
for tar in final_targets[:]:
is_base = False
if isinstance(tar, str):
is_base = tar in self.parse_baseline_names
else:
# multi targets
is_base = all([
f in self.parse_baseline_names
for f in tar
])
if is_base:
skipped.append(tar)
final_targets.remove(tar)
if skipped:
self.dist_log("skip baseline features", skipped)
return has_baseline, final_targets, extra_flags
def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):
"""leave a notice that $keep_sort is on"""
self.dist_log(
"policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n"
"are 'not' sorted depend on the highest interest but"
"as specified in the dispatch-able source or the extra group"
)
return has_baseline, final_targets, extra_flags
def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):
"""sorted depend on the highest interest"""
final_targets = self.feature_sorted(final_targets, reverse=True)
return has_baseline, final_targets, extra_flags
def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):
"""append the compiler optimization flags"""
if self.cc_has_debug:
self.dist_log("debug mode is detected, policy 'maxopt' is skipped.")
elif self.cc_noopt:
self.dist_log("optimization is disabled, policy 'maxopt' is skipped.")
else:
flags = self.cc_flags["opt"]
if not flags:
self.dist_log(
"current compiler doesn't support optimization flags, "
"policy 'maxopt' is skipped", stderr=True
)
else:
extra_flags += flags
return has_baseline, final_targets, extra_flags
def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):
"""force warnings to treated as errors"""
flags = self.cc_flags["werror"]
if not flags:
self.dist_log(
"current compiler doesn't support werror flags, "
"warnings will 'not' treated as errors", stderr=True
)
else:
self.dist_log("compiler warnings are treated as errors")
extra_flags += flags
return has_baseline, final_targets, extra_flags
def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):
"""skip features that has no auto-vectorized support by compiler"""
skipped = []
for tar in final_targets[:]:
if isinstance(tar, str):
can = self.feature_can_autovec(tar)
else: # multiple target
can = all([
self.feature_can_autovec(t)
for t in tar
])
if not can:
final_targets.remove(tar)
skipped.append(tar)
if skipped:
self.dist_log("skip non auto-vectorized features", skipped)
return has_baseline, final_targets, extra_flags
class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
"""
A helper class for `CCompiler` aims to provide extra build options
to effectively control of compiler optimizations that are directly
related to CPU features.
"""
def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None):
_Config.__init__(self)
_Distutils.__init__(self, ccompiler)
_Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch)
_CCompiler.__init__(self)
_Feature.__init__(self)
if not self.cc_noopt and self.cc_has_native:
self.dist_log(
"native flag is specified through environment variables. "
"force cpu-baseline='native'"
)
cpu_baseline = "native"
_Parse.__init__(self, cpu_baseline, cpu_dispatch)
# keep the requested features untouched, need it later for report
# and trace purposes
self._requested_baseline = cpu_baseline
self._requested_dispatch = cpu_dispatch
# key is the dispatch-able source and value is a tuple
# contains two items (has_baseline[boolean], dispatched-features[list])
self.sources_status = getattr(self, "sources_status", {})
# every instance should has a separate one
self.cache_private.add("sources_status")
# set it at the end to make sure the cache writing was done after init
# this class
self.hit_cache = hasattr(self, "hit_cache")
def is_cached(self):
"""
Returns True if the class loaded from the cache file
"""
return self.cache_infile and self.hit_cache
def cpu_baseline_flags(self):
"""
Returns a list of final CPU baseline compiler flags
"""
return self.parse_baseline_flags
def cpu_baseline_names(self):
"""
return a list of final CPU baseline feature names
"""
return self.parse_baseline_names
def cpu_dispatch_names(self):
"""
return a list of final CPU dispatch feature names
"""
return self.parse_dispatch_names
def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):
"""
Compile one or more dispatch-able sources and generates object files,
also generates abstract C config headers and macros that
used later for the final runtime dispatching process.
The mechanism behind it is to takes each source file that specified
in 'sources' and branching it into several files depend on
special configuration statements that must be declared in the
top of each source which contains targeted CPU features,
then it compiles every branched source with the proper compiler flags.
Parameters
----------
sources : list
Must be a list of dispatch-able sources file paths,
and configuration statements must be declared inside
each file.
src_dir : str
Path of parent directory for the generated headers and wrapped sources.
If None(default) the files will generated in-place.
ccompiler: CCompiler
Distutils `CCompiler` instance to be used for compilation.
If None (default), the provided instance during the initialization
will be used instead.
**kwargs : any
Arguments to pass on to the `CCompiler.compile()`
Returns
-------
list : generated object files
Raises
------
CompileError
Raises by `CCompiler.compile()` on compiling failure.
DistutilsError
Some errors during checking the sanity of configuration statements.
See Also
--------
parse_targets :
Parsing the configuration statements of dispatch-able sources.
"""
to_compile = {}
baseline_flags = self.cpu_baseline_flags()
include_dirs = kwargs.setdefault("include_dirs", [])
for src in sources:
output_dir = os.path.dirname(src)
if src_dir:
if not output_dir.startswith(src_dir):
output_dir = os.path.join(src_dir, output_dir)
if output_dir not in include_dirs:
# To allow including the generated config header(*.dispatch.h)
# by the dispatch-able sources
include_dirs.append(output_dir)
has_baseline, targets, extra_flags = self.parse_targets(src)
nochange = self._generate_config(output_dir, src, targets, has_baseline)
for tar in targets:
tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)
flags = tuple(extra_flags + self.feature_flags(tar))
to_compile.setdefault(flags, []).append(tar_src)
if has_baseline:
flags = tuple(extra_flags + baseline_flags)
to_compile.setdefault(flags, []).append(src)
self.sources_status[src] = (has_baseline, targets)
# For these reasons, the sources are compiled in a separate loop:
# - Gathering all sources with the same flags to benefit from
# the parallel compiling as much as possible.
# - To generate all config headers of the dispatchable sources,
# before the compilation in case if there are dependency relationships
# among them.
objects = []
for flags, srcs in to_compile.items():
objects += self.dist_compile(
srcs, list(flags), ccompiler=ccompiler, **kwargs
)
return objects
def generate_dispatch_header(self, header_path):
"""
Generate the dispatch header which contains the #definitions and headers
for platform-specific instruction-sets for the enabled CPU baseline and
dispatch-able features.
Its highly recommended to take a look at the generated header
also the generated source files via `try_dispatch()`
in order to get the full picture.
"""
self.dist_log("generate CPU dispatch header: (%s)" % header_path)
baseline_names = self.cpu_baseline_names()
dispatch_names = self.cpu_dispatch_names()
baseline_len = len(baseline_names)
dispatch_len = len(dispatch_names)
header_dir = os.path.dirname(header_path)
if not os.path.exists(header_dir):
self.dist_log(
f"dispatch header dir {header_dir} does not exist, creating it",
stderr=True
)
os.makedirs(header_dir)
with open(header_path, 'w') as f:
baseline_calls = ' \\\n'.join([
(
"\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
) % (self.conf_c_prefix, f)
for f in baseline_names
])
dispatch_calls = ' \\\n'.join([
(
"\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
) % (self.conf_c_prefix, f)
for f in dispatch_names
])
f.write(textwrap.dedent("""\
/*
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator (distutils/ccompiler_opt.py)
*/
#define {pfx}WITH_CPU_BASELINE "{baseline_str}"
#define {pfx}WITH_CPU_DISPATCH "{dispatch_str}"
#define {pfx}WITH_CPU_BASELINE_N {baseline_len}
#define {pfx}WITH_CPU_DISPATCH_N {dispatch_len}
#define {pfx}WITH_CPU_EXPAND_(X) X
#define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\
{baseline_calls}
#define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\
{dispatch_calls}
""").format(
pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names),
dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len,
dispatch_len=dispatch_len, baseline_calls=baseline_calls,
dispatch_calls=dispatch_calls
))
baseline_pre = ''
for name in baseline_names:
baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n'
dispatch_pre = ''
for name in dispatch_names:
dispatch_pre += textwrap.dedent("""\
#ifdef {pfx}CPU_TARGET_{name}
{pre}
#endif /*{pfx}CPU_TARGET_{name}*/
""").format(
pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor(
name, tabs=1
))
f.write(textwrap.dedent("""\
/******* baseline features *******/
{baseline_pre}
/******* dispatch features *******/
{dispatch_pre}
""").format(
pfx=self.conf_c_prefix_, baseline_pre=baseline_pre,
dispatch_pre=dispatch_pre
))
def report(self, full=False):
report = []
platform_rows = []
baseline_rows = []
dispatch_rows = []
report.append(("Platform", platform_rows))
report.append(("", ""))
report.append(("CPU baseline", baseline_rows))
report.append(("", ""))
report.append(("CPU dispatch", dispatch_rows))
########## platform ##########
platform_rows.append(("Architecture", (
"unsupported" if self.cc_on_noarch else self.cc_march)
))
platform_rows.append(("Compiler", (
"unix-like" if self.cc_is_nocc else self.cc_name)
))
########## baseline ##########
if self.cc_noopt:
baseline_rows.append(("Requested", "optimization disabled"))
else:
baseline_rows.append(("Requested", repr(self._requested_baseline)))
baseline_names = self.cpu_baseline_names()
baseline_rows.append((
"Enabled", (' '.join(baseline_names) if baseline_names else "none")
))
baseline_flags = self.cpu_baseline_flags()
baseline_rows.append((
"Flags", (' '.join(baseline_flags) if baseline_flags else "none")
))
extra_checks = []
for name in baseline_names:
extra_checks += self.feature_extra_checks(name)
baseline_rows.append((
"Extra checks", (' '.join(extra_checks) if extra_checks else "none")
))
########## dispatch ##########
if self.cc_noopt:
baseline_rows.append(("Requested", "optimization disabled"))
else:
dispatch_rows.append(("Requested", repr(self._requested_dispatch)))
dispatch_names = self.cpu_dispatch_names()
dispatch_rows.append((
"Enabled", (' '.join(dispatch_names) if dispatch_names else "none")
))
########## Generated ##########
# TODO:
# - collect object names from 'try_dispatch()'
# then get size of each object and printed
# - give more details about the features that not
# generated due compiler support
# - find a better output's design.
#
target_sources = {}
for source, (_, targets) in self.sources_status.items():
for tar in targets:
target_sources.setdefault(tar, []).append(source)
if not full or not target_sources:
generated = ""
for tar in self.feature_sorted(target_sources):
sources = target_sources[tar]
name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
generated += name + "[%d] " % len(sources)
dispatch_rows.append(("Generated", generated[:-1] if generated else "none"))
else:
dispatch_rows.append(("Generated", ''))
for tar in self.feature_sorted(target_sources):
sources = target_sources[tar]
pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
flags = ' '.join(self.feature_flags(tar))
implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
detect = ' '.join(self.feature_detect(tar))
extra_checks = []
for name in ((tar,) if isinstance(tar, str) else tar):
extra_checks += self.feature_extra_checks(name)
extra_checks = (' '.join(extra_checks) if extra_checks else "none")
dispatch_rows.append(('', ''))
dispatch_rows.append((pretty_name, implies))
dispatch_rows.append(("Flags", flags))
dispatch_rows.append(("Extra checks", extra_checks))
dispatch_rows.append(("Detect", detect))
for src in sources:
dispatch_rows.append(("", src))
###############################
# TODO: add support for 'markdown' format
text = []
secs_len = [len(secs) for secs, _ in report]
cols_len = [len(col) for _, rows in report for col, _ in rows]
tab = ' ' * 2
pad = max(max(secs_len), max(cols_len))
for sec, rows in report:
if not sec:
text.append("") # empty line
continue
sec += ' ' * (pad - len(sec))
text.append(sec + tab + ': ')
for col, val in rows:
col += ' ' * (pad - len(col))
text.append(tab + col + ': ' + val)
return '\n'.join(text)
def _wrap_target(self, output_dir, dispatch_src, target, nochange=False):
assert(isinstance(target, (str, tuple)))
if isinstance(target, str):
ext_name = target_name = target
else:
# multi-target
ext_name = '.'.join(target)
target_name = '__'.join(target)
wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src))
wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower())
if nochange and os.path.exists(wrap_path):
return wrap_path
self.dist_log("wrap dispatch-able target -> ", wrap_path)
# sorting for readability
features = self.feature_sorted(self.feature_implies_c(target))
target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_
target_defs = [target_join + f for f in features]
target_defs = '\n'.join(target_defs)
with open(wrap_path, "w") as fd:
fd.write(textwrap.dedent("""\
/**
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator \
(distutils/ccompiler_opt.py)
*/
#define {pfx}CPU_TARGET_MODE
#define {pfx}CPU_TARGET_CURRENT {target_name}
{target_defs}
#include "{path}"
""").format(
pfx=self.conf_c_prefix_, target_name=target_name,
path=os.path.abspath(dispatch_src), target_defs=target_defs
))
return wrap_path
def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False):
config_path = os.path.basename(dispatch_src)
config_path = os.path.splitext(config_path)[0] + '.h'
config_path = os.path.join(output_dir, config_path)
# check if targets didn't change to avoid recompiling
cache_hash = self.cache_hash(targets, has_baseline)
try:
with open(config_path) as f:
last_hash = f.readline().split("cache_hash:")
if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
return True
except OSError:
pass
os.makedirs(os.path.dirname(config_path), exist_ok=True)
self.dist_log("generate dispatched config -> ", config_path)
dispatch_calls = []
for tar in targets:
if isinstance(tar, str):
target_name = tar
else: # multi target
target_name = '__'.join([t for t in tar])
req_detect = self.feature_detect(tar)
req_detect = '&&'.join([
"CHK(%s)" % f for f in req_detect
])
dispatch_calls.append(
"\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % (
self.conf_c_prefix_, req_detect, target_name
))
dispatch_calls = ' \\\n'.join(dispatch_calls)
if has_baseline:
baseline_calls = (
"\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))"
) % self.conf_c_prefix_
else:
baseline_calls = ''
with open(config_path, "w") as fd:
fd.write(textwrap.dedent("""\
// cache_hash:{cache_hash}
/**
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator (distutils/ccompiler_opt.py)
*/
#ifndef {pfx}CPU_DISPATCH_EXPAND_
#define {pfx}CPU_DISPATCH_EXPAND_(X) X
#endif
#undef {pfx}CPU_DISPATCH_BASELINE_CALL
#undef {pfx}CPU_DISPATCH_CALL
#define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\
{baseline_calls}
#define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\
{dispatch_calls}
""").format(
pfx=self.conf_c_prefix_, baseline_calls=baseline_calls,
dispatch_calls=dispatch_calls, cache_hash=cache_hash
))
return False
def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):
"""
Create a new instance of 'CCompilerOpt' and generate the dispatch header
which contains the #definitions and headers of platform-specific instruction-sets for
the enabled CPU baseline and dispatch-able features.
Parameters
----------
compiler : CCompiler instance
dispatch_hpath : str
path of the dispatch header
**kwargs: passed as-is to `CCompilerOpt(...)`
Returns
-------
new instance of CCompilerOpt
"""
opt = CCompilerOpt(compiler, **kwargs)
if not os.path.exists(dispatch_hpath) or not opt.is_cached():
opt.generate_dispatch_header(dispatch_hpath)
return opt
|
[] |
[] |
[
"CFLAGS",
"path",
"CPPFLAGS"
] |
[]
|
["CFLAGS", "path", "CPPFLAGS"]
|
python
| 3 | 0 | |
integration-cli/requirements.go
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/go-check/check"
)
type testCondition func() bool
type testRequirement struct {
Condition testCondition
SkipMessage string
}
// List test requirements
var (
DaemonIsWindows = testRequirement{
func() bool { return daemonPlatform == "windows" },
"Test requires a Windows daemon",
}
DaemonIsLinux = testRequirement{
func() bool { return daemonPlatform == "linux" },
"Test requires a Linux daemon",
}
NotArm = testRequirement{
func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") == "arm" },
"Test requires a daemon not running on ARM",
}
SameHostDaemon = testRequirement{
func() bool { return isLocalDaemon },
"Test requires docker daemon to run on the same machine as CLI",
}
UnixCli = testRequirement{
func() bool { return isUnixCli },
"Test requires posix utilities or functionality to run.",
}
ExecSupport = testRequirement{
func() bool { return supportsExec },
"Test requires 'docker exec' capabilities on the tested daemon.",
}
Network = testRequirement{
func() bool {
// Set a timeout on the GET at 15s
var timeout = time.Duration(15 * time.Second)
var url = "https://hub.docker.com"
client := http.Client{
Timeout: timeout,
}
resp, err := client.Get(url)
if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
panic(fmt.Sprintf("Timeout for GET request on %s", url))
}
if resp != nil {
resp.Body.Close()
}
return err == nil
},
"Test requires network availability, environment variable set to none to run in a non-network enabled mode.",
}
Apparmor = testRequirement{
func() bool {
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
return err == nil && len(buf) > 1 && buf[0] == 'Y'
},
"Test requires apparmor is enabled.",
}
RegistryHosting = testRequirement{
func() bool {
// for now registry binary is built only if we're running inside
// container through `make test`. Figure that out by testing if
// registry binary is in PATH.
_, err := exec.LookPath(v2binary)
return err == nil
},
fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary),
}
NotaryHosting = testRequirement{
func() bool {
// for now notary binary is built only if we're running inside
// container through `make test`. Figure that out by testing if
// notary-server binary is in PATH.
_, err := exec.LookPath(notaryBinary)
return err == nil
},
fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryBinary),
}
NotOverlay = testRequirement{
func() bool {
cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts")
if err := cmd.Run(); err != nil {
return true
}
return false
},
"Test requires underlying root filesystem not be backed by overlay.",
}
IPv6 = testRequirement{
func() bool {
cmd := exec.Command("test", "-f", "/proc/net/if_inet6")
if err := cmd.Run(); err != nil {
return true
}
return false
},
"Test requires support for IPv6",
}
NotGCCGO = testRequirement{
func() bool {
out, err := exec.Command("go", "version").Output()
if err == nil && strings.Contains(string(out), "gccgo") {
return false
}
return true
},
"Test requires native Golang compiler instead of GCCGO",
}
NotUserNamespace = testRequirement{
func() bool {
root := os.Getenv("DOCKER_REMAP_ROOT")
if root != "" {
return false
}
return true
},
"Test cannot be run when remapping root",
}
)
// testRequires checks if the environment satisfies the requirements
// for the test to run or skips the tests.
func testRequires(c *check.C, requirements ...testRequirement) {
for _, r := range requirements {
if !r.Condition() {
c.Skip(r.SkipMessage)
}
}
}
|
[
"\"DOCKER_ENGINE_GOARCH\"",
"\"DOCKER_REMAP_ROOT\""
] |
[] |
[
"DOCKER_ENGINE_GOARCH",
"DOCKER_REMAP_ROOT"
] |
[]
|
["DOCKER_ENGINE_GOARCH", "DOCKER_REMAP_ROOT"]
|
go
| 2 | 0 | |
server/gps_tracker/wsgi.py
|
"""
WSGI config for gps_tracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gps_tracker.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"fmt"
"net/http"
"os"
"time"
f "github.com/fauna/faunadb-go/v4/faunadb"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
)
type User struct {
ID string `json:"id,omitempty" fauna:"id"`
Name string `json:"name" fauna:"name"`
DOB time.Time `json:"dob" fauna"dob"`
Address string `json:"address" fauna:"address"`
Description string `json:"description" fauna:"description"`
CreatedAt time.Time `json:"createdAt,omitempty" fauna:"createdAt"`
}
// Global variable to access admin-client to faunaDB
var adminClient *f.FaunaClient
func init() {
var err error = godotenv.Load()
if err != nil {
panic("[ERROR]: Unable to load <.env> file!")
}
// Get a new fauna client with access key
adminClient = f.NewFaunaClient(os.Getenv("FAUNADB_ADMIN_SECRET"))
// Create a persistent database node for the API
result, err := adminClient.Query(
f.CreateDatabase(f.Obj{"name": "golang-api"}))
handleError(result, err)
// Create a collection (table) inside the database
result, err = adminClient.Query(
f.CreateCollection(f.Obj{"name": "Users"}))
handleError(result, err)
// Create an index to access documents easily
result, err = adminClient.Query(
f.CreateIndex(
f.Obj{
"name": "users_by_id",
"source": f.Collection("Users"),
"terms": f.Arr{f.Obj{"field": f.Arr{"data", "id"}}},
},
))
handleError(result, err)
}
// Handles the error and prompts the result accordingly
func handleError(result f.Value, err error) {
if err != nil {
fmt.Printf("[FAUNADB-WARN]: ")
fmt.Fprintln(os.Stderr, err)
} else {
fmt.Printf("[FAUNADB-DEBUG]: ")
fmt.Println(result)
}
}
// Fetch new ID for a document on call
func newID() (id string, err error) {
result, err := adminClient.Query(f.NewId())
if err != nil {
return "", err
}
err = result.Get(&id)
if err != nil {
return "", err
}
return id, nil
}
// getUser responds to GET requests with `id` parameter to API
func getUser(ctx *gin.Context) {
var id string = ctx.Param("id")
result, err := adminClient.Query(
f.Get(f.Ref(f.Collection("Users"), id)))
// In case user not found
if err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "user not found"})
return
}
// Shape the retrived data into <User> type
var user User
if err = result.At(f.ObjKey("data")).Get(&user); err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "unable to fetch user details"})
return
}
// Respond to client on read
ctx.IndentedJSON(http.StatusOK, user)
}
// createUser responds to POST requests to API
func createUser(ctx *gin.Context) {
var newUser User
if err := ctx.ShouldBindJSON(&newUser); err != nil {
ctx.IndentedJSON(
http.StatusNotAcceptable,
gin.H{"message": "invalid JSON data sent"},
)
return
}
// Get new id for document
id, err := newID()
if err != nil {
ctx.IndentedJSON(
http.StatusInternalServerError,
gin.H{"message": "unable to generate an id for the user"},
)
return
}
// Adding the user's id & ctime
newUser.ID = id
newUser.CreatedAt = time.Now()
// Commiting user details to DB
_, err = adminClient.Query(
f.Create(
f.Ref(f.Collection("Users"), id),
f.Obj{
"data": f.Obj{
"id": newUser.ID,
"name": newUser.Name,
"dob": newUser.DOB,
"address": newUser.Address,
"description": newUser.Description,
"createdAt": newUser.CreatedAt,
},
},
))
if err != nil {
ctx.IndentedJSON(
http.StatusInternalServerError,
gin.H{"message": "unable to create document"},
)
return
}
// Respond to client on create
ctx.IndentedJSON(http.StatusCreated, newUser)
}
// updateUser responds to PATCH requests to API
func updateUser(ctx *gin.Context) {
var id string = ctx.Param("id")
var updatedUser User
if err := ctx.ShouldBindJSON(&updatedUser); err != nil {
return
}
/// Similar to getUser func ///
result, err := adminClient.Query(
f.Get(f.Ref(f.Collection("Users"), id)))
// Incase user not found
if err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "user not found"})
return
}
var user User
if err = result.At(f.ObjKey("data")).Get(&user); err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "unable to fetch user details"})
return
}
//////////////////////////////
checkString := func(f1, f2 string) string {
if f1 != "" {
return f1
}
return f2
}
// ID, DOB & CreatedAt fields can't be changed in future after registration
result, err = adminClient.Query(
f.Update(
f.Ref(f.Collection("Users"), id),
f.Obj{
"data": f.Obj{
"name": checkString(updatedUser.Name, user.Name),
"address": checkString(updatedUser.Address, user.Address),
"description": checkString(updatedUser.Description, user.Description),
},
},
))
if err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "user not found"})
return
}
// Fetch the updated version of user details
if err = result.At(f.ObjKey("data")).Get(&updatedUser); err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "unable to fetch user details"})
return
}
// Respond the updated details to client
ctx.IndentedJSON(http.StatusPartialContent, updatedUser)
}
// deleteUser responds to DELETE requests to API
func deleteUser(ctx *gin.Context) {
var id string = ctx.Param("id")
_, err := adminClient.Query(
f.Delete(f.Ref(f.Collection("Users"), id)))
// In case user not found
if err != nil {
ctx.IndentedJSON(http.StatusNotFound, gin.H{"message": "user not found"})
return
}
// Respond to client on delete
ctx.IndentedJSON(http.StatusAccepted, gin.H{"message": id + ": user successfully deleted"})
}
func main() {
// Creates a gin router with default middleware:
// logger and recovery (crash-free) middleware
router := gin.Default()
router.GET("/api/users/:id", getUser)
router.POST("/api/users", createUser)
router.PATCH("/api/users/:id", updateUser)
router.DELETE("/api/users/:id", deleteUser)
router.Run(":3000")
}
|
[
"\"FAUNADB_ADMIN_SECRET\""
] |
[] |
[
"FAUNADB_ADMIN_SECRET"
] |
[]
|
["FAUNADB_ADMIN_SECRET"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/sts"
)
type Response events.APIGatewayProxyResponse
var (
ec2Svc *ec2.EC2
elbSvc *elbv2.ELBV2
rdsSvc *rds.RDS
stsSvc *sts.STS
elcSvc *elasticache.ElastiCache
)
func Handler(ctx context.Context) error {
//Handle
ec2Handle()
albHandle()
rdsHandle()
elasticacheHandle()
return nil
}
func main() {
region := os.Getenv("REGION")
//Prepare
sess, err := getAWSSession(region)
if err != nil {
panic(err)
}
// Services
ec2Svc = ec2.New(sess)
elbSvc = elbv2.New(sess)
rdsSvc = rds.New(sess)
stsSvc = sts.New(sess)
elcSvc = elasticache.New(sess)
// Handle Lambda
lambda.Start(Handler)
// elasticacheHandle()
}
func ec2Handle() {
fmt.Println("Searching for EC2 Instances")
instances, err := getEc2Instances()
if err != nil {
panic(err)
}
terminateInstances(instances)
fmt.Println("Searching for Snapshots")
snapshots, err := getEc2Snapshots()
if err != nil {
fmt.Println(err)
}
terminateSnapShots(snapshots)
fmt.Println("Searching for EBS Volumes")
volumes, err := getEBSVolumes()
if err != nil {
panic(err)
}
terminateEBS(volumes)
}
func albHandle() {
fmt.Println("Searching for ALB / ELB / NLBs Instances")
instances, err := getLoadBalancersInstances()
if err != nil {
panic(err)
}
terminateLoadBalancers(instances)
}
func rdsHandle() {
fmt.Println("Searching for RDS Instances")
instances, err := getRDSInstances()
if err != nil {
panic(err)
}
clusters, err := getRDSClusters()
if err != nil {
panic(err)
}
terminateRDSInstances(instances)
terminateRDSClusters(clusters)
}
func elasticacheHandle() {
fmt.Println("Searching for Elasticache Clusters")
clusters, err := getElasticacheClusters()
if err != nil {
panic(err)
}
terminateElasticacheClusters(clusters)
fmt.Println("Searching for Elasticache Repliocation Groups")
groups, err := getReplicationGroups()
if err != nil {
panic(err)
}
terminateReplicationGroups(groups)
}
func getElasticacheClusters() ([]*string, error) {
var instances []*string
input := &elasticache.DescribeCacheClustersInput{}
result, err := elcSvc.DescribeCacheClusters(input)
if err != nil {
return nil, err
}
for _, cluster := range result.CacheClusters {
if (*cluster.CacheClusterStatus == "available") {
instances = append(instances, cluster.CacheClusterId)
}
}
return instances, nil
}
func getReplicationGroups() ([]*string, error) {
var instances []*string
input := &elasticache.DescribeReplicationGroupsInput{}
result, err := elcSvc.DescribeReplicationGroups(input)
if err != nil {
return nil, err
}
for _, rpg := range result.ReplicationGroups {
instances = append(instances, rpg.ReplicationGroupId)
}
return instances, nil
}
func getLoadBalancersInstances() ([]*string, error) {
var instances []*string
input := &elbv2.DescribeLoadBalancersInput{}
result, err := elbSvc.DescribeLoadBalancers(input)
if err != nil {
return nil, err
}
for _, lb := range result.LoadBalancers {
instances = append(instances, lb.LoadBalancerArn)
}
return instances, nil
}
func getEc2Instances() ([]*string, error) {
var instances []*string
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("instance-state-name"),
Values: []*string{aws.String("running"), aws.String("pending")},
},
},
}
result, err := ec2Svc.DescribeInstances(input)
if err != nil {
return nil, err
}
for _, reservation := range result.Reservations {
for _, instance := range reservation.Instances {
instances = append(instances, instance.InstanceId)
}
}
return instances, nil
}
func getEc2Snapshots() ([]*string, error) {
var snapshots []*string
account, err := getAWSAccount()
if err != nil {
return nil, err
}
filters := []*ec2.Filter{
{
Name: aws.String("owner-id"),
Values: []*string{
aws.String(account),
},
},
}
input := &ec2.DescribeSnapshotsInput{Filters: filters}
result, err := ec2Svc.DescribeSnapshots(input)
if err != nil {
return nil, err
}
for _, snapshot := range result.Snapshots {
snapshots = append(snapshots, snapshot.SnapshotId)
}
return snapshots, nil
}
func getEBSVolumes() ([]*string, error) {
var volumes []*string
result, err := ec2Svc.DescribeVolumes(nil)
if err != nil {
return nil, err
}
for _, volume := range result.Volumes {
volumes = append(volumes, volume.VolumeId)
}
return volumes, nil
}
func getRDSInstances() ([]*string, error) {
var instances []*string
result, err := rdsSvc.DescribeDBInstances(nil)
if err != nil {
return nil, err
}
for _, rds := range result.DBInstances {
instances = append(instances, rds.DBInstanceIdentifier)
}
return instances, nil
}
func getRDSClusters() ([]*string, error) {
var instances []*string
result, err := rdsSvc.DescribeDBClusters(nil)
if err != nil {
return nil, err
}
for _, rds := range result.DBClusters {
instances = append(instances, rds.DBClusterIdentifier)
}
return instances, nil
}
func terminateInstances(instances []*string) {
if len(instances) == 0 {
fmt.Println("No more EC2 instances to destroy")
} else {
params := &ec2.TerminateInstancesInput{
InstanceIds: instances,
}
resp, err := ec2Svc.TerminateInstances(params)
if err != nil {
fmt.Printf("Failed to terminate instance", err)
}
for _, ti := range resp.TerminatingInstances {
fmt.Printf("Instance: %s \n\nStatus: %s", *ti.InstanceId, ti.CurrentState.String())
}
}
}
func terminateSnapShots(snapshots []*string) {
for _, snapshot := range snapshots {
input := &ec2.DeleteSnapshotInput{
SnapshotId: snapshot,
}
_, err := ec2Svc.DeleteSnapshot(input)
if err != nil {
fmt.Printf("Failed to terminate snapshot", err)
}
}
}
func terminateEBS(volumes []*string) {
for _, volume := range volumes {
fmt.Println(*volume)
params := &ec2.DeleteVolumeInput{
VolumeId: volume,
}
_, err := ec2Svc.DeleteVolume(params)
if err != nil {
fmt.Printf("Failed to terminate EBS", err)
}
}
}
func terminateLoadBalancers(instances []*string) {
for _, instance := range instances {
params := &elbv2.DeleteLoadBalancerInput{
LoadBalancerArn: instance,
}
_, err := elbSvc.DeleteLoadBalancer(params)
if err != nil {
fmt.Printf("Failed to terminate lb", err)
}
}
}
func terminateRDSInstances(instances []*string) {
for _, instance := range instances {
params := &rds.DeleteDBInstanceInput{
DBInstanceIdentifier: instance,
SkipFinalSnapshot: aws.Bool(true),
}
_, err := rdsSvc.DeleteDBInstance(params)
if err != nil {
fmt.Printf("Failed to terminate RDS", err)
}
}
}
func terminateRDSClusters(instances []*string) {
for _, instance := range instances {
params := &rds.DeleteDBClusterInput{
DBClusterIdentifier: instance,
SkipFinalSnapshot: aws.Bool(true),
}
_, err := rdsSvc.DeleteDBCluster(params)
if err != nil {
fmt.Printf("Failed to terminate RDS Cluster", err)
}
}
}
func terminateElasticacheClusters(clusters []*string) {
for _, cluster := range clusters {
params := &elasticache.DeleteCacheClusterInput{
CacheClusterId: cluster,
}
_, err := elcSvc.DeleteCacheCluster(params)
if err != nil {
fmt.Printf("Failed to terminate Replication Group", err)
}
}
}
func terminateReplicationGroups(groups []*string) {
for _, group := range groups {
params := &elasticache.DeleteReplicationGroupInput{
ReplicationGroupId: group,
}
_, err := elcSvc.DeleteReplicationGroup(params)
if err != nil {
fmt.Printf("Failed to terminate Cache Cluster", err)
}
}
}
func getAWSSession(region string) (*session.Session, error) {
awsConfig := &aws.Config{
Region: aws.String(region),
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
return session.NewSession(awsConfig)
}
func getAWSAccount() (string, error) {
callerInput := &sts.GetCallerIdentityInput{}
output, err := stsSvc.GetCallerIdentity(callerInput)
if err != nil {
return "", err
}
return *output.Account, nil
}
|
[
"\"REGION\""
] |
[] |
[
"REGION"
] |
[]
|
["REGION"]
|
go
| 1 | 0 | |
pkg/discord/webhook.go
|
package discord
import (
"os"
"log"
"encoding/json"
"net/http"
"github.com/dli-invest/finreddit/pkg/types"
"bytes"
)
func SendWebhook(discordWebhook types.DiscordPayload) (*http.Response, error){
discordUrl := os.Getenv("DISCORD_WEBHOOK")
if discordUrl == "" {
log.Fatal("DISCORD_WEBHOOK not set")
}
webhookData, err := json.Marshal(discordWebhook)
if err != nil {
log.Fatal(err)
}
resp, err := http.Post(discordUrl, "application/json", bytes.NewBuffer(webhookData))
return resp, err
}
|
[
"\"DISCORD_WEBHOOK\""
] |
[] |
[
"DISCORD_WEBHOOK"
] |
[]
|
["DISCORD_WEBHOOK"]
|
go
| 1 | 0 | |
statistics/selectivity_test.go
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics_test
import (
"context"
"fmt"
"math"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const eps = 1e-9
var _ = SerialSuites(&testStatsSuite{})
type testStatsSuite struct {
store kv.Storage
do *domain.Domain
hook *logHook
testData testutil.TestData
}
func (s *testStatsSuite) SetUpSuite(c *C) {
testleak.BeforeTest()
// Add the hook here to avoid data race.
s.registerHook()
var err error
s.store, s.do, err = newStoreWithBootstrap()
c.Assert(err, IsNil)
s.testData, err = testutil.LoadTestSuiteData("testdata", "stats_suite")
c.Assert(err, IsNil)
}
func (s *testStatsSuite) TearDownSuite(c *C) {
s.do.Close()
c.Assert(s.store.Close(), IsNil)
testleak.AfterTest(c)()
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *testStatsSuite) registerHook() {
conf := &log.Config{Level: os.Getenv("log_level"), File: log.FileLogConfig{}}
_, r, _ := log.InitLogger(conf)
s.hook = &logHook{r.Core, ""}
lg := zap.New(s.hook)
log.ReplaceGlobals(lg, r)
}
type logHook struct {
zapcore.Core
results string
}
func (h *logHook) Write(entry zapcore.Entry, fields []zapcore.Field) error {
message := entry.Message
if idx := strings.Index(message, "[stats"); idx != -1 {
h.results = h.results + message
for _, f := range fields {
h.results = h.results + ", " + f.Key + "=" + h.field2String(f)
}
}
return nil
}
func (h *logHook) field2String(field zapcore.Field) string {
switch field.Type {
case zapcore.StringType:
return field.String
case zapcore.Int64Type, zapcore.Int32Type, zapcore.Uint32Type:
return fmt.Sprintf("%v", field.Integer)
case zapcore.Float64Type:
return fmt.Sprintf("%v", math.Float64frombits(uint64(field.Integer)))
case zapcore.StringerType:
return field.Interface.(fmt.Stringer).String()
}
return "not support"
}
func (h *logHook) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if h.Enabled(e.Level) {
return ce.AddCore(e, h)
}
return ce
}
func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) {
store, err := mockstore.NewMockStore()
if err != nil {
return nil, nil, errors.Trace(err)
}
session.SetSchemaLease(0)
session.DisableStats4Test()
domain.RunAutoAnalyze = false
do, err := session.BootstrapSession(store)
do.SetStatsUpdating(true)
return store, do, errors.Trace(err)
}
func cleanEnv(c *C, store kv.Storage, do *domain.Domain) {
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
tk.MustExec("delete from mysql.stats_meta")
tk.MustExec("delete from mysql.stats_histograms")
tk.MustExec("delete from mysql.stats_buckets")
do.StatsHandle().Clear()
}
// generateIntDatum will generate a datum slice, every dimension is begin from 0, end with num - 1.
// If dimension is x, num is y, the total number of datum is y^x. And This slice is sorted.
func (s *testStatsSuite) generateIntDatum(dimension, num int) ([]types.Datum, error) {
length := int(math.Pow(float64(num), float64(dimension)))
ret := make([]types.Datum, length)
if dimension == 1 {
for i := 0; i < num; i++ {
ret[i] = types.NewIntDatum(int64(i))
}
} else {
sc := &stmtctx.StatementContext{TimeZone: time.Local}
// In this way, we can guarantee the datum is in order.
for i := 0; i < length; i++ {
data := make([]types.Datum, dimension)
j := i
for k := 0; k < dimension; k++ {
data[dimension-k-1].SetInt64(int64(j % num))
j = j / num
}
bytes, err := codec.EncodeKey(sc, nil, data...)
if err != nil {
return nil, err
}
ret[i].SetBytes(bytes)
}
}
return ret, nil
}
// mockStatsHistogram will create a statistics.Histogram, of which the data is uniform distribution.
func mockStatsHistogram(id int64, values []types.Datum, repeat int64, tp *types.FieldType) *statistics.Histogram {
ndv := len(values)
histogram := statistics.NewHistogram(id, int64(ndv), 0, 0, tp, ndv, 0)
for i := 0; i < ndv; i++ {
histogram.AppendBucket(&values[i], &values[i], repeat*int64(i+1), repeat)
}
return histogram
}
func mockStatsTable(tbl *model.TableInfo, rowCount int64) *statistics.Table {
histColl := statistics.HistColl{
PhysicalID: tbl.ID,
HavePhysicalID: true,
Count: rowCount,
Columns: make(map[int64]*statistics.Column, len(tbl.Columns)),
Indices: make(map[int64]*statistics.Index, len(tbl.Indices)),
}
statsTbl := &statistics.Table{
HistColl: histColl,
}
return statsTbl
}
func (s *testStatsSuite) prepareSelectivity(testKit *testkit.TestKit, c *C) *statistics.Table {
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int primary key, b int, c int, d int, e int, index idx_cd(c, d), index idx_de(d, e))")
is := s.do.InfoSchema()
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tbl := tb.Meta()
// mock the statistic table
statsTbl := mockStatsTable(tbl, 540)
// Set the value of columns' histogram.
colValues, err := s.generateIntDatum(1, 54)
c.Assert(err, IsNil)
for i := 1; i <= 5; i++ {
statsTbl.Columns[int64(i)] = &statistics.Column{Histogram: *mockStatsHistogram(int64(i), colValues, 10, types.NewFieldType(mysql.TypeLonglong)), Info: tbl.Columns[i-1]}
}
// Set the value of two indices' histograms.
idxValues, err := s.generateIntDatum(2, 3)
c.Assert(err, IsNil)
tp := types.NewFieldType(mysql.TypeBlob)
statsTbl.Indices[1] = &statistics.Index{Histogram: *mockStatsHistogram(1, idxValues, 60, tp), Info: tbl.Indices[0]}
statsTbl.Indices[2] = &statistics.Index{Histogram: *mockStatsHistogram(2, idxValues, 60, tp), Info: tbl.Indices[1]}
return statsTbl
}
func (s *testStatsSuite) TestSelectivity(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
statsTbl := s.prepareSelectivity(testKit, c)
is := s.do.InfoSchema()
longExpr := "0 < a and a = 1 "
for i := 1; i < 64; i++ {
longExpr += fmt.Sprintf(" and a > %d ", i)
}
tests := []struct {
exprs string
selectivity float64
}{
{
exprs: "a > 0 and a < 2",
selectivity: 0.01851851851,
},
{
exprs: "a >= 1 and a < 2",
selectivity: 0.01851851851,
},
{
exprs: "a >= 1 and b > 1 and a < 2",
selectivity: 0.01783264746,
},
{
exprs: "a >= 1 and c > 1 and a < 2",
selectivity: 0.00617283950,
},
{
exprs: "a >= 1 and c >= 1 and a < 2",
selectivity: 0.01234567901,
},
{
exprs: "d = 0 and e = 1",
selectivity: 0.11111111111,
},
{
exprs: "b > 1",
selectivity: 0.96296296296,
},
{
exprs: "a > 1 and b < 2 and c > 3 and d < 4 and e > 5",
selectivity: 0,
},
{
exprs: longExpr,
selectivity: 0.001,
},
}
ctx := context.Background()
for _, tt := range tests {
sql := "select * from t where " + tt.exprs
comment := Commentf("for %s", tt.exprs)
sctx := testKit.Se.(sessionctx.Context)
stmts, err := session.Parse(sctx, sql)
c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprs))
c.Assert(stmts, HasLen, 1)
err = plannercore.Preprocess(sctx, stmts[0], is)
c.Assert(err, IsNil, comment)
p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is)
c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, tt.exprs))
sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection)
ds := sel.Children()[0].(*plannercore.DataSource)
histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns)
ratio, _, err := histColl.Selectivity(sctx, sel.Conditions, nil)
c.Assert(err, IsNil, comment)
c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio))
histColl.Count *= 10
ratio, _, err = histColl.Selectivity(sctx, sel.Conditions, nil)
c.Assert(err, IsNil, comment)
c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio))
}
}
// TestDiscreteDistribution tests the estimation for discrete data distribution. This is more common when the index
// consists several columns, and the first column has small NDV.
func (s *testStatsSuite) TestDiscreteDistribution(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a char(10), b int, key idx(a, b))")
for i := 0; i < 499; i++ {
testKit.MustExec(fmt.Sprintf("insert into t values ('cn', %d)", i))
}
for i := 0; i < 10; i++ {
testKit.MustExec("insert into t values ('tw', 0)")
}
testKit.MustExec("analyze table t")
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows())
})
testKit.MustQuery(tt).Check(testkit.Rows(output[i]...))
}
}
func (s *testStatsSuite) TestSelectCombinedLowBound(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(id int auto_increment, kid int, pid int, primary key(id), key(kid, pid))")
testKit.MustExec("insert into t (kid, pid) values (1,2), (1,3), (1,4),(1, 11), (1, 12), (1, 13), (1, 14), (2, 2), (2, 3), (2, 4)")
testKit.MustExec("analyze table t")
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows())
})
testKit.MustQuery(tt).Check(testkit.Rows(output[i]...))
}
}
func getRange(start, end int64) []*ranger.Range {
ran := &ranger.Range{
LowVal: []types.Datum{types.NewIntDatum(start)},
HighVal: []types.Datum{types.NewIntDatum(end)},
}
return []*ranger.Range{ran}
}
func (s *testStatsSuite) TestOutOfRangeEQEstimation(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int)")
for i := 0; i < 1000; i++ {
testKit.MustExec(fmt.Sprintf("insert into t values (%v)", i/4)) // 0 ~ 249
}
testKit.MustExec("analyze table t")
h := s.do.StatsHandle()
table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
statsTbl := h.GetTableStats(table.Meta())
sc := &stmtctx.StatementContext{}
col := statsTbl.Columns[table.Meta().Columns[0].ID]
count, err := col.GetColumnRowCount(sc, getRange(250, 250), 0, false)
c.Assert(err, IsNil)
c.Assert(count, Equals, float64(0))
for i := 0; i < 8; i++ {
count, err := col.GetColumnRowCount(sc, getRange(250, 250), int64(i+1), false)
c.Assert(err, IsNil)
c.Assert(count, Equals, math.Min(float64(i+1), 4)) // estRows must be less than modifyCnt
}
}
func (s *testStatsSuite) TestEstimationForUnknownValues(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int, b int, key idx(a, b))")
testKit.MustExec("analyze table t")
for i := 0; i < 10; i++ {
testKit.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
testKit.MustExec("analyze table t")
for i := 0; i < 10; i++ {
testKit.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i+10, i+10))
}
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
c.Assert(h.Update(s.do.InfoSchema()), IsNil)
table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
statsTbl := h.GetTableStats(table.Meta())
sc := &stmtctx.StatementContext{}
colID := table.Meta().Columns[0].ID
count, err := statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(30, 30))
c.Assert(err, IsNil)
c.Assert(count, Equals, 0.2)
count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(9, 30))
c.Assert(err, IsNil)
c.Assert(count, Equals, 2.4000000000000004)
count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(9, math.MaxInt64))
c.Assert(err, IsNil)
c.Assert(count, Equals, 2.4000000000000004)
idxID := table.Meta().Indices[0].ID
count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(30, 30))
c.Assert(err, IsNil)
c.Assert(count, Equals, 0.2)
count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(9, 30))
c.Assert(err, IsNil)
c.Assert(count, Equals, 2.2)
testKit.MustExec("truncate table t")
testKit.MustExec("insert into t values (null, null)")
testKit.MustExec("analyze table t")
table, err = s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
statsTbl = h.GetTableStats(table.Meta())
colID = table.Meta().Columns[0].ID
count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(1, 30))
c.Assert(err, IsNil)
c.Assert(count, Equals, 0.0)
testKit.MustExec("drop table t")
testKit.MustExec("create table t(a int, b int, index idx(b))")
testKit.MustExec("insert into t values (1,1)")
testKit.MustExec("analyze table t")
table, err = s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
statsTbl = h.GetTableStats(table.Meta())
colID = table.Meta().Columns[0].ID
count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(2, 2))
c.Assert(err, IsNil)
c.Assert(count, Equals, 0.0)
idxID = table.Meta().Indices[0].ID
count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(2, 2))
c.Assert(err, IsNil)
c.Assert(count, Equals, 0.0)
}
func (s *testStatsSuite) TestEstimationUniqueKeyEqualConds(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int, b int, c int, unique key(b))")
testKit.MustExec("insert into t values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7)")
testKit.MustExec("analyze table t with 4 cmsketch width, 1 cmsketch depth;")
table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
statsTbl := s.do.StatsHandle().GetTableStats(table.Meta())
sc := &stmtctx.StatementContext{}
idxID := table.Meta().Indices[0].ID
count, err := statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(7, 7))
c.Assert(err, IsNil)
c.Assert(count, Equals, 1.0)
count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(6, 6))
c.Assert(err, IsNil)
c.Assert(count, Equals, 1.0)
colID := table.Meta().Columns[0].ID
count, err = statsTbl.GetRowCountByIntColumnRanges(sc, colID, getRange(7, 7))
c.Assert(err, IsNil)
c.Assert(count, Equals, 1.0)
count, err = statsTbl.GetRowCountByIntColumnRanges(sc, colID, getRange(6, 6))
c.Assert(err, IsNil)
c.Assert(count, Equals, 1.0)
}
func (s *testStatsSuite) TestPrimaryKeySelectivity(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
testKit.MustExec("create table t(a char(10) primary key, b int)")
var input, output [][]string
s.testData.GetTestCases(c, &input, &output)
for i, ts := range input {
for j, tt := range ts {
if j != len(ts)-1 {
testKit.MustExec(tt)
}
s.testData.OnRecord(func() {
if j == len(ts)-1 {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows())
}
})
if j == len(ts)-1 {
testKit.MustQuery(tt).Check(testkit.Rows(output[i]...))
}
}
}
}
func BenchmarkSelectivity(b *testing.B) {
c := &C{}
s := &testStatsSuite{}
s.SetUpSuite(c)
defer s.TearDownSuite(c)
testKit := testkit.NewTestKit(c, s.store)
statsTbl := s.prepareSelectivity(testKit, c)
is := s.do.InfoSchema()
exprs := "a > 1 and b < 2 and c > 3 and d < 4 and e > 5"
sql := "select * from t where " + exprs
comment := Commentf("for %s", exprs)
sctx := testKit.Se.(sessionctx.Context)
stmts, err := session.Parse(sctx, sql)
c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, exprs))
c.Assert(stmts, HasLen, 1)
err = plannercore.Preprocess(sctx, stmts[0], is)
c.Assert(err, IsNil, comment)
p, _, err := plannercore.BuildLogicalPlan(context.Background(), sctx, stmts[0], is)
c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, exprs))
file, err := os.Create("cpu.profile")
c.Assert(err, IsNil)
defer file.Close()
pprof.StartCPUProfile(file)
b.Run("Selectivity", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, err := statsTbl.Selectivity(sctx, p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection).Conditions, nil)
c.Assert(err, IsNil)
}
b.ReportAllocs()
})
pprof.StopCPUProfile()
}
func (s *testStatsSuite) TestStatsVer2(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("set tidb_analyze_version=2")
testKit.MustExec("drop table if exists tint")
testKit.MustExec("create table tint(a int, b int, c int, index singular(a), index multi(b, c))")
testKit.MustExec("insert into tint values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)")
testKit.MustExec("analyze table tint with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists tdouble")
testKit.MustExec("create table tdouble(a double, b double, c double, index singular(a), index multi(b, c))")
testKit.MustExec("insert into tdouble values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)")
testKit.MustExec("analyze table tdouble with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists tdecimal")
testKit.MustExec("create table tdecimal(a decimal(40, 20), b decimal(40, 20), c decimal(40, 20), index singular(a), index multi(b, c))")
testKit.MustExec("insert into tdecimal values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)")
testKit.MustExec("analyze table tdecimal with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists tstring")
testKit.MustExec("create table tstring(a varchar(64), b varchar(64), c varchar(64), index singular(a), index multi(b, c))")
testKit.MustExec("insert into tstring values ('1', '1', '1'), ('2', '2', '2'), ('3', '3', '3'), ('4', '4', '4'), ('5', '5', '5'), ('6', '6', '6'), ('7', '7', '7'), ('8', '8', '8')")
testKit.MustExec("analyze table tstring with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists tdatetime")
testKit.MustExec("create table tdatetime(a datetime, b datetime, c datetime, index singular(a), index multi(b, c))")
testKit.MustExec("insert into tdatetime values ('2001-01-01', '2001-01-01', '2001-01-01'), ('2001-01-02', '2001-01-02', '2001-01-02'), ('2001-01-03', '2001-01-03', '2001-01-03'), ('2001-01-04', '2001-01-04', '2001-01-04')")
testKit.MustExec("analyze table tdatetime with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists tprefix")
testKit.MustExec("create table tprefix(a varchar(64), b varchar(64), index prefixa(a(2)))")
testKit.MustExec("insert into tprefix values ('111', '111'), ('222', '222'), ('333', '333'), ('444', '444'), ('555', '555'), ('666', '666')")
testKit.MustExec("analyze table tprefix with 2 topn, 3 buckets")
// test with clustered index
testKit.MustExec("drop table if exists ct1")
testKit.MustExec("create table ct1 (a int, pk varchar(10), primary key(pk) clustered)")
testKit.MustExec("insert into ct1 values (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8')")
testKit.MustExec("analyze table ct1 with 2 topn, 3 buckets")
testKit.MustExec("drop table if exists ct2")
testKit.MustExec("create table ct2 (a int, b int, c int, primary key(a, b) clustered)")
testKit.MustExec("insert into ct2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)")
testKit.MustExec("analyze table ct2 with 2 topn, 3 buckets")
rows := testKit.MustQuery("select stats_ver from mysql.stats_histograms").Rows()
for _, r := range rows {
// ensure statsVer = 2
c.Assert(fmt.Sprintf("%v", r[0]), Equals, "2")
}
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i := range input {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows())
})
testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
}
func (s *testStatsSuite) TestColumnIndexNullEstimation(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int, b int, c int, index idx_b(b), index idx_c_a(c, a))")
testKit.MustExec("insert into t values(1,null,1),(2,null,2),(3,3,3),(4,null,4),(null,null,null);")
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
testKit.MustExec("analyze table t")
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i := 0; i < 5; i++ {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows())
})
testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
// Make sure column stats has been loaded.
testKit.MustExec(`explain select * from t where a is null`)
c.Assert(h.LoadNeededHistograms(), IsNil)
for i := 5; i < len(input); i++ {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows())
})
testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
}
func (s *testStatsSuite) TestUniqCompEqualEst(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int, b int, primary key(a, b))")
testKit.MustExec("insert into t values(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8),(1,9),(1,10)")
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
testKit.MustExec("analyze table t")
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i := 0; i < 1; i++ {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows())
})
testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
}
func (s *testStatsSuite) TestSelectivityGreedyAlgo(c *C) {
nodes := make([]*statistics.StatsNode, 3)
nodes[0] = statistics.MockStatsNode(1, 3, 2)
nodes[1] = statistics.MockStatsNode(2, 5, 2)
nodes[2] = statistics.MockStatsNode(3, 9, 2)
// Sets should not overlap on mask, so only nodes[0] is chosen.
usedSets := statistics.GetUsableSetsByGreedy(nodes)
c.Assert(len(usedSets), Equals, 1)
c.Assert(usedSets[0].ID, Equals, int64(1))
nodes[0], nodes[1] = nodes[1], nodes[0]
// Sets chosen should be stable, so the returned node is still the one with ID 1.
usedSets = statistics.GetUsableSetsByGreedy(nodes)
c.Assert(len(usedSets), Equals, 1)
c.Assert(usedSets[0].ID, Equals, int64(1))
}
func (s *testStatsSuite) TestCollationColumnEstimate(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
collate.SetNewCollationEnabledForTest(true)
defer collate.SetNewCollationEnabledForTest(false)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(20) collate utf8mb4_general_ci)")
tk.MustExec("insert into t values('aaa'), ('bbb'), ('AAA'), ('BBB')")
h := s.do.StatsHandle()
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
tk.MustExec("analyze table t")
tk.MustExec("explain select * from t where a = 'aaa'")
c.Assert(h.LoadNeededHistograms(), IsNil)
var (
input []string
output [][]string
)
s.testData.GetTestCases(c, &input, &output)
for i := 0; i < len(input); i++ {
s.testData.OnRecord(func() {
output[i] = s.testData.ConvertRowsToStrings(tk.MustQuery(input[i]).Rows())
})
tk.MustQuery(input[i]).Check(testkit.Rows(output[i]...))
}
}
// TestDNFCondSelectivity tests selectivity calculation with DNF conditions covered by using independence assumption.
func (s *testStatsSuite) TestDNFCondSelectivity(c *C) {
defer cleanEnv(c, s.store, s.do)
testKit := testkit.NewTestKit(c, s.store)
testKit.MustExec("use test")
testKit.MustExec("drop table if exists t")
testKit.MustExec("create table t(a int, b int, c int, d int)")
testKit.MustExec("insert into t value(1,5,4,4),(3,4,1,8),(4,2,6,10),(6,7,2,5),(7,1,4,9),(8,9,8,3),(9,1,9,1),(10,6,6,2)")
testKit.MustExec("alter table t add index (b)")
testKit.MustExec("alter table t add index (d)")
testKit.MustExec(`analyze table t`)
ctx := context.Background()
is := s.do.InfoSchema()
h := s.do.StatsHandle()
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tblInfo := tb.Meta()
statsTbl := h.GetTableStats(tblInfo)
var (
input []string
output []struct {
SQL string
Selectivity float64
}
)
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
sctx := testKit.Se.(sessionctx.Context)
stmts, err := session.Parse(sctx, tt)
c.Assert(err, IsNil, Commentf("error %v, for sql %s", err, tt))
c.Assert(stmts, HasLen, 1)
err = plannercore.Preprocess(sctx, stmts[0], is)
c.Assert(err, IsNil, Commentf("error %v, for sql %s", err, tt))
p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is)
c.Assert(err, IsNil, Commentf("error %v, for building plan, sql %s", err, tt))
sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection)
ds := sel.Children()[0].(*plannercore.DataSource)
histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns)
ratio, _, err := histColl.Selectivity(sctx, sel.Conditions, nil)
c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt))
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Selectivity = ratio
})
c.Assert(math.Abs(ratio-output[i].Selectivity) < eps, IsTrue,
Commentf("for %s, needed: %v, got: %v", tt, output[i].Selectivity, ratio))
}
// Test issue 19981
testKit.MustExec("select * from t where _tidb_rowid is null or _tidb_rowid > 7")
// Test issue 22134
// Information about column n will not be in stats immediately after this SQL executed.
// If we don't have a check against this, DNF condition could lead to infinite recursion in Selectivity().
testKit.MustExec("alter table t add column n timestamp;")
testKit.MustExec("select * from t where n = '2000-01-01' or n = '2000-01-02';")
}
func (s *testStatsSuite) TestIndexEstimationCrossValidate(c *C) {
defer cleanEnv(c, s.store, s.do)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key(a,b))")
tk.MustExec("insert into t values(1, 1), (1, 2), (1, 3), (2, 2)")
tk.MustExec("analyze table t")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/statistics/table/mockQueryBytesMaxUint64", `return(100000)`), IsNil)
tk.MustQuery("explain select * from t where a = 1 and b = 2").Check(testkit.Rows(
"IndexReader_6 1.00 root index:IndexRangeScan_5",
"└─IndexRangeScan_5 1.00 cop[tikv] table:t, index:a(a, b) range:[1 2,1 2], keep order:false"))
c.Assert(failpoint.Disable("github.com/pingcap/tidb/statistics/table/mockQueryBytesMaxUint64"), IsNil)
// Test issue 22466
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a int, b int, key b(b))")
tk.MustExec("insert into t2 values(1, 1), (2, 2), (3, 3), (4, 4), (5,5)")
// This line of select will mark column b stats as needed, and an invalid(empty) stats for column b
// will be loaded at the next analyze line, this will trigger the bug.
tk.MustQuery("select * from t2 where b=2")
tk.MustExec("analyze table t2 index b")
tk.MustQuery("explain select * from t2 where b=2").Check(testkit.Rows(
"TableReader_7 1.00 root data:Selection_6",
"└─Selection_6 1.00 cop[tikv] eq(test.t2.b, 2)",
" └─TableFullScan_5 5.00 cop[tikv] table:t2 keep order:false"))
}
|
[
"\"log_level\""
] |
[] |
[
"log_level"
] |
[]
|
["log_level"]
|
go
| 1 | 0 | |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%2#m5_i^d%9$e4(y26n#$dl$8b%^civ(o*=jst!e$vzny!gji+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'users',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/vol/web/static'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = "core.User"
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
contrib/google.golang.org/api/make_endpoints.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
// +build ignore
// This program generates a tree of endpoints for span tagging based on the
// API definitions in github.com/google/google-api-go-client.
package main
import (
"encoding/json"
"log"
"net/url"
"os"
"path/filepath"
"sort"
"strings"
"text/template"
"github.com/yosida95/uritemplate"
"github.com/bmermet/dd-trace-go/contrib/google.golang.org/api/internal"
)
type (
APIDefinition struct {
ID string `json"id"`
Name string `json:"name"`
CanonicalName string `json:"canonicalName"`
BaseURL string `json:"baseUrl"`
BasePath string `json:"basePath"`
Resources map[string]*APIResource `json:"resources"`
RootURL string `json:"rootUrl"`
}
APIResource struct {
Methods map[string]*APIMethod `json:"methods"`
Resources map[string]*APIResource `json:"resources"`
}
APIMethod struct {
ID string `json"id"`
FlatPath string `json:"flatPath"`
Path string `json:"path"`
HTTPMethod string `json:"httpMethod"`
}
)
var cnt int
func main() {
var es []internal.Endpoint
root := filepath.Join(os.Getenv("GOPATH"), "src", "google.golang.org", "api")
err := filepath.Walk(root, func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
return nil
}
if filepath.Ext(p) == ".json" {
var def APIDefinition
f, err := os.Open(p)
if err != nil {
return err
}
defer f.Close()
err = json.NewDecoder(f).Decode(&def)
if err != nil {
return err
}
for _, resource := range def.Resources {
res, err := handleResource(&def, resource)
if err != nil {
return err
}
es = append(es, res...)
}
}
return nil
})
if err != nil {
log.Fatalln(err)
}
f, err := os.Create("endpoints_gen.go")
if err != nil {
log.Fatalln(err)
}
defer f.Close()
sort.Slice(es, func(i, j int) bool {
return es[i].String() < es[j].String()
})
template.Must(template.New("").Parse(tpl)).Execute(f, map[string]interface{}{
"Endpoints": es,
})
}
func handleResource(def *APIDefinition, resource *APIResource) ([]internal.Endpoint, error) {
var es []internal.Endpoint
if resource.Methods != nil {
for _, method := range resource.Methods {
mes, err := handleMethod(def, resource, method)
if err != nil {
return nil, err
}
es = append(es, mes...)
}
}
if resource.Resources != nil {
for _, child := range resource.Resources {
res, err := handleResource(def, child)
if err != nil {
return nil, err
}
es = append(es, res...)
}
}
return es, nil
}
func handleMethod(def *APIDefinition, resource *APIResource, method *APIMethod) ([]internal.Endpoint, error) {
u, err := url.Parse(def.RootURL)
if err != nil {
return nil, err
}
hostname := u.Hostname()
path := method.FlatPath
if path == "" {
path = method.Path
}
path = def.BasePath + path
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
uritpl, err := uritemplate.New(path)
if err != nil {
return nil, err
}
return []internal.Endpoint{{
Hostname: hostname,
HTTPMethod: method.HTTPMethod,
PathTemplate: path,
PathMatcher: uritpl.Regexp(),
ServiceName: "google." + def.Name,
ResourceName: method.ID,
}}, nil
}
var tpl = `// Code generated by make_endpoints.go DO NOT EDIT
package api
import (
"regexp"
"github.com/bmermet/dd-trace-go/contrib/google.golang.org/api/internal"
)
func init() {
apiEndpoints = internal.NewTree([]internal.Endpoint{
{{- range .Endpoints }}
{{ . }},
{{- end }}
}...)
}
`
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
go/src/github.com/maximilien/softlayer-go/services/softlayer_security_ssh_key_test.go
|
package services_test
import (
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
slclientfakes "github.com/maximilien/softlayer-go/client/fakes"
datatypes "github.com/maximilien/softlayer-go/data_types"
softlayer "github.com/maximilien/softlayer-go/softlayer"
testhelpers "github.com/maximilien/softlayer-go/test_helpers"
)
var _ = Describe("SoftLayer_Ssh_Key_Service", func() {
var (
username, apiKey string
err error
fakeClient *slclientfakes.FakeSoftLayerClient
sshKeyService softlayer.SoftLayer_Security_Ssh_Key_Service
sshKey datatypes.SoftLayer_Security_Ssh_Key
sshKeyTemplate datatypes.SoftLayer_Security_Ssh_Key
)
BeforeEach(func() {
username = os.Getenv("SL_USERNAME")
Expect(username).ToNot(Equal(""))
apiKey = os.Getenv("SL_API_KEY")
Expect(apiKey).ToNot(Equal(""))
fakeClient = slclientfakes.NewFakeSoftLayerClient(username, apiKey)
Expect(fakeClient).ToNot(BeNil())
sshKeyService, err = fakeClient.GetSoftLayer_Security_Ssh_Key_Service()
Expect(err).ToNot(HaveOccurred())
Expect(sshKeyService).ToNot(BeNil())
sshKey = datatypes.SoftLayer_Security_Ssh_Key{}
sshKeyTemplate = datatypes.SoftLayer_Security_Ssh_Key{}
})
Context("#GetName", func() {
It("returns the name for the service", func() {
name := sshKeyService.GetName()
Expect(name).To(Equal("SoftLayer_Security_Ssh_Key"))
})
})
Context("#CreateObject", func() {
BeforeEach(func() {
fakeClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Security_Ssh_Key_Service_createObject.json")
Expect(err).ToNot(HaveOccurred())
})
It("creates a new SoftLayer_Ssh_Key instance", func() {
sshKeyTemplate = datatypes.SoftLayer_Security_Ssh_Key{
Fingerprint: "fake-fingerprint",
Key: "fake-key",
Label: "fake-label",
Notes: "fake-notes",
}
sshKey, err = sshKeyService.CreateObject(sshKeyTemplate)
Expect(err).ToNot(HaveOccurred())
Expect(sshKey.Fingerprint).To(Equal("fake-fingerprint"))
Expect(sshKey.Key).To(Equal("fake-key"))
Expect(sshKey.Label).To(Equal("fake-label"))
Expect(sshKey.Notes).To(Equal("fake-notes"))
})
})
Context("#GetObject", func() {
BeforeEach(func() {
sshKey.Id = 1337
fakeClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Security_Ssh_Key_Service_getObject.json")
Expect(err).ToNot(HaveOccurred())
})
It("gets an SSH key", func() {
key, err := sshKeyService.GetObject(sshKey.Id)
Expect(err).ToNot(HaveOccurred())
Expect(key.Id).To(Equal(1337))
Expect(key.Fingerprint).To(Equal("e9:56:6d:b1:f3:8b:f1:2a:dd:a3:24:73:4f:d3:1b:3c"))
Expect(key.Key).To(Equal("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAA/DczU7Wj4hgAgy14LfjOvVKtDBOfwFgHFwsXQ7Efp0pQRBOIwWoQfQ3hHMWw1X5Q7Mhwl9Gbat9V7tu985Hf5h9BOrq9D/ZIFQ1yhsvt6klZYoHHbM5kHFUegx9lgn3mHcfLNcvahDHpQAFXCPknc1VNpn0VP0RPhqZ8pubP7r9/Uczmit1ipy43SGzlxM46cyyqNPgDDRJepDla6coJJGuWVZMZaTXc3fNNFTSIi1ODDQgXxaYWcz5ThcQ1CT/MLSzAz7IDNNjAr5W40ZUmxxHzA5nPmLcKKqqXrxbnCyw+SrVjhIsKSoz41caYdSz2Bpw00ZxzVO9dCnHsEw=="))
Expect(key.Label).To(Equal("packer-53ead4c1-df11-9023-1173-eef40a291b7e"))
Expect(key.Notes).To(Equal("My test key"))
})
})
Context("#EditObject", func() {
BeforeEach(func() {
sshKey.Id = 1338
fakeClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Security_Ssh_Key_Service_editObject.json")
Expect(err).ToNot(HaveOccurred())
})
It("edits an existing SSH key", func() {
edited := datatypes.SoftLayer_Security_Ssh_Key{
Label: "edited-label",
}
result, err := sshKeyService.EditObject(sshKey.Id, edited)
Expect(err).ToNot(HaveOccurred())
Expect(result).To(BeTrue())
})
})
Context("#DeleteObject", func() {
BeforeEach(func() {
sshKey.Id = 1234567
})
It("sucessfully deletes the SoftLayer_Ssh_Key instance", func() {
fakeClient.DoRawHttpRequestResponse = []byte("true")
deleted, err := sshKeyService.DeleteObject(sshKey.Id)
Expect(err).ToNot(HaveOccurred())
Expect(deleted).To(BeTrue())
})
It("fails to delete the SoftLayer_Ssh_Key instance", func() {
fakeClient.DoRawHttpRequestResponse = []byte("false")
deleted, err := sshKeyService.DeleteObject(sshKey.Id)
Expect(err).To(HaveOccurred())
Expect(deleted).To(BeFalse())
})
})
Context("#GetSoftwarePasswords", func() {
BeforeEach(func() {
fakeClient.DoRawHttpRequestResponse, err = testhelpers.ReadJsonTestFixtures("services", "SoftLayer_Security_Ssh_Key_Service_getSoftwarePasswords.json")
Expect(err).ToNot(HaveOccurred())
sshKey.Id = 1234567
})
It("retrieves the software passwords associated with this virtual guest", func() {
passwords, err := sshKeyService.GetSoftwarePasswords(sshKey.Id)
Expect(err).ToNot(HaveOccurred())
Expect(len(passwords)).To(Equal(1))
password := passwords[0]
Expect(password.CreateDate).ToNot(BeNil())
Expect(password.Id).To(Equal(4244148))
Expect(password.ModifyDate).ToNot(BeNil())
Expect(password.Notes).To(Equal(""))
Expect(password.Password).To(Equal("QJ95Gznz"))
Expect(password.Port).To(Equal(0))
Expect(password.SoftwareId).To(Equal(4181746))
Expect(password.Username).To(Equal("root"))
Expect(password.Software.HardwareId).To(Equal(0))
Expect(password.Software.Id).To(Equal(4181746))
Expect(password.Software.ManufacturerLicenseInstance).To(Equal(""))
})
})
})
|
[
"\"SL_USERNAME\"",
"\"SL_API_KEY\""
] |
[] |
[
"SL_API_KEY",
"SL_USERNAME"
] |
[]
|
["SL_API_KEY", "SL_USERNAME"]
|
go
| 2 | 0 | |
cmd/main.go
|
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
)
func init() {
// set a fitting User-Agent for ACME requests
goModule := caddy.GoModule()
cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
certmagic.UserAgent = "Caddy/" + cleanModVersion
// by using Caddy, user indicates agreement to CA terms
// (very important, or ACME account creation will fail!)
certmagic.DefaultACME.Agreed = true
}
// Main implements the main function of the caddy command.
// Call this if Caddy is to be the main() if your program.
func Main() {
switch len(os.Args) {
case 0:
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
case 1:
os.Args = append(os.Args, "help")
}
subcommandName := os.Args[1]
subcommand, ok := commands[subcommandName]
if !ok {
if strings.HasPrefix(os.Args[1], "-") {
// user probably forgot to type the subcommand
fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
} else {
fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
}
os.Exit(caddy.ExitCodeFailedStartup)
}
fs := subcommand.Flags
if fs == nil {
fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
}
err := fs.Parse(os.Args[2:])
if err != nil {
fmt.Println(err)
os.Exit(caddy.ExitCodeFailedStartup)
}
exitCode, err := subcommand.Func(Flags{fs})
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
}
os.Exit(exitCode)
}
// handlePingbackConn reads from conn and ensures it matches
// the bytes in expect, or returns an error if it doesn't.
func handlePingbackConn(conn net.Conn, expect []byte) error {
defer conn.Close()
confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32))
if err != nil {
return err
}
if !bytes.Equal(confirmationBytes, expect) {
return fmt.Errorf("wrong confirmation: %x", confirmationBytes)
}
return nil
}
// loadConfig loads the config from configFile and adapts it
// using adapterName. If adapterName is specified, configFile
// must be also. If no configFile is specified, it tries
// loading a default config file. The lack of a config file is
// not treated as an error, but false will be returned if
// there is no config available. It prints any warnings to stderr,
// and returns the resulting JSON config bytes along with
// whether a config file was loaded or not.
func loadConfig(configFile, adapterName string) ([]byte, string, error) {
// specifying an adapter without a config file is ambiguous
if adapterName != "" && configFile == "" {
return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
}
// load initial config and adapter
var config []byte
var cfgAdapter caddyconfig.Adapter
var err error
if configFile != "" {
config, err = ioutil.ReadFile(configFile)
if err != nil {
return nil, "", fmt.Errorf("reading config file: %v", err)
}
caddy.Log().Info("using provided configuration",
zap.String("config_file", configFile),
zap.String("config_adapter", adapterName))
} else if adapterName == "" {
// as a special case when no config file or adapter
// is specified, see if the Caddyfile adapter is
// plugged in, and if so, try using a default Caddyfile
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
if cfgAdapter != nil {
config, err = ioutil.ReadFile("Caddyfile")
if os.IsNotExist(err) {
// okay, no default Caddyfile; pretend like this never happened
cfgAdapter = nil
} else if err != nil {
// default Caddyfile exists, but error reading it
return nil, "", fmt.Errorf("reading default Caddyfile: %v", err)
} else {
// success reading default Caddyfile
configFile = "Caddyfile"
caddy.Log().Info("using adjacent Caddyfile")
}
}
}
// as a special case, if a config file called "Caddyfile" was
// specified, and no adapter is specified, assume caddyfile adapter
// for convenience
if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") &&
filepath.Ext(configFile) != ".json" &&
adapterName == "" {
adapterName = "caddyfile"
}
// load config adapter
if adapterName != "" {
cfgAdapter = caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil {
return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName)
}
}
// adapt config
if cfgAdapter != nil {
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
"filename": configFile,
})
if err != nil {
return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
}
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
fmt.Printf("[WARNING][%s] %s:%d: %s\n", adapterName, warn.File, warn.Line, msg)
}
config = adaptedConfig
}
return config, configFile, nil
}
// watchConfigFile watches the config file at filename for changes
// and reloads the config if the file was updated. This function
// blocks indefinitely; it only quits if the poller has errors for
// long enough time. The filename passed in must be the actual
// config file used, not one to be discovered.
func watchConfigFile(filename, adapterName string) {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
}
}()
// make our logger; since config reloads can change the
// default logger, we need to get it dynamically each time
logger := func() *zap.Logger {
return caddy.Log().
Named("watcher").
With(zap.String("config_file", filename))
}
// get the initial timestamp on the config file
info, err := os.Stat(filename)
if err != nil {
logger().Error("cannot watch config file", zap.Error(err))
return
}
lastModified := info.ModTime()
logger().Info("watching config file for changes")
// if the file disappears or something, we can
// stop polling if the error lasts long enough
var lastErr time.Time
finalError := func(err error) bool {
if lastErr.IsZero() {
lastErr = time.Now()
return false
}
if time.Since(lastErr) > 30*time.Second {
logger().Error("giving up watching config file; too many errors",
zap.Error(err))
return true
}
return false
}
// begin poller
for range time.Tick(1 * time.Second) {
// get the file info
info, err := os.Stat(filename)
if err != nil {
if finalError(err) {
return
}
continue
}
lastErr = time.Time{} // no error, so clear any memory of one
// if it hasn't changed, nothing to do
if !info.ModTime().After(lastModified) {
continue
}
logger().Info("config file changed; reloading")
// remember this timestamp
lastModified = info.ModTime()
// load the contents of the file
config, _, err := loadConfig(filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
continue
}
// apply the updated config
err = caddy.Load(config, false)
if err != nil {
logger().Error("applying latest config", zap.Error(err))
continue
}
}
}
// Flags wraps a FlagSet so that typed values
// from flags can be easily retrieved.
type Flags struct {
*flag.FlagSet
}
// String returns the string representation of the
// flag given by name. It panics if the flag is not
// in the flag set.
func (f Flags) String(name string) string {
return f.FlagSet.Lookup(name).Value.String()
}
// Bool returns the boolean representation of the
// flag given by name. It returns false if the flag
// is not a boolean type. It panics if the flag is
// not in the flag set.
func (f Flags) Bool(name string) bool {
val, _ := strconv.ParseBool(f.String(name))
return val
}
// Int returns the integer representation of the
// flag given by name. It returns 0 if the flag
// is not an integer type. It panics if the flag is
// not in the flag set.
func (f Flags) Int(name string) int {
val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize)
return int(val)
}
// Float64 returns the float64 representation of the
// flag given by name. It returns false if the flag
// is not a float63 type. It panics if the flag is
// not in the flag set.
func (f Flags) Float64(name string) float64 {
val, _ := strconv.ParseFloat(f.String(name), 64)
return val
}
// Duration returns the duration representation of the
// flag given by name. It returns false if the flag
// is not a duration type. It panics if the flag is
// not in the flag set.
func (f Flags) Duration(name string) time.Duration {
val, _ := caddy.ParseDuration(f.String(name))
return val
}
// flagHelp returns the help text for fs.
func flagHelp(fs *flag.FlagSet) string {
if fs == nil {
return ""
}
// temporarily redirect output
out := fs.Output()
defer fs.SetOutput(out)
buf := new(bytes.Buffer)
fs.SetOutput(buf)
fs.PrintDefaults()
return buf.String()
}
func loadEnvFromFile(envFile string) error {
file, err := os.Open(envFile)
if err != nil {
return fmt.Errorf("reading environment file: %v", err)
}
defer file.Close()
envMap, err := parseEnvFile(file)
if err != nil {
return fmt.Errorf("parsing environment file: %v", err)
}
for k, v := range envMap {
if err := os.Setenv(k, v); err != nil {
return fmt.Errorf("setting environment variables: %v", err)
}
}
return nil
}
func parseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var line string
lineNumber := 0
for scanner.Scan() {
line = strings.TrimSpace(scanner.Text())
lineNumber++
// skip lines starting with comment
if strings.HasPrefix(line, "#") {
continue
}
// skip empty line
if len(line) == 0 {
continue
}
fields := strings.SplitN(line, "=", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
if strings.Contains(fields[0], " ") {
return nil, fmt.Errorf("bad key on line %d: contains whitespace", lineNumber)
}
key := fields[0]
val := fields[1]
if key == "" {
return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
func printEnvironment() {
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
fmt.Printf("caddy.Version=%s\n", caddy.GoModule().Version)
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU())
fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
fmt.Printf("runtime.Version=%s\n", runtime.Version())
cwd, err := os.Getwd()
if err != nil {
cwd = fmt.Sprintf("<error: %v>", err)
}
fmt.Printf("os.Getwd=%s\n\n", cwd)
for _, v := range os.Environ() {
fmt.Println(v)
}
}
// moveStorage moves the old default dataDir to the new default dataDir.
// TODO: This is TEMPORARY until the release candidates.
func moveStorage() {
// get the home directory (the old way)
oldHome := os.Getenv("HOME")
if oldHome == "" && runtime.GOOS == "windows" {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
oldHome = drive + path
if drive == "" || path == "" {
oldHome = os.Getenv("USERPROFILE")
}
}
if oldHome == "" {
oldHome = "."
}
oldDataDir := filepath.Join(oldHome, ".local", "share", "caddy")
// nothing to do if old data dir doesn't exist
_, err := os.Stat(oldDataDir)
if os.IsNotExist(err) {
return
}
// nothing to do if the new data dir is the same as the old one
newDataDir := caddy.AppDataDir()
if oldDataDir == newDataDir {
return
}
logger := caddy.Log().Named("automigrate").With(
zap.String("old_dir", oldDataDir),
zap.String("new_dir", newDataDir))
logger.Info("beginning one-time data directory migration",
zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
// if new data directory exists, avoid auto-migration as a conservative safety measure
_, err = os.Stat(newDataDir)
if !os.IsNotExist(err) {
logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure",
zap.Error(err),
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"))
return
}
// construct the new data directory's parent folder
err = os.MkdirAll(filepath.Dir(newDataDir), 0700)
if err != nil {
logger.Error("unable to make new datadirectory - follow link for instructions",
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
zap.Error(err))
return
}
// folder structure is same, so just try to rename (move) it;
// this fails if the new path is on a separate device
err = os.Rename(oldDataDir, newDataDir)
if err != nil {
logger.Error("new data directory already exists; skipping auto-migration as conservative safety measure - follow link for instructions",
zap.String("instructions", "https://github.com/caddyserver/caddy/issues/2955#issuecomment-570000333"),
zap.Error(err))
}
logger.Info("successfully completed one-time migration of data directory",
zap.String("details", "https://github.com/caddyserver/caddy/issues/2955"))
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
backend/internal/storage/companies.go
|
package storage
import (
"os"
"github.com/elhmn/camerdevs/pkg/models/v1beta"
)
//GetCompanies get companies
func (db DB) GetCompanies() ([]v1beta.Company, error) {
companies := []v1beta.Company{}
//Check if the first cameroonian company already exists
cameroonianCompany := v1beta.Company{}
ret := db.c.Table("companies").Where("name = ?", CameroonianCompanies[0]).Find(&cameroonianCompany)
if ret.Error != nil {
return companies, ret.Error
}
//If we did not find the first cameroonian company, we create every cameroonian companies
if cameroonianCompany.Name == "" {
tmpCameroonianCompanies := CameroonianCompanies
//If we are running tests we use a short list of companies
if os.Getenv("ENVIRONMENT") == "test" {
tmpCameroonianCompanies = []string{"Tester"}
}
for _, c := range tmpCameroonianCompanies {
tmpCompany := v1beta.Company{}
//I the company exists we don't create it
ret := db.c.Table("companies").Where("name = ?", c).Find(&tmpCompany)
if tmpCompany.Name != "" || ret.Error != nil {
continue
}
company := v1beta.Company{
Name: c,
}
db.c.Table("companies").Create(&company)
}
}
//Get the list of companies again
ret = db.c.Table("companies").Order("name").Find(&companies)
if ret.Error != nil {
return companies, ret.Error
}
return companies, nil
}
//GetCompanyByID get company by `id`
func (db DB) GetCompanyByID(id int64) (v1beta.Company, error) {
company := v1beta.Company{}
ret := db.c.First(&company, "id = ?", id)
if ret.Error != nil {
return company, ret.Error
}
return company, nil
}
//CameroonianCompanies is collection of default cameroonian companies
var CameroonianCompanies = []string{
"CCAA",
"ADC",
"ADER",
"Alpicam Industries",
"Alucam",
"AMPCI",
"Aberec Limited Machines électriques et matériels électroniques",
"Afriland First Bank",
"AFRILANE",
"AFROLOGIX",
"AFOUP",
"ANAFOR",
"ASMAP",
"AT graphiline",
"AFROSPHINX",
"APN",
"Brasseries du Cameroun",
"Business Facilities Corporation S.A",
"BT ROUTES S.A",
"BICEC",
"B.E.S Best Engineering System Sarl",
"Boh Plantations Limited",
"BUNS Sarl",
"BYOOS sarl",
"Bendo sarl",
"BVS",
"Camair",
"Camair-Co",
"Cambuild-BTP",
"Camdev emploi et formation",
"Camerinside Web Agency",
"Camerounaise des Eaux",
"Cameroon Water Utilities Corporation",
"Cameroon Development Corporation",
"Camercrack",
"Camlait",
"Campost",
"Camtel",
"Camrail",
"Cameroon Chemical Company",
"Cameroon Development Corporation",
"Cameroon Tea Estates",
"CamerPages",
"CAWAD",
"CCAA",
"CCPC Finance",
"Chanas assurances",
"Chantier naval industriel du Cameroun",
"CICAM",
"Clarans Afrique",
"Crédit lyonnais du Cameroun",
"Cimenterie du Cameroun",
"Crédit Foncier du Cameroun",
"CNIC",
"Commercial Bank of Cameroon",
"Compagnie d'Opérations Pétrolières Schlumberger",
"Conseil National des Chargeurs du Cameroun",
"Coordonnerie du 3e Millénaire",
"CRTV",
"CONGELCAM",
"Corlay Cameroun",
"CCA",
"CDS",
"CA Integrated Systems & consulting",
"CNPS",
"Douala Stock Exchange",
"DIPMAN",
"DANAY EXPRESS",
"DIGIT CAMEROON",
"EBA GROUP",
"E-Business Cameroon SARL",
"Electricity Development Corporation",
"Express Union",
"EIF-CONSULTING AND SERVICES SARL",
"Enéo Cameroon",
"EVA Corporation SARL",
"EWONGA SARL",
"ETS KOT SUD DOUALA",
"FRINT GROUP",
"Fonds national de l'emploi",
"Foongon Corporation",
"FEICOM",
"FB-Building",
"GSEC",
"Gaz du Cameroun",
"Geovic Cameroon",
"GETRACOM-INTER",
"GEQUIPS",
"GFBC",
"Guinness Cameroun sa",
"GNO Solutions",
"GESSIIA SARL",
"Haltech",
"Hévécam",
"Hope Management & Consulting",
"Hope Music Group",
"Hope Music Publishing",
"HSC Cameroun",
"IFATI",
"ITGStore",
"ICCSOFT",
"IMS",
"INSTRUMELEC",
"INSURMIND TECH",
"IELT CAMEROUN",
"IFRIQUIYA Conseil Sarl",
"Joy Bonapriso",
"KEN Technology Sarl",
"KPMG",
"KIMPA SAS",
"LANAVET",
"LIGHTECH",
"LAPIN237",
"LOCAGRUES CAMEROUN",
"LUMEN SARL",
"MIDEPECAM",
"MTN Cameroun",
"MIRE WORLD",
"Megasoft",
"Maguysama Technologies Solaires",
"MY WAY Sarl",
"Mezadi SARL",
"MEN TRAVEL",
"NACYDATE",
"Ndawara Tea Estates",
"Nestlé Cameroun",
"Nexttel cameroun",
"NTARRA",
"Nexa Industries",
"Orange Cameroun",
"Ok Plast Cam",
"Opticam",
"PERFITCOM",
"Plantations Pamol du Cameroun",
"PHP",
"Philjohn Technologies",
"PMUC",
"Port autonome de Douala",
"Port autonome de Kribi",
"Port autonome de Limbé",
"Plasticam",
"Pro Services Emploi",
"Projects Experts Consulting",
"Prosygma Cameroun",
"PwC Cameroun",
"Quezil Language Services traduction",
"ETS RIVER",
"SABC",
"SACONETS",
"SARMETAL",
"SBS Smart Business Solutions",
"SCDP",
"Semry",
"SGBC",
"SNH",
"Sodecoton",
"Socatral",
"Socapalm",
"SOCATUR",
"SONARA",
"Sopecam",
"SANCOMS",
"SOCOSER",
"SODEPA",
"SOTRACOM",
"SUDCAM",
"Secours auto",
"Societé Camerounaise d'équipement",
"SimaLap",
"SYSCOM SARL",
"S&P",
"Touristiques Express",
"Third",
"Tradex",
"The House of services",
"Uccao",
"UPTIMA.CM Sarl",
"WAT&CO",
}
|
[
"\"ENVIRONMENT\""
] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
go
| 1 | 0 | |
examples/noreply.go
|
package main
import (
"context"
"os"
"strconv"
"time"
"github.com/rs/zerolog/log"
"github.com/wzhliang/xing"
"github.com/wzhliang/xing/examples/hello"
)
func _assert(err error) {
if err != nil {
log.Error().Msgf("Client: %v", err)
}
}
func _assertReturn(req, resp string) {
if req != resp {
log.Error().Msgf("Client: %s != %s", req, resp)
}
}
func main() {
url := "amqp://guest:guest@localhost:5672/"
mq := os.Getenv("RABBITMQ")
if mq == "" {
mq = url
}
producer, err := xing.NewClient("orchestration.controller", mq,
xing.SetIdentifier(&xing.NoneIdentifier{}),
xing.SetSerializer(&xing.JSONSerializer{}),
)
if err != nil {
log.Error().Msg("failed to create new client")
return
}
cli := hello.NewGreeterClient("host.server", producer)
n, err := strconv.Atoi(os.Args[1])
if err != nil {
log.Error().Str("#", os.Args[1]).Msg("Wrong argument")
}
for i := 0; i < n; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 5000*time.Millisecond)
_, err = cli.Nihao(ctx, &hello.HelloRequest{
Name: "虚竹",
})
_assert(err)
cancel()
time.Sleep(1 * time.Second)
}
producer.Close()
}
|
[
"\"RABBITMQ\""
] |
[] |
[
"RABBITMQ"
] |
[]
|
["RABBITMQ"]
|
go
| 1 | 0 | |
photofolio/wsgi.py
|
"""
WSGI config for photofolio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photofolio.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
old_code/python/googleSpeech2Text.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script for google's new speech-to-text api, using long_running_recognition.
Notes:
- Only one alternative is queried (max_alternatives of the config is not set)
- You need to edit the path for your own credentials file
- Only files in a bucket in google cloud storage can be used
MANDATORY INPUT:
AUDIO: Path to an object (file) in google storage, including the bucket
and object names. E.g.: audio = "my_bucket/my_audio_file".
The full URI (request endpoint) will then look like this:
"gs://my_bucket/my_audio_file",
and that's enough for the speech-to-text app.
Using a bucket allows for long audio (> 1 min).
WARNING: You need to know in advance the encoding / sample rate / etc
of the audio file, as here we only point to it. If you are not using
a default LINEAR16 encoded, 16 kHz wav file, set the appropriate
optional arguments.
OPTIONAL INPUT:
CONTEXT: Speech context. Phrases that could help the recognition, especially
helpful with idiosyncratic names. Google expects a list of phrases
for the speech context object, where each phrase is max 100 chars,
and there are no more than 500 phrases alltogether (there is also
and overall cap of 10 000 chars though).
CONTEXT is a text file, with each line as one phrase.
Default is no context.
LANGUAGE: Speech-to-text can do many different languages now and also
many dialects. Specifying the dialect can help the transcription.
LANGUAGE should be a language code (string).
For a list of supported language codes see:
https://cloud.google.com/speech/docs/languages
Default is 'en-US'
ENCODING: Audio file encoding, we need to supply this to the
speech-to-text engine.
String, defaults to 'LINEAR16'.
For a list of supported values see:
https://cloud.google.com/speech/reference/rest/v1beta1/RecognitionConfig#AudioEncoding
SAMPRATE: Audio file sampling rate, we need to supply this to the
speech-to-text engine.
Int, defaults to 16000.
Values between 8000-48000 are supported, but 16000 is advised.
OUTPUTS:
TRANSCRIPT FILE: txt file containing the transcript. No punctuation.
WORDLIST FILE: csv file listing all the words. Each row has the format:
[startTime, endTime, word]
WARNING: endTime is unreliable. It always equals the
startTime of the next word, meaning that pauses count
into the legth of the last word
Created on Sat Jan 20 15:29:19 2018
@author: [email protected]
"""
#%% IMPORTS
import argparse
import csv
import time
import os
from google.cloud import speech
#%% Set GOOGLE_APPLICATION_CREDENTIALS environment variable
def setCredentials():
credPath = '/home/adamb/Documents/Python/google_serv_Account/\
SpeechToText-e9f958389223.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credPath
#%% Write out results
# Response object levels seem to be:
#
# response,
# result in response.results,
# alternative in result.alternatives,
# transcript in alternative.transcript,
# confidence in alternative.confidence
# word in alternative.words
# start_time in word.start_time
# end_time in word.end_time
# word in word.word
#
# Write out one file with only the trancripts,
# and another one with timing data for each word
def writeResponse(saveFileBase, response):
# First write out the pure transcript parts
saveFile = saveFileBase + '_transcript.txt'
with open(saveFile, 'w') as f:
for part in range(len(response.results)):
f.write(response.results[part].alternatives[0].transcript +
' ')
# write out each word with start and end times into a csv,
# first column: start time, second: end time, third: word
saveFile = saveFileBase + '_words.csv'
with open(saveFile, 'w') as file:
writer = csv.writer(file, delimiter=',')
# iterate over all parts of results
for part in range(len(response.results)):
# iterate over all words
for word in range(len(response.results[part].alternatives[0].words)):
# derive start and end times as floats,
# plus the word itself as string
wordInfo = response.results[part].alternatives[0].words[word]
startTime = float(str(wordInfo.start_time.seconds) +
'.' + str(wordInfo.start_time.nanos))
endTime = float(str(wordInfo.end_time.seconds) +
'.' + str(wordInfo.end_time.nanos))
currentWord = wordInfo.word
writer.writerow([startTime, endTime, currentWord])
#%% Check input arguments
def checkInputs(args):
# Check speechcontext
if args.speechcontext:
# if file, try to read it in, and create a list of phrases
if os.path.exists(args.speechcontext):
with open(args.speechcontext, 'r') as speechFile:
context = [line.strip() for line in speechFile.readlines()]
print('\nLoaded speech context file, found',
len(context), 'phrases')
else:
print('\nFound no file with the name given for speechcontext')
# if there was no speechcontext argument, or was empty
else:
print('\nReceived no speechcontext, '
'we trust google to do magic on its own')
context = []
# Check language
if not args.language:
language = 'en-US'
else:
language = args.language
print('\nLanguage code is set to ' + language)
# Check encoding
if not args.encoding:
encoding = 'LINEAR16'
else:
encoding = args.encoding
print('\nEncoding is set to ' + encoding)
# Check sampling rate
if not args.samprate:
samprate = 16000
else:
samprate = args.samprate
print('\nSampling rate is set to', samprate)
return context, language, encoding, samprate
#%% Use speech-to-text service
def transcribe(audio, context, language, encoding, samprate):
# set start time
startTime = time.time()
# init service
client = speech.SpeechClient()
# use long_running_recognize, aka asynchronous service,
# supply parameters in audio and config objects
operation = client.long_running_recognize(
audio=speech.types.RecognitionAudio(
uri='gs://'+audio
),
config=speech.types.RecognitionConfig(
encoding=encoding,
language_code=language,
sample_rate_hertz=samprate,
enable_word_time_offsets=True,
speech_contexts=[speech.types.SpeechContext(
phrases=context)]
),
)
# results
response = operation.result()
# print(results)
# feedback
print('\nReceived response, transcription took '
'{0:.2f} secs'.format(time.time()-startTime))
return response
#%% Main
def main():
# Argument parsing
parser = argparse.ArgumentParser(description='Script for connecting '
'to Google\'s speech-to-text service. '
'Read the docstring in the script.')
# Adding arguments. 'Audio' is mandatory, 'speechcontext', 'language',
# 'encoding' and 'samprate' are optional
# Audio
parser.add_argument(
'audio',
type=str,
help='URI to audio file in google storage '
'(e.g. \'my_bucket/my_audio_object\'.')
# Speechcontext
parser.add_argument(
'-s',
'--speechcontext',
type=str,
const=None,
default=None,
help='Text file with the speechcontext,'
'each line is a phrase supplied to the engine. '
'Defaults to empty list')
# Languagehttps://hynek.me/articles/serialization/
parser.add_argument(
'-l',
'--language',
type=str,
const=None,
default=None,
help='Language/dialect code supplied to the engine. '
'Defaults to \'en-US\'')
# Encoding
parser.add_argument(
'-e',
'--encoding',
type=str,
const=None,
default=None,
help='Audo encoding type supplied to the engine. '
'Defaults to \'LINEAR16\'')
# Sampling rate
parser.add_argument(
'-r',
'--samprate',
type=int,
const=None,
default=None,
help='Audo sampling rate supplied to the engine. '
'Defaults to 16000')
# parse arguments
args = parser.parse_args()
# check inputs
context, language, encoding, samprate = checkInputs(args)
# set env variable for authentication
setCredentials()
# use speech-to-text
response = transcribe(args.audio, context, language, encoding, samprate)
# write out results,
# first create saveFile name
saveFileBase = os.path.splitext(os.path.split(args.audio)[1])[0]
writeResponse(saveFileBase, response)
#%% GO
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 1 | 0 | |
contrib/gitian-build.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019 The EncoCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup_linux():
global args, workdir
if os.path.isfile('/usr/bin/apt-get'):
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', os.environ['USER']])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
setup_repos()
elif args.is_fedora:
pkgmgr = 'dnf'
repourl = 'https://download.docker.com/linux/fedora/docker-ce.repo'
elif args.is_centos:
pkgmgr = 'yum'
repourl = 'https://download.docker.com/linux/centos/docker-ce.repo'
if args.is_fedora or args.is_centos:
programs = ['ruby', 'make', 'wget', 'curl']
if args.kvm:
print('KVM not supported with Fedora/CentOS yet.')
sys.exit(1)
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
user = os.environ['USER']
dockers = ['docker-ce', 'docker-ce-cli', 'containerd.io']
if args.is_fedora:
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'dnf-plugins-core'])
subprocess.check_call(['sudo', pkgmgr, 'config-manager', '--add-repo', repourl])
elif args.is_centos:
reqs = ['yum-utils', 'device-mapper-persistent-data', 'lvm2']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + reqs)
subprocess.check_call(['sudo', 'yum-config-manager', '--add-repo', repourl])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + dockers)
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', user])
subprocess.check_call(['sudo', 'systemctl', 'enable', 'docker'])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
subprocess.check_call(['sudo', 'systemctl', 'start', 'docker'])
else:
print('LXC not supported with Fedora/CentOS yet.')
sys.exit(1)
if args.is_fedora:
programs += ['git']
if args.is_centos:
# CentOS ships with an insanely outdated version of git that is no longer compatible with gitian builds
# Check current version and update if necessary
oldgit = b'2.' not in subprocess.check_output(['git', '--version'])
if oldgit:
subprocess.check_call(['sudo', pkgmgr, 'remove', '-y', 'git*'])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'https://centos7.iuscommunity.org/ius-release.rpm'])
programs += ['git2u-all']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + programs)
setup_repos()
else:
print('Unsupported system/OS type.')
sys.exit(1)
def setup_darwin():
global args, workdir
programs = []
if not os.path.isfile('/usr/local/bin/wget'):
programs += ['wget']
if not os.path.isfile('/usr/local/bin/git'):
programs += ['git']
if not os.path.isfile('/usr/local/bin/gsha256sum'):
programs += ['coreutils']
if args.docker:
print('Experimental setup for macOS host')
if len(programs) > 0:
subprocess.check_call(['brew', 'install'] + programs)
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
elif args.kvm or not args.docker:
print('KVM and LXC are not supported under macOS at this time.')
sys.exit(0)
setup_repos()
def setup_repos():
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/gitian.sigs.git'])
if not os.path.isdir('encocoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/encocoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('encocoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/encocoin-Project/encocoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
if args.host_os == 'darwin':
subprocess.check_call(['sed', '-i.old', '/50cacher/d', 'bin/make-base-vm'])
if args.host_os == 'linux':
if args.is_fedora or args.is_centos:
subprocess.check_call(['sed', '-i', '/50cacher/d', 'bin/make-base-vm'])
subprocess.check_call(make_image_prog)
subprocess.check_call(['git', 'checkout', 'bin/make-base-vm'])
os.chdir(workdir)
if args.host_os == 'linux':
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
print('Setup complete!')
sys.exit(0)
def build():
global args, workdir
os.makedirs('encocoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../encocoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/encocoin-*.tar.gz build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/encocoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/encocoin-*.zip build/out/encocoin-*.exe build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'encocoin='+args.commit, '--url', 'encocoin='+args.url, '../encocoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/encocoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/encocoin-*.tar.gz build/out/encocoin-*.dmg build/out/src/encocoin-*.tar.gz ../encocoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
# TODO: Skip making signed windows sigs until we actually start producing signed windows binaries
#print('\nSigning ' + args.version + ' Windows')
#subprocess.check_call('cp inputs/encocoin-' + args.version + '-win-unsigned.tar.gz inputs/encocoin-win-unsigned.tar.gz', shell=True)
#subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call('mv build/out/encocoin-*win64-setup.exe ../encocoin-binaries/'+args.version, shell=True)
#subprocess.check_call('mv build/out/encocoin-*win32-setup.exe ../encocoin-binaries/'+args.version, shell=True)
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/encocoin-' + args.version + '-osx-unsigned.tar.gz inputs/encocoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/encocoin-osx-signed.dmg ../encocoin-binaries/'+args.version+'/encocoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
os.chdir('gitian.sigs')
commit = False
if os.path.isfile(args.version+'-win-signed/'+args.signer+'/encocoin-win-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
commit = True
if os.path.isfile(args.version+'-osx-signed/'+args.signer+'/encocoin-dmg-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
commit = True
if commit:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
else:
print('\nNothing to commit\n')
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../encocoin/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../encocoin/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../encocoin/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
# TODO: Skip checking signed windows sigs until we actually start producing signed windows binaries
#print('\nVerifying v'+args.version+' Signed Windows\n')
#if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../encocoin/contrib/gitian-descriptors/gitian-win-signer.yml']):
# print('Verifying v'+args.version+' Signed Windows FAILED\n')
# rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../encocoin/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/encocoin-Project/encocoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.host_os = sys.platform
if args.host_os == 'win32' or args.host_os == 'cygwin':
raise Exception('Error: Native Windows is not supported by this script, use WSL')
if args.host_os == 'linux':
if os.environ['USER'] == 'root':
raise Exception('Error: Do not run this script as the root user')
args.is_bionic = False
args.is_fedora = False
args.is_centos = False
if os.path.isfile('/usr/bin/lsb_release'):
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if os.path.isfile('/etc/fedora-release'):
args.is_fedora = True
if os.path.isfile('/etc/centos-release'):
args.is_centos = True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
if args.host_os == 'linux':
setup_linux()
elif args.host_os == 'darwin':
setup_darwin()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
if args.host_os == 'darwin':
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
if args.detach_sign:
args.commit_files = False
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('encocoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
if not os.path.isdir('../gitian-builder/inputs/encocoin'):
os.makedirs('../gitian-builder/inputs/encocoin')
os.chdir('../gitian-builder/inputs/encocoin')
if not os.path.isdir('.git'):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LXC_GUEST_IP",
"USE_DOCKER",
"USE_LXC",
"USER",
"USE_VBOX",
"GITIAN_HOST_IP",
"PATH"
] |
[]
|
["LXC_GUEST_IP", "USE_DOCKER", "USE_LXC", "USER", "USE_VBOX", "GITIAN_HOST_IP", "PATH"]
|
python
| 7 | 0 | |
molecule/macos-catalina/tests/test_default.py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
def test_java_toolchain(host):
f = host.file("/tmp/java")
assert f.exists
cmd = host.run("/tmp/java/jdk5/bin/java -version")
assert cmd.stderr == """java version \"1.5.0_22\"
Java(TM) 2 Runtime Environment, Standard Edition (build 1.5.0_22-b03)
Java HotSpot(TM) 64-Bit Server VM (build 1.5.0_22-b03, mixed mode)
"""
cmd = host.run("/tmp/java/jdk6/bin/java -version")
assert cmd.stderr == """java version \"1.6.0_45\"
Java(TM) SE Runtime Environment (build 1.6.0_45-b06)
Java HotSpot(TM) 64-Bit Server VM (build 20.45-b01, mixed mode)
"""
cmd = host.run("/tmp/java/jdk7/bin/java -version")
assert cmd.stderr == """java version \"1.7.0_79\"
Java(TM) SE Runtime Environment (build 1.7.0_79-b15)
Java HotSpot(TM) 64-Bit Server VM (build 24.79-b02, mixed mode)
"""
cmd = host.run("/tmp/java/jdk8/bin/java -version")
assert cmd.stderr == """java version \"1.8.0_162\"
Java(TM) SE Runtime Environment (build 1.8.0_162-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode)
"""
cmd = host.run("/tmp/java/jdk9/bin/java -version")
assert cmd.stderr == """java version \"9\"
Java(TM) SE Runtime Environment (build 9+181)
Java HotSpot(TM) 64-Bit Server VM (build 9+181, mixed mode)
"""
cmd = host.run("/tmp/java/jdk11/bin/java -version")
assert cmd.stderr == """java version \"11.0.1\" 2018-10-16 LTS
Java(TM) SE Runtime Environment 18.9 (build 11.0.1+13-LTS)
Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.1+13-LTS, mixed mode)
"""
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
dbcontrollers/context.go
|
package dbcontrollers
import (
"errors"
"fmt"
"os"
"github.com/artofimagination/mysql-user-db-go-interface/models"
"github.com/artofimagination/mysql-user-db-go-interface/mysqldb"
"github.com/google/uuid"
)
type DBControllerCommon interface {
CreateProduct(name string, owner *uuid.UUID, generateAssetPath func(assetID *uuid.UUID) (string, error)) (*models.Product, error)
DeleteProduct(productID *uuid.UUID) error
GetProduct(productID *uuid.UUID) (*models.ProductData, error)
UpdateProductDetails(details *models.Asset) error
UpdateProductAssets(assets *models.Asset) error
CreateUser(
name string,
email string,
passwd []byte,
generateAssetPath func(assetID *uuid.UUID) string,
encryptPassword func(password []byte) ([]byte, error)) (*models.User, error)
DeleteUser(ID *uuid.UUID, nominatedOwners map[uuid.UUID]uuid.UUID) error
GetUser(userID *uuid.UUID) (*models.UserData, error)
UpdateUserSettings(settings *models.Asset) error
UpdateUserAssets(assets *models.Asset) error
Authenticate(email string, passwd []byte, authenticate func(string, []byte, *models.User) error) error
}
type MYSQLController struct {
DBFunctions mysqldb.FunctionsCommon
DBConnector mysqldb.ConnectorCommon
ModelFunctions models.ModelFunctionsCommon
}
func NewDBController() (*MYSQLController, error) {
address := os.Getenv("MYSQL_DB_ADDRESS")
if address == "" {
return nil, errors.New("MYSQL DB address not defined")
}
port := os.Getenv("MYSQL_DB_PORT")
if address == "" {
return nil, errors.New("MYSQL DB port not defined")
}
username := os.Getenv("MYSQL_DB_USER")
if address == "" {
return nil, errors.New("MYSQL DB username not defined")
}
pass := os.Getenv("MYSQL_DB_PASSWORD")
if address == "" {
return nil, errors.New("MYSQL DB password not defined")
}
dbName := os.Getenv("MYSQL_DB_NAME")
if address == "" {
return nil, errors.New("MYSQL DB name not defined")
}
migrationDirectory := os.Getenv("MYSQL_DB_MIGRATION_DIR")
if migrationDirectory == "" {
return nil, errors.New("MYSQL DB migration folder not defined")
}
dbConnection := fmt.Sprintf(
"%s:%s@tcp(%s:%s)/%s?parseTime=true",
username,
pass,
address,
port,
dbName)
dbConnector := &mysqldb.MYSQLConnector{
DBConnection: dbConnection,
MigrationDirectory: migrationDirectory,
}
uuidImpl := &models.RepoUUID{}
controller := &MYSQLController{
DBFunctions: &mysqldb.MYSQLFunctions{
DBConnector: dbConnector,
UUIDImpl: uuidImpl,
},
DBConnector: dbConnector,
ModelFunctions: &models.RepoFunctions{
UUIDImpl: uuidImpl,
},
}
if err := controller.DBConnector.BootstrapSystem(); err != nil {
return nil, err
}
return controller, nil
}
|
[
"\"MYSQL_DB_ADDRESS\"",
"\"MYSQL_DB_PORT\"",
"\"MYSQL_DB_USER\"",
"\"MYSQL_DB_PASSWORD\"",
"\"MYSQL_DB_NAME\"",
"\"MYSQL_DB_MIGRATION_DIR\""
] |
[] |
[
"MYSQL_DB_PASSWORD",
"MYSQL_DB_NAME",
"MYSQL_DB_MIGRATION_DIR",
"MYSQL_DB_USER",
"MYSQL_DB_PORT",
"MYSQL_DB_ADDRESS"
] |
[]
|
["MYSQL_DB_PASSWORD", "MYSQL_DB_NAME", "MYSQL_DB_MIGRATION_DIR", "MYSQL_DB_USER", "MYSQL_DB_PORT", "MYSQL_DB_ADDRESS"]
|
go
| 6 | 0 | |
vendor/github.com/mozillazg/go-cos/examples/object/put.go
|
package main
import (
"context"
"net/url"
"os"
"strings"
"net/http"
"github.com/mozillazg/go-cos"
"github.com/mozillazg/go-cos/examples"
)
func main() {
u, _ := url.Parse("https://test-1253846586.cn-north.myqcloud.com")
b := &cos.BaseURL{BucketURL: u}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &examples.DebugRequestTransport{
RequestHeader: true,
RequestBody: true,
ResponseHeader: true,
ResponseBody: true,
},
},
})
name := "test/objectPut.go"
f := strings.NewReader("test")
_, err := c.Object.Put(context.Background(), name, f, nil)
if err != nil {
panic(err)
}
name = "test/put_option.go"
f = strings.NewReader("test xxx")
opt := &cos.ObjectPutOptions{
ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{
ContentType: "text/html",
},
ACLHeaderOptions: &cos.ACLHeaderOptions{
//XCosACL: "public-read",
XCosACL: "private",
},
}
_, err = c.Object.Put(context.Background(), name, f, opt)
if err != nil {
panic(err)
}
}
|
[
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
] |
[] |
[
"COS_SECRETKEY",
"COS_SECRETID"
] |
[]
|
["COS_SECRETKEY", "COS_SECRETID"]
|
go
| 2 | 0 | |
testutils_test.go
|
// Copyright 2017-2019 Lei Ni ([email protected])
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dragonboat
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"strconv"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/lni/dragonboat/client"
"github.com/lni/dragonboat/config"
serverConfig "github.com/lni/dragonboat/config"
"github.com/lni/dragonboat/internal/rsm"
"github.com/lni/dragonboat/internal/tests"
"github.com/lni/dragonboat/internal/tests/kvpb"
"github.com/lni/dragonboat/internal/utils/lang"
"github.com/lni/dragonboat/internal/utils/random"
)
type multiraftMonkeyTestAddrList struct {
addrList []string
}
func newMultiraftMonkeyTestAddrList() *multiraftMonkeyTestAddrList {
return &multiraftMonkeyTestAddrList{addrList: make([]string, 0)}
}
func (m *multiraftMonkeyTestAddrList) fill(base uint64) {
for i := uint64(1); i <= uint64(5); i++ {
addr := fmt.Sprintf("localhost:%d", base+i)
m.addrList = append(m.addrList, addr)
}
}
func (m *multiraftMonkeyTestAddrList) Size() uint64 {
return uint64(len(m.addrList))
}
func (m *multiraftMonkeyTestAddrList) Addresses() []string {
return m.addrList
}
func getMultiraftMonkeyTestAddrList() *multiraftMonkeyTestAddrList {
base := uint64(5400)
v := os.Getenv("MULTIRAFTMTPORT")
if len(v) > 0 {
iv, err := strconv.Atoi(v)
if err != nil {
panic(err)
}
base = uint64(iv)
plog.Infof("using port base from env %d", base)
} else {
plog.Infof("using default port base %d", base)
}
dl := newMultiraftMonkeyTestAddrList()
dl.fill(base)
return dl
}
const (
mtNumOfClusters uint64 = 64
mtWorkingDirectory = "monkey_testing_nh_dir_safe_to_delete"
)
type mtNodeHost struct {
listIndex uint64
dir string
nh *NodeHost
stopped bool
next int64
addresses []string
}
func (n *mtNodeHost) setNext(low int64, high int64) {
if high <= low {
panic("high <= low")
}
v := (low + rand.Int63()%(high-low)) * 1000000
plog.Infof("next event for node %d is scheduled in %d second",
n.listIndex+1, v/1000000000)
n.next = time.Now().UnixNano() + v
}
func (n *mtNodeHost) Running() bool {
return n.nh != nil && !n.stopped
}
func (n *mtNodeHost) Stop() {
if n.stopped {
panic("already stopped")
}
done := uint32(0)
go func() {
plog.Infof("going to stop the test node host, i %d", n.listIndex)
n.nh.Stop()
plog.Infof("test node host stopped, i %d", n.listIndex)
atomic.StoreUint32(&done, 1)
}()
count := 0
for {
time.Sleep(100 * time.Millisecond)
if atomic.LoadUint32(&done) == 1 {
break
}
count++
if count == 200 {
panic("failed to stop the node host")
}
}
n.stopped = true
}
func (n *mtNodeHost) Start() {
if !n.stopped {
panic("already running")
}
rc, nhc := getMTConfig()
config := serverConfig.NodeHostConfig{}
config = nhc
config.NodeHostDir = filepath.Join(n.dir, nhc.NodeHostDir)
config.WALDir = filepath.Join(n.dir, nhc.WALDir)
config.RaftAddress = n.addresses[n.listIndex]
nh := NewNodeHost(config)
n.nh = nh
peers := make(map[uint64]string)
for idx, v := range n.addresses {
peers[uint64(idx+1)] = v
}
createStateMachine := func(clusterID uint64, nodeID uint64,
done <-chan struct{}) rsm.IManagedStateMachine {
ds := tests.NewKVTest(clusterID, nodeID)
return rsm.NewNativeStateMachine(ds, done)
}
for i := uint64(1); i <= mtNumOfClusters; i++ {
rc.ClusterID = i
rc.NodeID = n.listIndex + 1
plog.Infof("starting cluster %d node %d", rc.ClusterID, rc.NodeID)
if err := n.nh.startCluster(peers,
false, createStateMachine, make(chan struct{}), rc); err != nil {
panic(err)
}
}
n.stopped = false
}
func (n *mtNodeHost) RestartCluster(clusterID uint64) {
if n.stopped {
panic("already stopped")
}
rc, _ := getMTConfig()
createStateMachine := func(clusterID uint64, nodeID uint64,
done <-chan struct{}) rsm.IManagedStateMachine {
ds := tests.NewKVTest(clusterID, nodeID)
return rsm.NewNativeStateMachine(ds, done)
}
rc.ClusterID = clusterID
// the extra 5 is to make it different from the initial
rc.NodeID = n.listIndex + 1 + 5 // make it different from the initial
plog.Infof("starting cluster %d node %d on node with list index %d",
rc.ClusterID, rc.NodeID, n.listIndex)
if err := n.nh.startCluster(nil,
true, createStateMachine, make(chan struct{}), rc); err != nil {
panic(err)
}
}
func getMTConfig() (config.Config, serverConfig.NodeHostConfig) {
rc := config.Config{
ElectionRTT: 20,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 5,
CompactionOverhead: 5,
}
nhc := serverConfig.NodeHostConfig{
WALDir: "nhmt",
NodeHostDir: "nhmt",
RTTMillisecond: 50,
}
return rc, nhc
}
func removeMTDirs() {
os.RemoveAll(mtWorkingDirectory)
}
func prepareMTDirs() []string {
removeMTDirs()
dl := getMultiraftMonkeyTestAddrList()
dirList := make([]string, 0)
for i := uint64(1); i <= dl.Size(); i++ {
nn := fmt.Sprintf("node%d", i)
nd := filepath.Join(mtWorkingDirectory, nn)
if err := os.MkdirAll(nd, 0755); err != nil {
panic(err)
}
dirList = append(dirList, nd)
}
return dirList
}
func createMTNodeHostList() []*mtNodeHost {
dirList := prepareMTDirs()
dl := getMultiraftMonkeyTestAddrList()
result := make([]*mtNodeHost, dl.Size())
for i := uint64(0); i < dl.Size(); i++ {
result[i] = &mtNodeHost{
listIndex: i,
stopped: true,
dir: dirList[i],
addresses: dl.Addresses(),
}
}
return result
}
func startAllClusters(nhList []*mtNodeHost) {
for _, node := range nhList {
node.Start()
}
}
func stopMTNodeHosts(nhList []*mtNodeHost) {
for _, v := range nhList {
v.Stop()
}
}
func waitForStableClusters(t *testing.T, nhList []*mtNodeHost, waitSeconds uint64) {
time.Sleep(3000 * time.Millisecond)
for {
done := tryWaitForStableClusters(t, nhList, waitSeconds)
if !done {
panic("failed to get a stable network")
}
time.Sleep(3 * time.Second)
done = tryWaitForStableClusters(t, nhList, waitSeconds)
if done {
return
}
time.Sleep(3 * time.Second)
}
}
func tryWaitForStableClusters(t *testing.T, nhList []*mtNodeHost,
waitSeconds uint64) bool {
waitMilliseconds := waitSeconds * 1000
totalWait := uint64(0)
var nodeReady bool
var leaderReady bool
for !nodeReady || !leaderReady {
nodeReady = true
leaderReady = true
leaderMap := make(map[uint64]bool)
time.Sleep(100 * time.Millisecond)
totalWait += 100
if totalWait >= waitMilliseconds {
return false
}
for _, n := range nhList {
if n == nil || n.nh == nil {
continue
}
nh := n.nh
nh.forEachCluster(func(cid uint64, node *node) bool {
leaderID, ok, err := nh.GetLeaderID(node.clusterID)
if err != nil {
nodeReady = false
return true
}
if ok && leaderID == node.nodeID {
leaderMap[node.clusterID] = true
}
return true
})
}
if uint64(len(leaderMap)) != mtNumOfClusters {
leaderReady = false
}
}
return true
}
func getRandomStringBytes(sz int) []byte {
return []byte(random.String(sz))
}
func makeRandomProposal(nhList []*mtNodeHost, count int) {
hasRunningNode := false
for _, n := range nhList {
if n.Running() {
hasRunningNode = true
}
}
if !hasRunningNode {
return
}
for i := 0; i < count; i++ {
idx := rand.Int() % len(nhList)
if nhList[idx].Running() {
valsz := rand.Int()%16 + 3
key := fmt.Sprintf("key-%d", rand.Int())
value := string(getRandomStringBytes(valsz))
kv := &kvpb.PBKV{
Key: &key,
Val: &value,
}
rec, err := proto.Marshal(kv)
if err != nil {
panic(err)
}
clusterID := rand.Uint64()%mtNumOfClusters + 1
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
session, err := nhList[idx].nh.GetNewSession(ctx, clusterID)
if err != nil {
plog.Errorf("couldn't get a proposal client for %d", clusterID)
continue
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
repeat := (rand.Int() % 3) + 1
for j := 0; j < repeat; j++ {
session.ProposalCompleted()
result, err := nhList[idx].nh.SyncPropose(ctx, session, rec)
if err != nil {
plog.Infof(err.Error())
continue
} else {
if result != uint64(len(rec)) {
plog.Panicf("result %d, want %d", result, len(rec))
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
readIdx := rand.Int() % len(nhList)
if !nhList[readIdx].Running() {
resp, err := nhList[readIdx].nh.SyncRead(ctx, clusterID, []byte(*kv.Key))
if err == nil {
if string(resp) != string(*kv.Val) {
plog.Panicf("got %s, want %s", string(resp), kv.Val)
}
}
}
}
}
if (rand.Int() % 10) > 2 {
f := func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
nhList[idx].nh.CloseSession(ctx, session)
}
f()
}
} else {
plog.Infof("got a dead node, skipping it, %d", i)
i--
}
}
}
func waitLastAppliedToSync(t *testing.T, smList []*mtNodeHost) {
count := 0
for {
appliedMap := make(map[uint64]uint64)
notSynced := make([]uint64, 0)
for _, n := range smList {
nh := n.nh
nh.forEachCluster(func(clusterID uint64, rn *node) bool {
lastApplied := rn.sm.GetLastApplied()
existingLastApplied, ok := appliedMap[clusterID]
if !ok {
appliedMap[clusterID] = lastApplied
} else {
if existingLastApplied != lastApplied {
notSynced = append(notSynced, clusterID)
}
}
return true
})
}
if len(notSynced) > 0 {
time.Sleep(100 * time.Millisecond)
count++
} else {
return
}
if count == 1200 {
for _, n := range smList {
nh := n.nh
nh.forEachCluster(func(clusterID uint64, rn *node) bool {
if lang.ContainsUint64(clusterID, notSynced) {
plog.Infof("%s rn.lastApplied %d", rn.describe(), rn.sm.GetLastApplied())
rn.dumpRaftInfoToLog()
}
return true
})
}
t.Fatalf("failed to sync last applied")
}
}
}
func checkStateMachine(t *testing.T, smList []*mtNodeHost) {
hashMap := make(map[uint64]uint64)
sessionHashMap := make(map[uint64]uint64)
waitLastAppliedToSync(t, smList)
for _, n := range smList {
nh := n.nh
nh.forEachCluster(func(clusterID uint64, rn *node) bool {
hash := rn.getStateMachineHash()
sessionHash := rn.getSessionHash()
// check hash
existingHash, ok := hashMap[clusterID]
if !ok {
hashMap[clusterID] = hash
} else {
if existingHash != hash {
t.Errorf("hash mismatch, existing %d, new %d",
existingHash, hash)
}
}
// check session hash
existingHash, ok = sessionHashMap[clusterID]
if !ok {
sessionHashMap[clusterID] = sessionHash
} else {
if existingHash != sessionHash {
t.Errorf("session hash mismatch, existing %d, new %d",
existingHash, sessionHash)
}
}
return true
})
}
}
func testProposalCanBeMade(t *testing.T, node *mtNodeHost, data []byte) {
session := tryGetSession(node.nh, 1)
if session == nil {
t.Fatalf("failed to get client session")
}
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
node.nh.CloseSession(ctx, session)
}()
retry := 0
for retry < 5 {
retry++
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := node.nh.SyncPropose(ctx, session, data)
cancel()
if err != nil {
session.ProposalCompleted()
if retry == 5 {
t.Fatalf("failed to make proposal %v", err)
}
} else {
break
}
}
}
func testLinearizableReadReturnExpectedResult(t *testing.T, node *mtNodeHost,
query []byte, expected []byte) {
retry := 0
for retry < 5 {
retry++
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
result, err := node.nh.SyncRead(ctx, 1, query)
cancel()
if err != nil {
if retry == 5 {
t.Fatalf("failed to read, %v", err)
}
} else {
if string(result) != string(expected) {
t.Errorf("got size %d want size %d", len(result), len(expected))
}
break
}
}
}
func tryGetSession(nh *NodeHost, clusterID uint64) *client.Session {
var err error
var session *client.Session
for i := 0; i < 5; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
session, err = nh.GetNewSession(ctx, clusterID)
if err == nil {
return session
}
}
return nil
}
|
[
"\"MULTIRAFTMTPORT\""
] |
[] |
[
"MULTIRAFTMTPORT"
] |
[]
|
["MULTIRAFTMTPORT"]
|
go
| 1 | 0 | |
chat_server/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chat_server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/main.go
|
package main
import (
"flag"
"github.com/davecgh/go-spew/spew"
"github.com/golang/glog"
"github.com/turbonomic/data-ingestion-framework/pkg"
"github.com/turbonomic/data-ingestion-framework/pkg/conf"
"os"
)
func parseFlags() {
flag.Parse()
}
func main() {
// Ignore errors
_ = flag.Set("logtostderr", "false")
_ = flag.Set("alsologtostderr", "true")
_ = flag.Set("log_dir", "/var/log")
defer glog.Flush()
// Config pretty print for debugging
spew.Config = spew.ConfigState{
Indent: " ",
MaxDepth: 0,
DisableMethods: true,
DisablePointerMethods: true,
ContinueOnMethod: false,
SortKeys: true,
SpewKeys: false,
}
// Parse command line flags
//parseFlags()
glog.Info("Starting DIF Turbo...")
glog.Infof("GIT_COMMIT: %s", os.Getenv("GIT_COMMIT"))
args := conf.NewDIFProbeArgs(flag.CommandLine)
flag.Parse()
s, err := pkg.NewDIFTAPService(args)
if err != nil {
glog.Fatalf("Failed creating DIFTurbo: %v", err)
}
s.Start()
return
}
|
[
"\"GIT_COMMIT\""
] |
[] |
[
"GIT_COMMIT"
] |
[]
|
["GIT_COMMIT"]
|
go
| 1 | 0 | |
restarter.go
|
// Package restarter restarts a command with arguments when a restart channel
// is sent to.
package restarter
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"context"
)
func DoWithContext(ctx context.Context, name string, args []string, restart <-chan bool) error {
var cmd *exec.Cmd
var err error
errc := make(chan error)
for {
select {
case <-ctx.Done():
debug("parent done")
return nil
default:
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
debug("starting cmd:")
debug(name)
cmd = exec.CommandContext(ctx, name, args...)
cmd.Env = os.Environ()
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
err = cmd.Start()
if err != nil {
debug(err)
time.Sleep(time.Second)
continue
}
go func() {
fmt.Fprintf(os.Stderr, "Running binary '%s' with arguments '%s'\n", name, args)
errc <- cmd.Wait()
}()
debug("waiting for restart")
select {
case err = <-errc:
debug("got error")
if err != nil {
debug("got error from cmd")
debug(err)
return err
}
case <-restart:
debug("got restart")
cancel()
err := <-errc
if err != nil {
debug("got error out of errc")
debug(err)
}
case <-ctx.Done():
debug("ctx Done")
return nil
}
}
}
var debug func(...interface{})
func noop(v ...interface{}) {}
func init() {
if os.Getenv("RESTARTER_DEBUG") == "" {
debug = noop
} else {
debug = log.New(os.Stderr, "RESTARTER: ", log.LstdFlags).Println
}
}
|
[
"\"RESTARTER_DEBUG\""
] |
[] |
[
"RESTARTER_DEBUG"
] |
[]
|
["RESTARTER_DEBUG"]
|
go
| 1 | 0 | |
src/cmd/compile/internal/ssa/rewrite.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"io"
"math"
"math/bits"
"os"
"path/filepath"
)
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
// repeat rewrites until we find no more rewrites
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
pendingLines.clear()
for {
change := false
for _, b := range f.Blocks {
if b.Control != nil && b.Control.Op == OpCopy {
for b.Control.Op == OpCopy {
b.SetControl(b.Control.Args[0])
}
}
if rb(b) {
change = true
}
for j, v := range b.Values {
change = phielimValue(v) || change
// Eliminate copy inputs.
// If any copy input becomes unused, mark it
// as invalid and discard its argument. Repeat
// recursively on the discarded argument.
// This phase helps remove phantom "dead copy" uses
// of a value so that a x.Uses==1 rule condition
// fires reliably.
for i, a := range v.Args {
if a.Op != OpCopy {
continue
}
aa := copySource(a)
v.SetArg(i, aa)
// If a, a copy, has a line boundary indicator, attempt to find a new value
// to hold it. The first candidate is the value that will replace a (aa),
// if it shares the same block and line and is eligible.
// The second option is v, which has a as an input. Because aa is earlier in
// the data flow, it is the better choice.
if a.Pos.IsStmt() == src.PosIsStmt {
if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt {
aa.Pos = aa.Pos.WithIsStmt()
} else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt {
v.Pos = v.Pos.WithIsStmt()
} else {
// Record the lost line and look for a new home after all rewrites are complete.
// TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same
// line to appear in more than one block, but only one block is stored, so if both end
// up here, then one will be lost.
pendingLines.set(a.Pos.Line(), int32(a.Block.ID))
}
a.Pos = a.Pos.WithNotStmt()
}
change = true
for a.Uses == 0 {
b := a.Args[0]
a.reset(OpInvalid)
a = b
}
}
// apply rewrite function
if rv(v) {
change = true
// If value changed to a poor choice for a statement boundary, move the boundary
if v.Pos.IsStmt() == src.PosIsStmt {
if k := nextGoodStatementIndex(v, j, b); k != j {
v.Pos = v.Pos.WithNotStmt()
b.Values[k].Pos = b.Values[k].Pos.WithIsStmt()
}
}
}
}
}
if !change {
break
}
}
// remove clobbered values
for _, b := range f.Blocks {
j := 0
for i, v := range b.Values {
vl := v.Pos.Line()
if v.Op == OpInvalid {
if v.Pos.IsStmt() == src.PosIsStmt {
pendingLines.set(vl, int32(b.ID))
}
f.freeValue(v)
continue
}
if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.get(vl) == int32(b.ID) {
pendingLines.remove(vl)
v.Pos = v.Pos.WithIsStmt()
}
if i != j {
b.Values[j] = v
}
j++
}
if pendingLines.get(b.Pos.Line()) == int32(b.ID) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos.Line())
}
if j != len(b.Values) {
tail := b.Values[j:]
for j := range tail {
tail[j] = nil
}
b.Values = b.Values[:j]
}
}
}
// Common functions called from rewriting rules
func is64BitFloat(t *types.Type) bool {
return t.Size() == 8 && t.IsFloat()
}
func is32BitFloat(t *types.Type) bool {
return t.Size() == 4 && t.IsFloat()
}
func is64BitInt(t *types.Type) bool {
return t.Size() == 8 && t.IsInteger()
}
func is32BitInt(t *types.Type) bool {
return t.Size() == 4 && t.IsInteger()
}
func is16BitInt(t *types.Type) bool {
return t.Size() == 2 && t.IsInteger()
}
func is8BitInt(t *types.Type) bool {
return t.Size() == 1 && t.IsInteger()
}
func isPtr(t *types.Type) bool {
return t.IsPtrShaped()
}
func isSigned(t *types.Type) bool {
return t.IsSigned()
}
// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} {
if x == nil {
return y
}
if y == nil {
return x
}
panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
}
func canMergeSym(x, y interface{}) bool {
return x == nil || y == nil
}
// canMergeLoad reports whether the load can be merged into target without
// invalidating the schedule.
// It also checks that the other non-load argument x is something we
// are ok with clobbering (all our current load+op instructions clobber
// their input register).
func canMergeLoad(target, load, x *Value) bool {
if target.Block.ID != load.Block.ID {
// If the load is in a different block do not merge it.
return false
}
// We can't merge the load into the target if the load
// has more than one use.
if load.Uses != 1 {
return false
}
// The register containing x is going to get clobbered.
// Don't merge if we still need the value of x.
// We don't have liveness information here, but we can
// approximate x dying with:
// 1) target is x's only use.
// 2) target is not in a deeper loop than x.
if x.Uses != 1 {
return false
}
loopnest := x.Block.Func.loopnest()
loopnest.calculateDepths()
if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
return false
}
mem := load.MemoryArg()
// We need the load's memory arg to still be alive at target. That
// can't be the case if one of target's args depends on a memory
// state that is a successor of load's memory arg.
//
// For example, it would be invalid to merge load into target in
// the following situation because newmem has killed oldmem
// before target is reached:
// load = read ... oldmem
// newmem = write ... oldmem
// arg0 = read ... newmem
// target = add arg0 load
//
// If the argument comes from a different block then we can exclude
// it immediately because it must dominate load (which is in the
// same block as target).
var args []*Value
for _, a := range target.Args {
if a != load && a.Block.ID == target.Block.ID {
args = append(args, a)
}
}
// memPreds contains memory states known to be predecessors of load's
// memory state. It is lazily initialized.
var memPreds map[*Value]bool
search:
for i := 0; len(args) > 0; i++ {
const limit = 100
if i >= limit {
// Give up if we have done a lot of iterations.
return false
}
v := args[len(args)-1]
args = args[:len(args)-1]
if target.Block.ID != v.Block.ID {
// Since target and load are in the same block
// we can stop searching when we leave the block.
continue search
}
if v.Op == OpPhi {
// A Phi implies we have reached the top of the block.
// The memory phi, if it exists, is always
// the first logical store in the block.
continue search
}
if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
// We could handle this situation however it is likely
// to be very rare.
return false
}
if v.Type.IsMemory() {
if memPreds == nil {
// Initialise a map containing memory states
// known to be predecessors of load's memory
// state.
memPreds = make(map[*Value]bool)
m := mem
const limit = 50
for i := 0; i < limit; i++ {
if m.Op == OpPhi {
// The memory phi, if it exists, is always
// the first logical store in the block.
break
}
if m.Block.ID != target.Block.ID {
break
}
if !m.Type.IsMemory() {
break
}
memPreds[m] = true
if len(m.Args) == 0 {
break
}
m = m.MemoryArg()
}
}
// We can merge if v is a predecessor of mem.
//
// For example, we can merge load into target in the
// following scenario:
// x = read ... v
// mem = write ... v
// load = read ... mem
// target = add x load
if memPreds[v] {
continue search
}
return false
}
if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
// If v takes mem as an input then we know mem
// is valid at this point.
continue search
}
for _, a := range v.Args {
if target.Block.ID == a.Block.ID {
args = append(args, a)
}
}
}
return true
}
// isSameSym returns whether sym is the same as the given named symbol
func isSameSym(sym interface{}, name string) bool {
s, ok := sym.(fmt.Stringer)
return ok && s.String() == name
}
// nlz returns the number of leading zeros.
func nlz(x int64) int64 {
return int64(bits.LeadingZeros64(uint64(x)))
}
// ntz returns the number of trailing zeros.
func ntz(x int64) int64 {
return int64(bits.TrailingZeros64(uint64(x)))
}
func oneBit(x int64) bool {
return bits.OnesCount64(uint64(x)) == 1
}
// nlo returns the number of leading ones.
func nlo(x int64) int64 {
return nlz(^x)
}
// nto returns the number of trailing ones.
func nto(x int64) int64 {
return ntz(^x)
}
// log2 returns logarithm in base 2 of uint64(n), with log2(0) = -1.
// Rounds down.
func log2(n int64) int64 {
return int64(bits.Len64(uint64(n))) - 1
}
// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
// Rounds down.
func log2uint32(n int64) int64 {
return int64(bits.Len32(uint32(n))) - 1
}
// isPowerOfTwo reports whether n is a power of 2.
func isPowerOfTwo(n int64) bool {
return n > 0 && n&(n-1) == 0
}
// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
func isUint64PowerOfTwo(in int64) bool {
n := uint64(in)
return n > 0 && n&(n-1) == 0
}
// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
func isUint32PowerOfTwo(in int64) bool {
n := uint64(uint32(in))
return n > 0 && n&(n-1) == 0
}
// is32Bit reports whether n can be represented as a signed 32 bit integer.
func is32Bit(n int64) bool {
return n == int64(int32(n))
}
// is16Bit reports whether n can be represented as a signed 16 bit integer.
func is16Bit(n int64) bool {
return n == int64(int16(n))
}
// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
func isU12Bit(n int64) bool {
return 0 <= n && n < (1<<12)
}
// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
func isU16Bit(n int64) bool {
return n == int64(uint16(n))
}
// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
func isU32Bit(n int64) bool {
return n == int64(uint32(n))
}
// is20Bit reports whether n can be represented as a signed 20 bit integer.
func is20Bit(n int64) bool {
return -(1<<19) <= n && n < (1<<19)
}
// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
func b2i(b bool) int64 {
if b {
return 1
}
return 0
}
// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
// A shift is bounded if it is shifting by less than the width of the shifted value.
func shiftIsBounded(v *Value) bool {
return v.AuxInt != 0
}
// i2f is used in rules for converting from an AuxInt to a float.
func i2f(i int64) float64 {
return math.Float64frombits(uint64(i))
}
// i2f32 is used in rules for converting from an AuxInt to a float32.
func i2f32(i int64) float32 {
return float32(math.Float64frombits(uint64(i)))
}
// f2i is used in the rules for storing a float in AuxInt.
func f2i(f float64) int64 {
return int64(math.Float64bits(f))
}
// uaddOvf returns true if unsigned a+b would overflow.
func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
// de-virtualize an InterCall
// 'sym' is the symbol for the itab
func devirt(v *Value, sym interface{}, offset int64) *obj.LSym {
f := v.Block.Func
n, ok := sym.(*obj.LSym)
if !ok {
return nil
}
lsym := f.fe.DerefItab(n, offset)
if f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
}
}
return lsym
}
// isSamePtr reports whether p1 and p2 point to the same address.
func isSamePtr(p1, p2 *Value) bool {
if p1 == p2 {
return true
}
if p1.Op != p2.Op {
return false
}
switch p1.Op {
case OpOffPtr:
return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
case OpAddr:
// OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
// Checking for value equality only works after [z]cse has run.
return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
case OpAddPtr:
return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
}
return false
}
// disjoint reports whether the memory region specified by [p1:p1+n1)
// does not overlap with [p2:p2+n2).
// A return value of false does not imply the regions overlap.
func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
if n1 == 0 || n2 == 0 {
return true
}
if p1 == p2 {
return false
}
baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
base, offset = ptr, 0
if base.Op == OpOffPtr {
offset += base.AuxInt
base = base.Args[0]
}
return base, offset
}
p1, off1 := baseAndOffset(p1)
p2, off2 := baseAndOffset(p2)
if isSamePtr(p1, p2) {
return !overlap(off1, n1, off2, n2)
}
// p1 and p2 are not the same, so if they are both OpAddrs then
// they point to different variables.
// If one pointer is on the stack and the other is an argument
// then they can't overlap.
switch p1.Op {
case OpAddr:
if p2.Op == OpAddr || p2.Op == OpSP {
return true
}
return p2.Op == OpArg && p1.Args[0].Op == OpSP
case OpArg:
if p2.Op == OpSP {
return true
}
return p2.Op == OpAddr && p2.Args[0].Op == OpSP
case OpSP:
return p2.Op == OpAddr || p2.Op == OpArg || p2.Op == OpSP
}
return false
}
// moveSize returns the number of bytes an aligned MOV instruction moves
func moveSize(align int64, c *Config) int64 {
switch {
case align%8 == 0 && c.PtrSize == 8:
return 8
case align%4 == 0:
return 4
case align%2 == 0:
return 2
}
return 1
}
// mergePoint finds a block among a's blocks which dominates b and is itself
// dominated by all of a's blocks. Returns nil if it can't find one.
// Might return nil even if one does exist.
func mergePoint(b *Block, a ...*Value) *Block {
// Walk backward from b looking for one of the a's blocks.
// Max distance
d := 100
for d > 0 {
for _, x := range a {
if b == x.Block {
goto found
}
}
if len(b.Preds) > 1 {
// Don't know which way to go back. Abort.
return nil
}
b = b.Preds[0].b
d--
}
return nil // too far away
found:
// At this point, r is the first value in a that we find by walking backwards.
// if we return anything, r will be it.
r := b
// Keep going, counting the other a's that we find. They must all dominate r.
na := 0
for d > 0 {
for _, x := range a {
if b == x.Block {
na++
}
}
if na == len(a) {
// Found all of a in a backwards walk. We can return r.
return r
}
if len(b.Preds) > 1 {
return nil
}
b = b.Preds[0].b
d--
}
return nil // too far away
}
// clobber invalidates v. Returns true.
// clobber is used by rewrite rules to:
// A) make sure v is really dead and never used again.
// B) decrement use counts of v's args.
func clobber(v *Value) bool {
v.reset(OpInvalid)
// Note: leave v.Block intact. The Block field is used after clobber.
return true
}
// clobberIfDead resets v when use count is 1. Returns true.
// clobberIfDead is used by rewrite rules to decrement
// use counts of v's args when v is dead and never used.
func clobberIfDead(v *Value) bool {
if v.Uses == 1 {
v.reset(OpInvalid)
}
// Note: leave v.Block intact. The Block field is used after clobberIfDead.
return true
}
// noteRule is an easy way to track if a rule is matched when writing
// new ones. Make the rule of interest also conditional on
// noteRule("note to self: rule of interest matched")
// and that message will print when the rule matches.
func noteRule(s string) bool {
fmt.Println(s)
return true
}
// warnRule generates a compiler debug output with string s when
// cond is true and the rule is fired.
func warnRule(cond bool, v *Value, s string) bool {
if cond {
v.Block.Func.Warnl(v.Pos, s)
}
return true
}
// for a pseudo-op like (LessThan x), extract x
func flagArg(v *Value) *Value {
if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
return nil
}
return v.Args[0]
}
// arm64Negate finds the complement to an ARM64 condition code,
// for example Equal -> NotEqual or LessThan -> GreaterEqual
//
// TODO: add floating-point conditions
func arm64Negate(op Op) Op {
switch op {
case OpARM64LessThan:
return OpARM64GreaterEqual
case OpARM64LessThanU:
return OpARM64GreaterEqualU
case OpARM64GreaterThan:
return OpARM64LessEqual
case OpARM64GreaterThanU:
return OpARM64LessEqualU
case OpARM64LessEqual:
return OpARM64GreaterThan
case OpARM64LessEqualU:
return OpARM64GreaterThanU
case OpARM64GreaterEqual:
return OpARM64LessThan
case OpARM64GreaterEqualU:
return OpARM64LessThanU
case OpARM64Equal:
return OpARM64NotEqual
case OpARM64NotEqual:
return OpARM64Equal
default:
panic("unreachable")
}
}
// arm64Invert evaluates (InvertFlags op), which
// is the same as altering the condition codes such
// that the same result would be produced if the arguments
// to the flag-generating instruction were reversed, e.g.
// (InvertFlags (CMP x y)) -> (CMP y x)
//
// TODO: add floating-point conditions
func arm64Invert(op Op) Op {
switch op {
case OpARM64LessThan:
return OpARM64GreaterThan
case OpARM64LessThanU:
return OpARM64GreaterThanU
case OpARM64GreaterThan:
return OpARM64LessThan
case OpARM64GreaterThanU:
return OpARM64LessThanU
case OpARM64LessEqual:
return OpARM64GreaterEqual
case OpARM64LessEqualU:
return OpARM64GreaterEqualU
case OpARM64GreaterEqual:
return OpARM64LessEqual
case OpARM64GreaterEqualU:
return OpARM64LessEqualU
case OpARM64Equal, OpARM64NotEqual:
return op
default:
panic("unreachable")
}
}
// evaluate an ARM64 op against a flags value
// that is potentially constant; return 1 for true,
// -1 for false, and 0 for not constant.
func ccARM64Eval(cc interface{}, flags *Value) int {
op := cc.(Op)
fop := flags.Op
switch fop {
case OpARM64InvertFlags:
return -ccARM64Eval(op, flags.Args[0])
case OpARM64FlagEQ:
switch op {
case OpARM64Equal, OpARM64GreaterEqual, OpARM64LessEqual,
OpARM64GreaterEqualU, OpARM64LessEqualU:
return 1
default:
return -1
}
case OpARM64FlagLT_ULT:
switch op {
case OpARM64LessThan, OpARM64LessThanU,
OpARM64LessEqual, OpARM64LessEqualU:
return 1
default:
return -1
}
case OpARM64FlagLT_UGT:
switch op {
case OpARM64LessThan, OpARM64GreaterThanU,
OpARM64LessEqual, OpARM64GreaterEqualU:
return 1
default:
return -1
}
case OpARM64FlagGT_ULT:
switch op {
case OpARM64GreaterThan, OpARM64LessThanU,
OpARM64GreaterEqual, OpARM64LessEqualU:
return 1
default:
return -1
}
case OpARM64FlagGT_UGT:
switch op {
case OpARM64GreaterThan, OpARM64GreaterThanU,
OpARM64GreaterEqual, OpARM64GreaterEqualU:
return 1
default:
return -1
}
default:
return 0
}
}
// logRule logs the use of the rule s. This will only be enabled if
// rewrite rules were generated with the -log option, see gen/rulegen.go.
func logRule(s string) {
if ruleFile == nil {
// Open a log file to write log to. We open in append
// mode because all.bash runs the compiler lots of times,
// and we want the concatenation of all of those logs.
// This means, of course, that users need to rm the old log
// to get fresh data.
// TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
panic(err)
}
ruleFile = w
}
_, err := fmt.Fprintf(ruleFile, "rewrite %s\n", s)
if err != nil {
panic(err)
}
}
var ruleFile io.Writer
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
func isConstZero(v *Value) bool {
switch v.Op {
case OpConstNil:
return true
case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
return v.AuxInt == 0
}
return false
}
// reciprocalExact64 reports whether 1/c is exactly representable.
func reciprocalExact64(c float64) bool {
b := math.Float64bits(c)
man := b & (1<<52 - 1)
if man != 0 {
return false // not a power of 2, denormal, or NaN
}
exp := b >> 52 & (1<<11 - 1)
// exponent bias is 0x3ff. So taking the reciprocal of a number
// changes the exponent to 0x7fe-exp.
switch exp {
case 0:
return false // ±0
case 0x7ff:
return false // ±inf
case 0x7fe:
return false // exponent is not representable
default:
return true
}
}
// reciprocalExact32 reports whether 1/c is exactly representable.
func reciprocalExact32(c float32) bool {
b := math.Float32bits(c)
man := b & (1<<23 - 1)
if man != 0 {
return false // not a power of 2, denormal, or NaN
}
exp := b >> 23 & (1<<8 - 1)
// exponent bias is 0x7f. So taking the reciprocal of a number
// changes the exponent to 0xfe-exp.
switch exp {
case 0:
return false // ±0
case 0xff:
return false // ±inf
case 0xfe:
return false // exponent is not representable
default:
return true
}
}
// check if an immediate can be directly encoded into an ARM's instruction
func isARMImmRot(v uint32) bool {
for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return true
}
v = v<<2 | v>>30
}
return false
}
// overlap reports whether the ranges given by the given offset and
// size pairs overlap.
func overlap(offset1, size1, offset2, size2 int64) bool {
if offset1 >= offset2 && offset2+size2 > offset1 {
return true
}
if offset2 >= offset1 && offset1+size1 > offset2 {
return true
}
return false
}
func areAdjacentOffsets(off1, off2, size int64) bool {
return off1+size == off2 || off1 == off2+size
}
// check if value zeroes out upper 32-bit of 64-bit register.
// depth limits recursion depth. In AMD64.rules 3 is used as limit,
// because it catches same amount of cases as 4.
func zeroUpper32Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1,
OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1,
OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload,
OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL,
OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst,
OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst,
OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL:
return true
case OpArg:
return x.Type.Width == 4
case OpPhi, OpSelect0, OpSelect1:
// Phis can use each-other as an arguments, instead of tracking visited values,
// just limit recursion depth.
if depth <= 0 {
return false
}
for i := range x.Args {
if !zeroUpper32Bits(x.Args[i], depth-1) {
return false
}
}
return true
}
return false
}
// isInlinableMemmove reports whether the given arch performs a Move of the given size
// faster than memmove. It will only return true if replacing the memmove with a Move is
// safe, either because Move is small or because the arguments are disjoint.
// This is used as a check for replacing memmove with Move ops.
func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// It is always safe to convert memmove into Move when its arguments are disjoint.
// Move ops may or may not be faster for large sizes depending on how the platform
// lowers them, so we only perform this optimization on platforms that we know to
// have fast Move ops.
switch c.arch {
case "amd64", "amd64p32":
return sz <= 16
case "386", "ppc64", "ppc64le", "arm64":
return sz <= 8
case "s390x":
return sz <= 8 || disjoint(dst, sz, src, sz)
case "arm", "mips", "mips64", "mipsle", "mips64le":
return sz <= 4
}
return false
}
// encodes the lsb and width for arm64 bitfield ops into the expected auxInt format.
func arm64BFAuxInt(lsb, width int64) int64 {
if lsb < 0 || lsb > 63 {
panic("ARM64 bit field lsb constant out of range")
}
if width < 1 || width > 64 {
panic("ARM64 bit field width constant out of range")
}
return width | lsb<<8
}
// returns the lsb part of the auxInt field of arm64 bitfield ops.
func getARM64BFlsb(bfc int64) int64 {
return int64(uint64(bfc) >> 8)
}
// returns the width part of the auxInt field of arm64 bitfield ops.
func getARM64BFwidth(bfc int64) int64 {
return bfc & 0xff
}
// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
func isARM64BFMask(lsb, mask, rshift int64) bool {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
return shiftedMask != 0 && isPowerOfTwo(shiftedMask+1) && nto(shiftedMask)+lsb < 64
}
// returns the bitfield width of mask >> rshift for arm64 bitfield ops
func arm64BFWidth(mask, rshift int64) int64 {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
if shiftedMask == 0 {
panic("ARM64 BF mask is zero")
}
return nto(shiftedMask)
}
// sizeof returns the size of t in bytes.
// It will panic if t is not a *types.Type.
func sizeof(t interface{}) int64 {
return t.(*types.Type).Size()
}
// alignof returns the alignment of t in bytes.
// It will panic if t is not a *types.Type.
func alignof(t interface{}) int64 {
return t.(*types.Type).Alignment()
}
// registerizable reports whether t is a primitive type that fits in
// a register. It assumes float64 values will always fit into registers
// even if that isn't strictly true.
// It will panic if t is not a *types.Type.
func registerizable(b *Block, t interface{}) bool {
typ := t.(*types.Type)
if typ.IsPtrShaped() || typ.IsFloat() {
return true
}
if typ.IsInteger() {
return typ.Size() <= b.Func.Config.RegSize
}
return false
}
|
[
"\"GOROOT\""
] |
[] |
[
"GOROOT"
] |
[]
|
["GOROOT"]
|
go
| 1 | 0 | |
qa/rpc-tests/maxuploadtarget.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("XTPD", "trustplusd"),
help="trustplusd binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
[] |
[] |
[
"XTPD"
] |
[]
|
["XTPD"]
|
python
| 1 | 0 | |
gdal-1.11.0/ogr/ogrsf_frmts/s57/s57tables.py
|
#!/usr/bin/env python
#******************************************************************************
# $Id: s57tables.py 2780 2001-12-17 22:33:06Z warmerda $
#
# Project: S-57 OGR Translator
# Purpose: Script to translate s57 .csv files into C code "data" statements.
# Author: Frank Warmerdam, [email protected]
#
#******************************************************************************
# Copyright (c) 2001, Frank Warmerdam
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
#
# $Log$
# Revision 1.1 2001/12/17 22:33:06 warmerda
# New
#
import sys
import os
import string
# -----------------------------------------------------------------------------
# EscapeLine - escape anything C-problematic in a line.
# -----------------------------------------------------------------------------
def EscapeLine( line ):
line_out = ''
for lchar in line:
if lchar == '"':
line_out += '\\"'
else:
line_out += lchar
return line_out
# -----------------------------------------------------------------------------
#
if __name__ != '__main__':
print 'This module should only be used as a mainline.'
sys.exit( 1 )
if len(sys.argv) < 2:
directory = os.environ['S57_CSV']
else:
directory = sys.argv[1]
print 'char *gpapszS57Classes[] = {'
classes = open( directory + '/s57objectclasses.csv' ).readlines()
for line in classes:
print '"%s",' % EscapeLine(string.strip(line))
print 'NULL };'
print 'char *gpapszS57attributes[] = {'
classes = open( directory + '/s57attributes.csv' ).readlines()
for line in classes:
print '"%s",' % EscapeLine(string.strip(line))
print 'NULL };'
|
[] |
[] |
[
"S57_CSV"
] |
[]
|
["S57_CSV"]
|
python
| 1 | 0 | |
gocd/provider.go
|
package gocd
import (
"fmt"
"github.com/drewsonne/go-gocd/gocd"
"github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
"net/http"
"os"
"runtime"
)
func Provider() terraform.ResourceProvider {
return SchemaProvider()
}
// SchemaProvider describing the required configs to interact with GoCD server. Environment variables can also be set:
// baseurl - GOCD_URL
// username - GOCD_USERNAME
// password - GOCD_PASSWORD
// skip_ssl_check - GOCD_SKIP_SSL_CHECK
func SchemaProvider() *schema.Provider {
return &schema.Provider{
DataSourcesMap: map[string]*schema.Resource{
//"gocd_stage_definition": dataSourceGocdStageTemplate(),
"gocd_job_definition": dataSourceGocdJobTemplate(),
"gocd_task_definition": dataSourceGocdTaskDefinition(),
},
ResourcesMap: map[string]*schema.Resource{
"gocd_environment": resourceEnvironment(),
"gocd_environment_association": resourceEnvironmentAssociation(),
"gocd_pipeline_template": resourcePipelineTemplate(),
"gocd_pipeline": resourcePipeline(),
"gocd_pipeline_stage": resourcePipelineStage(),
},
Schema: map[string]*schema.Schema{
"baseurl": {
Type: schema.TypeString,
Required: true,
Description: descriptions["gocd_baseurl"],
DefaultFunc: envDefault("GOCD_URL"),
},
"username": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["username"],
DefaultFunc: envDefault("GOCD_USERNAME"),
},
"password": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["password"],
DefaultFunc: envDefault("GOCD_PASSWORD"),
},
"skip_ssl_check": {
Type: schema.TypeBool,
Optional: true,
Description: descriptions["skip_ssl_check"],
DefaultFunc: envDefault("GOCD_SKIP_SSL_CHECK"),
},
},
ConfigureFunc: providerConfigure,
}
}
var descriptions map[string]string
func init() {
descriptions = map[string]string{
"baseurl": "URL for the GoCD Server",
"username": "User to interact with the GoCD API with.",
"password": "Password for User for GoCD API interaction.",
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
var url, u, p string
var rUrl, rU, rP, rB interface{}
var ok, nossl, b bool
var cfg *gocd.Configuration
if rUrl, ok = d.GetOk("baseurl"); ok {
if url, ok = rUrl.(string); !ok || url == "" {
url = os.Getenv("GOCD_URL")
}
}
if rU, ok = d.GetOk("username"); ok {
if u, ok = rU.(string); !ok || u == "" {
u = os.Getenv("GOCD_USERNAME")
}
}
if rP, ok = d.GetOk("password"); ok {
if p, ok = rP.(string); !ok || p == "" {
p = os.Getenv("GOCD_PASSWORD")
}
}
if rB, ok = d.GetOk("skip_ssl_check"); ok {
if b, ok = rB.(bool); !ok {
nossl = false
} else {
nossl = b
}
}
cfg = &gocd.Configuration{
Server: url,
Username: u,
Password: p,
SkipSslCheck: nossl,
}
hClient := &http.Client{
Transport: http.DefaultTransport,
}
// Add API logging
hClient.Transport = logging.NewTransport("GoCD", hClient.Transport)
gc := gocd.NewClient(cfg, hClient)
versionString := terraform.VersionString()
gc.UserAgent = fmt.Sprintf("(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString)
return gc, nil
}
func envDefault(e string) schema.SchemaDefaultFunc {
return schema.MultiEnvDefaultFunc([]string{
e,
}, nil)
}
|
[
"\"GOCD_URL\"",
"\"GOCD_USERNAME\"",
"\"GOCD_PASSWORD\""
] |
[] |
[
"GOCD_URL",
"GOCD_USERNAME",
"GOCD_PASSWORD"
] |
[]
|
["GOCD_URL", "GOCD_USERNAME", "GOCD_PASSWORD"]
|
go
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cgroup_manager.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config.go
|
package notes
import (
"github.com/pkg/errors"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strings"
)
// Config represents user configuration of notes command
type Config struct {
// HomePath is a file path to directory of home of notes command. If $NOTES_CLI_HOME is set, it is used.
// Otherwise, notes-cli directory in XDG data directory is used. This directory is automatically created
// when config is created
HomePath string
// GitPath is a file path to `git` executable. If $NOTES_CLI_GIT is set, it is used.
// Otherwise, `git` is used by default. This is optional and can be empty. When empty, some command
// and functionality which require Git don't work
GitPath string
// EditorCmd is a command of your favorite editor. If $NOTES_CLI_EDITOR is set, it is used. This value is
// similar to $EDITOR environment variable and can contain command arguments like "vim -g". Otherwise,
// this value will be empty. When empty, some functionality which requires an editor to open note doesn't
// work
EditorCmd string
// PagerCmd is a command for paging output from 'list' subcommand. If $NOTES_CLI_PAGER is set, it is used.
PagerCmd string
}
func homePath() (string, error) {
u, err := user.Current()
if err != nil {
return "", errors.Wrap(err, "Cannot locate home directory. Please set $NOTES_CLI_HOME")
}
if env := os.Getenv("NOTES_CLI_HOME"); env != "" {
if strings.HasPrefix(env, "~"+string(filepath.Separator)) {
env = filepath.Join(u.HomeDir, env[2:])
}
return filepath.Clean(env), nil
}
if xdg := os.Getenv("XDG_DATA_HOME"); xdg != "" {
return filepath.Join(xdg, "notes-cli"), nil
}
if runtime.GOOS == "windows" {
if env := os.Getenv("APPLOCALDATA"); env != "" {
return filepath.Join(env, "notes-cli"), nil
}
}
return filepath.Join(u.HomeDir, ".local", "share", "notes-cli"), nil
}
func gitPath() string {
c := "git"
if env, ok := os.LookupEnv("NOTES_CLI_GIT"); ok {
c = filepath.Clean(env)
}
exe, err := exec.LookPath(c)
if err != nil {
// Git is optional
return ""
}
return exe
}
func editorCmd() string {
if env, ok := os.LookupEnv("NOTES_CLI_EDITOR"); ok {
return env
}
if env, ok := os.LookupEnv("EDITOR"); ok {
return env
}
return ""
}
func pagerCmd() string {
if env, ok := os.LookupEnv("NOTES_CLI_PAGER"); ok {
return env
}
if env, ok := os.LookupEnv("PAGER"); ok {
return env
}
if _, err := exec.LookPath("less"); err == nil {
return "less -R -F -X"
}
return ""
}
// NewConfig creates a new Config instance by looking the user's environment. GitPath and EditorPath
// may be empty when proper configuration is not found. When home directory path cannot be located,
// this function returns an error
func NewConfig() (*Config, error) {
h, err := homePath()
if err != nil {
return nil, err
}
// Ensure home directory exists
if err := os.MkdirAll(h, 0755); err != nil {
return nil, errors.Wrapf(err, "Could not create home '%s'", h)
}
return &Config{h, gitPath(), editorCmd(), pagerCmd()}, nil
}
|
[
"\"NOTES_CLI_HOME\"",
"\"XDG_DATA_HOME\"",
"\"APPLOCALDATA\""
] |
[] |
[
"APPLOCALDATA",
"NOTES_CLI_HOME",
"XDG_DATA_HOME"
] |
[]
|
["APPLOCALDATA", "NOTES_CLI_HOME", "XDG_DATA_HOME"]
|
go
| 3 | 0 | |
v3/process/process_test.go
|
package process
import (
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"os/user"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/cosnicolaou/gopsutil/v3/internal/common"
"github.com/stretchr/testify/assert"
)
var mu sync.Mutex
func skipIfNotImplementedErr(t *testing.T, err error) {
if err == common.ErrNotImplementedError {
t.Skip("not implemented")
}
}
func testGetProcess() Process {
checkPid := os.Getpid() // process.test
ret, _ := NewProcess(int32(checkPid))
return *ret
}
func Test_Pids(t *testing.T) {
ret, err := Pids()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(ret) == 0 {
t.Errorf("could not get pids %v", ret)
}
}
func Test_Pids_Fail(t *testing.T) {
if runtime.GOOS != "darwin" {
t.Skip("darwin only")
}
mu.Lock()
defer mu.Unlock()
invoke = common.FakeInvoke{Suffix: "fail"}
ret, err := Pids()
skipIfNotImplementedErr(t, err)
invoke = common.Invoke{}
if err != nil {
t.Errorf("error %v", err)
}
if len(ret) != 9 {
t.Errorf("wrong getted pid nums: %v/%d", ret, len(ret))
}
}
func Test_Pid_exists(t *testing.T) {
checkPid := os.Getpid()
ret, err := PidExists(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if ret == false {
t.Errorf("could not get process exists: %v", ret)
}
}
func Test_NewProcess(t *testing.T) {
checkPid := os.Getpid()
ret, err := NewProcess(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
empty := &Process{}
if runtime.GOOS != "windows" { // Windows pid is 0
if empty == ret {
t.Errorf("error %v", ret)
}
}
}
func Test_Process_memory_maps(t *testing.T) {
checkPid := os.Getpid()
ret, err := NewProcess(int32(checkPid))
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// ungrouped memory maps
mmaps, err := ret.MemoryMaps(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("memory map get error %v", err)
}
empty := MemoryMapsStat{}
for _, m := range *mmaps {
if m == empty {
t.Errorf("memory map get error %v", m)
}
}
// grouped memory maps
mmaps, err = ret.MemoryMaps(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("memory map get error %v", err)
}
if len(*mmaps) != 1 {
t.Errorf("grouped memory maps length (%v) is not equal to 1", len(*mmaps))
}
if (*mmaps)[0] == empty {
t.Errorf("memory map is empty")
}
}
func Test_Process_MemoryInfo(t *testing.T) {
p := testGetProcess()
v, err := p.MemoryInfo()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting memory info error %v", err)
}
empty := MemoryInfoStat{}
if v == nil || *v == empty {
t.Errorf("could not get memory info %v", v)
}
}
func Test_Process_CmdLine(t *testing.T) {
p := testGetProcess()
v, err := p.Cmdline()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting cmdline error %v", err)
}
if !strings.Contains(v, "process.test") {
t.Errorf("invalid cmd line %v", v)
}
}
func Test_Process_CmdLineSlice(t *testing.T) {
p := testGetProcess()
v, err := p.CmdlineSlice()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("getting cmdline slice error %v", err)
}
if !reflect.DeepEqual(v, os.Args) {
t.Errorf("returned cmdline slice not as expected:\nexp: %v\ngot: %v", os.Args, v)
}
}
func Test_Process_Ppid(t *testing.T) {
p := testGetProcess()
v, err := p.Ppid()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting ppid error %v", err)
}
if v == 0 {
t.Errorf("return value is 0 %v", v)
}
expected := os.Getppid()
if v != int32(expected) {
t.Errorf("return value is %v, expected %v", v, expected)
}
}
func Test_Process_Status(t *testing.T) {
p := testGetProcess()
v, err := p.Status()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting status error %v", err)
}
if len(v) == 0 {
t.Errorf("could not get state")
}
if v[0] != Running && v[0] != Sleep {
t.Errorf("got wrong state, %v", v)
}
}
func Test_Process_Terminal(t *testing.T) {
p := testGetProcess()
_, err := p.Terminal()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting terminal error %v", err)
}
}
func Test_Process_IOCounters(t *testing.T) {
p := testGetProcess()
v, err := p.IOCounters()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting iocounter error %v", err)
return
}
empty := &IOCountersStat{}
if v == empty {
t.Errorf("error %v", v)
}
}
func Test_Process_NumCtx(t *testing.T) {
p := testGetProcess()
_, err := p.NumCtxSwitches()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting numctx error %v", err)
return
}
}
func Test_Process_Nice(t *testing.T) {
p := testGetProcess()
n, err := p.Nice()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting nice error %v", err)
}
if runtime.GOOS != "windows" && n != 0 && n != 20 && n != 8 {
t.Errorf("invalid nice: %d", n)
}
}
func Test_Process_Groups(t *testing.T) {
p := testGetProcess()
v, err := p.Groups()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting groups error %v", err)
}
if len(v) == 0 {
t.Skip("Groups is empty")
}
if v[0] < 0 {
t.Errorf("invalid Groups: %v", v)
}
}
func Test_Process_NumThread(t *testing.T) {
p := testGetProcess()
n, err := p.NumThreads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting NumThread error %v", err)
}
if n < 0 {
t.Errorf("invalid NumThread: %d", n)
}
}
func Test_Process_Threads(t *testing.T) {
p := testGetProcess()
n, err := p.NumThreads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting NumThread error %v", err)
}
if n < 0 {
t.Errorf("invalid NumThread: %d", n)
}
ts, err := p.Threads()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting Threads error %v", err)
}
if len(ts) != int(n) {
t.Errorf("unexpected number of threads: %v vs %v", len(ts), n)
}
}
func Test_Process_Name(t *testing.T) {
p := testGetProcess()
n, err := p.Name()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting name error %v", err)
}
if !strings.Contains(n, "process.test") {
t.Errorf("invalid Exe %s", n)
}
}
func Test_Process_Long_Name_With_Spaces(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unable to create temp dir %v", err)
}
defer os.RemoveAll(tmpdir) // clean up
tmpfilepath := filepath.Join(tmpdir, "loooong name with spaces.go")
tmpfile, err := os.Create(tmpfilepath)
if err != nil {
t.Fatalf("unable to create temp file %v", err)
}
tmpfilecontent := []byte("package main\nimport(\n\"time\"\n)\nfunc main(){\nfor range time.Tick(time.Second) {}\n}")
if _, err := tmpfile.Write(tmpfilecontent); err != nil {
tmpfile.Close()
t.Fatalf("unable to write temp file %v", err)
}
if err := tmpfile.Close(); err != nil {
t.Fatalf("unable to close temp file %v", err)
}
err = exec.Command("go", "build", "-o", tmpfile.Name()+".exe", tmpfile.Name()).Run()
if err != nil {
t.Fatalf("unable to build temp file %v", err)
}
cmd := exec.Command(tmpfile.Name() + ".exe")
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
p, err := NewProcess(int32(cmd.Process.Pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
n, err := p.Name()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("getting name error %v", err)
}
basename := filepath.Base(tmpfile.Name() + ".exe")
if basename != n {
t.Fatalf("%s != %s", basename, n)
}
cmd.Process.Kill()
}
func Test_Process_Long_Name(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unable to create temp dir %v", err)
}
defer os.RemoveAll(tmpdir) // clean up
tmpfilepath := filepath.Join(tmpdir, "looooooooooooooooooooong.go")
tmpfile, err := os.Create(tmpfilepath)
if err != nil {
t.Fatalf("unable to create temp file %v", err)
}
tmpfilecontent := []byte("package main\nimport(\n\"time\"\n)\nfunc main(){\nfor range time.Tick(time.Second) {}\n}")
if _, err := tmpfile.Write(tmpfilecontent); err != nil {
tmpfile.Close()
t.Fatalf("unable to write temp file %v", err)
}
if err := tmpfile.Close(); err != nil {
t.Fatalf("unable to close temp file %v", err)
}
err = exec.Command("go", "build", "-o", tmpfile.Name()+".exe", tmpfile.Name()).Run()
if err != nil {
t.Fatalf("unable to build temp file %v", err)
}
cmd := exec.Command(tmpfile.Name() + ".exe")
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
p, err := NewProcess(int32(cmd.Process.Pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
n, err := p.Name()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("getting name error %v", err)
}
basename := filepath.Base(tmpfile.Name() + ".exe")
if basename != n {
t.Fatalf("%s != %s", basename, n)
}
cmd.Process.Kill()
}
func Test_Process_Exe(t *testing.T) {
p := testGetProcess()
n, err := p.Exe()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("getting Exe error %v", err)
}
if !strings.Contains(n, "process.test") {
t.Errorf("invalid Exe %s", n)
}
}
func Test_Process_CpuPercent(t *testing.T) {
p := testGetProcess()
percent, err := p.Percent(0)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
duration := time.Duration(1000) * time.Microsecond
time.Sleep(duration)
percent, err = p.Percent(0)
if err != nil {
t.Errorf("error %v", err)
}
numcpu := runtime.NumCPU()
// if percent < 0.0 || percent > 100.0*float64(numcpu) { // TODO
if percent < 0.0 {
t.Fatalf("CPUPercent value is invalid: %f, %d", percent, numcpu)
}
}
func Test_Process_CpuPercentLoop(t *testing.T) {
p := testGetProcess()
numcpu := runtime.NumCPU()
for i := 0; i < 2; i++ {
duration := time.Duration(100) * time.Microsecond
percent, err := p.Percent(duration)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// if percent < 0.0 || percent > 100.0*float64(numcpu) { // TODO
if percent < 0.0 {
t.Fatalf("CPUPercent value is invalid: %f, %d", percent, numcpu)
}
}
}
func Test_Process_CreateTime(t *testing.T) {
if os.Getenv("CIRCLECI") == "true" {
t.Skip("Skip CI")
}
p := testGetProcess()
c, err := p.CreateTime()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if c < 1420000000 {
t.Errorf("process created time is wrong.")
}
gotElapsed := time.Since(time.Unix(int64(c/1000), 0))
maxElapsed := time.Duration(5 * time.Second)
if gotElapsed >= maxElapsed {
t.Errorf("this process has not been running for %v", gotElapsed)
}
}
func Test_Parent(t *testing.T) {
p := testGetProcess()
c, err := p.Parent()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("error %v", err)
}
if c == nil {
t.Fatalf("could not get parent")
}
if c.Pid == 0 {
t.Fatalf("wrong parent pid")
}
}
func Test_Connections(t *testing.T) {
p := testGetProcess()
ch0 := make(chan string)
ch1 := make(chan string)
go func() { // TCP listening goroutine
addr, err := net.ResolveTCPAddr("tcp", "localhost:0") // dynamically get a random open port from OS
if err != nil {
t.Skip("unable to resolve localhost:", err)
}
l, err := net.ListenTCP(addr.Network(), addr)
if err != nil {
t.Skip(fmt.Sprintf("unable to listen on %v: %v", addr, err))
}
defer l.Close()
ch0 <- l.Addr().String()
for {
conn, err := l.Accept()
if err != nil {
t.Skip("unable to accept connection:", err)
}
ch1 <- l.Addr().String()
defer conn.Close()
}
}()
go func() { // TCP client goroutine
tcpServerAddr := <-ch0
net.Dial("tcp", tcpServerAddr)
}()
tcpServerAddr := <-ch1
tcpServerAddrIP := strings.Split(tcpServerAddr, ":")[0]
tcpServerAddrPort, err := strconv.ParseUint(strings.Split(tcpServerAddr, ":")[1], 10, 32)
if err != nil {
t.Errorf("unable to parse tcpServerAddr port: %v", err)
}
c, err := p.Connections()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(c) == 0 {
t.Errorf("no connections found")
}
found := 0
for _, connection := range c {
if connection.Status == "ESTABLISHED" && (connection.Laddr.IP == tcpServerAddrIP && connection.Laddr.Port == uint32(tcpServerAddrPort)) || (connection.Raddr.IP == tcpServerAddrIP && connection.Raddr.Port == uint32(tcpServerAddrPort)) {
found++
}
}
if found != 2 { // two established connections, one for the server, the other for the client
t.Errorf(fmt.Sprintf("wrong connections: %+v", c))
}
}
func Test_Children(t *testing.T) {
p := testGetProcess()
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "4")
} else {
cmd = exec.Command("sleep", "3")
}
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
c, err := p.Children()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("error %v", err)
}
if len(c) == 0 {
t.Fatalf("children is empty")
}
found := false
for _, child := range c {
if child.Pid == int32(cmd.Process.Pid) {
found = true
break
}
}
if !found {
t.Errorf("could not find child %d", cmd.Process.Pid)
}
}
func Test_Username(t *testing.T) {
myPid := os.Getpid()
currentUser, _ := user.Current()
myUsername := currentUser.Username
process, _ := NewProcess(int32(myPid))
pidUsername, err := process.Username()
skipIfNotImplementedErr(t, err)
assert.Equal(t, myUsername, pidUsername)
t.Log(pidUsername)
}
func Test_CPUTimes(t *testing.T) {
pid := os.Getpid()
process, err := NewProcess(int32(pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
spinSeconds := 0.2
cpuTimes0, err := process.Times()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
// Spin for a duration of spinSeconds
t0 := time.Now()
tGoal := t0.Add(time.Duration(spinSeconds*1000) * time.Millisecond)
assert.Nil(t, err)
for time.Now().Before(tGoal) {
// This block intentionally left blank
}
cpuTimes1, err := process.Times()
assert.Nil(t, err)
if cpuTimes0 == nil || cpuTimes1 == nil {
t.FailNow()
}
measuredElapsed := cpuTimes1.Total() - cpuTimes0.Total()
message := fmt.Sprintf("Measured %fs != spun time of %fs\ncpuTimes0=%v\ncpuTimes1=%v",
measuredElapsed, spinSeconds, cpuTimes0, cpuTimes1)
assert.True(t, measuredElapsed > float64(spinSeconds)/5, message)
assert.True(t, measuredElapsed < float64(spinSeconds)*5, message)
}
func Test_OpenFiles(t *testing.T) {
pid := os.Getpid()
p, err := NewProcess(int32(pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
v, err := p.OpenFiles()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
assert.NotEmpty(t, v) // test always open files.
for _, vv := range v {
assert.NotEqual(t, "", vv.Path)
}
}
func Test_Kill(t *testing.T) {
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "4")
} else {
cmd = exec.Command("sleep", "3")
}
assert.Nil(t, cmd.Start())
time.Sleep(100 * time.Millisecond)
p, err := NewProcess(int32(cmd.Process.Pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
err = p.Kill()
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
cmd.Wait()
}
func Test_IsRunning(t *testing.T) {
var cmd *exec.Cmd
if runtime.GOOS == "windows" {
cmd = exec.Command("ping", "localhost", "-n", "2")
} else {
cmd = exec.Command("sleep", "1")
}
cmd.Start()
p, err := NewProcess(int32(cmd.Process.Pid))
skipIfNotImplementedErr(t, err)
assert.Nil(t, err)
running, err := p.IsRunning()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("IsRunning error: %v", err)
}
if !running {
t.Fatalf("process should be found running")
}
cmd.Wait()
running, err = p.IsRunning()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Fatalf("IsRunning error: %v", err)
}
if running {
t.Fatalf("process should NOT be found running")
}
}
func Test_AllProcesses_cmdLine(t *testing.T) {
procs, err := Processes()
if err == nil {
for _, proc := range procs {
var exeName string
var cmdLine string
exeName, _ = proc.Exe()
cmdLine, err = proc.Cmdline()
if err != nil {
cmdLine = "Error: " + err.Error()
}
t.Logf("Process #%v: Name: %v / CmdLine: %v\n", proc.Pid, exeName, cmdLine)
}
}
}
func BenchmarkNewProcess(b *testing.B) {
checkPid := os.Getpid()
for i := 0; i < b.N; i++ {
NewProcess(int32(checkPid))
}
}
func BenchmarkProcessName(b *testing.B) {
p := testGetProcess()
for i := 0; i < b.N; i++ {
p.Name()
}
}
func BenchmarkProcessPpid(b *testing.B) {
p := testGetProcess()
for i := 0; i < b.N; i++ {
p.Ppid()
}
}
|
[
"\"CIRCLECI\""
] |
[] |
[
"CIRCLECI"
] |
[]
|
["CIRCLECI"]
|
go
| 1 | 0 | |
src/php/integration/deploy_a_php_app_with_newrelic_test.go
|
package integration_test
import (
"os"
"path/filepath"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CF PHP Buildpack", func() {
var app *cutlass.App
AfterEach(func() { app = DestroyApp(app) })
Context("in offline mode", func() {
BeforeEach(func() {
SkipUnlessCached()
})
It("succeeds", func() {
app = cutlass.New(filepath.Join(bpDir, "fixtures", "with_newrelic"))
app.SetEnv("COMPOSER_GITHUB_OAUTH_TOKEN", os.Getenv("COMPOSER_GITHUB_OAUTH_TOKEN"))
app.SetEnv("BP_DEBUG", "true")
PushAppAndConfirm(app)
By("downloads the binaries directly from the buildpack")
Eventually(app.Stdout.String).Should(MatchRegexp(`Downloaded \[file://.*/dependencies/https___download.newrelic.com_php_agent_archive_[\d\.]+_newrelic-php5-[\d\.]+-linux\.tar\.gz\] to \[/tmp\]`))
By("sets up New Relic")
Eventually(app.Stdout.String).Should(ContainSubstring("Installing NewRelic"))
Eventually(app.Stdout.String).Should(ContainSubstring("NewRelic Installed"))
By("installs the default version of newrelic")
Eventually(app.Stdout.String).Should(ContainSubstring("Using NewRelic default version:"))
})
AssertNoInternetTraffic("with_newrelic")
})
})
|
[
"\"COMPOSER_GITHUB_OAUTH_TOKEN\""
] |
[] |
[
"COMPOSER_GITHUB_OAUTH_TOKEN"
] |
[]
|
["COMPOSER_GITHUB_OAUTH_TOKEN"]
|
go
| 1 | 0 | |
owllook/fetcher/novels_schedule.py
|
#!/usr/bin/env python
import asyncio
import os
import schedule
import sys
import time
import uvloop
os.environ['MODE'] = 'PRO'
sys.path.append('../../')
from owllook.fetcher.cache import update_all_books
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
def update_all_books_schedule():
task = asyncio.ensure_future(update_all_books(loop))
loop.run_until_complete(task)
return task.result() or None
# python novels_schedule.py
schedule.every(90).minutes.do(update_all_books_schedule)
while True:
schedule.run_pending()
time.sleep(1)
|
[] |
[] |
[
"MODE"
] |
[]
|
["MODE"]
|
python
| 1 | 0 | |
src/app/api/email/kubernetes.py
|
import os
import sib_api_v3_sdk
from sib_api_v3_sdk.rest import ApiException
from pydantic import EmailStr
configuration = sib_api_v3_sdk.Configuration()
configuration.api_key['api-key'] = os.getenv("SENDINBLUE_API_KEY")
api_instance = sib_api_v3_sdk.TransactionalEmailsApi(sib_api_v3_sdk.ApiClient(configuration))
async def mail_kubernetes_new_kubeconfig(e_mail: EmailStr):
sender = {"name": "Scraiber", "email": "[email protected]"}
to = [{"email": e_mail}]
subject = "A new kubeconfig has been created for you"
html_content = """Hi,
a new kubeconfig has been created for you.
In case, it does not work or was not you who did that, please generate a new one."""
send_smtp_email = sib_api_v3_sdk.SendSmtpEmail(sender=sender, to=to, subject=subject, html_content=html_content)
try:
api_instance.send_transac_email(send_smtp_email)
except ApiException as e:
print("Exception when calling SMTPApi->send_transac_email: %s\n" % e)
|
[] |
[] |
[
"SENDINBLUE_API_KEY"
] |
[]
|
["SENDINBLUE_API_KEY"]
|
python
| 1 | 0 | |
pkg/builds/build_number.go
|
package builds
import (
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"io/ioutil"
"os"
"regexp"
"strings"
)
var (
numericStringRegex = regexp.MustCompile("[0-9]+")
)
// GetBuildNumber returns the build number using environment variables and/or pod Downward API files
func GetBuildNumber() string {
buildNumber := os.Getenv("JX_BUILD_NUMBER")
if buildNumber != "" {
return buildNumber
}
buildNumber = os.Getenv("BUILD_NUMBER")
if buildNumber != "" {
return buildNumber
}
buildID := os.Getenv("BUILD_ID")
if buildID != "" {
return buildID
}
// if we are in a knative build pod we can discover it via the dowmward API if the `/etc/podinfo/labels` file exists
const podInfoLabelsFile = "/etc/podinfo/labels"
exists, err := util.FileExists(podInfoLabelsFile)
if err != nil {
log.Warnf("failed to detect if the file %s exists: %s\n", podInfoLabelsFile, err)
} else if exists {
data, err := ioutil.ReadFile(podInfoLabelsFile)
if err != nil {
log.Warnf("failed to load downward API pod labels from %s due to: %s\n", podInfoLabelsFile, err)
} else {
text := strings.TrimSpace(string(data))
if text != "" {
return GetBuildNumberFromLabelsFileData(text)
}
}
}
return ""
}
// GetBuildNumberFromLabelsFileData parses the /etc/podinfo/labels style downward API file for a pods labels
// and returns the build number if it can be discovered
func GetBuildNumberFromLabelsFileData(text string) string {
m := LoadDownwardAPILabels(text)
return GetBuildNumberFromLabels(m)
}
// GetBuildNumberFromLabels returns the build number from the given Pod labels
func GetBuildNumberFromLabels(m map[string]string) string {
if m == nil {
return ""
}
answer := ""
for _, key := range []string{LabelBuildName, "build-number", LabelOldBuildName, LabelPipelineRunName} {
answer = m[key]
if answer != "" {
break
}
}
if answer != "" {
return lastNumberFrom(answer)
}
return ""
}
// lastNumberFrom splits a string such as "jstrachan-mynodething-master-1-build" via "-" and returns the last
// numeric string
func lastNumberFrom(text string) string {
// lets remove any whilespace or double quotes
text = strings.TrimSpace(text)
text = strings.TrimPrefix(text, "\"")
text = strings.TrimSuffix(text, "\"")
paths := strings.Split(text, "-")
for i := len(paths) - 1; i >= 0; i-- {
path := paths[i]
if numericStringRegex.MatchString(path) {
return path
}
}
return ""
}
// LoadDownwardAPILabels parses the /etc/podinfo/labels text into a map of label values
func LoadDownwardAPILabels(text string) map[string]string {
m := map[string]string{}
lines := strings.Split(text, "\n")
for _, line := range lines {
l := strings.TrimSpace(line)
paths := strings.SplitN(l, "=", 2)
if len(paths) == 2 {
m[paths[0]] = paths[1]
}
}
return m
}
|
[
"\"JX_BUILD_NUMBER\"",
"\"BUILD_NUMBER\"",
"\"BUILD_ID\""
] |
[] |
[
"BUILD_ID",
"JX_BUILD_NUMBER",
"BUILD_NUMBER"
] |
[]
|
["BUILD_ID", "JX_BUILD_NUMBER", "BUILD_NUMBER"]
|
go
| 3 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/Swift5ClientCodegen.java
|
/*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.WordUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.meta.GeneratorMetadata;
import org.openapitools.codegen.meta.Stability;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.time.OffsetDateTime;
import java.time.Instant;
import java.time.temporal.ChronoField;
import java.util.concurrent.TimeUnit;
import static org.openapitools.codegen.utils.StringUtils.camelize;
public class Swift5ClientCodegen extends DefaultCodegen implements CodegenConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(Swift5ClientCodegen.class);
public static final String PROJECT_NAME = "projectName";
public static final String RESPONSE_AS = "responseAs";
public static final String OBJC_COMPATIBLE = "objcCompatible";
public static final String POD_SOURCE = "podSource";
public static final String POD_AUTHORS = "podAuthors";
public static final String POD_SOCIAL_MEDIA_URL = "podSocialMediaURL";
public static final String POD_LICENSE = "podLicense";
public static final String POD_HOMEPAGE = "podHomepage";
public static final String POD_SUMMARY = "podSummary";
public static final String POD_DESCRIPTION = "podDescription";
public static final String POD_SCREENSHOTS = "podScreenshots";
public static final String POD_DOCUMENTATION_URL = "podDocumentationURL";
public static final String READONLY_PROPERTIES = "readonlyProperties";
public static final String SWIFT_USE_API_NAMESPACE = "swiftUseApiNamespace";
public static final String DEFAULT_POD_AUTHORS = "OpenAPI Generator";
public static final String LENIENT_TYPE_CAST = "lenientTypeCast";
protected static final String LIBRARY_ALAMOFIRE = "alamofire";
protected static final String LIBRARY_URLSESSION = "urlsession";
protected static final String RESPONSE_LIBRARY_PROMISE_KIT = "PromiseKit";
protected static final String RESPONSE_LIBRARY_RX_SWIFT = "RxSwift";
protected static final String RESPONSE_LIBRARY_RESULT = "Result";
protected static final String RESPONSE_LIBRARY_COMBINE = "Combine";
protected static final String[] RESPONSE_LIBRARIES = {RESPONSE_LIBRARY_PROMISE_KIT, RESPONSE_LIBRARY_RX_SWIFT, RESPONSE_LIBRARY_RESULT, RESPONSE_LIBRARY_COMBINE};
protected String projectName = "OpenAPIClient";
protected boolean nonPublicApi = false;
protected boolean objcCompatible = false;
protected boolean lenientTypeCast = false;
protected boolean readonlyProperties = false;
protected boolean swiftUseApiNamespace;
protected String[] responseAs = new String[0];
protected String sourceFolder = "Classes" + File.separator + "OpenAPIs";
protected HashSet objcReservedWords;
protected String apiDocPath = "docs/";
protected String modelDocPath = "docs/";
/**
* Constructor for the swift5 language codegen module.
*/
public Swift5ClientCodegen() {
super();
generatorMetadata = GeneratorMetadata.newBuilder(generatorMetadata)
.stability(Stability.BETA)
.build();
outputFolder = "generated-code" + File.separator + "swift";
modelTemplateFiles.put("model.mustache", ".swift");
apiTemplateFiles.put("api.mustache", ".swift");
embeddedTemplateDir = templateDir = "swift5";
apiPackage = File.separator + "APIs";
modelPackage = File.separator + "Models";
modelDocTemplateFiles.put("model_doc.mustache", ".md");
apiDocTemplateFiles.put("api_doc.mustache", ".md");
languageSpecificPrimitives = new HashSet<>(
Arrays.asList(
"Int",
"Int32",
"Int64",
"Float",
"Double",
"Bool",
"Void",
"String",
"URL",
"Data",
"Date",
"Character",
"UUID",
"URL",
"AnyObject",
"Any",
"Decimal")
);
defaultIncludes = new HashSet<>(
Arrays.asList(
"Data",
"Date",
"URL", // for file
"UUID",
"Array",
"Dictionary",
"Set",
"Any",
"Empty",
"AnyObject",
"Any",
"Decimal")
);
objcReservedWords = new HashSet<>(
Arrays.asList(
// Added for Objective-C compatibility
"id", "description", "NSArray", "NSURL", "CGFloat", "NSSet", "NSString", "NSInteger", "NSUInteger",
"NSError", "NSDictionary",
// Cannot override with a stored property 'className'
"className"
)
);
reservedWords = new HashSet<>(
Arrays.asList(
// name used by swift client
"ErrorResponse", "Response",
// Swift keywords. This list is taken from here:
// https://developer.apple.com/library/content/documentation/Swift/Conceptual/Swift_Programming_Language/LexicalStructure.html#//apple_ref/doc/uid/TP40014097-CH30-ID410
//
// Keywords used in declarations
"associatedtype", "class", "deinit", "enum", "extension", "fileprivate", "func", "import", "init",
"inout", "internal", "let", "open", "operator", "private", "protocol", "public", "static", "struct",
"subscript", "typealias", "var",
// Keywords uses in statements
"break", "case", "continue", "default", "defer", "do", "else", "fallthrough", "for", "guard", "if",
"in", "repeat", "return", "switch", "where", "while",
// Keywords used in expressions and types
"as", "Any", "catch", "false", "is", "nil", "rethrows", "super", "self", "Self", "throw", "throws", "true", "try",
// Keywords used in patterns
"_",
// Keywords that begin with a number sign
"#available", "#colorLiteral", "#column", "#else", "#elseif", "#endif", "#file", "#fileLiteral", "#function", "#if",
"#imageLiteral", "#line", "#selector", "#sourceLocation",
// Keywords reserved in particular contexts
"associativity", "convenience", "dynamic", "didSet", "final", "get", "infix", "indirect", "lazy", "left",
"mutating", "none", "nonmutating", "optional", "override", "postfix", "precedence", "prefix", "Protocol",
"required", "right", "set", "Type", "unowned", "weak", "willSet",
//
// Swift Standard Library types
// https://developer.apple.com/documentation/swift
//
// Numbers and Basic Values
"Bool", "Int", "Double", "Float", "Range", "ClosedRange", "Error", "Optional",
// Special-Use Numeric Types
"UInt", "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64", "Float80", "Float32", "Float64",
// Strings and Text
"String", "Character", "Unicode", "StaticString",
// Collections
"Array", "Dictionary", "Set", "OptionSet", "CountableRange", "CountableClosedRange",
// The following are commonly-used Foundation types
"URL", "Data", "Codable", "Encodable", "Decodable",
// The following are other words we want to reserve
"Void", "AnyObject", "Class", "dynamicType", "COLUMN", "FILE", "FUNCTION", "LINE"
)
);
typeMapping = new HashMap<>();
typeMapping.put("array", "Array");
typeMapping.put("List", "Array");
typeMapping.put("map", "Dictionary");
typeMapping.put("set", "Set");
typeMapping.put("date", "Date");
typeMapping.put("Date", "Date");
typeMapping.put("DateTime", "Date");
typeMapping.put("boolean", "Bool");
typeMapping.put("string", "String");
typeMapping.put("char", "Character");
typeMapping.put("short", "Int");
typeMapping.put("int", "Int");
typeMapping.put("long", "Int64");
typeMapping.put("integer", "Int");
typeMapping.put("Integer", "Int");
typeMapping.put("float", "Float");
typeMapping.put("number", "Double");
typeMapping.put("double", "Double");
typeMapping.put("file", "URL");
typeMapping.put("binary", "URL");
typeMapping.put("ByteArray", "Data");
typeMapping.put("UUID", "UUID");
typeMapping.put("URI", "String");
typeMapping.put("BigDecimal", "Decimal");
typeMapping.put("object", "Any");
typeMapping.put("AnyType", "Any");
importMapping = new HashMap<>();
cliOptions.add(new CliOption(PROJECT_NAME, "Project name in Xcode"));
cliOptions.add(new CliOption(RESPONSE_AS,
"Optionally use libraries to manage response. Currently "
+ StringUtils.join(RESPONSE_LIBRARIES, ", ")
+ " are available."));
cliOptions.add(new CliOption(CodegenConstants.NON_PUBLIC_API,
CodegenConstants.NON_PUBLIC_API_DESC
+ "(default: false)"));
cliOptions.add(new CliOption(OBJC_COMPATIBLE,
"Add additional properties and methods for Objective-C "
+ "compatibility (default: false)"));
cliOptions.add(new CliOption(POD_SOURCE, "Source information used for Podspec"));
cliOptions.add(new CliOption(CodegenConstants.POD_VERSION, "Version used for Podspec"));
cliOptions.add(new CliOption(POD_AUTHORS, "Authors used for Podspec"));
cliOptions.add(new CliOption(POD_SOCIAL_MEDIA_URL, "Social Media URL used for Podspec"));
cliOptions.add(new CliOption(POD_LICENSE, "License used for Podspec"));
cliOptions.add(new CliOption(POD_HOMEPAGE, "Homepage used for Podspec"));
cliOptions.add(new CliOption(POD_SUMMARY, "Summary used for Podspec"));
cliOptions.add(new CliOption(POD_DESCRIPTION, "Description used for Podspec"));
cliOptions.add(new CliOption(POD_SCREENSHOTS, "Screenshots used for Podspec"));
cliOptions.add(new CliOption(POD_DOCUMENTATION_URL,
"Documentation URL used for Podspec"));
cliOptions.add(new CliOption(READONLY_PROPERTIES, "Make properties "
+ "readonly (default: false)"));
cliOptions.add(new CliOption(SWIFT_USE_API_NAMESPACE,
"Flag to make all the API classes inner-class "
+ "of {{projectName}}API"));
cliOptions.add(new CliOption(CodegenConstants.HIDE_GENERATION_TIMESTAMP,
CodegenConstants.HIDE_GENERATION_TIMESTAMP_DESC)
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(LENIENT_TYPE_CAST,
"Accept and cast values for simple types (string->bool, "
+ "string->int, int->string)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(CodegenConstants.API_NAME_PREFIX, CodegenConstants.API_NAME_PREFIX_DESC));
supportedLibraries.put(LIBRARY_URLSESSION, "[DEFAULT] HTTP client: URLSession");
supportedLibraries.put(LIBRARY_ALAMOFIRE, "HTTP client: Alamofire");
CliOption libraryOption = new CliOption(CodegenConstants.LIBRARY, "Library template (sub-template) to use");
libraryOption.setEnum(supportedLibraries);
libraryOption.setDefault(LIBRARY_URLSESSION);
cliOptions.add(libraryOption);
setLibrary(LIBRARY_URLSESSION);
}
private static CodegenModel reconcileProperties(CodegenModel codegenModel,
CodegenModel parentCodegenModel) {
// To support inheritance in this generator, we will analyze
// the parent and child models, look for properties that match, and remove
// them from the child models and leave them in the parent.
// Because the child models extend the parents, the properties
// will be available via the parent.
// Get the properties for the parent and child models
final List<CodegenProperty> parentModelCodegenProperties = parentCodegenModel.vars;
List<CodegenProperty> codegenProperties = codegenModel.vars;
codegenModel.allVars = new ArrayList<CodegenProperty>(codegenProperties);
codegenModel.parentVars = parentCodegenModel.allVars;
// Iterate over all of the parent model properties
boolean removedChildProperty = false;
for (CodegenProperty parentModelCodegenProperty : parentModelCodegenProperties) {
// Now that we have found a prop in the parent class,
// and search the child class for the same prop.
Iterator<CodegenProperty> iterator = codegenProperties.iterator();
while (iterator.hasNext()) {
CodegenProperty codegenProperty = iterator.next();
if (codegenProperty.baseName.equals(parentModelCodegenProperty.baseName)) {
// We found a property in the child class that is
// a duplicate of the one in the parent, so remove it.
iterator.remove();
removedChildProperty = true;
}
}
}
if (removedChildProperty) {
// If we removed an entry from this model's vars, we need to ensure hasMore is updated
int count = 0;
int numVars = codegenProperties.size();
for (CodegenProperty codegenProperty : codegenProperties) {
count += 1;
codegenProperty.hasMore = count < numVars;
}
codegenModel.vars = codegenProperties;
}
return codegenModel;
}
@Override
public CodegenType getTag() {
return CodegenType.CLIENT;
}
@Override
public String getName() {
return "swift5";
}
@Override
public String getHelp() {
return "Generates a Swift 5.x client library.";
}
@Override
protected void addAdditionPropertiesToCodeGenModel(CodegenModel codegenModel,
Schema schema) {
final Schema additionalProperties = getAdditionalProperties(schema);
if (additionalProperties != null) {
codegenModel.additionalPropertiesType = getSchemaType(additionalProperties);
}
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("SWIFT_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable SWIFT_POST_PROCESS_FILE not defined so the Swift code may not be properly formatted. To define it, try 'export SWIFT_POST_PROCESS_FILE=/usr/local/bin/swiftformat' (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
// Setup project name
if (additionalProperties.containsKey(PROJECT_NAME)) {
setProjectName((String) additionalProperties.get(PROJECT_NAME));
} else {
additionalProperties.put(PROJECT_NAME, projectName);
}
sourceFolder = projectName + File.separator + sourceFolder;
// Setup nonPublicApi option, which generates code with reduced access
// modifiers; allows embedding elsewhere without exposing non-public API calls
// to consumers
if (additionalProperties.containsKey(CodegenConstants.NON_PUBLIC_API)) {
setNonPublicApi(convertPropertyToBooleanAndWriteBack(CodegenConstants.NON_PUBLIC_API));
}
additionalProperties.put(CodegenConstants.NON_PUBLIC_API, nonPublicApi);
// Setup objcCompatible option, which adds additional properties
// and methods for Objective-C compatibility
if (additionalProperties.containsKey(OBJC_COMPATIBLE)) {
setObjcCompatible(convertPropertyToBooleanAndWriteBack(OBJC_COMPATIBLE));
}
additionalProperties.put(OBJC_COMPATIBLE, objcCompatible);
// add objc reserved words
if (Boolean.TRUE.equals(objcCompatible)) {
reservedWords.addAll(objcReservedWords);
}
if (additionalProperties.containsKey(RESPONSE_AS)) {
Object responseAsObject = additionalProperties.get(RESPONSE_AS);
if (responseAsObject instanceof String) {
setResponseAs(((String) responseAsObject).split(","));
} else {
setResponseAs((String[]) responseAsObject);
}
}
additionalProperties.put(RESPONSE_AS, responseAs);
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_PROMISE_KIT)) {
additionalProperties.put("usePromiseKit", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RX_SWIFT)) {
additionalProperties.put("useRxSwift", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RESULT)) {
additionalProperties.put("useResult", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_COMBINE)) {
additionalProperties.put("useCombine", true);
}
// Setup readonlyProperties option, which declares properties so they can only
// be set at initialization
if (additionalProperties.containsKey(READONLY_PROPERTIES)) {
setReadonlyProperties(convertPropertyToBooleanAndWriteBack(READONLY_PROPERTIES));
}
additionalProperties.put(READONLY_PROPERTIES, readonlyProperties);
// Setup swiftUseApiNamespace option, which makes all the API
// classes inner-class of {{projectName}}API
if (additionalProperties.containsKey(SWIFT_USE_API_NAMESPACE)) {
setSwiftUseApiNamespace(convertPropertyToBooleanAndWriteBack(SWIFT_USE_API_NAMESPACE));
}
if (!additionalProperties.containsKey(POD_AUTHORS)) {
additionalProperties.put(POD_AUTHORS, DEFAULT_POD_AUTHORS);
}
setLenientTypeCast(convertPropertyToBooleanAndWriteBack(LENIENT_TYPE_CAST));
// make api and model doc path available in mustache template
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
supportingFiles.add(new SupportingFile("Podspec.mustache",
"",
projectName + ".podspec"));
supportingFiles.add(new SupportingFile("Cartfile.mustache",
"",
"Cartfile"));
supportingFiles.add(new SupportingFile("Package.swift.mustache",
"",
"Package.swift"));
supportingFiles.add(new SupportingFile("APIHelper.mustache",
sourceFolder,
"APIHelper.swift"));
supportingFiles.add(new SupportingFile("Configuration.mustache",
sourceFolder,
"Configuration.swift"));
supportingFiles.add(new SupportingFile("Extensions.mustache",
sourceFolder,
"Extensions.swift"));
supportingFiles.add(new SupportingFile("Models.mustache",
sourceFolder,
"Models.swift"));
supportingFiles.add(new SupportingFile("APIs.mustache",
sourceFolder,
"APIs.swift"));
supportingFiles.add(new SupportingFile("CodableHelper.mustache",
sourceFolder,
"CodableHelper.swift"));
supportingFiles.add(new SupportingFile("OpenISO8601DateFormatter.mustache",
sourceFolder,
"OpenISO8601DateFormatter.swift"));
supportingFiles.add(new SupportingFile("JSONDataEncoding.mustache",
sourceFolder,
"JSONDataEncoding.swift"));
supportingFiles.add(new SupportingFile("JSONEncodingHelper.mustache",
sourceFolder,
"JSONEncodingHelper.swift"));
supportingFiles.add(new SupportingFile("git_push.sh.mustache",
"",
"git_push.sh"));
supportingFiles.add(new SupportingFile("SynchronizedDictionary.mustache",
sourceFolder,
"SynchronizedDictionary.swift"));
supportingFiles.add(new SupportingFile("gitignore.mustache",
"",
".gitignore"));
supportingFiles.add(new SupportingFile("README.mustache",
"",
"README.md"));
supportingFiles.add(new SupportingFile("XcodeGen.mustache",
"",
"project.yml"));
switch (getLibrary()) {
case LIBRARY_ALAMOFIRE:
additionalProperties.put("useAlamofire", true);
supportingFiles.add(new SupportingFile("AlamofireImplementations.mustache",
sourceFolder,
"AlamofireImplementations.swift"));
break;
case LIBRARY_URLSESSION:
additionalProperties.put("useURLSession", true);
supportingFiles.add(new SupportingFile("URLSessionImplementations.mustache",
sourceFolder,
"URLSessionImplementations.swift"));
break;
default:
break;
}
}
@Override
protected boolean isReservedWord(String word) {
return word != null && reservedWords.contains(word); //don't lowercase as super does
}
@Override
public String escapeReservedWord(String name) {
if (this.reservedWordsMappings().containsKey(name)) {
return this.reservedWordsMappings().get(name);
}
return "_" + name; // add an underscore to the name
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder
+ modelPackage().replace('.', File.separatorChar);
}
@Override
public String apiFileFolder() {
return outputFolder + File.separator + sourceFolder
+ apiPackage().replace('.', File.separatorChar);
}
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
Schema inner = ap.getItems();
return ModelUtils.isSet(p) ? "Set<" + getTypeDeclaration(inner) + ">" : "[" + getTypeDeclaration(inner) + "]";
} else if (ModelUtils.isMapSchema(p)) {
Schema inner = getAdditionalProperties(p);
return "[String:" + getTypeDeclaration(inner) + "]";
}
return super.getTypeDeclaration(p);
}
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
if (languageSpecificPrimitives.contains(type) || defaultIncludes.contains(type)) {
return type;
}
} else {
type = openAPIType;
}
return toModelName(type);
}
@Override
public boolean isDataTypeFile(String dataType) {
return dataType != null && dataType.equals("URL");
}
@Override
public boolean isDataTypeBinary(final String dataType) {
return dataType != null && dataType.equals("Data");
}
/**
* Output the proper model name (capitalized).
*
* @param name the name of the model
* @return capitalized model name
*/
@Override
public String toModelName(String name) {
// FIXME parameter should not be assigned. Also declare it as "final"
name = sanitizeName(name);
if (!StringUtils.isEmpty(modelNameSuffix)) { // set model suffix
name = name + "_" + modelNameSuffix;
}
if (!StringUtils.isEmpty(modelNamePrefix)) { // set model prefix
name = modelNamePrefix + "_" + name;
}
// camelize the model name
// phone_number => PhoneNumber
name = camelize(name);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(name)) {
String modelName = "Model" + name;
LOGGER.warn(name + " (reserved word) cannot be used as model name. Renamed to "
+ modelName);
return modelName;
}
// model name starts with number
if (name.matches("^\\d.*")) {
// e.g. 200Response => Model200Response (after camelize)
String modelName = "Model" + name;
LOGGER.warn(name
+ " (model name starts with number) cannot be used as model name."
+ " Renamed to " + modelName);
return modelName;
}
return name;
}
/**
* Return the capitalized file name of the model.
*
* @param name the model name
* @return the file name of the model
*/
@Override
public String toModelFilename(String name) {
// should be the same as the model name
return toModelName(name);
}
@Override
public String toDefaultValue(Schema p) {
if (p.getEnum() != null && !p.getEnum().isEmpty()) {
if (p.getDefault() != null) {
if (ModelUtils.isStringSchema(p)) {
return "." + toEnumVarName(escapeText((String) p.getDefault()), p.getType());
} else {
return "." + toEnumVarName(escapeText(p.getDefault().toString()), p.getType());
}
}
}
if (p.getDefault() != null) {
if (ModelUtils.isIntegerSchema(p) || ModelUtils.isNumberSchema(p) || ModelUtils.isBooleanSchema(p)) {
return p.getDefault().toString();
} else if (ModelUtils.isDateTimeSchema(p)) {
// Datetime time stamps in Swift are expressed as Seconds with Microsecond precision.
// In Java, we need to be creative to get the Timestamp in Microseconds as a long.
Instant instant = ((OffsetDateTime) p.getDefault()).toInstant();
long epochMicro = TimeUnit.SECONDS.toMicros(instant.getEpochSecond()) + ((long) instant.get(ChronoField.MICRO_OF_SECOND));
return "Date(timeIntervalSince1970: " + String.valueOf(epochMicro) + ".0 / 1_000_000)";
} else if (ModelUtils.isStringSchema(p)) {
return "\"" + escapeText((String) p.getDefault()) + "\"";
}
// TODO: Handle more cases from `ModelUtils`, such as Date
}
return null;
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isMapSchema(p)) {
return getSchemaType(getAdditionalProperties(p));
} else if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
String inner = getSchemaType(ap.getItems());
return ModelUtils.isSet(p) ? "Set<" + inner + ">" : "[" + inner + "]";
}
return null;
}
@Override
public String toApiName(String name) {
if (name.length() == 0) {
return "DefaultAPI";
}
return camelize(apiNamePrefix + "_" + name) + "API";
}
@Override
public String apiDocFileFolder() {
return (outputFolder + "/" + apiDocPath).replace("/", File.separator);
}
@Override
public String modelDocFileFolder() {
return (outputFolder + "/" + modelDocPath).replace("/", File.separator);
}
@Override
public String toModelDocFilename(String name) {
return toModelName(name);
}
@Override
public String toApiDocFilename(String name) {
return toApiName(name);
}
@Override
public String toOperationId(String operationId) {
operationId = camelize(sanitizeName(operationId), true);
// Throw exception if method name is empty.
// This should not happen but keep the check just in case
if (StringUtils.isEmpty(operationId)) {
throw new RuntimeException("Empty method name (operationId) not allowed");
}
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
String newOperationId = camelize(("call_" + operationId), true);
LOGGER.warn(operationId + " (reserved word) cannot be used as method name."
+ " Renamed to " + newOperationId);
return newOperationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn(operationId + " (starting with a number) cannot be used as method name. Renamed to " + camelize(sanitizeName("call_" + operationId), true));
operationId = camelize(sanitizeName("call_" + operationId), true);
}
return operationId;
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name);
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String toParamName(String name) {
// sanitize name
name = sanitizeName(name);
// replace - with _ e.g. created-at => created_at
name = name.replaceAll("-", "_");
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize(lower) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public CodegenModel fromModel(String name, Schema model) {
Map<String, Schema> allDefinitions = ModelUtils.getSchemas(this.openAPI);
CodegenModel codegenModel = super.fromModel(name, model);
if (codegenModel.description != null) {
codegenModel.imports.add("ApiModel");
}
if (allDefinitions != null) {
String parentSchema = codegenModel.parentSchema;
// multilevel inheritance: reconcile properties of all the parents
while (parentSchema != null) {
final Schema parentModel = allDefinitions.get(parentSchema);
final CodegenModel parentCodegenModel = super.fromModel(codegenModel.parent,
parentModel);
codegenModel = Swift5ClientCodegen.reconcileProperties(codegenModel, parentCodegenModel);
// get the next parent
parentSchema = parentCodegenModel.parentSchema;
}
}
return codegenModel;
}
public void setProjectName(String projectName) {
this.projectName = projectName;
}
public void setNonPublicApi(boolean nonPublicApi) {
this.nonPublicApi = nonPublicApi;
}
public void setObjcCompatible(boolean objcCompatible) {
this.objcCompatible = objcCompatible;
}
public void setLenientTypeCast(boolean lenientTypeCast) {
this.lenientTypeCast = lenientTypeCast;
}
public void setReadonlyProperties(boolean readonlyProperties) {
this.readonlyProperties = readonlyProperties;
}
public void setResponseAs(String[] responseAs) {
this.responseAs = responseAs;
}
public void setSwiftUseApiNamespace(boolean swiftUseApiNamespace) {
this.swiftUseApiNamespace = swiftUseApiNamespace;
}
@Override
public String toEnumValue(String value, String datatype) {
// for string, array of string
if ("String".equals(datatype) || "[String]".equals(datatype) || "[String:String]".equals(datatype)) {
return "\"" + String.valueOf(value) + "\"";
} else {
return String.valueOf(value);
}
}
@Override
public String toEnumDefaultValue(String value, String datatype) {
return datatype + "_" + value;
}
@Override
public String toEnumVarName(String name, String datatype) {
if (name.length() == 0) {
return "empty";
}
Pattern startWithNumberPattern = Pattern.compile("^\\d+");
Matcher startWithNumberMatcher = startWithNumberPattern.matcher(name);
if (startWithNumberMatcher.find()) {
String startingNumbers = startWithNumberMatcher.group(0);
String nameWithoutStartingNumbers = name.substring(startingNumbers.length());
return "_" + startingNumbers + camelize(nameWithoutStartingNumbers, true);
}
// for symbol, e.g. $, #
if (getSymbolName(name) != null) {
return camelize(WordUtils.capitalizeFully(getSymbolName(name).toUpperCase(Locale.ROOT)), true);
}
// Camelize only when we have a structure defined below
Boolean camelized = false;
if (name.matches("[A-Z][a-z0-9]+[a-zA-Z0-9]*")) {
name = camelize(name, true);
camelized = true;
}
// Reserved Name
String nameLowercase = StringUtils.lowerCase(name);
if (isReservedWord(nameLowercase)) {
return escapeReservedWord(nameLowercase);
}
// Check for numerical conversions
if ("Int".equals(datatype) || "Int32".equals(datatype) || "Int64".equals(datatype)
|| "Float".equals(datatype) || "Double".equals(datatype)) {
String varName = "number" + camelize(name);
varName = varName.replaceAll("-", "minus");
varName = varName.replaceAll("\\+", "plus");
varName = varName.replaceAll("\\.", "dot");
return varName;
}
// If we have already camelized the word, don't progress
// any further
if (camelized) {
return name;
}
char[] separators = {'-', '_', ' ', ':', '(', ')'};
return camelize(WordUtils.capitalizeFully(StringUtils.lowerCase(name), separators)
.replaceAll("[-_ :\\(\\)]", ""),
true);
}
@Override
public String toEnumName(CodegenProperty property) {
String enumName = toModelName(property.name);
// Ensure that the enum type doesn't match a reserved word or
// the variable name doesn't match the generated enum type or the
// Swift compiler will generate an error
if (isReservedWord(property.datatypeWithEnum)
|| toVarName(property.name).equals(property.datatypeWithEnum)) {
enumName = property.datatypeWithEnum + "Enum";
}
// TODO: toModelName already does something for names starting with number,
// so this code is probably never called
if (enumName.matches("\\d.*")) { // starts with number
return "_" + enumName;
} else {
return enumName;
}
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
Map<String, Object> postProcessedModelsEnum = postProcessModelsEnum(objs);
// We iterate through the list of models, and also iterate through each of the
// properties for each model. For each property, if:
//
// CodegenProperty.name != CodegenProperty.baseName
//
// then we set
//
// CodegenProperty.vendorExtensions["x-codegen-escaped-property-name"] = true
//
// Also, if any property in the model has x-codegen-escaped-property-name=true, then we mark:
//
// CodegenModel.vendorExtensions["x-codegen-has-escaped-property-names"] = true
//
List<Object> models = (List<Object>) postProcessedModelsEnum.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
boolean modelHasPropertyWithEscapedName = false;
for (CodegenProperty prop : cm.allVars) {
if (!prop.name.equals(prop.baseName)) {
prop.vendorExtensions.put("x-codegen-escaped-property-name", true);
modelHasPropertyWithEscapedName = true;
}
}
if (modelHasPropertyWithEscapedName) {
cm.vendorExtensions.put("x-codegen-has-escaped-property-names", true);
}
}
return postProcessedModelsEnum;
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
boolean isSwiftScalarType = property.isInteger || property.isLong || property.isFloat
|| property.isDouble || property.isBoolean;
if ((!property.required || property.isNullable) && isSwiftScalarType) {
// Optional scalar types like Int?, Int64?, Float?, Double?, and Bool?
// do not translate to Objective-C. So we want to flag those
// properties in case we want to put special code in the templates
// which provide Objective-C compatibility.
property.vendorExtensions.put("x-swift-optional-scalar", true);
}
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*");
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String swiftPostProcessFile = System.getenv("SWIFT_POST_PROCESS_FILE");
if (StringUtils.isEmpty(swiftPostProcessFile)) {
return; // skip if SWIFT_POST_PROCESS_FILE env variable is not defined
}
// only process files with swift extension
if ("swift".equals(FilenameUtils.getExtension(file.toString()))) {
String command = swiftPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
int exitValue = p.waitFor();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit value: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: " + command);
}
} catch (Exception e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
}
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
Map<String, Object> objectMap = (Map<String, Object>) objs.get("operations");
HashMap<String, CodegenModel> modelMaps = new HashMap<String, CodegenModel>();
for (Object o : allModels) {
HashMap<String, Object> h = (HashMap<String, Object>) o;
CodegenModel m = (CodegenModel) h.get("model");
modelMaps.put(m.classname, m);
}
List<CodegenOperation> operations = (List<CodegenOperation>) objectMap.get("operation");
for (CodegenOperation operation : operations) {
for (CodegenParameter cp : operation.allParams) {
cp.vendorExtensions.put("x-swift-example", constructExampleCode(cp, modelMaps, new ExampleCodeGenerationContext()));
}
}
return objs;
}
public String constructExampleCode(CodegenParameter codegenParameter, HashMap<String, CodegenModel> modelMaps, ExampleCodeGenerationContext context) {
if (codegenParameter.isListContainer) { // array
return "[" + constructExampleCode(codegenParameter.items, modelMaps, context) + "]";
} else if (codegenParameter.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenParameter.dataType)) { // primitive type
if ("String".equals(codegenParameter.dataType) || "Character".equals(codegenParameter.dataType)) {
if (StringUtils.isEmpty(codegenParameter.example)) {
return "\"" + codegenParameter.example + "\"";
} else {
return "\"" + codegenParameter.paramName + "_example\"";
}
} else if ("Bool".equals(codegenParameter.dataType)) { // boolean
if (Boolean.parseBoolean(codegenParameter.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenParameter.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Date".equals(codegenParameter.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenParameter.example)) {
return codegenParameter.example;
} else {
return "987";
}
}
} else { // model
// look up the model
if (modelMaps.containsKey(codegenParameter.dataType)) {
return constructExampleCode(modelMaps.get(codegenParameter.dataType), modelMaps, context);
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenParameter.dataType);
return "TODO";
}
}
}
private String constructExampleCode(CodegenProperty codegenProperty, HashMap<String, CodegenModel> modelMaps, ExampleCodeGenerationContext context) {
if (codegenProperty.isListContainer) { // array
return "[" + constructExampleCode(codegenProperty.items, modelMaps, context) + "]";
} else if (codegenProperty.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenProperty.dataType)) { // primitive type
if ("String".equals(codegenProperty.dataType) || "Character".equals(codegenProperty.dataType)) {
if (StringUtils.isEmpty(codegenProperty.example)) {
return "\"" + codegenProperty.example + "\"";
} else {
return "\"" + codegenProperty.name + "_example\"";
}
} else if ("Bool".equals(codegenProperty.dataType)) { // boolean
if (Boolean.parseBoolean(codegenProperty.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenProperty.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Date".equals(codegenProperty.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenProperty.example)) {
return codegenProperty.example;
} else {
return "123";
}
}
} else {
// look up the model
if (modelMaps.containsKey(codegenProperty.dataType)) {
return constructExampleCode(modelMaps.get(codegenProperty.dataType), modelMaps, context);
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenProperty.dataType);
return "\"TODO\"";
}
}
}
private String constructExampleCode(CodegenModel codegenModel, HashMap<String, CodegenModel> modelMaps, ExampleCodeGenerationContext context) {
if (context.isTypeVisted(codegenModel.dataType)) {
String exampleCode = context.getExampleCode(codegenModel.dataType);
if (exampleCode != null) {
// Reuse already generated exampleCode
return exampleCode;
} else {
// Visited but no Example Code. Circuit Breaker --> No StackOverflow
return "{...}";
}
} else {
context.visitType(codegenModel.dataType);
String example = codegenModel.name + "(";
List<String> propertyExamples = new ArrayList<>();
for (CodegenProperty codegenProperty : codegenModel.vars) {
String propertyExample = constructExampleCode(codegenProperty, modelMaps, context);
propertyExamples.add(codegenProperty.name + ": " + propertyExample);
}
example += StringUtils.join(propertyExamples, ", ");
example += ")";
context.setExampleCode(codegenModel.dataType, example);
return example;
}
}
private static class ExampleCodeGenerationContext {
private Map<String, String> modelExampleCode = new HashMap<>();
public boolean isTypeVisted(String type) {
return modelExampleCode.containsKey(type);
}
public void visitType(String type) {
modelExampleCode.put(type, null);
}
public void setExampleCode(String type, String code) {
modelExampleCode.put(type, code);
}
public String getExampleCode(String type) {
return modelExampleCode.get(type);
}
}
}
|
[
"\"SWIFT_POST_PROCESS_FILE\"",
"\"SWIFT_POST_PROCESS_FILE\""
] |
[] |
[
"SWIFT_POST_PROCESS_FILE"
] |
[]
|
["SWIFT_POST_PROCESS_FILE"]
|
java
| 1 | 0 | |
link_test.go
|
// +build linux
package netlink
import (
"bytes"
"fmt"
"net"
"os"
"os/exec"
"syscall"
"testing"
"time"
"github.com/vishvananda/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)
const (
testTxQLen int = 100
defaultTxQLen int = 1000
testTxQueues int = 4
testRxQueues int = 8
)
func testLinkAddDel(t *testing.T, link Link) {
links, err := LinkList()
if err != nil {
t.Fatal(err)
}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
base := link.Attrs()
result, err := LinkByName(base.Name)
if err != nil {
t.Fatal(err)
}
rBase := result.Attrs()
if base.Index != 0 {
if base.Index != rBase.Index {
t.Fatalf("index is %d, should be %d", rBase.Index, base.Index)
}
}
if base.Group > 0 {
if base.Group != rBase.Group {
t.Fatalf("group is %d, should be %d", rBase.Group, base.Group)
}
}
if vlan, ok := link.(*Vlan); ok {
other, ok := result.(*Vlan)
if !ok {
t.Fatal("Result of create is not a vlan")
}
if vlan.VlanId != other.VlanId {
t.Fatal("Link.VlanId id doesn't match")
}
}
if veth, ok := result.(*Veth); ok {
if rBase.TxQLen != base.TxQLen {
t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen)
}
if rBase.NumTxQueues != base.NumTxQueues {
t.Fatalf("txQueues is %d, should be %d", rBase.NumTxQueues, base.NumTxQueues)
}
if rBase.NumRxQueues != base.NumRxQueues {
t.Fatalf("rxQueues is %d, should be %d", rBase.NumRxQueues, base.NumRxQueues)
}
if rBase.MTU != base.MTU {
t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
}
if original, ok := link.(*Veth); ok {
if original.PeerName != "" {
var peer *Veth
other, err := LinkByName(original.PeerName)
if err != nil {
t.Fatalf("Peer %s not created", veth.PeerName)
}
if peer, ok = other.(*Veth); !ok {
t.Fatalf("Peer %s is incorrect type", veth.PeerName)
}
if peer.TxQLen != testTxQLen {
t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
}
if peer.NumTxQueues != testTxQueues {
t.Fatalf("NumTxQueues of peer is %d, should be %d", peer.NumTxQueues, testTxQueues)
}
if peer.NumRxQueues != testRxQueues {
t.Fatalf("NumRxQueues of peer is %d, should be %d", peer.NumRxQueues, testRxQueues)
}
if !bytes.Equal(peer.Attrs().HardwareAddr, original.PeerHardwareAddr) {
t.Fatalf("Peer MAC addr is %s, should be %s", peer.Attrs().HardwareAddr, original.PeerHardwareAddr)
}
}
}
} else {
// recent kernels set the parent index for veths in the response
if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
t.Fatalf("Created link doesn't have parent %d but it should", base.ParentIndex)
} else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
t.Fatalf("Created link has parent %d but it shouldn't", rBase.ParentIndex)
} else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
if rBase.ParentIndex != base.ParentIndex {
t.Fatalf("Link.ParentIndex doesn't match %d != %d", rBase.ParentIndex, base.ParentIndex)
}
}
}
if _, ok := link.(*Wireguard); ok {
_, ok := result.(*Wireguard)
if !ok {
t.Fatal("Result of create is not a wireguard")
}
}
if vxlan, ok := link.(*Vxlan); ok {
other, ok := result.(*Vxlan)
if !ok {
t.Fatal("Result of create is not a vxlan")
}
compareVxlan(t, vxlan, other)
}
if ipv, ok := link.(*IPVlan); ok {
other, ok := result.(*IPVlan)
if !ok {
t.Fatal("Result of create is not a ipvlan")
}
if ipv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
}
if ipv.Flag != other.Flag {
t.Fatalf("Got unexpected flag: %d, expected: %d", other.Flag, ipv.Flag)
}
}
if macv, ok := link.(*Macvlan); ok {
other, ok := result.(*Macvlan)
if !ok {
t.Fatal("Result of create is not a macvlan")
}
if macv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode)
}
}
if macv, ok := link.(*Macvtap); ok {
other, ok := result.(*Macvtap)
if !ok {
t.Fatal("Result of create is not a macvtap")
}
if macv.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode)
}
}
if _, ok := link.(*Vti); ok {
_, ok := result.(*Vti)
if !ok {
t.Fatal("Result of create is not a vti")
}
}
if bond, ok := link.(*Bond); ok {
other, ok := result.(*Bond)
if !ok {
t.Fatal("Result of create is not a bond")
}
if bond.Mode != other.Mode {
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, bond.Mode)
}
if bond.ArpIpTargets != nil {
if other.ArpIpTargets == nil {
t.Fatalf("Got unexpected ArpIpTargets: nil")
}
if len(bond.ArpIpTargets) != len(other.ArpIpTargets) {
t.Fatalf("Got unexpected ArpIpTargets len: %d, expected: %d",
len(other.ArpIpTargets), len(bond.ArpIpTargets))
}
for i := range bond.ArpIpTargets {
if !bond.ArpIpTargets[i].Equal(other.ArpIpTargets[i]) {
t.Fatalf("Got unexpected ArpIpTargets: %s, expected: %s",
other.ArpIpTargets[i], bond.ArpIpTargets[i])
}
}
}
// Mode specific checks
if os.Getenv("TRAVIS_BUILD_DIR") != "" {
t.Log("Kernel in travis is too old for this check")
} else {
switch mode := bondModeToString[bond.Mode]; mode {
case "802.3ad":
if bond.AdSelect != other.AdSelect {
t.Fatalf("Got unexpected AdSelect: %d, expected: %d", other.AdSelect, bond.AdSelect)
}
if bond.AdActorSysPrio != other.AdActorSysPrio {
t.Fatalf("Got unexpected AdActorSysPrio: %d, expected: %d", other.AdActorSysPrio, bond.AdActorSysPrio)
}
if bond.AdUserPortKey != other.AdUserPortKey {
t.Fatalf("Got unexpected AdUserPortKey: %d, expected: %d", other.AdUserPortKey, bond.AdUserPortKey)
}
if bytes.Compare(bond.AdActorSystem, other.AdActorSystem) != 0 {
t.Fatalf("Got unexpected AdActorSystem: %d, expected: %d", other.AdActorSystem, bond.AdActorSystem)
}
case "balance-tlb":
if bond.TlbDynamicLb != other.TlbDynamicLb {
t.Fatalf("Got unexpected TlbDynamicLb: %d, expected: %d", other.TlbDynamicLb, bond.TlbDynamicLb)
}
}
}
}
if _, ok := link.(*Iptun); ok {
_, ok := result.(*Iptun)
if !ok {
t.Fatal("Result of create is not a iptun")
}
}
if _, ok := link.(*Ip6tnl); ok {
_, ok := result.(*Ip6tnl)
if !ok {
t.Fatal("Result of create is not a ip6tnl")
}
}
if _, ok := link.(*Sittun); ok {
_, ok := result.(*Sittun)
if !ok {
t.Fatal("Result of create is not a sittun")
}
}
if geneve, ok := link.(*Geneve); ok {
other, ok := result.(*Geneve)
if !ok {
t.Fatal("Result of create is not a Geneve")
}
compareGeneve(t, geneve, other)
}
if gretap, ok := link.(*Gretap); ok {
other, ok := result.(*Gretap)
if !ok {
t.Fatal("Result of create is not a Gretap")
}
compareGretap(t, gretap, other)
}
if gretun, ok := link.(*Gretun); ok {
other, ok := result.(*Gretun)
if !ok {
t.Fatal("Result of create is not a Gretun")
}
compareGretun(t, gretun, other)
}
if xfrmi, ok := link.(*Xfrmi); ok {
other, ok := result.(*Xfrmi)
if !ok {
t.Fatal("Result of create is not a xfrmi")
}
compareXfrmi(t, xfrmi, other)
}
if tuntap, ok := link.(*Tuntap); ok {
other, ok := result.(*Tuntap)
if !ok {
t.Fatal("Result of create is not a tuntap")
}
compareTuntap(t, tuntap, other)
}
if err = LinkDel(link); err != nil {
t.Fatal(err)
}
links, err = LinkList()
if err != nil {
t.Fatal(err)
}
for _, l := range links {
if l.Attrs().Name == link.Attrs().Name {
t.Fatal("Link not removed properly")
}
}
}
func compareGeneve(t *testing.T, expected, actual *Geneve) {
if actual.ID != expected.ID {
t.Fatalf("Geneve.ID doesn't match: %d %d", actual.ID, expected.ID)
}
// set the Dport to 6081 (the linux default) if it wasn't specified at creation
if expected.Dport == 0 {
expected.Dport = 6081
}
if actual.Dport != expected.Dport {
t.Fatal("Geneve.Dport doesn't match")
}
if actual.Ttl != expected.Ttl {
t.Fatal("Geneve.Ttl doesn't match")
}
if actual.Tos != expected.Tos {
t.Fatal("Geneve.Tos doesn't match")
}
if !actual.Remote.Equal(expected.Remote) {
t.Fatalf("Geneve.Remote is not equal: %s!=%s", actual.Remote, expected.Remote)
}
// TODO: we should implement the rest of the geneve methods
}
func compareGretap(t *testing.T, expected, actual *Gretap) {
if actual.IKey != expected.IKey {
t.Fatal("Gretap.IKey doesn't match")
}
if actual.OKey != expected.OKey {
t.Fatal("Gretap.OKey doesn't match")
}
if actual.EncapSport != expected.EncapSport {
t.Fatal("Gretap.EncapSport doesn't match")
}
if actual.EncapDport != expected.EncapDport {
t.Fatal("Gretap.EncapDport doesn't match")
}
if expected.Local != nil && !actual.Local.Equal(expected.Local) {
t.Fatal("Gretap.Local doesn't match")
}
if expected.Remote != nil && !actual.Remote.Equal(expected.Remote) {
t.Fatal("Gretap.Remote doesn't match")
}
if actual.IFlags != expected.IFlags {
t.Fatal("Gretap.IFlags doesn't match")
}
if actual.OFlags != expected.OFlags {
t.Fatal("Gretap.OFlags doesn't match")
}
if actual.PMtuDisc != expected.PMtuDisc {
t.Fatal("Gretap.PMtuDisc doesn't match")
}
if actual.Ttl != expected.Ttl {
t.Fatal("Gretap.Ttl doesn't match")
}
if actual.Tos != expected.Tos {
t.Fatal("Gretap.Tos doesn't match")
}
if actual.EncapType != expected.EncapType {
t.Fatal("Gretap.EncapType doesn't match")
}
if actual.EncapFlags != expected.EncapFlags {
t.Fatal("Gretap.EncapFlags doesn't match")
}
if actual.Link != expected.Link {
t.Fatal("Gretap.Link doesn't match")
}
/*
* NOTE: setting the FlowBased flag doesn't seem to work, but by lack of
* a proper way to debug this, this test is disabled for now
if actual.FlowBased != expected.FlowBased {
t.Fatal("Gretap.FlowBased doesn't match")
}
*/
}
func compareGretun(t *testing.T, expected, actual *Gretun) {
if actual.Link != expected.Link {
t.Fatal("Gretun.Link doesn't match")
}
if actual.IFlags != expected.IFlags {
t.Fatal("Gretun.IFlags doesn't match")
}
if actual.OFlags != expected.OFlags {
t.Fatal("Gretun.OFlags doesn't match")
}
if actual.IKey != expected.IKey {
t.Fatal("Gretun.IKey doesn't match")
}
if actual.OKey != expected.OKey {
t.Fatal("Gretun.OKey doesn't match")
}
if expected.Local != nil && !actual.Local.Equal(expected.Local) {
t.Fatal("Gretun.Local doesn't match")
}
if expected.Remote != nil && !actual.Remote.Equal(expected.Remote) {
t.Fatal("Gretun.Remote doesn't match")
}
if actual.Ttl != expected.Ttl {
t.Fatal("Gretun.Ttl doesn't match")
}
if actual.Tos != expected.Tos {
t.Fatal("Gretun.Tos doesn't match")
}
if actual.PMtuDisc != expected.PMtuDisc {
t.Fatal("Gretun.PMtuDisc doesn't match")
}
if actual.EncapType != expected.EncapType {
t.Fatal("Gretun.EncapType doesn't match")
}
if actual.EncapFlags != expected.EncapFlags {
t.Fatal("Gretun.EncapFlags doesn't match")
}
if actual.EncapSport != expected.EncapSport {
t.Fatal("Gretun.EncapSport doesn't match")
}
if actual.EncapDport != expected.EncapDport {
t.Fatal("Gretun.EncapDport doesn't match")
}
}
func compareVxlan(t *testing.T, expected, actual *Vxlan) {
if actual.VxlanId != expected.VxlanId {
t.Fatal("Vxlan.VxlanId doesn't match")
}
if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
t.Fatal("Vxlan.SrcAddr doesn't match")
}
if expected.Group != nil && !actual.Group.Equal(expected.Group) {
t.Fatal("Vxlan.Group doesn't match")
}
if expected.TTL != -1 && actual.TTL != expected.TTL {
t.Fatal("Vxlan.TTL doesn't match")
}
if expected.TOS != -1 && actual.TOS != expected.TOS {
t.Fatal("Vxlan.TOS doesn't match")
}
if actual.Learning != expected.Learning {
t.Fatal("Vxlan.Learning doesn't match")
}
if actual.Proxy != expected.Proxy {
t.Fatal("Vxlan.Proxy doesn't match")
}
if actual.RSC != expected.RSC {
t.Fatal("Vxlan.RSC doesn't match")
}
if actual.L2miss != expected.L2miss {
t.Fatal("Vxlan.L2miss doesn't match")
}
if actual.L3miss != expected.L3miss {
t.Fatal("Vxlan.L3miss doesn't match")
}
if actual.GBP != expected.GBP {
t.Fatal("Vxlan.GBP doesn't match")
}
if actual.FlowBased != expected.FlowBased {
t.Fatal("Vxlan.FlowBased doesn't match")
}
if actual.UDP6ZeroCSumTx != expected.UDP6ZeroCSumTx {
t.Fatal("Vxlan.UDP6ZeroCSumTx doesn't match")
}
if actual.UDP6ZeroCSumRx != expected.UDP6ZeroCSumRx {
t.Fatal("Vxlan.UDP6ZeroCSumRx doesn't match")
}
if expected.NoAge {
if !actual.NoAge {
t.Fatal("Vxlan.NoAge doesn't match")
}
} else if expected.Age > 0 && actual.Age != expected.Age {
t.Fatal("Vxlan.Age doesn't match")
}
if expected.Limit > 0 && actual.Limit != expected.Limit {
t.Fatal("Vxlan.Limit doesn't match")
}
if expected.Port > 0 && actual.Port != expected.Port {
t.Fatal("Vxlan.Port doesn't match")
}
if expected.PortLow > 0 || expected.PortHigh > 0 {
if actual.PortLow != expected.PortLow {
t.Fatal("Vxlan.PortLow doesn't match")
}
if actual.PortHigh != expected.PortHigh {
t.Fatal("Vxlan.PortHigh doesn't match")
}
}
}
func compareXfrmi(t *testing.T, expected, actual *Xfrmi) {
if expected.Ifid != actual.Ifid {
t.Fatal("Xfrmi.Ifid doesn't match")
}
}
func compareTuntap(t *testing.T, expected, actual *Tuntap) {
if expected.Mode != actual.Mode {
t.Fatalf("Tuntap.Mode doesn't match: expected : %+v, got %+v", expected.Mode, actual.Mode)
}
if expected.Owner != actual.Owner {
t.Fatal("Tuntap.Owner doesn't match")
}
if expected.Group != actual.Group {
t.Fatal("Tuntap.Group doesn't match")
}
if expected.NonPersist != actual.NonPersist {
t.Fatal("Tuntap.Group doesn't match")
}
}
func TestLinkAddDelWithIndex(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Dummy{LinkAttrs{Index: 1000, Name: "foo"}})
}
func TestLinkAddDelDummy(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelDummyWithGroup(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo", Group: 42}})
}
func TestLinkModify(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
linkName := "foo"
originalMTU := 1500
updatedMTU := 1442
link := &Dummy{LinkAttrs{Name: linkName, MTU: originalMTU}}
base := link.Attrs()
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
link.MTU = updatedMTU
if err := pkgHandle.LinkModify(link); err != nil {
t.Fatal(err)
}
result, err := LinkByName(linkName)
if err != nil {
t.Fatal(err)
}
rBase := result.Attrs()
if rBase.MTU != updatedMTU {
t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
}
}
func TestLinkAddDelIfb(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}})
}
func TestLinkAddDelBridge(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Bridge{LinkAttrs: LinkAttrs{Name: "foo", MTU: 1400}})
}
func TestLinkAddDelGeneve(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Geneve{
LinkAttrs: LinkAttrs{Name: "foo4", EncapType: "geneve"},
ID: 0x1000,
Remote: net.IPv4(127, 0, 0, 1)})
testLinkAddDel(t, &Geneve{
LinkAttrs: LinkAttrs{Name: "foo6", EncapType: "geneve"},
ID: 0x1000,
Remote: net.ParseIP("2001:db8:ef33::2")})
}
func TestGeneveCompareToIP(t *testing.T) {
ns, tearDown := setUpNamedNetlinkTest(t)
defer tearDown()
expected := &Geneve{
ID: 0x764332, // 23 bits
Remote: net.ParseIP("1.2.3.4"),
Dport: 6081,
}
// Create interface
cmd := exec.Command("ip", "netns", "exec", ns,
"ip", "link", "add", "gen0",
"type", "geneve",
"vni", fmt.Sprint(expected.ID),
"remote", expected.Remote.String(),
// TODO: unit tests are currently done on ubuntu 16, and the version of iproute2 there doesn't support dstport
// We can still do most of the testing by verifying that we do read the default port
// "dstport", fmt.Sprint(expected.Dport),
)
out := &bytes.Buffer{}
cmd.Stdout = out
cmd.Stderr = out
if rc := cmd.Run(); rc != nil {
t.Fatal("failed creating link:", rc, out.String())
}
link, err := LinkByName("gen0")
if err != nil {
t.Fatal("Failed getting link: ", err)
}
actual, ok := link.(*Geneve)
if !ok {
t.Fatalf("resulted interface is not geneve: %T", link)
}
compareGeneve(t, expected, actual)
}
func TestLinkAddDelGretap(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Gretap{
LinkAttrs: LinkAttrs{Name: "foo4"},
IKey: 0x101,
OKey: 0x101,
PMtuDisc: 1,
Local: net.IPv4(127, 0, 0, 1),
Remote: net.IPv4(127, 0, 0, 1)})
testLinkAddDel(t, &Gretap{
LinkAttrs: LinkAttrs{Name: "foo6"},
IKey: 0x101,
OKey: 0x101,
Local: net.ParseIP("2001:db8:abcd::1"),
Remote: net.ParseIP("2001:db8:ef33::2")})
}
func TestLinkAddDelGretun(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Gretun{
LinkAttrs: LinkAttrs{Name: "foo4"},
Local: net.IPv4(127, 0, 0, 1),
Remote: net.IPv4(127, 0, 0, 1)})
testLinkAddDel(t, &Gretun{
LinkAttrs: LinkAttrs{Name: "foo6"},
Local: net.ParseIP("2001:db8:abcd::1"),
Remote: net.ParseIP("2001:db8:ef33::2")})
}
func TestLinkAddDelGretunPointToMultiPoint(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Gretun{
LinkAttrs: LinkAttrs{Name: "foo"},
Local: net.IPv4(127, 0, 0, 1),
IKey: 1234,
OKey: 1234})
testLinkAddDel(t, &Gretun{
LinkAttrs: LinkAttrs{Name: "foo6"},
Local: net.ParseIP("2001:db8:1234::4"),
IKey: 5678,
OKey: 7890})
}
func TestLinkAddDelGretapFlowBased(t *testing.T) {
minKernelRequired(t, 4, 3)
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Gretap{
LinkAttrs: LinkAttrs{Name: "foo"},
FlowBased: true})
}
func TestLinkAddDelVlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900, VLAN_PROTOCOL_8021Q})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
})
testLinkAddDel(t, &Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_BRIDGE,
})
testLinkAddDel(t, &Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_VEPA,
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelMacvtap(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_PRIVATE,
},
})
testLinkAddDel(t, &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_BRIDGE,
},
})
testLinkAddDel(t, &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_VEPA,
},
})
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVeth(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
peerMAC, _ := net.ParseMAC("00:12:34:56:78:02")
veth := &Veth{
LinkAttrs: LinkAttrs{
Name: "foo",
TxQLen: testTxQLen,
MTU: 1400,
NumTxQueues: testTxQueues,
NumRxQueues: testRxQueues,
},
PeerName: "bar",
PeerHardwareAddr: peerMAC,
}
testLinkAddDel(t, veth)
}
func TestLinkAddDelBond(t *testing.T) {
minKernelRequired(t, 3, 13)
tearDown := setUpNetlinkTest(t)
defer tearDown()
modes := []string{"802.3ad", "balance-tlb"}
for _, mode := range modes {
bond := NewLinkBond(LinkAttrs{Name: "foo"})
bond.Mode = StringToBondModeMap[mode]
switch mode {
case "802.3ad":
bond.AdSelect = BondAdSelect(BOND_AD_SELECT_BANDWIDTH)
bond.AdActorSysPrio = 1
bond.AdUserPortKey = 1
bond.AdActorSystem, _ = net.ParseMAC("06:aa:bb:cc:dd:ee")
bond.ArpIpTargets = []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("1.1.1.2")}
case "balance-tlb":
bond.TlbDynamicLb = 1
bond.ArpIpTargets = []net.IP{net.ParseIP("1.1.1.2"), net.ParseIP("1.1.1.1")}
}
testLinkAddDel(t, bond)
}
}
func TestLinkAddVethWithDefaultTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != defaultTxQLen {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
}
}
}
func TestLinkAddVethWithZeroTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 0
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
if err := LinkAdd(veth); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if veth, ok := link.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if veth, ok := peer.(*Veth); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if veth.TxQLen != 0 {
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
}
}
}
func TestLinkAddDelDummyWithGSO(t *testing.T) {
const (
gsoMaxSegs = 16
gsoMaxSize = 1 << 14
)
minKernelRequired(t, 4, 16)
tearDown := setUpNetlinkTest(t)
defer tearDown()
dummy := &Dummy{LinkAttrs: LinkAttrs{Name: "foo", GSOMaxSize: gsoMaxSize, GSOMaxSegs: gsoMaxSegs}}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
dummy, ok := link.(*Dummy)
if !ok {
t.Fatalf("unexpected link type: %T", link)
}
if dummy.GSOMaxSize != gsoMaxSize {
t.Fatalf("GSOMaxSize is %d, should be %d", dummy.GSOMaxSize, gsoMaxSize)
}
if dummy.GSOMaxSegs != gsoMaxSegs {
t.Fatalf("GSOMaxSeg is %d, should be %d", dummy.GSOMaxSegs, gsoMaxSegs)
}
}
func TestLinkAddDummyWithTxQLen(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
la := NewLinkAttrs()
la.Name = "foo"
la.TxQLen = 1500
dummy := &Dummy{LinkAttrs: la}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if dummy, ok := link.(*Dummy); !ok {
t.Fatalf("unexpected link type: %T", link)
} else {
if dummy.TxQLen != 1500 {
t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500)
}
}
}
func TestLinkAddDelBridgeMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func testLinkSetUnsetResetMaster(t *testing.T, master, newmaster Link) {
slave := &Dummy{LinkAttrs{Name: "baz"}}
if err := LinkAdd(slave); err != nil {
t.Fatal(err)
}
nonexistsmaster := &Bridge{LinkAttrs: LinkAttrs{Name: "foobar"}}
if err := LinkSetMaster(slave, nonexistsmaster); err == nil {
t.Fatal("error expected")
}
if err := LinkSetMaster(slave, master); err != nil {
t.Fatal(err)
}
link, err := LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != master.Attrs().Index {
t.Fatal("Master not set properly")
}
if err := LinkSetMaster(slave, newmaster); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != newmaster.Attrs().Index {
t.Fatal("Master not reset properly")
}
if err := LinkSetNoMaster(slave); err != nil {
t.Fatal(err)
}
link, err = LinkByName("baz")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MasterIndex != 0 {
t.Fatal("Master not unset properly")
}
if err := LinkDel(slave); err != nil {
t.Fatal(err)
}
}
func TestLinkSetUnsetResetMaster(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
newmaster := &Bridge{LinkAttrs: LinkAttrs{Name: "bar"}}
if err := LinkAdd(newmaster); err != nil {
t.Fatal(err)
}
testLinkSetUnsetResetMaster(t, master, newmaster)
if err := LinkDel(newmaster); err != nil {
t.Fatal(err)
}
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetUnsetResetMasterBond(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := NewLinkBond(LinkAttrs{Name: "foo"})
master.Mode = BOND_MODE_BALANCE_RR
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
newmaster := NewLinkBond(LinkAttrs{Name: "bar"})
newmaster.Mode = BOND_MODE_BALANCE_RR
if err := LinkAdd(newmaster); err != nil {
t.Fatal(err)
}
testLinkSetUnsetResetMaster(t, master, newmaster)
if err := LinkDel(newmaster); err != nil {
t.Fatal(err)
}
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func TestLinkSetNs(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
basens, err := netns.Get()
if err != nil {
t.Fatal("Failed to get basens")
}
defer basens.Close()
newns, err := netns.New()
if err != nil {
t.Fatal("Failed to create newns")
}
defer newns.Close()
link := &Veth{LinkAttrs{Name: "foo"}, "bar", nil, nil}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
peer, err := LinkByName("bar")
if err != nil {
t.Fatal(err)
}
LinkSetNsFd(peer, int(basens))
if err != nil {
t.Fatal("Failed to set newns for link")
}
_, err = LinkByName("bar")
if err == nil {
t.Fatal("Link bar is still in newns")
}
err = netns.Set(basens)
if err != nil {
t.Fatal("Failed to set basens")
}
peer, err = LinkByName("bar")
if err != nil {
t.Fatal("Link is not in basens")
}
if err := LinkDel(peer); err != nil {
t.Fatal(err)
}
err = netns.Set(newns)
if err != nil {
t.Fatal("Failed to set newns")
}
_, err = LinkByName("foo")
if err == nil {
t.Fatal("Other half of veth pair not deleted")
}
}
func TestLinkAddDelWireguard(t *testing.T) {
minKernelRequired(t, 5, 6)
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Wireguard{LinkAttrs: LinkAttrs{Name: "wg0"}})
}
func TestVethPeerNs(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
basens, err := netns.Get()
if err != nil {
t.Fatal("Failed to get basens")
}
defer basens.Close()
newns, err := netns.New()
if err != nil {
t.Fatal("Failed to create newns")
}
defer newns.Close()
link := &Veth{LinkAttrs{Name: "foo"}, "bar", nil, NsFd(basens)}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
_, err = LinkByName("bar")
if err == nil {
t.Fatal("Link bar is in newns")
}
err = netns.Set(basens)
if err != nil {
t.Fatal("Failed to set basens")
}
_, err = LinkByName("bar")
if err != nil {
t.Fatal("Link bar is not in basens")
}
err = netns.Set(newns)
if err != nil {
t.Fatal("Failed to set newns")
}
_, err = LinkByName("foo")
if err != nil {
t.Fatal("Link foo is not in newns")
}
}
func TestVethPeerNs2(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
basens, err := netns.Get()
if err != nil {
t.Fatal("Failed to get basens")
}
defer basens.Close()
onens, err := netns.New()
if err != nil {
t.Fatal("Failed to create newns")
}
defer onens.Close()
twons, err := netns.New()
if err != nil {
t.Fatal("Failed to create twons")
}
defer twons.Close()
link := &Veth{LinkAttrs{Name: "foo", Namespace: NsFd(onens)}, "bar", nil, NsFd(basens)}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
_, err = LinkByName("foo")
if err == nil {
t.Fatal("Link foo is in twons")
}
_, err = LinkByName("bar")
if err == nil {
t.Fatal("Link bar is in twons")
}
err = netns.Set(basens)
if err != nil {
t.Fatal("Failed to set basens")
}
_, err = LinkByName("bar")
if err != nil {
t.Fatal("Link bar is not in basens")
}
err = netns.Set(onens)
if err != nil {
t.Fatal("Failed to set onens")
}
_, err = LinkByName("foo")
if err != nil {
t.Fatal("Link foo is not in onens")
}
}
func TestLinkAddDelVxlan(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{
LinkAttrs{Name: "foo"},
}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
VxlanId: 10,
VtepDevIndex: parent.Index,
Learning: true,
L2miss: true,
L3miss: true,
}
testLinkAddDel(t, &vxlan)
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVxlanUdpCSum6(t *testing.T) {
minKernelRequired(t, 3, 16)
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{
LinkAttrs{Name: "foo"},
}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
VxlanId: 10,
VtepDevIndex: parent.Index,
Learning: true,
L2miss: true,
L3miss: true,
UDP6ZeroCSumTx: true,
UDP6ZeroCSumRx: true,
}
testLinkAddDel(t, &vxlan)
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVxlanGbp(t *testing.T) {
minKernelRequired(t, 4, 0)
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{
LinkAttrs{Name: "foo"},
}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
VxlanId: 10,
VtepDevIndex: parent.Index,
Learning: true,
L2miss: true,
L3miss: true,
UDP6ZeroCSumTx: true,
UDP6ZeroCSumRx: true,
GBP: true,
}
testLinkAddDel(t, &vxlan)
if err := LinkDel(parent); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelVxlanFlowBased(t *testing.T) {
minKernelRequired(t, 4, 3)
tearDown := setUpNetlinkTest(t)
defer tearDown()
vxlan := Vxlan{
LinkAttrs: LinkAttrs{
Name: "foo",
},
Learning: false,
FlowBased: true,
}
testLinkAddDel(t, &vxlan)
}
func TestLinkAddDelIPVlanL2(t *testing.T) {
minKernelRequired(t, 4, 2)
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L2,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanL3(t *testing.T) {
minKernelRequired(t, 4, 2)
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L3,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanVepa(t *testing.T) {
minKernelRequired(t, 4, 15)
tearDown := setUpNetlinkTest(t)
defer tearDown()
parent := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
ParentIndex: parent.Index,
},
Mode: IPVLAN_MODE_L3,
Flag: IPVLAN_FLAG_VEPA,
}
testLinkAddDel(t, &ipv)
}
func TestLinkAddDelIPVlanNoParent(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ipv := IPVlan{
LinkAttrs: LinkAttrs{
Name: "bar",
},
Mode: IPVLAN_MODE_L3,
}
err := LinkAdd(&ipv)
if err == nil {
t.Fatal("Add should fail if ipvlan creating without ParentIndex")
}
if err.Error() != "Can't create ipvlan link without ParentIndex" {
t.Fatalf("Error should be about missing ParentIndex, got %q", err)
}
}
func TestLinkByIndex(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
dummy := &Dummy{LinkAttrs{Name: "dummy"}}
if err := LinkAdd(dummy); err != nil {
t.Fatal(err)
}
found, err := LinkByIndex(dummy.Index)
if err != nil {
t.Fatal(err)
}
if found.Attrs().Index != dummy.Attrs().Index {
t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
}
LinkDel(dummy)
// test not found
_, err = LinkByIndex(dummy.Attrs().Index)
if err == nil {
t.Fatalf("LinkByIndex(%v) found deleted link", err)
}
}
func TestLinkSet(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
iface := &Dummy{LinkAttrs{Name: "foo"}}
if err := LinkAdd(iface); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
err = LinkSetName(link, "bar")
if err != nil {
t.Fatalf("Could not change interface name: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatalf("Interface name not changed: %v", err)
}
err = LinkSetMTU(link, 1400)
if err != nil {
t.Fatalf("Could not set MTU: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().MTU != 1400 {
t.Fatal("MTU not changed")
}
err = LinkSetTxQLen(link, 500)
if err != nil {
t.Fatalf("Could not set txqlen: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().TxQLen != 500 {
t.Fatal("txqlen not changed")
}
addr, err := net.ParseMAC("00:12:34:56:78:AB")
if err != nil {
t.Fatal(err)
}
err = LinkSetHardwareAddr(link, addr)
if err != nil {
t.Fatal(err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
t.Fatalf("hardware address not changed")
}
err = LinkSetAlias(link, "barAlias")
if err != nil {
t.Fatalf("Could not set alias: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().Alias != "barAlias" {
t.Fatalf("alias not changed")
}
link, err = LinkByAlias("barAlias")
if err != nil {
t.Fatal(err)
}
err = LinkSetGroup(link, 42)
if err != nil {
t.Fatalf("Could not set group: %v", err)
}
link, err = LinkByName("bar")
if err != nil {
t.Fatal(err)
}
if link.Attrs().Group != 42 {
t.Fatal("Link group not changed")
}
}
func TestLinkSetARP(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
iface := &Veth{LinkAttrs: LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1500}, PeerName: "banana"}
if err := LinkAdd(iface); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
err = LinkSetARPOff(link)
if err != nil {
t.Fatal(err)
}
link, err = LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if link.Attrs().RawFlags&unix.IFF_NOARP != uint32(unix.IFF_NOARP) {
t.Fatalf("NOARP was not set")
}
err = LinkSetARPOn(link)
if err != nil {
t.Fatal(err)
}
link, err = LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if link.Attrs().RawFlags&unix.IFF_NOARP != 0 {
t.Fatalf("NOARP is still set")
}
}
func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool {
for {
timeout := time.After(time.Minute)
select {
case update := <-ch:
if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&unix.IFF_UP != 0) == up {
return true
}
case <-timeout:
return false
}
}
}
func TestLinkSubscribe(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribe(ch, done); err != nil {
t.Fatal(err)
}
link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Add update not received as expected")
}
if err := LinkSetUp(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", true) {
t.Fatal("Link Up update not received as expected")
}
if err := LinkDel(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Del update not received as expected")
}
}
func TestLinkSubscribeWithOptions(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
var lastError error
defer func() {
if lastError != nil {
t.Fatalf("Fatal error received during subscription: %v", lastError)
}
}()
if err := LinkSubscribeWithOptions(ch, done, LinkSubscribeOptions{
ErrorCallback: func(err error) {
lastError = err
},
}); err != nil {
t.Fatal(err)
}
link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "foo", false) {
t.Fatal("Add update not received as expected")
}
}
func TestLinkSubscribeAt(t *testing.T) {
skipUnlessRoot(t)
// Create an handle on a custom netns
newNs, err := netns.New()
if err != nil {
t.Fatal(err)
}
defer newNs.Close()
nh, err := NewHandleAt(newNs)
if err != nil {
t.Fatal(err)
}
defer nh.Delete()
// Subscribe for Link events on the custom netns
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribeAt(newNs, ch, done); err != nil {
t.Fatal(err)
}
link := &Veth{LinkAttrs{Name: "test", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil}
if err := nh.LinkAdd(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", false) {
t.Fatal("Add update not received as expected")
}
if err := nh.LinkSetUp(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", true) {
t.Fatal("Link Up update not received as expected")
}
if err := nh.LinkDel(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", false) {
t.Fatal("Del update not received as expected")
}
}
func TestLinkSubscribeListExisting(t *testing.T) {
skipUnlessRoot(t)
// Create an handle on a custom netns
newNs, err := netns.New()
if err != nil {
t.Fatal(err)
}
defer newNs.Close()
nh, err := NewHandleAt(newNs)
if err != nil {
t.Fatal(err)
}
defer nh.Delete()
link := &Veth{LinkAttrs{Name: "test", TxQLen: testTxQLen, MTU: 1400}, "bar", nil, nil}
if err := nh.LinkAdd(link); err != nil {
t.Fatal(err)
}
// Subscribe for Link events on the custom netns
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribeWithOptions(ch, done, LinkSubscribeOptions{
Namespace: &newNs,
ListExisting: true},
); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", false) {
t.Fatal("Add update not received as expected")
}
if err := nh.LinkSetUp(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", true) {
t.Fatal("Link Up update not received as expected")
}
if err := nh.LinkDel(link); err != nil {
t.Fatal(err)
}
if !expectLinkUpdate(ch, "test", false) {
t.Fatal("Del update not received as expected")
}
}
func TestLinkStats(t *testing.T) {
defer setUpNetlinkTest(t)()
// Create a veth pair and verify the cross-stats once both
// ends are brought up and some ICMPv6 packets are exchanged
v0 := "v0"
v1 := "v1"
vethLink := &Veth{LinkAttrs: LinkAttrs{Name: v0}, PeerName: v1}
if err := LinkAdd(vethLink); err != nil {
t.Fatal(err)
}
veth0, err := LinkByName(v0)
if err != nil {
t.Fatal(err)
}
if err := LinkSetUp(veth0); err != nil {
t.Fatal(err)
}
veth1, err := LinkByName(v1)
if err != nil {
t.Fatal(err)
}
if err := LinkSetUp(veth1); err != nil {
t.Fatal(err)
}
time.Sleep(2 * time.Second)
// verify statistics
veth0, err = LinkByName(v0)
if err != nil {
t.Fatal(err)
}
veth1, err = LinkByName(v1)
if err != nil {
t.Fatal(err)
}
v0Stats := veth0.Attrs().Statistics
v1Stats := veth1.Attrs().Statistics
if v0Stats.RxPackets != v1Stats.TxPackets || v0Stats.TxPackets != v1Stats.RxPackets ||
v0Stats.RxBytes != v1Stats.TxBytes || v0Stats.TxBytes != v1Stats.RxBytes {
t.Fatalf("veth ends counters differ:\n%v\n%v", v0Stats, v1Stats)
}
}
func TestLinkXdp(t *testing.T) {
links, err := LinkList()
if err != nil {
t.Fatal(err)
}
var testXdpLink Link
for _, link := range links {
if link.Attrs().Xdp != nil && !link.Attrs().Xdp.Attached {
testXdpLink = link
break
}
}
if testXdpLink == nil {
t.Skipf("No link supporting XDP found")
}
fd, err := loadSimpleBpf(BPF_PROG_TYPE_XDP, 2 /*XDP_PASS*/)
if err != nil {
t.Skipf("Loading bpf program failed: %s", err)
}
if err := LinkSetXdpFd(testXdpLink, fd); err != nil {
t.Fatal(err)
}
if err := LinkSetXdpFdWithFlags(testXdpLink, fd, nl.XDP_FLAGS_UPDATE_IF_NOEXIST); err != unix.EBUSY {
t.Fatal(err)
}
if err := LinkSetXdpFd(testXdpLink, -1); err != nil {
t.Fatal(err)
}
}
func TestLinkAddDelIptun(t *testing.T) {
minKernelRequired(t, 4, 9)
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Iptun{
LinkAttrs: LinkAttrs{Name: "iptunfoo"},
PMtuDisc: 1,
Local: net.IPv4(127, 0, 0, 1),
Remote: net.IPv4(127, 0, 0, 1)})
}
func TestLinkAddDelIp6tnl(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Ip6tnl{
LinkAttrs: LinkAttrs{Name: "ip6tnltest"},
Local: net.ParseIP("2001:db8::100"),
Remote: net.ParseIP("2001:db8::200"),
})
}
func TestLinkAddDelSittun(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Sittun{
LinkAttrs: LinkAttrs{Name: "sittunfoo"},
PMtuDisc: 1,
Local: net.IPv4(127, 0, 0, 1),
Remote: net.IPv4(127, 0, 0, 1)})
}
func TestLinkAddDelVti(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
testLinkAddDel(t, &Vti{
LinkAttrs: LinkAttrs{Name: "vtifoo"},
IKey: 0x101,
OKey: 0x101,
Local: net.IPv4(127, 0, 0, 1),
Remote: net.IPv4(127, 0, 0, 1)})
testLinkAddDel(t, &Vti{
LinkAttrs: LinkAttrs{Name: "vtibar"},
IKey: 0x101,
OKey: 0x101,
Local: net.IPv6loopback,
Remote: net.IPv6loopback})
}
func TestBridgeCreationWithMulticastSnooping(t *testing.T) {
minKernelRequired(t, 4, 4)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeWithDefaultMcastSnoopName := "foo"
bridgeWithDefaultMcastSnoop := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultMcastSnoopName}}
if err := LinkAdd(bridgeWithDefaultMcastSnoop); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeWithDefaultMcastSnoopName, true)
if err := LinkDel(bridgeWithDefaultMcastSnoop); err != nil {
t.Fatal(err)
}
mcastSnoop := true
bridgeWithMcastSnoopOnName := "bar"
bridgeWithMcastSnoopOn := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithMcastSnoopOnName}, MulticastSnooping: &mcastSnoop}
if err := LinkAdd(bridgeWithMcastSnoopOn); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeWithMcastSnoopOnName, true)
if err := LinkDel(bridgeWithMcastSnoopOn); err != nil {
t.Fatal(err)
}
mcastSnoop = false
bridgeWithMcastSnoopOffName := "foobar"
bridgeWithMcastSnoopOff := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithMcastSnoopOffName}, MulticastSnooping: &mcastSnoop}
if err := LinkAdd(bridgeWithMcastSnoopOff); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeWithMcastSnoopOffName, false)
if err := LinkDel(bridgeWithMcastSnoopOff); err != nil {
t.Fatal(err)
}
}
func TestBridgeSetMcastSnoop(t *testing.T) {
minKernelRequired(t, 4, 4)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeName := "foo"
bridge := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeName}}
if err := LinkAdd(bridge); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeName, true)
if err := BridgeSetMcastSnoop(bridge, false); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeName, false)
if err := BridgeSetMcastSnoop(bridge, true); err != nil {
t.Fatal(err)
}
expectMcastSnooping(t, bridgeName, true)
if err := LinkDel(bridge); err != nil {
t.Fatal(err)
}
}
func expectMcastSnooping(t *testing.T, linkName string, expected bool) {
bridge, err := LinkByName(linkName)
if err != nil {
t.Fatal(err)
}
if actual := *bridge.(*Bridge).MulticastSnooping; actual != expected {
t.Fatalf("expected %t got %t", expected, actual)
}
}
func TestBridgeSetVlanFiltering(t *testing.T) {
minKernelRequired(t, 4, 4)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeName := "foo"
bridge := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeName}}
if err := LinkAdd(bridge); err != nil {
t.Fatal(err)
}
expectVlanFiltering(t, bridgeName, false)
if err := BridgeSetVlanFiltering(bridge, true); err != nil {
t.Fatal(err)
}
expectVlanFiltering(t, bridgeName, true)
if err := BridgeSetVlanFiltering(bridge, false); err != nil {
t.Fatal(err)
}
expectVlanFiltering(t, bridgeName, false)
if err := LinkDel(bridge); err != nil {
t.Fatal(err)
}
}
func expectVlanFiltering(t *testing.T, linkName string, expected bool) {
bridge, err := LinkByName(linkName)
if err != nil {
t.Fatal(err)
}
if actual := *bridge.(*Bridge).VlanFiltering; actual != expected {
t.Fatalf("expected %t got %t", expected, actual)
}
}
func TestBridgeCreationWithAgeingTime(t *testing.T) {
minKernelRequired(t, 3, 18)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeWithSpecifiedAgeingTimeName := "foo"
ageingTime := uint32(20000)
bridgeWithSpecifiedAgeingTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithSpecifiedAgeingTimeName}, AgeingTime: &ageingTime}
if err := LinkAdd(bridgeWithSpecifiedAgeingTime); err != nil {
t.Fatal(err)
}
retrievedBridge, err := LinkByName(bridgeWithSpecifiedAgeingTimeName)
if err != nil {
t.Fatal(err)
}
actualAgeingTime := *retrievedBridge.(*Bridge).AgeingTime
if actualAgeingTime != ageingTime {
t.Fatalf("expected %d got %d", ageingTime, actualAgeingTime)
}
if err := LinkDel(bridgeWithSpecifiedAgeingTime); err != nil {
t.Fatal(err)
}
bridgeWithDefaultAgeingTimeName := "bar"
bridgeWithDefaultAgeingTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultAgeingTimeName}}
if err := LinkAdd(bridgeWithDefaultAgeingTime); err != nil {
t.Fatal(err)
}
retrievedBridge, err = LinkByName(bridgeWithDefaultAgeingTimeName)
if err != nil {
t.Fatal(err)
}
actualAgeingTime = *retrievedBridge.(*Bridge).AgeingTime
if actualAgeingTime != 30000 {
t.Fatalf("expected %d got %d", 30000, actualAgeingTime)
}
if err := LinkDel(bridgeWithDefaultAgeingTime); err != nil {
t.Fatal(err)
}
}
func TestBridgeCreationWithHelloTime(t *testing.T) {
minKernelRequired(t, 3, 18)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeWithSpecifiedHelloTimeName := "foo"
helloTime := uint32(300)
bridgeWithSpecifiedHelloTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithSpecifiedHelloTimeName}, HelloTime: &helloTime}
if err := LinkAdd(bridgeWithSpecifiedHelloTime); err != nil {
t.Fatal(err)
}
retrievedBridge, err := LinkByName(bridgeWithSpecifiedHelloTimeName)
if err != nil {
t.Fatal(err)
}
actualHelloTime := *retrievedBridge.(*Bridge).HelloTime
if actualHelloTime != helloTime {
t.Fatalf("expected %d got %d", helloTime, actualHelloTime)
}
if err := LinkDel(bridgeWithSpecifiedHelloTime); err != nil {
t.Fatal(err)
}
bridgeWithDefaultHelloTimeName := "bar"
bridgeWithDefaultHelloTime := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultHelloTimeName}}
if err := LinkAdd(bridgeWithDefaultHelloTime); err != nil {
t.Fatal(err)
}
retrievedBridge, err = LinkByName(bridgeWithDefaultHelloTimeName)
if err != nil {
t.Fatal(err)
}
actualHelloTime = *retrievedBridge.(*Bridge).HelloTime
if actualHelloTime != 200 {
t.Fatalf("expected %d got %d", 200, actualHelloTime)
}
if err := LinkDel(bridgeWithDefaultHelloTime); err != nil {
t.Fatal(err)
}
}
func TestBridgeCreationWithVlanFiltering(t *testing.T) {
minKernelRequired(t, 3, 18)
tearDown := setUpNetlinkTest(t)
defer tearDown()
bridgeWithVlanFilteringEnabledName := "foo"
vlanFiltering := true
bridgeWithVlanFilteringEnabled := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithVlanFilteringEnabledName}, VlanFiltering: &vlanFiltering}
if err := LinkAdd(bridgeWithVlanFilteringEnabled); err != nil {
t.Fatal(err)
}
retrievedBridge, err := LinkByName(bridgeWithVlanFilteringEnabledName)
if err != nil {
t.Fatal(err)
}
retrievedVlanFilteringState := *retrievedBridge.(*Bridge).VlanFiltering
if retrievedVlanFilteringState != vlanFiltering {
t.Fatalf("expected %t got %t", vlanFiltering, retrievedVlanFilteringState)
}
if err := LinkDel(bridgeWithVlanFilteringEnabled); err != nil {
t.Fatal(err)
}
bridgeWithDefaultVlanFilteringName := "bar"
bridgeWIthDefaultVlanFiltering := &Bridge{LinkAttrs: LinkAttrs{Name: bridgeWithDefaultVlanFilteringName}}
if err := LinkAdd(bridgeWIthDefaultVlanFiltering); err != nil {
t.Fatal(err)
}
retrievedBridge, err = LinkByName(bridgeWithDefaultVlanFilteringName)
if err != nil {
t.Fatal(err)
}
retrievedVlanFilteringState = *retrievedBridge.(*Bridge).VlanFiltering
if retrievedVlanFilteringState != false {
t.Fatalf("expected %t got %t", false, retrievedVlanFilteringState)
}
if err := LinkDel(bridgeWIthDefaultVlanFiltering); err != nil {
t.Fatal(err)
}
}
func TestLinkSubscribeWithProtinfo(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
master := &Bridge{LinkAttrs: LinkAttrs{Name: "foo"}}
if err := LinkAdd(master); err != nil {
t.Fatal(err)
}
slave := &Veth{
LinkAttrs: LinkAttrs{
Name: "bar",
TxQLen: testTxQLen,
MTU: 1400,
MasterIndex: master.Attrs().Index,
},
PeerName: "bar-peer",
}
if err := LinkAdd(slave); err != nil {
t.Fatal(err)
}
ch := make(chan LinkUpdate)
done := make(chan struct{})
defer close(done)
if err := LinkSubscribe(ch, done); err != nil {
t.Fatal(err)
}
if err := LinkSetHairpin(slave, true); err != nil {
t.Fatal(err)
}
select {
case update := <-ch:
if !(update.Attrs().Name == "bar" && update.Attrs().Protinfo != nil &&
update.Attrs().Protinfo.Hairpin) {
t.Fatal("Hairpin update not received as expected")
}
case <-time.After(time.Minute):
t.Fatal("Hairpin update timed out")
}
if err := LinkDel(slave); err != nil {
t.Fatal(err)
}
if err := LinkDel(master); err != nil {
t.Fatal(err)
}
}
func testGTPLink(t *testing.T) *GTP {
conn1, err := net.ListenUDP("udp", &net.UDPAddr{
IP: net.ParseIP("0.0.0.0"),
Port: 3386,
})
if err != nil {
t.Fatal(err)
}
conn2, err := net.ListenUDP("udp", &net.UDPAddr{
IP: net.ParseIP("0.0.0.0"),
Port: 2152,
})
if err != nil {
t.Fatal(err)
}
fd1, _ := conn1.File()
fd2, _ := conn2.File()
return >P{
LinkAttrs: LinkAttrs{
Name: "gtp0",
},
FD0: int(fd1.Fd()),
FD1: int(fd2.Fd()),
}
}
func TestLinkAddDelGTP(t *testing.T) {
tearDown := setUpNetlinkTestWithKModule(t, "gtp")
defer tearDown()
gtp := testGTPLink(t)
testLinkAddDel(t, gtp)
}
func TestLinkAddDelXfrmi(t *testing.T) {
minKernelRequired(t, 4, 19)
defer setUpNetlinkTest(t)()
lo, _ := LinkByName("lo")
testLinkAddDel(t, &Xfrmi{
LinkAttrs: LinkAttrs{Name: "xfrm123", ParentIndex: lo.Attrs().Index},
Ifid: 123})
}
func TestLinkAddDelXfrmiNoId(t *testing.T) {
minKernelRequired(t, 4, 19)
defer setUpNetlinkTest(t)()
lo, _ := LinkByName("lo")
testLinkAddDel(t, &Xfrmi{
LinkAttrs: LinkAttrs{Name: "xfrm0", ParentIndex: lo.Attrs().Index}})
}
func TestLinkByNameWhenLinkIsNotFound(t *testing.T) {
_, err := LinkByName("iammissing")
if err == nil {
t.Fatal("Link not expected to found")
}
_, ok := err.(LinkNotFoundError)
if !ok {
t.Errorf("Error returned expected to of LinkNotFoundError type: %v", err)
}
}
func TestLinkByAliasWhenLinkIsNotFound(t *testing.T) {
_, err := LinkByAlias("iammissing")
if err == nil {
t.Fatal("Link not expected to found")
}
_, ok := err.(LinkNotFoundError)
if !ok {
t.Errorf("Error returned expected to of LinkNotFoundError type: %v", err)
}
}
func TestLinkAddDelTuntap(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
// Mount sysfs so that sysfs gets the namespace tag of the current network namespace
// This is necessary so that /sys shows the network interfaces of the current namespace.
if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil {
t.Fatal("Cannot mount sysfs")
}
defer func() {
if err := syscall.Unmount("/sys", 0); err != nil {
t.Fatal("Cannot umount /sys")
}
}()
testLinkAddDel(t, &Tuntap{
LinkAttrs: LinkAttrs{Name: "foo"},
Mode: TUNTAP_MODE_TAP})
}
func TestLinkAddDelTuntapMq(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil {
t.Fatal("Cannot mount sysfs")
}
defer func() {
if err := syscall.Unmount("/sys", 0); err != nil {
t.Fatal("Cannot umount /sys")
}
}()
testLinkAddDel(t, &Tuntap{
LinkAttrs: LinkAttrs{Name: "foo"},
Mode: TUNTAP_MODE_TAP,
Queues: 4})
testLinkAddDel(t, &Tuntap{
LinkAttrs: LinkAttrs{Name: "foo"},
Mode: TUNTAP_MODE_TAP,
Queues: 4,
Flags: TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR})
}
func TestLinkAddDelTuntapOwnerGroup(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
if err := syscall.Mount("sysfs", "/sys", "sysfs", syscall.MS_RDONLY, ""); err != nil {
t.Fatal("Cannot mount sysfs")
}
defer func() {
if err := syscall.Unmount("/sys", 0); err != nil {
t.Fatal("Cannot umount /sys")
}
}()
testLinkAddDel(t, &Tuntap{
LinkAttrs: LinkAttrs{Name: "foo"},
Mode: TUNTAP_MODE_TAP,
Owner: 0,
Group: 0,
})
}
func TestVethPeerIndex(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
const (
vethPeer1 = "vethOne"
vethPeer2 = "vethTwo"
)
link := &Veth{
LinkAttrs: LinkAttrs{
Name: vethPeer1,
MTU: 1500,
Flags: net.FlagUp,
},
PeerName: vethPeer2,
}
if err := LinkAdd(link); err != nil {
t.Fatal(err)
}
linkOne, err := LinkByName("vethOne")
if err != nil {
t.Fatal(err)
}
linkTwo, err := LinkByName("vethTwo")
if err != nil {
t.Fatal(err)
}
peerIndexOne, err := VethPeerIndex(&Veth{LinkAttrs: *linkOne.Attrs()})
if err != nil {
t.Fatal(err)
}
peerIndexTwo, err := VethPeerIndex(&Veth{LinkAttrs: *linkTwo.Attrs()})
if err != nil {
t.Fatal(err)
}
if peerIndexOne != linkTwo.Attrs().Index {
t.Errorf("VethPeerIndex(%s) mismatch %d != %d", linkOne.Attrs().Name, peerIndexOne, linkTwo.Attrs().Index)
}
if peerIndexTwo != linkOne.Attrs().Index {
t.Errorf("VethPeerIndex(%s) mismatch %d != %d", linkTwo.Attrs().Name, peerIndexTwo, linkOne.Attrs().Index)
}
}
func TestLinkSlaveBond(t *testing.T) {
minKernelRequired(t, 3, 13)
tearDown := setUpNetlinkTest(t)
defer tearDown()
const (
bondName = "foo"
slaveName = "fooFoo"
)
bond := NewLinkBond(LinkAttrs{Name: bondName})
bond.Mode = BOND_MODE_BALANCE_RR
if err := LinkAdd(bond); err != nil {
t.Fatal(err)
}
defer LinkDel(bond)
slaveDummy := &Dummy{LinkAttrs{Name: slaveName}}
if err := LinkAdd(slaveDummy); err != nil {
t.Fatal(err)
}
defer LinkDel(slaveDummy)
if err := LinkSetBondSlave(slaveDummy, bond); err != nil {
t.Fatal(err)
}
slaveLink, err := LinkByName(slaveName)
if err != nil {
t.Fatal(err)
}
slave := slaveLink.Attrs().Slave
if slave == nil {
t.Errorf("for %s expected slave is not nil.", slaveName)
}
if slaveType := slave.SlaveType(); slaveType != "bond" {
t.Errorf("for %s expected slave type is 'bond', but '%s'", slaveName, slaveType)
}
}
func TestLinkSetBondSlaveQueueId(t *testing.T) {
minKernelRequired(t, 3, 13)
tearDown := setUpNetlinkTest(t)
defer tearDown()
const (
bondName = "foo"
slave1Name = "fooFoo"
)
bond := NewLinkBond(LinkAttrs{Name: bondName})
if err := LinkAdd(bond); err != nil {
t.Fatal(err)
}
defer LinkDel(bond)
slave := &Dummy{LinkAttrs{Name: slave1Name}}
if err := LinkAdd(slave); err != nil {
t.Fatal(err)
}
defer LinkDel(slave)
if err := LinkSetBondSlave(slave, bond); err != nil {
t.Fatal(err)
}
if err := pkgHandle.LinkSetBondSlaveQueueId(slave, 1); err != nil {
t.Fatal(err)
}
}
func TestLinkSetBondSlave(t *testing.T) {
minKernelRequired(t, 3, 13)
tearDown := setUpNetlinkTest(t)
defer tearDown()
const (
bondName = "foo"
slaveOneName = "fooFoo"
slaveTwoName = "fooBar"
)
bond := NewLinkBond(LinkAttrs{Name: bondName})
bond.Mode = StringToBondModeMap["802.3ad"]
bond.AdSelect = BondAdSelect(BOND_AD_SELECT_BANDWIDTH)
bond.AdActorSysPrio = 1
bond.AdUserPortKey = 1
bond.AdActorSystem, _ = net.ParseMAC("06:aa:bb:cc:dd:ee")
if err := LinkAdd(bond); err != nil {
t.Fatal(err)
}
bondLink, err := LinkByName(bondName)
if err != nil {
t.Fatal(err)
}
defer LinkDel(bondLink)
if err := LinkAdd(&Dummy{LinkAttrs{Name: slaveOneName}}); err != nil {
t.Fatal(err)
}
slaveOneLink, err := LinkByName(slaveOneName)
if err != nil {
t.Fatal(err)
}
defer LinkDel(slaveOneLink)
if err := LinkAdd(&Dummy{LinkAttrs{Name: slaveTwoName}}); err != nil {
t.Fatal(err)
}
slaveTwoLink, err := LinkByName(slaveTwoName)
if err != nil {
t.Fatal(err)
}
defer LinkDel(slaveTwoLink)
if err := LinkSetBondSlave(slaveOneLink, &Bond{LinkAttrs: *bondLink.Attrs()}); err != nil {
t.Fatal(err)
}
if err := LinkSetBondSlave(slaveTwoLink, &Bond{LinkAttrs: *bondLink.Attrs()}); err != nil {
t.Fatal(err)
}
// Update info about interfaces
slaveOneLink, err = LinkByName(slaveOneName)
if err != nil {
t.Fatal(err)
}
slaveTwoLink, err = LinkByName(slaveTwoName)
if err != nil {
t.Fatal(err)
}
if slaveOneLink.Attrs().MasterIndex != bondLink.Attrs().Index {
t.Errorf("For %s expected %s to be master", slaveOneLink.Attrs().Name, bondLink.Attrs().Name)
}
if slaveTwoLink.Attrs().MasterIndex != bondLink.Attrs().Index {
t.Errorf("For %s expected %s to be master", slaveTwoLink.Attrs().Name, bondLink.Attrs().Name)
}
}
func TestLinkSetAllmulticast(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
iface := &Veth{LinkAttrs: LinkAttrs{Name: "foo"}, PeerName: "bar"}
if err := LinkAdd(iface); err != nil {
t.Fatal(err)
}
link, err := LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if err := LinkSetUp(link); err != nil {
t.Fatal(err)
}
link, err = LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if err := LinkSetAllmulticastOn(link); err != nil {
t.Fatal(err)
}
link, err = LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if link.Attrs().Allmulti != 1 {
t.Fatal("IFF_ALLMULTI was not set")
}
if err := LinkSetAllmulticastOff(link); err != nil {
t.Fatal(err)
}
link, err = LinkByName("foo")
if err != nil {
t.Fatal(err)
}
if link.Attrs().Allmulti != 0 {
t.Fatal("IFF_ALLMULTI is still set")
}
}
func TestLinkSetMacvlanMode(t *testing.T) {
tearDown := setUpNetlinkTest(t)
defer tearDown()
const (
parentName = "foo"
macvlanName = "fooFoo"
macvtapName = "fooBar"
)
parent := &Dummy{LinkAttrs{Name: parentName}}
if err := LinkAdd(parent); err != nil {
t.Fatal(err)
}
defer LinkDel(parent)
testMacvlanMode := func(link Link, mode MacvlanMode) {
if err := LinkSetMacvlanMode(link, mode); err != nil {
t.Fatal(err)
}
name := link.Attrs().Name
result, err := LinkByName(name)
if err != nil {
t.Fatal(err)
}
var actual MacvlanMode
switch l := result.(type) {
case *Macvlan:
actual = l.Mode
case *Macvtap:
actual = l.Macvlan.Mode
}
if actual != mode {
t.Fatalf("expected %v got %v for %+v", mode, actual, link)
}
}
macvlan := &Macvlan{
LinkAttrs: LinkAttrs{Name: macvlanName, ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_BRIDGE,
}
if err := LinkAdd(macvlan); err != nil {
t.Fatal(err)
}
defer LinkDel(macvlan)
testMacvlanMode(macvlan, MACVLAN_MODE_VEPA)
testMacvlanMode(macvlan, MACVLAN_MODE_PRIVATE)
testMacvlanMode(macvlan, MACVLAN_MODE_SOURCE)
testMacvlanMode(macvlan, MACVLAN_MODE_BRIDGE)
macvtap := &Macvtap{
Macvlan: Macvlan{
LinkAttrs: LinkAttrs{Name: macvtapName, ParentIndex: parent.Attrs().Index},
Mode: MACVLAN_MODE_BRIDGE,
},
}
if err := LinkAdd(macvtap); err != nil {
t.Fatal(err)
}
defer LinkDel(macvtap)
testMacvlanMode(macvtap, MACVLAN_MODE_VEPA)
testMacvlanMode(macvtap, MACVLAN_MODE_PRIVATE)
testMacvlanMode(macvtap, MACVLAN_MODE_SOURCE)
testMacvlanMode(macvtap, MACVLAN_MODE_BRIDGE)
}
|
[
"\"TRAVIS_BUILD_DIR\""
] |
[] |
[
"TRAVIS_BUILD_DIR"
] |
[]
|
["TRAVIS_BUILD_DIR"]
|
go
| 1 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/pepcashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *pepcash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("pepcash-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
internal/aws_oidc.go
|
package internal
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
"github.com/aws/aws-sdk-go-v2/service/sts"
)
const expiryDelta = 10 * time.Second
type AWSCredentials struct {
AWSAccessKey string
AWSSecretKey string
AWSSessionToken string
AWSSecurityToken string
PrincipalARN string
Expires time.Time
}
func (cred AWSCredentials) Valid() bool {
if cred.Expires.IsZero() {
return false
}
return !cred.Expires.Add(-expiryDelta).Before(time.Now())
}
func GetCredentialsWithOIDC(client *OIDCClient, idToken string, roleARN string, durationSeconds int64) (*AWSCredentials, error) {
role := strings.SplitN(roleARN, "/", 2)[1]
var awsCredsBag AWSCredentials
jsonString, err := getAWSTokenCache(role)
if err != nil {
if err != ErrNotFound {
return nil, err
}
} else {
if err := json.Unmarshal([]byte(jsonString), &awsCredsBag); err != nil {
return nil, err
}
}
awsCreds := awsCredsBag
if awsCreds.Valid() {
return &awsCreds, nil
}
token, err := assumeRoleWithWebIdentity(client, idToken, roleARN, durationSeconds)
if err != nil {
return nil, err
}
awsCredsBag = *token
awsCredsBagJSON, err := json.Marshal(awsCredsBag)
if err != nil {
return nil, err
}
if err := saveAWSTokenCache(string(awsCredsBagJSON), role); err != nil {
return nil, err
}
return token, err
}
func assumeRoleWithWebIdentity(client *OIDCClient, idToken string, roleARN string, durationSeconds int64) (*AWSCredentials, error) {
var username string
if strings.Contains(os.Getenv("USER"), "\\") {
username = strings.ToUpper(strings.SplitN(os.Getenv("USER"), "\\", 2)[1])
} else {
username = os.Getenv("USER")
}
split := strings.SplitN(roleARN, "/", 2)
rolename := client.name
if len(split) == 2 {
rolename = split[1]
}
log.Println("Requesting AWS credentials using ID Token")
cfg := defaults.Config()
cfg.Region = "eu-central-1"
req := sts.New(cfg).AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
RoleArn: aws.String(roleARN),
RoleSessionName: aws.String(username + "@" + rolename),
WebIdentityToken: aws.String(idToken),
DurationSeconds: aws.Int64(durationSeconds),
})
resp, err := req.Send(context.Background())
if err != nil {
return nil, fmt.Errorf("error retrieving STS credentials using ID Token: %v", err)
}
return &AWSCredentials{
AWSAccessKey: aws.StringValue(resp.Credentials.AccessKeyId),
AWSSecretKey: aws.StringValue(resp.Credentials.SecretAccessKey),
AWSSessionToken: aws.StringValue(resp.Credentials.SessionToken),
AWSSecurityToken: aws.StringValue(resp.Credentials.SessionToken),
PrincipalARN: aws.StringValue(resp.AssumedRoleUser.Arn),
Expires: resp.Credentials.Expiration.Local(),
}, nil
}
|
[
"\"USER\"",
"\"USER\"",
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"strconv"
"os"
// "fmt"
"github.com/ant0ine/go-json-rest/rest"
"github.com/PuerkitoBio/goquery"
"github.com/k0kubun/pp"
)
type PostData struct {
Url string
}
type List struct {
Url []string
User []User
}
type User struct {
Name string
Image string
CancelCount int
JoinCount int
}
func main() {
api := rest.NewApi()
api.Use(rest.DefaultDevStack...)
router, err := rest.MakeRouter(
rest.Post("/join", PostJoin),
rest.Post("/cancel", PostCancel),
)
api.Use(&rest.CorsMiddleware{
RejectNonCorsRequests: false,
OriginValidator: func(origin string, request *rest.Request) bool {
// allow every origin (for now)
return true
},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{
"Accept", "Content-Type", "X-Custom-Header", "Origin",
},
AccessControlAllowCredentials: true,
AccessControlMaxAge: 3600,
})
if err != nil {
log.Fatal(err)
}
port := os.Getenv("PORT")
api.SetApp(router)
log.Fatal(http.ListenAndServe(":" + port, api.MakeHandler()))
// log.Fatal(http.ListenAndServe(":8080", api.MakeHandler()))
}
func PostJoin(w rest.ResponseWriter, r *rest.Request) {
post_data := PostData{}
err := r.DecodeJsonPayload(&post_data)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if post_data.Url == "" {
rest.Error(w, "url required", 400)
}
list := List{}
GetPageToConnpass(post_data.Url, &list)
w.WriteJson(list.Url)
}
func PostCancel(w rest.ResponseWriter, r *rest.Request) {
post_data := PostData{}
err := r.DecodeJsonPayload(&post_data)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if post_data.Url == "" {
rest.Error(w, "url required", 400)
}
url := post_data.Url
user := GetUserPageToConnpass(url)
pp.Println(user)
w.WriteJson(user)
}
func GetPageToConnpass(url string, list *List) {
doc, _ := goquery.NewDocument(url + "participation/#participants")
doc.Find(".user").Each(func(_ int, s *goquery.Selection) {
s.Find(".image_link").Each(func(_ int, s *goquery.Selection) {
url, _ := s.Attr("href")
list.Url = append(list.Url, url)
})
})
}
func GetUserPageToConnpass(url string) User {
user := User{"", "", 0, 0}
// 退会ユーザーなどはURLが取れないため無視
if url != "" {
doc, _ := goquery.NewDocument(url)
image_elm := doc.Find("#side_area > div.mb_20.text_center img")
user.Name, _ = image_elm.Attr("title")
user.Image, _ = image_elm.Attr("src")
doc.Find("#main > div.event_area.mb_10 > div.event_list.vevent").Each(func(_ int, s *goquery.Selection) {
join_status := s.Find("p.label_status_tag").Text()
if join_status == "キャンセル" {
user.CancelCount++
} else {
user.JoinCount++
}
})
// ページ数が1以上ある場合
if (doc.Find("#main > div.paging_area > ul > li").Length() - 1) > 1 {
total_page := doc.Find("#main > div.paging_area > ul > li").Length() - 1
for i := 2; i <= total_page; i++ {
doc, _ := goquery.NewDocument(url + "?page=" + strconv.Itoa(i))
doc.Find("#main > div.event_area.mb_10 > div.event_list.vevent").Each(func(_ int, s *goquery.Selection) {
join_status := s.Find("p.label_status_tag").Text()
if join_status == "キャンセル" {
user.CancelCount++
} else {
user.JoinCount++
}
})
}
}
}
return user
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
service/omega365.py
|
import requests
from flask import Flask, Response, request
import os
import logger
import cherrypy
import json
from datetime import datetime
app = Flask(__name__)
logger = logger.Logger('Omega365 client service')
url = os.environ.get("base_url")
username = os.environ.get("username")
pw = os.environ.get("password")
remove_namespaces = os.environ.get("remove_namespaces", True)
headers = json.loads('{"Content-Type": "application/json"}')
resources_config = json.loads(os.environ.get("resources", '[]'))
resources = {}
class BasicUrlSystem:
def __init__(self, config):
self._config = config
def make_session(self):
session = requests.Session()
session.headers = self._config["headers"]
session.verify = True
return session
session_factory = BasicUrlSystem({"headers": headers})
def authenticate(s):
auth_url = url + "/login?mobile_login=true"
auth_content = {
"username_user": username,
"password": pw,
"remember": "false"
}
try:
auth_resp = s.request("POST", auth_url, json=auth_content)
except Exception as e:
logger.warning("Exception occurred when authenticating the user: '%s'", e)
def stream_json(clean, since_property_name, id_property_name):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
if since_property_name is not None:
row["_updated"] = row[since_property_name]
if id_property_name is not None:
row["_id"] = str(row[id_property_name])
yield json.dumps(row)
yield ']'
def remove_ns(keys):
if isinstance(keys, list):
for key in keys:
remove_ns(key)
if isinstance(keys, dict):
for key in keys.keys():
if ":" in key:
new_key = key.split(":")[1]
keys[new_key] = keys.pop(key)
for val in keys.values():
remove_ns(val)
def populate_resources():
for resource in resources_config:
since_property_name = None
id_property_name = None
if "since_property_name" in resource:
since_property_name = resource["since_property_name"]
if "id_property_name" in resource:
id_property_name = resource["id_property_name"]
resources[resource["resource_name"]] = \
{
"fields": resource["fields"],
"since_property_name": since_property_name,
"id_property_name": id_property_name
}
@app.route("/<path:path>", methods=["GET"])
def get(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
where_clause = None
if request.args.get('since') is not None and resources[path]["since_property_name"] is not None:
logger.info("Since marker found: {0}".format(request.args.get('since')))
since = request.args.get('since').split(".")[0]
where_clause = "{0} >= '{1}'".format(resources[path]["since_property_name"], datetime.strptime(since, "%Y-%m-%dT%H:%M:%S"))
get_template = {
"maxRecords": -1,
"operation": "retrieve",
"resourceName": path,
"fields": resources[path]["fields"],
"whereClause": where_clause
}
logger.info("Request data: %s", get_template)
with session_factory.make_session() as s:
authenticate(s)
response = s.request("POST", request_url, json=get_template, headers=headers)
if response.status_code != 200:
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
return Response(
stream_json(result['success'], resources[path]["since_property_name"], resources[path]["id_property_name"]),
mimetype='application/json'
)
@app.route("/<path:path>", methods=["POST"])
def post(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
request_data = json.loads(request.data)
logger.info("Request data: %s", request_data)
create_template = {
"maxRecords": -1,
"operation": "create",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
delete_template = {
"operation": "destroy",
"resourceName": path,
"uniqueName": path
}
update_template = {
"operation": "update",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
def generate(entities):
yield "["
with session_factory.make_session() as s:
authenticate(s)
for index, entity in enumerate(entities):
if index > 0:
yield ","
post_entity = entity.copy()
if "_deleted" in entity and entity["_deleted"] is True:
logger.info("Deleting entity: {0}!".format(entity["_id"]))
post_entity.update(delete_template)
else:
if resources[path]["id_property_name"] in entity:
logger.info("Updating entity: {0}!".format(entity["_id"]))
post_entity.update(update_template)
else:
logger.info("Creating entity: {0}!".format(entity["_id"]))
post_entity.update(create_template)
response = s.request("POST", request_url, json=post_entity, headers=headers)
if response.status_code != 200:
logger.warning("An error occurred: {0}. {1}".format(response.reason, response.text))
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
yield json.dumps(result['success'])
yield "]"
response_data_generator = generate(request_data)
response_data = response_data_generator
return Response(response=response_data, mimetype="application/json")
if __name__ == '__main__':
cherrypy.tree.graft(app, '/')
populate_resources()
# Set the configuration of the web server to production mode
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5002,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
|
[] |
[] |
[
"username",
"base_url",
"remove_namespaces",
"password",
"resources"
] |
[]
|
["username", "base_url", "remove_namespaces", "password", "resources"]
|
python
| 5 | 0 | |
Src/CycleGan/options/test_options.py
|
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.parser.add_argument('--only_one', action='store_true',
help='if specified, print more debugging information')
self.parser.add_argument('--show_by', type=str, default='A2B',
help='A|B')
self.parser.add_argument('--side', type=str, default='A2B',
help='A2B|B2A')
self.isTrain = False
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
hack/release/pkg/builder/builder.go
|
// Copyright (c) 2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/coreos/go-semver/semver"
"github.com/sirupsen/logrus"
)
// Global configuration for releases.
var (
// Registries to which all release images are pushed.
registries = []string{
"docker.io/calico",
"quay.io/calico",
"gcr.io/projectcalico-org",
"eu.gcr.io/projectcalico-org",
"asia.gcr.io/projectcalico-org",
"us.gcr.io/projectcalico-org",
}
// Git configuration for publishing to GitHub.
organization = "projectcalico"
repo = "calico"
origin = "origin"
)
func NewReleaseBuilder(runner CommandRunner) *ReleaseBuilder {
return &ReleaseBuilder{
runner: runner,
}
}
type ReleaseBuilder struct {
// Allow specification of command runner so it can be overridden in tests.
runner CommandRunner
}
// BuildRelease creates a Calico release.
func (r *ReleaseBuilder) BuildRelease() error {
// Check that we're not already on a git tag.
out, err := r.git("describe", "--exact-match", "--tags", "HEAD")
if err == nil {
// On a current tag.
return fmt.Errorf("Already on a tag (%s), refusing to create release", out)
}
// Check that the repository is not a shallow clone. We need correct history.
out, err = r.git("rev-parse", "--is-shallow-repository")
if err != nil {
return err
}
if strings.TrimSpace(out) == "true" {
return fmt.Errorf("Attempt to release from a shallow clone is not possible")
}
// Check that the environment has the necessary prereqs.
if err := r.releasePrereqs(); err != nil {
return err
}
// Determine the last tag on this branch.
out, err = r.git("describe", "--tags", "--dirty", "--always", "--abbrev=12")
if err != nil {
logrus.WithError(err).Fatal("Failed to git describe")
}
logrus.WithField("out", out).Info("Current git describe")
// Determine the release version to use based on the last tag, and then tag the branch.
ver, err := r.determineReleaseVersion(out)
if err != nil {
return err
}
branch := r.determineBranch()
logrus.WithFields(logrus.Fields{"branch": branch, "version": ver}).Infof("Creating Calico release from branch")
_, err = r.git("tag", ver)
if err != nil {
return fmt.Errorf("Failed to tag release: %s", err)
}
// Successfully tagged. If we fail to release after this stage, we need to delete the tag.
defer func() {
if err != nil {
logrus.Warn("Failed to release, cleaning up tag")
r.git("tag", "-d", ver)
}
}()
// Build container images for the release.
if err = r.buildContainerImages(ver); err != nil {
return err
}
// TODO: Assert the produced images are OK. e.g., have correct
// commit and version information compiled in.
// Build artifacts to upload to github.
if err = r.collectGithubArtifacts(ver); err != nil {
return err
}
return nil
}
func (r *ReleaseBuilder) PublishRelease() error {
// Determine the currently checked-out tag.
ver, err := r.git("describe", "--exact-match", "--tags", "HEAD")
if err != nil {
return fmt.Errorf("failed to get tag for checked-out commit, is there one? %s", err)
}
// Check that the environment has the necessary prereqs.
if err = r.publishPrereqs(ver); err != nil {
return err
}
// Publish container images.
if err = r.publishContainerImages(ver); err != nil {
return fmt.Errorf("failed to publish container images: %s", err)
}
// If all else is successful, push the git tag.
if _, err = r.git("push", origin, ver); err != nil {
return fmt.Errorf("failed to push git tag: %s", err)
}
// Publish the release to github.
if err = r.publishGithubRelease(ver); err != nil {
return fmt.Errorf("failed to publish github release: %s", err)
}
return nil
}
// Check general prerequisites for cutting and publishing a release.
func (r *ReleaseBuilder) releasePrereqs() error {
// Check that we're not on the master branch. We never cut releases from master.
branch := r.determineBranch()
if branch == "master" {
return fmt.Errorf("Cannot cut release from branch: %s", branch)
}
// Make sure we have a github token - needed for publishing to GH.
// Strictly only needed for publishing, but we check during release anyway so
// that we don't get all the way through the build to find out we're missing it!
if token := os.Getenv("GITHUB_TOKEN"); token == "" {
return fmt.Errorf("No GITHUB_TOKEN present in environment")
}
// TODO: Make sure the environment isn't dirty.
return nil
}
// Prerequisites specific to publishing a release.
func (r *ReleaseBuilder) publishPrereqs(ver string) error {
// TODO: Verify all required artifacts are present.
return r.releasePrereqs()
}
// We include the following GitHub artifacts on each release. This function assumes
// that they have already been built, and simply wraps them up.
// - release-vX.Y.Z.tgz: contains images, manifests, and binaries.
// - tigera-operator-vX.Y.Z.tgz: contains the helm v3 chart.
// - calico-windows-vX.Y.Z.zip: Calico for Windows.
func (r *ReleaseBuilder) collectGithubArtifacts(ver string) error {
// Final artifacts will be moved here.
uploadDir := r.uploadDir(ver)
// TODO: Delete if already exists.
err := os.MkdirAll(uploadDir, os.ModePerm)
if err != nil {
return fmt.Errorf("Failed to create dir: %s", err)
}
// We attach calicoctl binaries directly to the release as well.
files, err := ioutil.ReadDir("calicoctl/bin/")
if err != nil {
return err
}
for _, b := range files {
if _, err := r.runner.Run("cp", []string{fmt.Sprintf("calicoctl/bin/%s", b.Name()), uploadDir}, nil); err != nil {
return err
}
}
// Build and add in the complete release tarball.
if err = r.buildReleaseTar(ver, uploadDir); err != nil {
return err
}
// Add in the already-buily windows zip archive and helm chart.
if _, err := r.runner.Run("cp", []string{fmt.Sprintf("node/dist/calico-windows-%s.zip", ver), uploadDir}, nil); err != nil {
return err
}
if _, err := r.runner.Run("cp", []string{fmt.Sprintf("calico/bin/tigera-operator-%s.tgz", ver), uploadDir}, nil); err != nil {
return err
}
return nil
}
func (r *ReleaseBuilder) uploadDir(ver string) string {
return fmt.Sprintf("_output/upload/%s", ver)
}
// Builds the complete release tar for upload to github.
// - release-vX.Y.Z.tgz: contains images, manifests, and binaries.
// TODO: We should produce a tar per architecture that we ship.
func (r *ReleaseBuilder) buildReleaseTar(ver string, targetDir string) error {
// Create tar files for container image that are shipped.
releaseBase := fmt.Sprintf("_output/release-%s", ver)
err := os.MkdirAll(releaseBase+"/images", os.ModePerm)
if err != nil {
return fmt.Errorf("Failed to create images dir: %s", err)
}
outFmt := "_output/release-%s/images/%s"
registry := registries[0]
images := map[string]string{
fmt.Sprintf("%s/node:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-node.tar"),
fmt.Sprintf("%s/typha:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-typha.tar"),
fmt.Sprintf("%s/cni:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-cni.tar"),
fmt.Sprintf("%s/kube-controllers:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-kube-controllers.tar"),
fmt.Sprintf("%s/pod2daemon-flexvol:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-pod2daemon.tar"),
fmt.Sprintf("%s/dikastes:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-dikastes.tar"),
fmt.Sprintf("%s/flannel-migration-controller:%s", registry, ver): fmt.Sprintf(outFmt, ver, "calico-flannel-migration-controller.tar"),
}
for img, out := range images {
err = r.archiveContainerImage(out, img)
if err != nil {
return err
}
}
// Add in release binaries that we ship.
binDir := fmt.Sprintf("%s/bin", releaseBase)
err = os.MkdirAll(binDir, os.ModePerm)
if err != nil {
return fmt.Errorf("Failed to create images dir: %s", err)
}
binaries := map[string]string{
// CNI plugin binaries are all placed in github dir.
"cni-plugin/bin/": binDir + "/cni",
// Calicoctl binaries.
"calicoctl/bin/": binDir + "/calicoctl",
// Felix binaries.
"felix/bin/calico-bpf": binDir,
}
for src, dst := range binaries {
if _, err := r.runner.Run("cp", []string{"-r", src, dst}, nil); err != nil {
return err
}
}
// Add in manifests directory generated from the docs.
if _, err := r.runner.Run("cp", []string{"-r", "calico/_site/manifests", releaseBase}, nil); err != nil {
return err
}
// tar up the whole thing.
if _, err := r.runner.Run("tar", []string{"-czvf", fmt.Sprintf("_output/release-%s.tgz", ver), "-C", "_output", fmt.Sprintf("release-%s", ver)}, nil); err != nil {
return err
}
if _, err := r.runner.Run("cp", []string{fmt.Sprintf("_output/release-%s.tgz", ver), targetDir}, nil); err != nil {
return err
}
return nil
}
func (r *ReleaseBuilder) buildContainerImages(ver string) error {
releaseDirs := []string{
"node",
"pod2daemon",
"cni-plugin",
"apiserver",
"kube-controllers",
"calicoctl",
"app-policy",
"typha",
"felix",
"calico", // Technically not a container image, but a helm chart.
}
// Build env.
// TODO: Pass CHART_RELEASE to calico repo if needed.
env := append(os.Environ(),
fmt.Sprintf("VERSION=%s", ver),
fmt.Sprintf("DEV_REGISTRIES=%s", strings.Join(registries, " ")),
)
for _, dir := range releaseDirs {
out, err := r.makeInDirectoryWithOutput(dir, "release-build", env...)
if err != nil {
logrus.Error(out)
return fmt.Errorf("Failed to build %s: %s", dir, err)
}
logrus.Info(out)
}
return nil
}
func (r *ReleaseBuilder) publishGithubRelease(ver string) error {
releaseNoteTemplate := `
Release notes can be found at https://projectcalico.docs.tigera.io/archive/{release_stream}/release-notes/
Attached to this release are the following artifacts:
- {release_tar}: container images, binaries, and kubernetes manifests.
- {calico_windows_zip}: Calico for Windows.
- {helm_chart}: Calico Helm v3 chart.
`
sv, err := semver.NewVersion(strings.TrimPrefix(ver, "v"))
if err != nil {
return err
}
formatters := []string{
// Alternating placeholder / filler. We can't use backticks in the multiline string above,
// so we replace anything that needs to be backticked into it here.
"{version}", ver,
"{release_stream}", fmt.Sprintf("v%d.%d", sv.Major, sv.Minor),
"{release_tar}", fmt.Sprintf("`release-%s.tgz`", ver),
"{calico_windows_zip}", fmt.Sprintf("`calico-windows-%s.zip`", ver),
"{helm_chart}", fmt.Sprintf("`tigera-operator-%s.tgz`", ver),
}
replacer := strings.NewReplacer(formatters...)
releaseNote := replacer.Replace(releaseNoteTemplate)
args := []string{
"-username", organization,
"-repository", repo,
"-name", ver,
"-body", releaseNote,
ver,
r.uploadDir(ver),
}
_, err = r.runner.Run("ghr", args, nil)
return err
}
func (r *ReleaseBuilder) publishContainerImages(ver string) error {
releaseDirs := []string{
"pod2daemon",
"cni-plugin",
"apiserver",
"kube-controllers",
"calicoctl",
"app-policy",
"typha",
"node",
}
env := append(os.Environ(),
fmt.Sprintf("IMAGETAG=%s", ver),
fmt.Sprintf("VERSION=%s", ver),
"RELEASE=true",
"CONFIRM=true",
fmt.Sprintf("DEV_REGISTRIES=%s", strings.Join(registries, " ")),
)
// We allow for a certain number of retries when publishing each directory, since
// network flakes can occasionally result in images failing to push.
maxRetries := 1
for _, dir := range releaseDirs {
attempt := 0
for {
out, err := r.makeInDirectoryWithOutput(dir, "release-publish", env...)
if err != nil {
if attempt < maxRetries {
logrus.WithField("attempt", attempt).WithError(err).Warn("Publish failed, retrying")
attempt++
continue
}
logrus.Error(out)
return fmt.Errorf("Failed to publish %s: %s", dir, err)
}
// Success - move on to the next directory.
logrus.Info(out)
break
}
}
return nil
}
// determineReleaseVersion uses historical clues to figure out the next semver
// release number to use for this release.
func (r *ReleaseBuilder) determineReleaseVersion(previousTag string) (string, error) {
// There are two types of tag that this might be - either it was a previous patch release,
// or it was a "vX.Y.Z-0.dev" tag produced when cutting the relaese branch.
if strings.Contains(previousTag, "-0.dev") {
// This is the first release from this branch - we can simply extract the version from
// the dev tag.
return strings.Split(previousTag, "-0.dev")[0], nil
} else {
// This is a patch release - we need to parse the previous, and
// bump the patch version.
previousVersion := strings.Split(previousTag, "-")[0]
logrus.WithField("previousVersion", previousVersion).Info("Previous version")
v, err := semver.NewVersion(strings.TrimPrefix(previousVersion, "v"))
if err != nil {
logrus.WithField("previousVersion", previousVersion).WithError(err).Error("Failed to parse git version as semver")
return "", fmt.Errorf("failed to parse git version as semver: %s", err)
}
v.BumpPatch()
return fmt.Sprintf("v%s", v.String()), nil
}
}
// determineBranch returns the current checked out branch.
func (r *ReleaseBuilder) determineBranch() string {
out, err := r.git("rev-parse", "--abbrev-ref", "HEAD")
if err != nil {
logrus.WithError(err).Fatal("Error determining branch")
} else if strings.TrimSpace(out) == "HEAD" {
logrus.Fatal("Not on a branch, refusing to cut release")
}
return strings.TrimSpace(out)
}
// Uses docker to build a tgz archive of the specified container image.
func (r *ReleaseBuilder) archiveContainerImage(out, image string) error {
_, err := r.runner.Run("docker", []string{"save", "--output", out, image}, nil)
return err
}
func (r *ReleaseBuilder) git(args ...string) (string, error) {
return r.runner.Run("git", args, nil)
}
func (r *ReleaseBuilder) makeInDirectory(dir, target string, env ...string) error {
_, err := r.runner.Run("make", []string{"-C", dir, target}, env)
return err
}
func (r *ReleaseBuilder) makeInDirectoryWithOutput(dir, target string, env ...string) (string, error) {
return r.runner.Run("make", []string{"-C", dir, target}, env)
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
data/bookcorpus/sentence_segmentation_nltk.py
|
# NVIDIA
import nltk
import os
nltk.download('punkt')
input_file = os.environ['WORKING_DIR'] + '/intermediate_files/bookcorpus.txt'
output_file = os.environ['WORKING_DIR'] + '/final_text_file_single/bookcorpus.segmented.nltk.txt'
doc_seperator = "\n"
with open(input_file) as ifile:
with open(output_file, "w") as ofile:
for line in ifile:
if line != "\n":
sent_list = nltk.tokenize.sent_tokenize(line)
for sent in sent_list:
ofile.write(sent + "\n")
ofile.write(doc_seperator)
|
[] |
[] |
[
"WORKING_DIR"
] |
[]
|
["WORKING_DIR"]
|
python
| 1 | 0 | |
cmd/root.go
|
package cmd
import (
"fmt"
"os"
"path/filepath"
"github.com/inconshreveable/log15"
"github.com/knqyf263/gost/util"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "gost",
Short: "Security Tracker",
Long: `Security Tracker`,
SilenceErrors: true,
SilenceUsage: true,
}
func init() {
cobra.OnInitialize(initConfig)
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.gost.yaml)")
RootCmd.PersistentFlags().String("log-dir", "", "/path/to/log")
viper.BindPFlag("log-dir", RootCmd.PersistentFlags().Lookup("log-dir"))
viper.SetDefault("log-dir", util.GetDefaultLogDir())
RootCmd.PersistentFlags().Bool("log-json", false, "output log as JSON")
viper.BindPFlag("log-json", RootCmd.PersistentFlags().Lookup("log-json"))
viper.SetDefault("log-json", false)
RootCmd.PersistentFlags().Bool("debug", false, "debug mode (default: false)")
viper.BindPFlag("debug", RootCmd.PersistentFlags().Lookup("debug"))
viper.SetDefault("debug", false)
RootCmd.PersistentFlags().Bool("debug-sql", false, "SQL debug mode")
viper.BindPFlag("debug-sql", RootCmd.PersistentFlags().Lookup("debug-sql"))
viper.SetDefault("debug-sql", false)
RootCmd.PersistentFlags().String("dbpath", "", "/path/to/sqlite3 or SQL connection string")
viper.BindPFlag("dbpath", RootCmd.PersistentFlags().Lookup("dbpath"))
pwd := os.Getenv("PWD")
viper.SetDefault("dbpath", filepath.Join(pwd, "gost.sqlite3"))
RootCmd.PersistentFlags().String("dbtype", "", "Database type to store data in (sqlite3, mysql or postgres supported)")
viper.BindPFlag("dbtype", RootCmd.PersistentFlags().Lookup("dbtype"))
viper.SetDefault("dbtype", "sqlite3")
RootCmd.PersistentFlags().String("http-proxy", "", "http://proxy-url:port (default: empty)")
viper.BindPFlag("http-proxy", RootCmd.PersistentFlags().Lookup("http-proxy"))
viper.SetDefault("http-proxy", "")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
log15.Error("Failed to find home directory.", "err", err)
os.Exit(1)
}
// Search config in home directory with name ".gost" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".gost")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
logDir := viper.GetString("log-dir")
debug := viper.GetBool("debug")
logJSON := viper.GetBool("log-json")
util.SetLogger(logDir, debug, logJSON)
}
|
[
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
standup/settings.py
|
"""
Django settings for standup project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.getenv('DEBUG'))
ALLOWED_HOSTS = [os.getenv('DEFAULT_HOST', '127.0.0.1')]
# Application definition
INSTALLED_APPS = [
'multi_captcha_admin',
'snowpenguin.django.recaptcha2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'timezone_field',
'ordered_model',
'standup',
]
MULTI_CAPTCHA_ADMIN = {
'engine': 'recaptcha2',
}
RECAPTCHA_PUBLIC_KEY = os.getenv('RECAPTCHA_PUBLIC')
RECAPTCHA_PRIVATE_KEY = os.getenv('RECAPTCHA_PRIVATE')
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'standup.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'standup.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.getenv('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.getenv('DATABASE_NAME', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.getenv('DATABASE_USER'),
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': os.getenv('DATABASE_HOST'),
'PORT': os.getenv('DATABASE_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
AUTH_USER_MODEL = 'standup.User'
SECRET_KEY = os.getenv('DISCORD_TOKEN')
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
AUTH_USER_MODEL = 'standup.User'
SECRET_KEY = os.getenv('DISCORD_TOKEN')
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
|
[] |
[] |
[
"RECAPTCHA_PUBLIC",
"DISCORD_TOKEN",
"DATABASE_PASSWORD",
"RECAPTCHA_PRIVATE",
"DATABASE_NAME",
"DATABASE_HOST",
"DEFAULT_HOST",
"DATABASE_ENGINE",
"DATABASE_USER",
"DEBUG",
"DATABASE_PORT"
] |
[]
|
["RECAPTCHA_PUBLIC", "DISCORD_TOKEN", "DATABASE_PASSWORD", "RECAPTCHA_PRIVATE", "DATABASE_NAME", "DATABASE_HOST", "DEFAULT_HOST", "DATABASE_ENGINE", "DATABASE_USER", "DEBUG", "DATABASE_PORT"]
|
python
| 11 | 0 | |
lib/ManyHellos/ManyHellosServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from ManyHellos.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ManyHellos'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ManyHellos.ManyHellosImpl import ManyHellos # noqa @IgnorePep8
impl_ManyHellos = ManyHellos(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ManyHellos'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ManyHellos.manyHellos,
name='ManyHellos.manyHellos',
types=[dict])
self.method_authentication['ManyHellos.manyHellos'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_prepare,
name='ManyHellos.manyHellos_prepare',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_prepare'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_runEach,
name='ManyHellos.manyHellos_runEach',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_runEach'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.manyHellos_collect,
name='ManyHellos.manyHellos_collect',
types=[dict])
self.method_authentication['ManyHellos.manyHellos_collect'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.hi,
name='ManyHellos.hi',
types=[basestring])
self.method_authentication['ManyHellos.hi'] = 'required' # noqa
self.rpc_service.add(impl_ManyHellos.status,
name='ManyHellos.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ManyHellos ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
[] |
[] |
[
"SDK_CALLBACK_URL"
] |
[]
|
["SDK_CALLBACK_URL"]
|
python
| 1 | 0 | |
server.go
|
package main
import (
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/playground"
"graphql_demo/demo/demo3/graph"
"graphql_demo/demo/demo3/graph/generated"
"log"
"net/http"
"os"
)
const defaultPort = "8080"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
srv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{}}))
http.Handle("/", playground.Handler("GraphQL playground", "/query"))
http.Handle("/query", srv)
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
spicystrings/hotstrings.py
|
#!/usr/bin/env python
from __future__ import annotations
import argparse
from collections import deque
import json
import logging
import os
import signal
import sys
from typing import Any
from pygtrie import CharTrie
import Xlib
import Xlib.X
import Xlib.XK
import Xlib.display
import Xlib.ext.record
import Xlib.protocol
from .actions import Action
EXIT_FAILURE = 1
RECORD_CONTEXT_ARGUMENTS = (
0,
(Xlib.ext.record.AllClients,),
({
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (Xlib.X.KeyPress, Xlib.X.KeyRelease),
'errors': (0, 0),
'client_started': False,
'client_died': False
},)
)
# Load xkb to access XK_ISO_Level3_Shift
Xlib.XK.load_keysym_group('xkb')
event_field = Xlib.protocol.rq.EventField(None)
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
'path', metavar='PATH', nargs='?',
help='Path to JSON file containing hotstring definitions. '
'Default: %(default)s',
default=os.path.join(get_xdg_config_home(), 'hotstrings.json'))
argument_parser.add_argument('--verbose', '-v', action='store_true')
arguments = argument_parser.parse_args()
if arguments.verbose:
logging.basicConfig(level=logging.INFO)
path = os.path.expanduser(arguments.path)
if not os.path.exists(path):
argument_parser.exit(EXIT_FAILURE,
path + ': No such file or directory.\n')
connection = Xlib.display.Display()
record_connection = Xlib.display.Display()
if not record_connection.has_extension('RECORD'):
argument_parser.exit(EXIT_FAILURE,
'X Record Extension Library not found.\n')
with open(path) as file:
hotstrings_json = json.load(file)
if not hotstrings_json:
argument_parser.exit(EXIT_FAILURE, 'No hotstrings defined.\n')
hotstring_mapping = hotstring_lookup_from_json(hotstrings_json)
record_context = record_connection.record_create_context(
*RECORD_CONTEXT_ARGUMENTS)
# Only keep at maximum the amount of characters of the longest hotstring
# in the HotstringProcessor queue
hotstring_processor = HotstringProcessor(
hotstring_mapping,
connection
)
record_handler = RecordHandler(connection, record_connection,
hotstring_processor)
def clean_up(*args):
record_connection.record_free_context(record_context)
record_connection.close()
connection.close()
argument_parser.exit()
# Make sure to free structs and close connections
for signal_ in signal.SIGINT, signal.SIGTERM:
signal.signal(signal_, clean_up)
logging.info('Listening for hotstrings...')
record_connection.record_enable_context(record_context, record_handler)
def parse_event_fields(data, display):
while data:
event, data = event_field.parse_binary_value(data, display, None, None)
yield event
def get_xdg_config_home():
xdg_config_home = os.getenv('XDG_CONFIG_HOME')
if xdg_config_home is not None and os.path.isabs(xdg_config_home):
return xdg_config_home
return os.path.expanduser('~/.config')
class RecordHandler:
MODIFIER_KEY_MASKS = {
'Shift': Xlib.X.ShiftMask,
'Lock': Xlib.X.LockMask,
'Control': Xlib.X.ControlMask,
'Alt': Xlib.X.Mod1Mask,
'Mod1': Xlib.X.Mod1Mask,
'Mod2': Xlib.X.Mod2Mask,
'Mod3': Xlib.X.Mod3Mask,
'Mod4': Xlib.X.Mod4Mask,
'Mod5': Xlib.X.Mod5Mask
}
def __init__(self, connection, record_connection, callback):
self.connection = connection
self.record_connection = record_connection
self.callback = callback
# Support for XK_ISO_Level3_Shift/AltGr:
self.alt_gr_pressed = False
self.alt_gr_keycodes = set(i[0] for i in self.connection.keysym_to_keycodes(Xlib.XK.XK_ISO_Level3_Shift))
def get_modifier_state_index(self, state):
# None = 0, Shift = 1, Alt = 2, Alt + Shift = 3, AltGr = 4, AltGr + Shift = 5
pressed = {n: (state & m) == m for n, m in self.MODIFIER_KEY_MASKS.items()}
index = 0
if pressed['Shift']:
index += 1
if pressed['Alt']:
index += 2
if self.alt_gr_pressed:
index += 4
return index
def key_pressed(self, event):
# Manually keep track of AltGr state because it is not encoded in the event.state byte
if event.detail in self.alt_gr_keycodes:
self.alt_gr_pressed = True
keysym = self.connection.keycode_to_keysym(event.detail, self.get_modifier_state_index(event.state))
character = self.connection.lookup_string(keysym)
if character:
self.callback(character)
def key_released(self, event):
if event.detail in self.alt_gr_keycodes:
self.alt_gr_pressed = False
def __call__(self, reply):
# Ignore all replies that can't be parsed by parse_event_fields
if not reply.category == Xlib.ext.record.FromServer:
return
for event in parse_event_fields(reply.data, self.record_connection.display):
if event.type == Xlib.X.KeyPress:
self.key_pressed(event)
else:
self.key_released(event)
class HotstringProcessor:
BACKSPACE_CHARACTER = '\x08'
def __init__(self, hotstring_lookup: CharTrie[str, Action], connection):
self.hotstring_mapping = hotstring_lookup
self.connection = connection
maxlen = max(len(k) for k in hotstring_lookup.keys())
self.char_stack: deque[str] = deque(maxlen=maxlen)
self.root_window = self.connection.screen().root
# These stay the same for all requests, so just keep a local copy
self._default_key_press_event_arguments = dict(
time=Xlib.X.CurrentTime,
root=self.root_window,
child=Xlib.X.NONE,
root_x=0, root_y=0, event_x=0, event_y=0,
same_screen=1
)
self._default_key_release_event_arguments = self._default_key_press_event_arguments # noqa: E501
def make_key_press_event(self, detail, state, window, **kwargs):
arguments = self._default_key_press_event_arguments.copy()
arguments.update(kwargs)
return Xlib.protocol.event.KeyPress(detail=detail, state=state, window=window, **arguments)
def make_key_release_event(self, detail, state, window, **kwargs):
arguments = self._default_key_release_event_arguments.copy()
arguments.update(kwargs)
return Xlib.protocol.event.KeyRelease(detail=detail, state=state, window=window, **arguments)
# TODO: Figure out a way to find keycodes not assigned in the current keyboard mapping
def string_to_keycodes(self, string_):
for character in string_:
code_point = ord(character)
# TODO: Take a look at other projects using python-xlib to improve this
# See Xlib.XK.keysym_to_string
keycodes = tuple(self.connection.keysym_to_keycodes(code_point) or
self.connection.keysym_to_keycodes(0xFF00 | code_point))
keycode = keycodes[0] if keycodes else None
# TODO: Remap missing characters to available keycodes
if not keycode:
logging.info('No keycode found for: %r.' % character, file=sys.stderr)
continue
yield keycode
def type_keycode(self, keycode, window):
detail, state = keycode
window.send_event(self.make_key_press_event(detail, state, window))
window.send_event(self.make_key_release_event(detail, state, window))
def type_keycodes(self, keycodes, window):
for keycode in keycodes:
self.type_keycode(keycode, window)
self.connection.flush()
def __call__(self, character):
self.update_char_stack(character)
window = self.connection.get_input_focus().focus
hotstring, action = self.hotstring_mapping.longest_prefix(
''.join(self.char_stack))
if hotstring:
replacement = action.replacement()
self.type_backspaces(len(hotstring), window)
# Linefeeds don't seem to be sent by Xlib, so replace them with
# carriage returns: normalize \r\n to \r
# first, then replace all remaining \n with \r
replacement = replacement.replace('\r\n', '\r').replace('\n', '\r')
self.type_keycodes(self.string_to_keycodes(replacement), window)
self.char_stack.clear()
logging.info(f'HotstringProcessor.char_stack: {self.char_stack}')
def update_char_stack(self, character):
"""Append or delete characters from buffer"""
if character == self.BACKSPACE_CHARACTER:
try:
self.char_stack.popleft()
except IndexError:
pass # deque was empty
else:
self.char_stack.appendleft(character)
logging.info(f'HotstringProcessor.char_stack: {self.char_stack}')
def type_backspaces(self, num_times, window):
backspace = tuple(
self.string_to_keycodes(self.BACKSPACE_CHARACTER)
)
self.type_keycodes(backspace * num_times, window)
def hotstring_lookup_from_json(hotstrings: Any) -> CharTrie[str, Action]:
"""Returns a CharTrie mapping a reversed hotstring to an Action."""
def get_key_value():
for hotstring, action_code in hotstrings.items():
# require space to trigger hotstring
key = ' ' + ''.join(reversed(hotstring))
value = Action.from_list(action_code)
yield key, value
return CharTrie(get_key_value())
if __name__ == '__main__':
main()
|
[] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.